if_dc.c revision 119498
1/* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* 34 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 35 * series chips and several workalikes including the following: 36 * 37 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 38 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 39 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 40 * ASIX Electronics AX88140A (www.asix.com.tw) 41 * ASIX Electronics AX88141 (www.asix.com.tw) 42 * ADMtek AL981 (www.admtek.com.tw) 43 * ADMtek AN985 (www.admtek.com.tw) 44 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985 45 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 46 * Accton EN1217 (www.accton.com) 47 * Xircom X3201 (www.xircom.com) 48 * Abocom FE2500 49 * Conexant LANfinity (www.conexant.com) 50 * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com) 51 * 52 * Datasheets for the 21143 are available at developer.intel.com. 53 * Datasheets for the clone parts can be found at their respective sites. 54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 55 * The PNIC II is essentially a Macronix 98715A chip; the only difference 56 * worth noting is that its multicast hash table is only 128 bits wide 57 * instead of 512. 58 * 59 * Written by Bill Paul <wpaul@ee.columbia.edu> 60 * Electrical Engineering Department 61 * Columbia University, New York City 62 */ 63 64/* 65 * The Intel 21143 is the successor to the DEC 21140. It is basically 66 * the same as the 21140 but with a few new features. The 21143 supports 67 * three kinds of media attachments: 68 * 69 * o MII port, for 10Mbps and 100Mbps support and NWAY 70 * autonegotiation provided by an external PHY. 71 * o SYM port, for symbol mode 100Mbps support. 72 * o 10baseT port. 73 * o AUI/BNC port. 74 * 75 * The 100Mbps SYM port and 10baseT port can be used together in 76 * combination with the internal NWAY support to create a 10/100 77 * autosensing configuration. 78 * 79 * Note that not all tulip workalikes are handled in this driver: we only 80 * deal with those which are relatively well behaved. The Winbond is 81 * handled separately due to its different register offsets and the 82 * special handling needed for its various bugs. The PNIC is handled 83 * here, but I'm not thrilled about it. 84 * 85 * All of the workalike chips use some form of MII transceiver support 86 * with the exception of the Macronix chips, which also have a SYM port. 87 * The ASIX AX88140A is also documented to have a SYM port, but all 88 * the cards I've seen use an MII transceiver, probably because the 89 * AX88140A doesn't support internal NWAY. 90 */ 91 92#include <sys/cdefs.h> 93__FBSDID("$FreeBSD: head/sys/dev/dc/if_dc.c 119498 2003-08-27 08:13:34Z mbr $"); 94 95#include <sys/param.h> 96#include <sys/endian.h> 97#include <sys/systm.h> 98#include <sys/sockio.h> 99#include <sys/mbuf.h> 100#include <sys/malloc.h> 101#include <sys/kernel.h> 102#include <sys/socket.h> 103#include <sys/sysctl.h> 104 105#include <net/if.h> 106#include <net/if_arp.h> 107#include <net/ethernet.h> 108#include <net/if_dl.h> 109#include <net/if_media.h> 110#include <net/if_types.h> 111#include <net/if_vlan_var.h> 112 113#include <net/bpf.h> 114 115#include <machine/bus_pio.h> 116#include <machine/bus_memio.h> 117#include <machine/bus.h> 118#include <machine/resource.h> 119#include <sys/bus.h> 120#include <sys/rman.h> 121 122#include <dev/mii/mii.h> 123#include <dev/mii/miivar.h> 124 125#include <dev/pci/pcireg.h> 126#include <dev/pci/pcivar.h> 127 128#define DC_USEIOSPACE 129#ifdef __alpha__ 130#define SRM_MEDIA 131#endif 132 133#include <pci/if_dcreg.h> 134 135MODULE_DEPEND(dc, pci, 1, 1, 1); 136MODULE_DEPEND(dc, ether, 1, 1, 1); 137MODULE_DEPEND(dc, miibus, 1, 1, 1); 138 139/* "controller miibus0" required. See GENERIC if you get errors here. */ 140#include "miibus_if.h" 141 142/* 143 * Various supported device vendors/types and their names. 144 */ 145static struct dc_type dc_devs[] = { 146 { DC_VENDORID_DEC, DC_DEVICEID_21143, 147 "Intel 21143 10/100BaseTX" }, 148 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 149 "Davicom DM9009 10/100BaseTX" }, 150 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 151 "Davicom DM9100 10/100BaseTX" }, 152 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 153 "Davicom DM9102 10/100BaseTX" }, 154 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 155 "Davicom DM9102A 10/100BaseTX" }, 156 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 157 "ADMtek AL981 10/100BaseTX" }, 158 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 159 "ADMtek AN985 10/100BaseTX" }, 160 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511, 161 "ADMtek ADM9511 10/100BaseTX" }, 162 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513, 163 "ADMtek ADM9513 10/100BaseTX" }, 164 { DC_VENDORID_ADMTEK, DC_DEVICEID_FA511, 165 "Netgear FA511 10/100BaseTX" }, 166 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 167 "ASIX AX88140A 10/100BaseTX" }, 168 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 169 "ASIX AX88141 10/100BaseTX" }, 170 { DC_VENDORID_MX, DC_DEVICEID_98713, 171 "Macronix 98713 10/100BaseTX" }, 172 { DC_VENDORID_MX, DC_DEVICEID_98713, 173 "Macronix 98713A 10/100BaseTX" }, 174 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 175 "Compex RL100-TX 10/100BaseTX" }, 176 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 177 "Compex RL100-TX 10/100BaseTX" }, 178 { DC_VENDORID_MX, DC_DEVICEID_987x5, 179 "Macronix 98715/98715A 10/100BaseTX" }, 180 { DC_VENDORID_MX, DC_DEVICEID_987x5, 181 "Macronix 98715AEC-C 10/100BaseTX" }, 182 { DC_VENDORID_MX, DC_DEVICEID_987x5, 183 "Macronix 98725 10/100BaseTX" }, 184 { DC_VENDORID_MX, DC_DEVICEID_98727, 185 "Macronix 98727/98732 10/100BaseTX" }, 186 { DC_VENDORID_LO, DC_DEVICEID_82C115, 187 "LC82C115 PNIC II 10/100BaseTX" }, 188 { DC_VENDORID_LO, DC_DEVICEID_82C168, 189 "82c168 PNIC 10/100BaseTX" }, 190 { DC_VENDORID_LO, DC_DEVICEID_82C168, 191 "82c169 PNIC 10/100BaseTX" }, 192 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 193 "Accton EN1217 10/100BaseTX" }, 194 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 195 "Accton EN2242 MiniPCI 10/100BaseTX" }, 196 { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, 197 "Xircom X3201 10/100BaseTX" }, 198 { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, 199 "Abocom FE2500 10/100BaseTX" }, 200 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 201 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 202 { DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX, 203 "Hawking CB102 CardBus 10/100" }, 204 { DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T, 205 "PlaneX FNW-3602-T CardBus 10/100" }, 206 { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB, 207 "3Com OfficeConnect 10/100B" }, 208 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120, 209 "Microsoft MN-120 CardBus 10/100" }, 210 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130, 211 "Microsoft MN-130 10/100" }, 212 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130_FAKE, 213 "Microsoft MN-130 10/100" }, 214 { 0, 0, NULL } 215}; 216 217static int dc_probe (device_t); 218static int dc_attach (device_t); 219static int dc_detach (device_t); 220static int dc_suspend (device_t); 221static int dc_resume (device_t); 222#ifndef BURN_BRIDGES 223static void dc_acpi (device_t); 224#endif 225static struct dc_type *dc_devtype (device_t); 226static int dc_newbuf (struct dc_softc *, int, int); 227static int dc_encap (struct dc_softc *, struct mbuf *); 228static void dc_pnic_rx_bug_war (struct dc_softc *, int); 229static int dc_rx_resync (struct dc_softc *); 230static void dc_rxeof (struct dc_softc *); 231static void dc_txeof (struct dc_softc *); 232static void dc_tick (void *); 233static void dc_tx_underrun (struct dc_softc *); 234static void dc_intr (void *); 235static void dc_start (struct ifnet *); 236static int dc_ioctl (struct ifnet *, u_long, caddr_t); 237static void dc_init (void *); 238static void dc_stop (struct dc_softc *); 239static void dc_watchdog (struct ifnet *); 240static void dc_shutdown (device_t); 241static int dc_ifmedia_upd (struct ifnet *); 242static void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 243 244static void dc_delay (struct dc_softc *); 245static void dc_eeprom_idle (struct dc_softc *); 246static void dc_eeprom_putbyte (struct dc_softc *, int); 247static void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 248static void dc_eeprom_getword_pnic 249 (struct dc_softc *, int, u_int16_t *); 250static void dc_eeprom_getword_xircom 251 (struct dc_softc *, int, u_int16_t *); 252static void dc_eeprom_width (struct dc_softc *); 253static void dc_read_eeprom (struct dc_softc *, caddr_t, int, int, int); 254 255static void dc_mii_writebit (struct dc_softc *, int); 256static int dc_mii_readbit (struct dc_softc *); 257static void dc_mii_sync (struct dc_softc *); 258static void dc_mii_send (struct dc_softc *, u_int32_t, int); 259static int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 260static int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 261static int dc_miibus_readreg (device_t, int, int); 262static int dc_miibus_writereg (device_t, int, int, int); 263static void dc_miibus_statchg (device_t); 264static void dc_miibus_mediainit (device_t); 265 266static void dc_setcfg (struct dc_softc *, int); 267static u_int32_t dc_crc_le (struct dc_softc *, caddr_t); 268static u_int32_t dc_crc_be (caddr_t); 269static void dc_setfilt_21143 (struct dc_softc *); 270static void dc_setfilt_asix (struct dc_softc *); 271static void dc_setfilt_admtek (struct dc_softc *); 272static void dc_setfilt_xircom (struct dc_softc *); 273 274static void dc_setfilt (struct dc_softc *); 275 276static void dc_reset (struct dc_softc *); 277static int dc_list_rx_init (struct dc_softc *); 278static int dc_list_tx_init (struct dc_softc *); 279 280static void dc_read_srom (struct dc_softc *, int); 281static void dc_parse_21143_srom (struct dc_softc *); 282static void dc_decode_leaf_sia (struct dc_softc *, struct dc_eblock_sia *); 283static void dc_decode_leaf_mii (struct dc_softc *, struct dc_eblock_mii *); 284static void dc_decode_leaf_sym (struct dc_softc *, struct dc_eblock_sym *); 285static void dc_apply_fixup (struct dc_softc *, int); 286 287static void dc_dma_map_txbuf (void *, bus_dma_segment_t *, int, bus_size_t, 288 int); 289static void dc_dma_map_rxbuf (void *, bus_dma_segment_t *, int, bus_size_t, 290 int); 291 292#ifdef DC_USEIOSPACE 293#define DC_RES SYS_RES_IOPORT 294#define DC_RID DC_PCI_CFBIO 295#else 296#define DC_RES SYS_RES_MEMORY 297#define DC_RID DC_PCI_CFBMA 298#endif 299 300static device_method_t dc_methods[] = { 301 /* Device interface */ 302 DEVMETHOD(device_probe, dc_probe), 303 DEVMETHOD(device_attach, dc_attach), 304 DEVMETHOD(device_detach, dc_detach), 305 DEVMETHOD(device_suspend, dc_suspend), 306 DEVMETHOD(device_resume, dc_resume), 307 DEVMETHOD(device_shutdown, dc_shutdown), 308 309 /* bus interface */ 310 DEVMETHOD(bus_print_child, bus_generic_print_child), 311 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 312 313 /* MII interface */ 314 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 315 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 316 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 317 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 318 319 { 0, 0 } 320}; 321 322static driver_t dc_driver = { 323 "dc", 324 dc_methods, 325 sizeof(struct dc_softc) 326}; 327 328static devclass_t dc_devclass; 329#ifdef __i386__ 330static int dc_quick = 1; 331SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, &dc_quick, 0, 332 "do not mdevget in dc driver"); 333#endif 334 335DRIVER_MODULE(dc, cardbus, dc_driver, dc_devclass, 0, 0); 336DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0); 337DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 338 339#define DC_SETBIT(sc, reg, x) \ 340 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 341 342#define DC_CLRBIT(sc, reg, x) \ 343 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 344 345#define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 346#define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 347 348#define IS_MPSAFE 0 349 350static void 351dc_delay(struct dc_softc *sc) 352{ 353 int idx; 354 355 for (idx = (300 / 33) + 1; idx > 0; idx--) 356 CSR_READ_4(sc, DC_BUSCTL); 357} 358 359static void 360dc_eeprom_width(struct dc_softc *sc) 361{ 362 int i; 363 364 /* Force EEPROM to idle state. */ 365 dc_eeprom_idle(sc); 366 367 /* Enter EEPROM access mode. */ 368 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 369 dc_delay(sc); 370 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 371 dc_delay(sc); 372 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 373 dc_delay(sc); 374 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 375 dc_delay(sc); 376 377 for (i = 3; i--;) { 378 if (6 & (1 << i)) 379 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 380 else 381 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 382 dc_delay(sc); 383 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 384 dc_delay(sc); 385 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 386 dc_delay(sc); 387 } 388 389 for (i = 1; i <= 12; i++) { 390 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 391 dc_delay(sc); 392 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 393 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 394 dc_delay(sc); 395 break; 396 } 397 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 398 dc_delay(sc); 399 } 400 401 /* Turn off EEPROM access mode. */ 402 dc_eeprom_idle(sc); 403 404 if (i < 4 || i > 12) 405 sc->dc_romwidth = 6; 406 else 407 sc->dc_romwidth = i; 408 409 /* Enter EEPROM access mode. */ 410 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 411 dc_delay(sc); 412 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 413 dc_delay(sc); 414 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 415 dc_delay(sc); 416 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 417 dc_delay(sc); 418 419 /* Turn off EEPROM access mode. */ 420 dc_eeprom_idle(sc); 421} 422 423static void 424dc_eeprom_idle(struct dc_softc *sc) 425{ 426 int i; 427 428 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 429 dc_delay(sc); 430 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 431 dc_delay(sc); 432 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 433 dc_delay(sc); 434 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 435 dc_delay(sc); 436 437 for (i = 0; i < 25; i++) { 438 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 439 dc_delay(sc); 440 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 441 dc_delay(sc); 442 } 443 444 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 445 dc_delay(sc); 446 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 447 dc_delay(sc); 448 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 449} 450 451/* 452 * Send a read command and address to the EEPROM, check for ACK. 453 */ 454static void 455dc_eeprom_putbyte(struct dc_softc *sc, int addr) 456{ 457 int d, i; 458 459 d = DC_EECMD_READ >> 6; 460 for (i = 3; i--; ) { 461 if (d & (1 << i)) 462 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 463 else 464 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 465 dc_delay(sc); 466 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 467 dc_delay(sc); 468 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 469 dc_delay(sc); 470 } 471 472 /* 473 * Feed in each bit and strobe the clock. 474 */ 475 for (i = sc->dc_romwidth; i--;) { 476 if (addr & (1 << i)) { 477 SIO_SET(DC_SIO_EE_DATAIN); 478 } else { 479 SIO_CLR(DC_SIO_EE_DATAIN); 480 } 481 dc_delay(sc); 482 SIO_SET(DC_SIO_EE_CLK); 483 dc_delay(sc); 484 SIO_CLR(DC_SIO_EE_CLK); 485 dc_delay(sc); 486 } 487} 488 489/* 490 * Read a word of data stored in the EEPROM at address 'addr.' 491 * The PNIC 82c168/82c169 has its own non-standard way to read 492 * the EEPROM. 493 */ 494static void 495dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 496{ 497 int i; 498 u_int32_t r; 499 500 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr); 501 502 for (i = 0; i < DC_TIMEOUT; i++) { 503 DELAY(1); 504 r = CSR_READ_4(sc, DC_SIO); 505 if (!(r & DC_PN_SIOCTL_BUSY)) { 506 *dest = (u_int16_t)(r & 0xFFFF); 507 return; 508 } 509 } 510} 511 512/* 513 * Read a word of data stored in the EEPROM at address 'addr.' 514 * The Xircom X3201 has its own non-standard way to read 515 * the EEPROM, too. 516 */ 517static void 518dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 519{ 520 521 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 522 523 addr *= 2; 524 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 525 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; 526 addr += 1; 527 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 528 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; 529 530 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 531} 532 533/* 534 * Read a word of data stored in the EEPROM at address 'addr.' 535 */ 536static void 537dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 538{ 539 int i; 540 u_int16_t word = 0; 541 542 /* Force EEPROM to idle state. */ 543 dc_eeprom_idle(sc); 544 545 /* Enter EEPROM access mode. */ 546 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 547 dc_delay(sc); 548 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 549 dc_delay(sc); 550 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 551 dc_delay(sc); 552 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 553 dc_delay(sc); 554 555 /* 556 * Send address of word we want to read. 557 */ 558 dc_eeprom_putbyte(sc, addr); 559 560 /* 561 * Start reading bits from EEPROM. 562 */ 563 for (i = 0x8000; i; i >>= 1) { 564 SIO_SET(DC_SIO_EE_CLK); 565 dc_delay(sc); 566 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 567 word |= i; 568 dc_delay(sc); 569 SIO_CLR(DC_SIO_EE_CLK); 570 dc_delay(sc); 571 } 572 573 /* Turn off EEPROM access mode. */ 574 dc_eeprom_idle(sc); 575 576 *dest = word; 577} 578 579/* 580 * Read a sequence of words from the EEPROM. 581 */ 582static void 583dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int swap) 584{ 585 int i; 586 u_int16_t word = 0, *ptr; 587 588 for (i = 0; i < cnt; i++) { 589 if (DC_IS_PNIC(sc)) 590 dc_eeprom_getword_pnic(sc, off + i, &word); 591 else if (DC_IS_XIRCOM(sc)) 592 dc_eeprom_getword_xircom(sc, off + i, &word); 593 else 594 dc_eeprom_getword(sc, off + i, &word); 595 ptr = (u_int16_t *)(dest + (i * 2)); 596 if (swap) 597 *ptr = ntohs(word); 598 else 599 *ptr = word; 600 } 601} 602 603/* 604 * The following two routines are taken from the Macronix 98713 605 * Application Notes pp.19-21. 606 */ 607/* 608 * Write a bit to the MII bus. 609 */ 610static void 611dc_mii_writebit(struct dc_softc *sc, int bit) 612{ 613 614 if (bit) 615 CSR_WRITE_4(sc, DC_SIO, 616 DC_SIO_ROMCTL_WRITE | DC_SIO_MII_DATAOUT); 617 else 618 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 619 620 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 621 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 622} 623 624/* 625 * Read a bit from the MII bus. 626 */ 627static int 628dc_mii_readbit(struct dc_softc *sc) 629{ 630 631 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR); 632 CSR_READ_4(sc, DC_SIO); 633 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 634 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 635 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 636 return (1); 637 638 return (0); 639} 640 641/* 642 * Sync the PHYs by setting data bit and strobing the clock 32 times. 643 */ 644static void 645dc_mii_sync(struct dc_softc *sc) 646{ 647 int i; 648 649 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 650 651 for (i = 0; i < 32; i++) 652 dc_mii_writebit(sc, 1); 653} 654 655/* 656 * Clock a series of bits through the MII. 657 */ 658static void 659dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 660{ 661 int i; 662 663 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 664 dc_mii_writebit(sc, bits & i); 665} 666 667/* 668 * Read an PHY register through the MII. 669 */ 670static int 671dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 672{ 673 int i, ack; 674 675 DC_LOCK(sc); 676 677 /* 678 * Set up frame for RX. 679 */ 680 frame->mii_stdelim = DC_MII_STARTDELIM; 681 frame->mii_opcode = DC_MII_READOP; 682 frame->mii_turnaround = 0; 683 frame->mii_data = 0; 684 685 /* 686 * Sync the PHYs. 687 */ 688 dc_mii_sync(sc); 689 690 /* 691 * Send command/address info. 692 */ 693 dc_mii_send(sc, frame->mii_stdelim, 2); 694 dc_mii_send(sc, frame->mii_opcode, 2); 695 dc_mii_send(sc, frame->mii_phyaddr, 5); 696 dc_mii_send(sc, frame->mii_regaddr, 5); 697 698#ifdef notdef 699 /* Idle bit */ 700 dc_mii_writebit(sc, 1); 701 dc_mii_writebit(sc, 0); 702#endif 703 704 /* Check for ack. */ 705 ack = dc_mii_readbit(sc); 706 707 /* 708 * Now try reading data bits. If the ack failed, we still 709 * need to clock through 16 cycles to keep the PHY(s) in sync. 710 */ 711 if (ack) { 712 for (i = 0; i < 16; i++) 713 dc_mii_readbit(sc); 714 goto fail; 715 } 716 717 for (i = 0x8000; i; i >>= 1) { 718 if (!ack) { 719 if (dc_mii_readbit(sc)) 720 frame->mii_data |= i; 721 } 722 } 723 724fail: 725 726 dc_mii_writebit(sc, 0); 727 dc_mii_writebit(sc, 0); 728 729 DC_UNLOCK(sc); 730 731 if (ack) 732 return (1); 733 return (0); 734} 735 736/* 737 * Write to a PHY register through the MII. 738 */ 739static int 740dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 741{ 742 743 DC_LOCK(sc); 744 /* 745 * Set up frame for TX. 746 */ 747 748 frame->mii_stdelim = DC_MII_STARTDELIM; 749 frame->mii_opcode = DC_MII_WRITEOP; 750 frame->mii_turnaround = DC_MII_TURNAROUND; 751 752 /* 753 * Sync the PHYs. 754 */ 755 dc_mii_sync(sc); 756 757 dc_mii_send(sc, frame->mii_stdelim, 2); 758 dc_mii_send(sc, frame->mii_opcode, 2); 759 dc_mii_send(sc, frame->mii_phyaddr, 5); 760 dc_mii_send(sc, frame->mii_regaddr, 5); 761 dc_mii_send(sc, frame->mii_turnaround, 2); 762 dc_mii_send(sc, frame->mii_data, 16); 763 764 /* Idle bit. */ 765 dc_mii_writebit(sc, 0); 766 dc_mii_writebit(sc, 0); 767 768 DC_UNLOCK(sc); 769 770 return (0); 771} 772 773static int 774dc_miibus_readreg(device_t dev, int phy, int reg) 775{ 776 struct dc_mii_frame frame; 777 struct dc_softc *sc; 778 int i, rval, phy_reg = 0; 779 780 sc = device_get_softc(dev); 781 bzero(&frame, sizeof(frame)); 782 783 /* 784 * Note: both the AL981 and AN985 have internal PHYs, 785 * however the AL981 provides direct access to the PHY 786 * registers while the AN985 uses a serial MII interface. 787 * The AN985's MII interface is also buggy in that you 788 * can read from any MII address (0 to 31), but only address 1 789 * behaves normally. To deal with both cases, we pretend 790 * that the PHY is at MII address 1. 791 */ 792 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 793 return (0); 794 795 /* 796 * Note: the ukphy probes of the RS7112 report a PHY at 797 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 798 * so we only respond to correct one. 799 */ 800 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 801 return (0); 802 803 if (sc->dc_pmode != DC_PMODE_MII) { 804 if (phy == (MII_NPHY - 1)) { 805 switch (reg) { 806 case MII_BMSR: 807 /* 808 * Fake something to make the probe 809 * code think there's a PHY here. 810 */ 811 return (BMSR_MEDIAMASK); 812 break; 813 case MII_PHYIDR1: 814 if (DC_IS_PNIC(sc)) 815 return (DC_VENDORID_LO); 816 return (DC_VENDORID_DEC); 817 break; 818 case MII_PHYIDR2: 819 if (DC_IS_PNIC(sc)) 820 return (DC_DEVICEID_82C168); 821 return (DC_DEVICEID_21143); 822 break; 823 default: 824 return (0); 825 break; 826 } 827 } else 828 return (0); 829 } 830 831 if (DC_IS_PNIC(sc)) { 832 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 833 (phy << 23) | (reg << 18)); 834 for (i = 0; i < DC_TIMEOUT; i++) { 835 DELAY(1); 836 rval = CSR_READ_4(sc, DC_PN_MII); 837 if (!(rval & DC_PN_MII_BUSY)) { 838 rval &= 0xFFFF; 839 return (rval == 0xFFFF ? 0 : rval); 840 } 841 } 842 return (0); 843 } 844 845 if (DC_IS_COMET(sc)) { 846 switch (reg) { 847 case MII_BMCR: 848 phy_reg = DC_AL_BMCR; 849 break; 850 case MII_BMSR: 851 phy_reg = DC_AL_BMSR; 852 break; 853 case MII_PHYIDR1: 854 phy_reg = DC_AL_VENID; 855 break; 856 case MII_PHYIDR2: 857 phy_reg = DC_AL_DEVID; 858 break; 859 case MII_ANAR: 860 phy_reg = DC_AL_ANAR; 861 break; 862 case MII_ANLPAR: 863 phy_reg = DC_AL_LPAR; 864 break; 865 case MII_ANER: 866 phy_reg = DC_AL_ANER; 867 break; 868 default: 869 printf("dc%d: phy_read: bad phy register %x\n", 870 sc->dc_unit, reg); 871 return (0); 872 break; 873 } 874 875 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 876 877 if (rval == 0xFFFF) 878 return (0); 879 return (rval); 880 } 881 882 frame.mii_phyaddr = phy; 883 frame.mii_regaddr = reg; 884 if (sc->dc_type == DC_TYPE_98713) { 885 phy_reg = CSR_READ_4(sc, DC_NETCFG); 886 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 887 } 888 dc_mii_readreg(sc, &frame); 889 if (sc->dc_type == DC_TYPE_98713) 890 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 891 892 return (frame.mii_data); 893} 894 895static int 896dc_miibus_writereg(device_t dev, int phy, int reg, int data) 897{ 898 struct dc_softc *sc; 899 struct dc_mii_frame frame; 900 int i, phy_reg = 0; 901 902 sc = device_get_softc(dev); 903 bzero(&frame, sizeof(frame)); 904 905 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 906 return (0); 907 908 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 909 return (0); 910 911 if (DC_IS_PNIC(sc)) { 912 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 913 (phy << 23) | (reg << 10) | data); 914 for (i = 0; i < DC_TIMEOUT; i++) { 915 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 916 break; 917 } 918 return (0); 919 } 920 921 if (DC_IS_COMET(sc)) { 922 switch (reg) { 923 case MII_BMCR: 924 phy_reg = DC_AL_BMCR; 925 break; 926 case MII_BMSR: 927 phy_reg = DC_AL_BMSR; 928 break; 929 case MII_PHYIDR1: 930 phy_reg = DC_AL_VENID; 931 break; 932 case MII_PHYIDR2: 933 phy_reg = DC_AL_DEVID; 934 break; 935 case MII_ANAR: 936 phy_reg = DC_AL_ANAR; 937 break; 938 case MII_ANLPAR: 939 phy_reg = DC_AL_LPAR; 940 break; 941 case MII_ANER: 942 phy_reg = DC_AL_ANER; 943 break; 944 default: 945 printf("dc%d: phy_write: bad phy register %x\n", 946 sc->dc_unit, reg); 947 return (0); 948 break; 949 } 950 951 CSR_WRITE_4(sc, phy_reg, data); 952 return (0); 953 } 954 955 frame.mii_phyaddr = phy; 956 frame.mii_regaddr = reg; 957 frame.mii_data = data; 958 959 if (sc->dc_type == DC_TYPE_98713) { 960 phy_reg = CSR_READ_4(sc, DC_NETCFG); 961 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 962 } 963 dc_mii_writereg(sc, &frame); 964 if (sc->dc_type == DC_TYPE_98713) 965 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 966 967 return (0); 968} 969 970static void 971dc_miibus_statchg(device_t dev) 972{ 973 struct dc_softc *sc; 974 struct mii_data *mii; 975 struct ifmedia *ifm; 976 977 sc = device_get_softc(dev); 978 if (DC_IS_ADMTEK(sc)) 979 return; 980 981 mii = device_get_softc(sc->dc_miibus); 982 ifm = &mii->mii_media; 983 if (DC_IS_DAVICOM(sc) && 984 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 985 dc_setcfg(sc, ifm->ifm_media); 986 sc->dc_if_media = ifm->ifm_media; 987 } else { 988 dc_setcfg(sc, mii->mii_media_active); 989 sc->dc_if_media = mii->mii_media_active; 990 } 991} 992 993/* 994 * Special support for DM9102A cards with HomePNA PHYs. Note: 995 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 996 * to be impossible to talk to the management interface of the DM9801 997 * PHY (its MDIO pin is not connected to anything). Consequently, 998 * the driver has to just 'know' about the additional mode and deal 999 * with it itself. *sigh* 1000 */ 1001static void 1002dc_miibus_mediainit(device_t dev) 1003{ 1004 struct dc_softc *sc; 1005 struct mii_data *mii; 1006 struct ifmedia *ifm; 1007 int rev; 1008 1009 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1010 1011 sc = device_get_softc(dev); 1012 mii = device_get_softc(sc->dc_miibus); 1013 ifm = &mii->mii_media; 1014 1015 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 1016 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); 1017} 1018 1019#define DC_POLY 0xEDB88320 1020#define DC_BITS_512 9 1021#define DC_BITS_128 7 1022#define DC_BITS_64 6 1023 1024static u_int32_t 1025dc_crc_le(struct dc_softc *sc, caddr_t addr) 1026{ 1027 u_int32_t idx, bit, data, crc; 1028 1029 /* Compute CRC for the address value. */ 1030 crc = 0xFFFFFFFF; /* initial value */ 1031 1032 for (idx = 0; idx < 6; idx++) { 1033 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 1034 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 1035 } 1036 1037 /* 1038 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1039 * chips is only 128 bits wide. 1040 */ 1041 if (sc->dc_flags & DC_128BIT_HASH) 1042 return (crc & ((1 << DC_BITS_128) - 1)); 1043 1044 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1045 if (sc->dc_flags & DC_64BIT_HASH) 1046 return (crc & ((1 << DC_BITS_64) - 1)); 1047 1048 /* Xircom's hash filtering table is different (read: weird) */ 1049 /* Xircom uses the LEAST significant bits */ 1050 if (DC_IS_XIRCOM(sc)) { 1051 if ((crc & 0x180) == 0x180) 1052 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); 1053 else 1054 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + 1055 (12 << 4)); 1056 } 1057 1058 return (crc & ((1 << DC_BITS_512) - 1)); 1059} 1060 1061/* 1062 * Calculate CRC of a multicast group address, return the lower 6 bits. 1063 */ 1064static u_int32_t 1065dc_crc_be(caddr_t addr) 1066{ 1067 u_int32_t crc, carry; 1068 int i, j; 1069 u_int8_t c; 1070 1071 /* Compute CRC for the address value. */ 1072 crc = 0xFFFFFFFF; /* initial value */ 1073 1074 for (i = 0; i < 6; i++) { 1075 c = *(addr + i); 1076 for (j = 0; j < 8; j++) { 1077 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 1078 crc <<= 1; 1079 c >>= 1; 1080 if (carry) 1081 crc = (crc ^ 0x04c11db6) | carry; 1082 } 1083 } 1084 1085 /* Return the filter bit position. */ 1086 return ((crc >> 26) & 0x0000003F); 1087} 1088 1089/* 1090 * 21143-style RX filter setup routine. Filter programming is done by 1091 * downloading a special setup frame into the TX engine. 21143, Macronix, 1092 * PNIC, PNIC II and Davicom chips are programmed this way. 1093 * 1094 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1095 * address (our node address) and a 512-bit hash filter for multicast 1096 * frames. We also sneak the broadcast address into the hash filter since 1097 * we need that too. 1098 */ 1099static void 1100dc_setfilt_21143(struct dc_softc *sc) 1101{ 1102 struct dc_desc *sframe; 1103 u_int32_t h, *sp; 1104 struct ifmultiaddr *ifma; 1105 struct ifnet *ifp; 1106 int i; 1107 1108 ifp = &sc->arpcom.ac_if; 1109 1110 i = sc->dc_cdata.dc_tx_prod; 1111 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1112 sc->dc_cdata.dc_tx_cnt++; 1113 sframe = &sc->dc_ldata->dc_tx_list[i]; 1114 sp = sc->dc_cdata.dc_sbuf; 1115 bzero(sp, DC_SFRAME_LEN); 1116 1117 sframe->dc_data = htole32(sc->dc_saddr); 1118 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1119 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1120 1121 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1122 1123 /* If we want promiscuous mode, set the allframes bit. */ 1124 if (ifp->if_flags & IFF_PROMISC) 1125 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1126 else 1127 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1128 1129 if (ifp->if_flags & IFF_ALLMULTI) 1130 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1131 else 1132 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1133 1134 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1135 if (ifma->ifma_addr->sa_family != AF_LINK) 1136 continue; 1137 h = dc_crc_le(sc, 1138 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1139 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1140 } 1141 1142 if (ifp->if_flags & IFF_BROADCAST) { 1143 h = dc_crc_le(sc, (caddr_t)ifp->if_broadcastaddr); 1144 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1145 } 1146 1147 /* Set our MAC address */ 1148 sp[39] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1149 sp[40] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1150 sp[41] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1151 1152 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1153 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1154 1155 /* 1156 * The PNIC takes an exceedingly long time to process its 1157 * setup frame; wait 10ms after posting the setup frame 1158 * before proceeding, just so it has time to swallow its 1159 * medicine. 1160 */ 1161 DELAY(10000); 1162 1163 ifp->if_timer = 5; 1164} 1165 1166static void 1167dc_setfilt_admtek(struct dc_softc *sc) 1168{ 1169 struct ifnet *ifp; 1170 struct ifmultiaddr *ifma; 1171 int h = 0; 1172 u_int32_t hashes[2] = { 0, 0 }; 1173 1174 ifp = &sc->arpcom.ac_if; 1175 1176 /* Init our MAC address. */ 1177 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1178 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1179 1180 /* If we want promiscuous mode, set the allframes bit. */ 1181 if (ifp->if_flags & IFF_PROMISC) 1182 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1183 else 1184 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1185 1186 if (ifp->if_flags & IFF_ALLMULTI) 1187 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1188 else 1189 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1190 1191 /* First, zot all the existing hash bits. */ 1192 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1193 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1194 1195 /* 1196 * If we're already in promisc or allmulti mode, we 1197 * don't have to bother programming the multicast filter. 1198 */ 1199 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1200 return; 1201 1202 /* Now program new ones. */ 1203 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1204 if (ifma->ifma_addr->sa_family != AF_LINK) 1205 continue; 1206 if (DC_IS_CENTAUR(sc)) 1207 h = dc_crc_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1208 else 1209 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1210 if (h < 32) 1211 hashes[0] |= (1 << h); 1212 else 1213 hashes[1] |= (1 << (h - 32)); 1214 } 1215 1216 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1217 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1218} 1219 1220static void 1221dc_setfilt_asix(struct dc_softc *sc) 1222{ 1223 struct ifnet *ifp; 1224 struct ifmultiaddr *ifma; 1225 int h = 0; 1226 u_int32_t hashes[2] = { 0, 0 }; 1227 1228 ifp = &sc->arpcom.ac_if; 1229 1230 /* Init our MAC address */ 1231 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1232 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1233 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1234 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1235 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1236 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1237 1238 /* If we want promiscuous mode, set the allframes bit. */ 1239 if (ifp->if_flags & IFF_PROMISC) 1240 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1241 else 1242 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1243 1244 if (ifp->if_flags & IFF_ALLMULTI) 1245 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1246 else 1247 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1248 1249 /* 1250 * The ASIX chip has a special bit to enable reception 1251 * of broadcast frames. 1252 */ 1253 if (ifp->if_flags & IFF_BROADCAST) 1254 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1255 else 1256 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1257 1258 /* first, zot all the existing hash bits */ 1259 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1260 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1261 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1262 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1263 1264 /* 1265 * If we're already in promisc or allmulti mode, we 1266 * don't have to bother programming the multicast filter. 1267 */ 1268 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1269 return; 1270 1271 /* now program new ones */ 1272 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1273 if (ifma->ifma_addr->sa_family != AF_LINK) 1274 continue; 1275 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1276 if (h < 32) 1277 hashes[0] |= (1 << h); 1278 else 1279 hashes[1] |= (1 << (h - 32)); 1280 } 1281 1282 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1283 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1284 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1285 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1286} 1287 1288static void 1289dc_setfilt_xircom(struct dc_softc *sc) 1290{ 1291 struct ifnet *ifp; 1292 struct ifmultiaddr *ifma; 1293 struct dc_desc *sframe; 1294 u_int32_t h, *sp; 1295 int i; 1296 1297 ifp = &sc->arpcom.ac_if; 1298 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1299 1300 i = sc->dc_cdata.dc_tx_prod; 1301 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1302 sc->dc_cdata.dc_tx_cnt++; 1303 sframe = &sc->dc_ldata->dc_tx_list[i]; 1304 sp = sc->dc_cdata.dc_sbuf; 1305 bzero(sp, DC_SFRAME_LEN); 1306 1307 sframe->dc_data = htole32(sc->dc_saddr); 1308 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1309 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1310 1311 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1312 1313 /* If we want promiscuous mode, set the allframes bit. */ 1314 if (ifp->if_flags & IFF_PROMISC) 1315 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1316 else 1317 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1318 1319 if (ifp->if_flags & IFF_ALLMULTI) 1320 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1321 else 1322 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1323 1324 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1325 if (ifma->ifma_addr->sa_family != AF_LINK) 1326 continue; 1327 h = dc_crc_le(sc, 1328 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1329 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1330 } 1331 1332 if (ifp->if_flags & IFF_BROADCAST) { 1333 h = dc_crc_le(sc, (caddr_t)ifp->if_broadcastaddr); 1334 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1335 } 1336 1337 /* Set our MAC address */ 1338 sp[0] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1339 sp[1] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1340 sp[2] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1341 1342 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1343 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1344 ifp->if_flags |= IFF_RUNNING; 1345 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1346 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1347 1348 /* 1349 * Wait some time... 1350 */ 1351 DELAY(1000); 1352 1353 ifp->if_timer = 5; 1354} 1355 1356static void 1357dc_setfilt(struct dc_softc *sc) 1358{ 1359 1360 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1361 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1362 dc_setfilt_21143(sc); 1363 1364 if (DC_IS_ASIX(sc)) 1365 dc_setfilt_asix(sc); 1366 1367 if (DC_IS_ADMTEK(sc)) 1368 dc_setfilt_admtek(sc); 1369 1370 if (DC_IS_XIRCOM(sc)) 1371 dc_setfilt_xircom(sc); 1372} 1373 1374/* 1375 * In order to fiddle with the 'full-duplex' and '100Mbps' bits in 1376 * the netconfig register, we first have to put the transmit and/or 1377 * receive logic in the idle state. 1378 */ 1379static void 1380dc_setcfg(struct dc_softc *sc, int media) 1381{ 1382 int i, restart = 0, watchdogreg; 1383 u_int32_t isr; 1384 1385 if (IFM_SUBTYPE(media) == IFM_NONE) 1386 return; 1387 1388 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) { 1389 restart = 1; 1390 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1391 1392 for (i = 0; i < DC_TIMEOUT; i++) { 1393 isr = CSR_READ_4(sc, DC_ISR); 1394 if (isr & DC_ISR_TX_IDLE && 1395 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1396 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1397 break; 1398 DELAY(10); 1399 } 1400 1401 if (i == DC_TIMEOUT) 1402 printf("dc%d: failed to force tx and " 1403 "rx to idle state\n", sc->dc_unit); 1404 } 1405 1406 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1407 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1408 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1409 if (sc->dc_pmode == DC_PMODE_MII) { 1410 if (DC_IS_INTEL(sc)) { 1411 /* There's a write enable bit here that reads as 1. */ 1412 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1413 watchdogreg &= ~DC_WDOG_CTLWREN; 1414 watchdogreg |= DC_WDOG_JABBERDIS; 1415 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1416 } else { 1417 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1418 } 1419 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1420 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1421 if (sc->dc_type == DC_TYPE_98713) 1422 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1423 DC_NETCFG_SCRAMBLER)); 1424 if (!DC_IS_DAVICOM(sc)) 1425 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1426 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1427 if (DC_IS_INTEL(sc)) 1428 dc_apply_fixup(sc, IFM_AUTO); 1429 } else { 1430 if (DC_IS_PNIC(sc)) { 1431 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1432 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1433 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1434 } 1435 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1436 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1437 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1438 if (DC_IS_INTEL(sc)) 1439 dc_apply_fixup(sc, 1440 (media & IFM_GMASK) == IFM_FDX ? 1441 IFM_100_TX | IFM_FDX : IFM_100_TX); 1442 } 1443 } 1444 1445 if (IFM_SUBTYPE(media) == IFM_10_T) { 1446 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1447 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1448 if (sc->dc_pmode == DC_PMODE_MII) { 1449 /* There's a write enable bit here that reads as 1. */ 1450 if (DC_IS_INTEL(sc)) { 1451 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1452 watchdogreg &= ~DC_WDOG_CTLWREN; 1453 watchdogreg |= DC_WDOG_JABBERDIS; 1454 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1455 } else { 1456 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1457 } 1458 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1459 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1460 if (sc->dc_type == DC_TYPE_98713) 1461 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1462 if (!DC_IS_DAVICOM(sc)) 1463 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1464 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1465 if (DC_IS_INTEL(sc)) 1466 dc_apply_fixup(sc, IFM_AUTO); 1467 } else { 1468 if (DC_IS_PNIC(sc)) { 1469 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1470 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1471 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1472 } 1473 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1474 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1475 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1476 if (DC_IS_INTEL(sc)) { 1477 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1478 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1479 if ((media & IFM_GMASK) == IFM_FDX) 1480 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1481 else 1482 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1483 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1484 DC_CLRBIT(sc, DC_10BTCTRL, 1485 DC_TCTL_AUTONEGENBL); 1486 dc_apply_fixup(sc, 1487 (media & IFM_GMASK) == IFM_FDX ? 1488 IFM_10_T | IFM_FDX : IFM_10_T); 1489 DELAY(20000); 1490 } 1491 } 1492 } 1493 1494 /* 1495 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1496 * PHY and we want HomePNA mode, set the portsel bit to turn 1497 * on the external MII port. 1498 */ 1499 if (DC_IS_DAVICOM(sc)) { 1500 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1501 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1502 sc->dc_link = 1; 1503 } else { 1504 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1505 } 1506 } 1507 1508 if ((media & IFM_GMASK) == IFM_FDX) { 1509 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1510 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1511 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1512 } else { 1513 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1514 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1515 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1516 } 1517 1518 if (restart) 1519 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON); 1520} 1521 1522static void 1523dc_reset(struct dc_softc *sc) 1524{ 1525 int i; 1526 1527 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1528 1529 for (i = 0; i < DC_TIMEOUT; i++) { 1530 DELAY(10); 1531 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1532 break; 1533 } 1534 1535 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || 1536 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { 1537 DELAY(10000); 1538 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1539 i = 0; 1540 } 1541 1542 if (i == DC_TIMEOUT) 1543 printf("dc%d: reset never completed!\n", sc->dc_unit); 1544 1545 /* Wait a little while for the chip to get its brains in order. */ 1546 DELAY(1000); 1547 1548 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1549 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1550 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1551 1552 /* 1553 * Bring the SIA out of reset. In some cases, it looks 1554 * like failing to unreset the SIA soon enough gets it 1555 * into a state where it will never come out of reset 1556 * until we reset the whole chip again. 1557 */ 1558 if (DC_IS_INTEL(sc)) { 1559 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1560 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1561 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1562 } 1563} 1564 1565static struct dc_type * 1566dc_devtype(device_t dev) 1567{ 1568 struct dc_type *t; 1569 u_int32_t rev; 1570 1571 t = dc_devs; 1572 1573 while (t->dc_name != NULL) { 1574 if ((pci_get_vendor(dev) == t->dc_vid) && 1575 (pci_get_device(dev) == t->dc_did)) { 1576 /* Check the PCI revision */ 1577 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1578 if (t->dc_did == DC_DEVICEID_98713 && 1579 rev >= DC_REVISION_98713A) 1580 t++; 1581 if (t->dc_did == DC_DEVICEID_98713_CP && 1582 rev >= DC_REVISION_98713A) 1583 t++; 1584 if (t->dc_did == DC_DEVICEID_987x5 && 1585 rev >= DC_REVISION_98715AEC_C) 1586 t++; 1587 if (t->dc_did == DC_DEVICEID_987x5 && 1588 rev >= DC_REVISION_98725) 1589 t++; 1590 if (t->dc_did == DC_DEVICEID_AX88140A && 1591 rev >= DC_REVISION_88141) 1592 t++; 1593 if (t->dc_did == DC_DEVICEID_82C168 && 1594 rev >= DC_REVISION_82C169) 1595 t++; 1596 if (t->dc_did == DC_DEVICEID_DM9102 && 1597 rev >= DC_REVISION_DM9102A) 1598 t++; 1599 /* 1600 * The Microsoft MN-130 has a device ID of 0x0002, 1601 * which happens to be the same as the PNIC 82c168. 1602 * To keep dc_attach() from getting confused, we 1603 * pretend its ID is something different. 1604 * XXX: ideally, dc_attach() should be checking 1605 * vendorid+deviceid together to avoid such 1606 * collisions. 1607 */ 1608 if (t->dc_vid == DC_VENDORID_MICROSOFT && 1609 t->dc_did == DC_DEVICEID_MSMN130) 1610 t++; 1611 return (t); 1612 } 1613 t++; 1614 } 1615 1616 return (NULL); 1617} 1618 1619/* 1620 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1621 * IDs against our list and return a device name if we find a match. 1622 * We do a little bit of extra work to identify the exact type of 1623 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1624 * but different revision IDs. The same is true for 98715/98715A 1625 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1626 * cases, the exact chip revision affects driver behavior. 1627 */ 1628static int 1629dc_probe(device_t dev) 1630{ 1631 struct dc_type *t; 1632 1633 t = dc_devtype(dev); 1634 1635 if (t != NULL) { 1636 device_set_desc(dev, t->dc_name); 1637 return (0); 1638 } 1639 1640 return (ENXIO); 1641} 1642 1643#ifndef BURN_BRIDGES 1644static void 1645dc_acpi(device_t dev) 1646{ 1647 int unit; 1648 u_int32_t iobase, membase, irq; 1649 1650 unit = device_get_unit(dev); 1651 1652 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1653 /* Save important PCI config data. */ 1654 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1655 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1656 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1657 1658 /* Reset the power state. */ 1659 printf("dc%d: chip is in D%d power mode " 1660 "-- setting to D0\n", unit, 1661 pci_get_powerstate(dev)); 1662 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1663 1664 /* Restore PCI config data. */ 1665 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1666 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1667 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1668 } 1669} 1670#endif 1671 1672static void 1673dc_apply_fixup(struct dc_softc *sc, int media) 1674{ 1675 struct dc_mediainfo *m; 1676 u_int8_t *p; 1677 int i; 1678 u_int32_t reg; 1679 1680 m = sc->dc_mi; 1681 1682 while (m != NULL) { 1683 if (m->dc_media == media) 1684 break; 1685 m = m->dc_next; 1686 } 1687 1688 if (m == NULL) 1689 return; 1690 1691 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1692 reg = (p[0] | (p[1] << 8)) << 16; 1693 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1694 } 1695 1696 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1697 reg = (p[0] | (p[1] << 8)) << 16; 1698 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1699 } 1700} 1701 1702static void 1703dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1704{ 1705 struct dc_mediainfo *m; 1706 1707 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1708 if (l->dc_sia_code == DC_SIA_CODE_10BT) 1709 m->dc_media = IFM_10_T; 1710 1711 if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX) 1712 m->dc_media = IFM_10_T | IFM_FDX; 1713 1714 if (l->dc_sia_code == DC_SIA_CODE_10B2) 1715 m->dc_media = IFM_10_2; 1716 1717 if (l->dc_sia_code == DC_SIA_CODE_10B5) 1718 m->dc_media = IFM_10_5; 1719 1720 m->dc_gp_len = 2; 1721 m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl; 1722 1723 m->dc_next = sc->dc_mi; 1724 sc->dc_mi = m; 1725 1726 sc->dc_pmode = DC_PMODE_SIA; 1727} 1728 1729static void 1730dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1731{ 1732 struct dc_mediainfo *m; 1733 1734 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1735 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1736 m->dc_media = IFM_100_TX; 1737 1738 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1739 m->dc_media = IFM_100_TX | IFM_FDX; 1740 1741 m->dc_gp_len = 2; 1742 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1743 1744 m->dc_next = sc->dc_mi; 1745 sc->dc_mi = m; 1746 1747 sc->dc_pmode = DC_PMODE_SYM; 1748} 1749 1750static void 1751dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1752{ 1753 struct dc_mediainfo *m; 1754 u_int8_t *p; 1755 1756 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1757 /* We abuse IFM_AUTO to represent MII. */ 1758 m->dc_media = IFM_AUTO; 1759 m->dc_gp_len = l->dc_gpr_len; 1760 1761 p = (u_int8_t *)l; 1762 p += sizeof(struct dc_eblock_mii); 1763 m->dc_gp_ptr = p; 1764 p += 2 * l->dc_gpr_len; 1765 m->dc_reset_len = *p; 1766 p++; 1767 m->dc_reset_ptr = p; 1768 1769 m->dc_next = sc->dc_mi; 1770 sc->dc_mi = m; 1771} 1772 1773static void 1774dc_read_srom(struct dc_softc *sc, int bits) 1775{ 1776 int size; 1777 1778 size = 2 << bits; 1779 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1780 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1781} 1782 1783static void 1784dc_parse_21143_srom(struct dc_softc *sc) 1785{ 1786 struct dc_leaf_hdr *lhdr; 1787 struct dc_eblock_hdr *hdr; 1788 int have_mii, i, loff; 1789 char *ptr; 1790 1791 have_mii = 0; 1792 loff = sc->dc_srom[27]; 1793 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1794 1795 ptr = (char *)lhdr; 1796 ptr += sizeof(struct dc_leaf_hdr) - 1; 1797 /* 1798 * Look if we got a MII media block. 1799 */ 1800 for (i = 0; i < lhdr->dc_mcnt; i++) { 1801 hdr = (struct dc_eblock_hdr *)ptr; 1802 if (hdr->dc_type == DC_EBLOCK_MII) 1803 have_mii++; 1804 1805 ptr += (hdr->dc_len & 0x7F); 1806 ptr++; 1807 } 1808 1809 /* 1810 * Do the same thing again. Only use SIA and SYM media 1811 * blocks if no MII media block is available. 1812 */ 1813 ptr = (char *)lhdr; 1814 ptr += sizeof(struct dc_leaf_hdr) - 1; 1815 for (i = 0; i < lhdr->dc_mcnt; i++) { 1816 hdr = (struct dc_eblock_hdr *)ptr; 1817 switch (hdr->dc_type) { 1818 case DC_EBLOCK_MII: 1819 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1820 break; 1821 case DC_EBLOCK_SIA: 1822 if (! have_mii) 1823 dc_decode_leaf_sia(sc, 1824 (struct dc_eblock_sia *)hdr); 1825 break; 1826 case DC_EBLOCK_SYM: 1827 if (! have_mii) 1828 dc_decode_leaf_sym(sc, 1829 (struct dc_eblock_sym *)hdr); 1830 break; 1831 default: 1832 /* Don't care. Yet. */ 1833 break; 1834 } 1835 ptr += (hdr->dc_len & 0x7F); 1836 ptr++; 1837 } 1838} 1839 1840static void 1841dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1842{ 1843 u_int32_t *paddr; 1844 1845 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 1846 paddr = arg; 1847 *paddr = segs->ds_addr; 1848} 1849 1850/* 1851 * Attach the interface. Allocate softc structures, do ifmedia 1852 * setup and ethernet/BPF attach. 1853 */ 1854static int 1855dc_attach(device_t dev) 1856{ 1857 int tmp = 0; 1858 u_char eaddr[ETHER_ADDR_LEN]; 1859 u_int32_t command; 1860 struct dc_softc *sc; 1861 struct ifnet *ifp; 1862 u_int32_t revision; 1863 int unit, error = 0, rid, mac_offset; 1864 int i; 1865 u_int8_t *mac; 1866 1867 sc = device_get_softc(dev); 1868 unit = device_get_unit(dev); 1869 1870 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1871 MTX_DEF | MTX_RECURSE); 1872#ifndef BURN_BRIDGES 1873 /* 1874 * Handle power management nonsense. 1875 */ 1876 dc_acpi(dev); 1877#endif 1878 /* 1879 * Map control/status registers. 1880 */ 1881 pci_enable_busmaster(dev); 1882 1883 rid = DC_RID; 1884 sc->dc_res = bus_alloc_resource(dev, DC_RES, &rid, 1885 0, ~0, 1, RF_ACTIVE); 1886 1887 if (sc->dc_res == NULL) { 1888 printf("dc%d: couldn't map ports/memory\n", unit); 1889 error = ENXIO; 1890 goto fail; 1891 } 1892 1893 sc->dc_btag = rman_get_bustag(sc->dc_res); 1894 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1895 1896 /* Allocate interrupt. */ 1897 rid = 0; 1898 sc->dc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1899 RF_SHAREABLE | RF_ACTIVE); 1900 1901 if (sc->dc_irq == NULL) { 1902 printf("dc%d: couldn't map interrupt\n", unit); 1903 error = ENXIO; 1904 goto fail; 1905 } 1906 1907 /* Need this info to decide on a chip type. */ 1908 sc->dc_info = dc_devtype(dev); 1909 revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; 1910 1911 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ 1912 if (sc->dc_info->dc_did != DC_DEVICEID_82C168 && 1913 sc->dc_info->dc_did != DC_DEVICEID_X3201) 1914 dc_eeprom_width(sc); 1915 1916 switch (sc->dc_info->dc_did) { 1917 case DC_DEVICEID_21143: 1918 sc->dc_type = DC_TYPE_21143; 1919 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1920 sc->dc_flags |= DC_REDUCED_MII_POLL; 1921 /* Save EEPROM contents so we can parse them later. */ 1922 dc_read_srom(sc, sc->dc_romwidth); 1923 break; 1924 case DC_DEVICEID_DM9009: 1925 case DC_DEVICEID_DM9100: 1926 case DC_DEVICEID_DM9102: 1927 sc->dc_type = DC_TYPE_DM9102; 1928 sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS; 1929 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD; 1930 sc->dc_flags |= DC_TX_ALIGN; 1931 sc->dc_pmode = DC_PMODE_MII; 1932 /* Increase the latency timer value. */ 1933 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1934 command &= 0xFFFF00FF; 1935 command |= 0x00008000; 1936 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1937 break; 1938 case DC_DEVICEID_AL981: 1939 sc->dc_type = DC_TYPE_AL981; 1940 sc->dc_flags |= DC_TX_USE_TX_INTR; 1941 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1942 sc->dc_pmode = DC_PMODE_MII; 1943 dc_read_srom(sc, sc->dc_romwidth); 1944 break; 1945 case DC_DEVICEID_AN985: 1946 case DC_DEVICEID_ADM9511: 1947 case DC_DEVICEID_ADM9513: 1948 case DC_DEVICEID_FA511: 1949 case DC_DEVICEID_FE2500: 1950 case DC_DEVICEID_EN2242: 1951 case DC_DEVICEID_HAWKING_PN672TX: 1952 case DC_DEVICEID_3CSOHOB: 1953 case DC_DEVICEID_MSMN120: 1954 case DC_DEVICEID_MSMN130_FAKE: /* XXX avoid collision with PNIC*/ 1955 sc->dc_type = DC_TYPE_AN985; 1956 sc->dc_flags |= DC_64BIT_HASH; 1957 sc->dc_flags |= DC_TX_USE_TX_INTR; 1958 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1959 sc->dc_pmode = DC_PMODE_MII; 1960 dc_read_srom(sc, sc->dc_romwidth); 1961 break; 1962 case DC_DEVICEID_98713: 1963 case DC_DEVICEID_98713_CP: 1964 if (revision < DC_REVISION_98713A) { 1965 sc->dc_type = DC_TYPE_98713; 1966 } 1967 if (revision >= DC_REVISION_98713A) { 1968 sc->dc_type = DC_TYPE_98713A; 1969 sc->dc_flags |= DC_21143_NWAY; 1970 } 1971 sc->dc_flags |= DC_REDUCED_MII_POLL; 1972 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1973 break; 1974 case DC_DEVICEID_987x5: 1975 case DC_DEVICEID_EN1217: 1976 /* 1977 * Macronix MX98715AEC-C/D/E parts have only a 1978 * 128-bit hash table. We need to deal with these 1979 * in the same manner as the PNIC II so that we 1980 * get the right number of bits out of the 1981 * CRC routine. 1982 */ 1983 if (revision >= DC_REVISION_98715AEC_C && 1984 revision < DC_REVISION_98725) 1985 sc->dc_flags |= DC_128BIT_HASH; 1986 sc->dc_type = DC_TYPE_987x5; 1987 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1988 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1989 break; 1990 case DC_DEVICEID_98727: 1991 sc->dc_type = DC_TYPE_987x5; 1992 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1993 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1994 break; 1995 case DC_DEVICEID_82C115: 1996 sc->dc_type = DC_TYPE_PNICII; 1997 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH; 1998 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1999 break; 2000 case DC_DEVICEID_82C168: 2001 sc->dc_type = DC_TYPE_PNIC; 2002 sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS; 2003 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 2004 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); 2005 if (revision < DC_REVISION_82C169) 2006 sc->dc_pmode = DC_PMODE_SYM; 2007 break; 2008 case DC_DEVICEID_AX88140A: 2009 sc->dc_type = DC_TYPE_ASIX; 2010 sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG; 2011 sc->dc_flags |= DC_REDUCED_MII_POLL; 2012 sc->dc_pmode = DC_PMODE_MII; 2013 break; 2014 case DC_DEVICEID_X3201: 2015 sc->dc_type = DC_TYPE_XIRCOM; 2016 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 2017 DC_TX_ALIGN; 2018 /* 2019 * We don't actually need to coalesce, but we're doing 2020 * it to obtain a double word aligned buffer. 2021 * The DC_TX_COALESCE flag is required. 2022 */ 2023 sc->dc_pmode = DC_PMODE_MII; 2024 break; 2025 case DC_DEVICEID_RS7112: 2026 sc->dc_type = DC_TYPE_CONEXANT; 2027 sc->dc_flags |= DC_TX_INTR_ALWAYS; 2028 sc->dc_flags |= DC_REDUCED_MII_POLL; 2029 sc->dc_pmode = DC_PMODE_MII; 2030 dc_read_srom(sc, sc->dc_romwidth); 2031 break; 2032 default: 2033 printf("dc%d: unknown device: %x\n", sc->dc_unit, 2034 sc->dc_info->dc_did); 2035 break; 2036 } 2037 2038 /* Save the cache line size. */ 2039 if (DC_IS_DAVICOM(sc)) 2040 sc->dc_cachesize = 0; 2041 else 2042 sc->dc_cachesize = pci_read_config(dev, 2043 DC_PCI_CFLT, 4) & 0xFF; 2044 2045 /* Reset the adapter. */ 2046 dc_reset(sc); 2047 2048 /* Take 21143 out of snooze mode */ 2049 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 2050 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2051 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); 2052 pci_write_config(dev, DC_PCI_CFDD, command, 4); 2053 } 2054 2055 /* 2056 * Try to learn something about the supported media. 2057 * We know that ASIX and ADMtek and Davicom devices 2058 * will *always* be using MII media, so that's a no-brainer. 2059 * The tricky ones are the Macronix/PNIC II and the 2060 * Intel 21143. 2061 */ 2062 if (DC_IS_INTEL(sc)) 2063 dc_parse_21143_srom(sc); 2064 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2065 if (sc->dc_type == DC_TYPE_98713) 2066 sc->dc_pmode = DC_PMODE_MII; 2067 else 2068 sc->dc_pmode = DC_PMODE_SYM; 2069 } else if (!sc->dc_pmode) 2070 sc->dc_pmode = DC_PMODE_MII; 2071 2072 /* 2073 * Get station address from the EEPROM. 2074 */ 2075 switch(sc->dc_type) { 2076 case DC_TYPE_98713: 2077 case DC_TYPE_98713A: 2078 case DC_TYPE_987x5: 2079 case DC_TYPE_PNICII: 2080 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2081 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2082 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2083 break; 2084 case DC_TYPE_PNIC: 2085 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2086 break; 2087 case DC_TYPE_DM9102: 2088 case DC_TYPE_21143: 2089 case DC_TYPE_ASIX: 2090 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2091 break; 2092 case DC_TYPE_AL981: 2093 case DC_TYPE_AN985: 2094 bcopy(sc->dc_srom + DC_AL_EE_NODEADDR, &eaddr, 2095 ETHER_ADDR_LEN); 2096 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_AL_EE_NODEADDR, 3, 0); 2097 break; 2098 case DC_TYPE_CONEXANT: 2099 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 2100 ETHER_ADDR_LEN); 2101 break; 2102 case DC_TYPE_XIRCOM: 2103 /* The MAC comes from the CIS. */ 2104 mac = pci_get_ether(dev); 2105 if (!mac) { 2106 device_printf(dev, "No station address in CIS!\n"); 2107 error = ENXIO; 2108 goto fail; 2109 } 2110 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2111 break; 2112 default: 2113 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2114 break; 2115 } 2116 2117 /* 2118 * A 21143 or clone chip was detected. Inform the world. 2119 */ 2120 printf("dc%d: Ethernet address: %6D\n", unit, eaddr, ":"); 2121 2122 sc->dc_unit = unit; 2123 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 2124 2125 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 2126 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2127 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct dc_list_data), 1, 2128 sizeof(struct dc_list_data), 0, NULL, NULL, &sc->dc_ltag); 2129 if (error) { 2130 printf("dc%d: failed to allocate busdma tag\n", unit); 2131 error = ENXIO; 2132 goto fail; 2133 } 2134 error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata, 2135 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap); 2136 if (error) { 2137 printf("dc%d: failed to allocate DMA safe memory\n", unit); 2138 error = ENXIO; 2139 goto fail; 2140 } 2141 error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata, 2142 sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr, 2143 BUS_DMA_NOWAIT); 2144 if (error) { 2145 printf("dc%d: cannot get address of the descriptors\n", unit); 2146 error = ENXIO; 2147 goto fail; 2148 } 2149 2150 /* 2151 * Allocate a busdma tag and DMA safe memory for the multicast 2152 * setup frame. 2153 */ 2154 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2155 BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, 2156 DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag); 2157 if (error) { 2158 printf("dc%d: failed to allocate busdma tag\n", unit); 2159 error = ENXIO; 2160 goto fail; 2161 } 2162 error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, 2163 BUS_DMA_NOWAIT, &sc->dc_smap); 2164 if (error) { 2165 printf("dc%d: failed to allocate DMA safe memory\n", unit); 2166 error = ENXIO; 2167 goto fail; 2168 } 2169 error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, 2170 DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); 2171 if (error) { 2172 printf("dc%d: cannot get address of the descriptors\n", unit); 2173 error = ENXIO; 2174 goto fail; 2175 } 2176 2177 /* Allocate a busdma tag for mbufs. */ 2178 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2179 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * DC_TX_LIST_CNT, 2180 DC_TX_LIST_CNT, MCLBYTES, 0, NULL, NULL, &sc->dc_mtag); 2181 if (error) { 2182 printf("dc%d: failed to allocate busdma tag\n", unit); 2183 error = ENXIO; 2184 goto fail; 2185 } 2186 2187 /* Create the TX/RX busdma maps. */ 2188 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2189 error = bus_dmamap_create(sc->dc_mtag, 0, 2190 &sc->dc_cdata.dc_tx_map[i]); 2191 if (error) { 2192 printf("dc%d: failed to init TX ring\n", unit); 2193 error = ENXIO; 2194 goto fail; 2195 } 2196 } 2197 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2198 error = bus_dmamap_create(sc->dc_mtag, 0, 2199 &sc->dc_cdata.dc_rx_map[i]); 2200 if (error) { 2201 printf("dc%d: failed to init RX ring\n", unit); 2202 error = ENXIO; 2203 goto fail; 2204 } 2205 } 2206 error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap); 2207 if (error) { 2208 printf("dc%d: failed to init RX ring\n", unit); 2209 error = ENXIO; 2210 goto fail; 2211 } 2212 2213 ifp = &sc->arpcom.ac_if; 2214 ifp->if_softc = sc; 2215 ifp->if_unit = unit; 2216 ifp->if_name = "dc"; 2217 /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ 2218 ifp->if_mtu = ETHERMTU; 2219 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2220 ifp->if_ioctl = dc_ioctl; 2221 ifp->if_start = dc_start; 2222 ifp->if_watchdog = dc_watchdog; 2223 ifp->if_init = dc_init; 2224 ifp->if_baudrate = 10000000; 2225 ifp->if_snd.ifq_maxlen = DC_TX_LIST_CNT - 1; 2226 2227 /* 2228 * Do MII setup. If this is a 21143, check for a PHY on the 2229 * MII bus after applying any necessary fixups to twiddle the 2230 * GPIO bits. If we don't end up finding a PHY, restore the 2231 * old selection (SIA only or SIA/SYM) and attach the dcphy 2232 * driver instead. 2233 */ 2234 if (DC_IS_INTEL(sc)) { 2235 dc_apply_fixup(sc, IFM_AUTO); 2236 tmp = sc->dc_pmode; 2237 sc->dc_pmode = DC_PMODE_MII; 2238 } 2239 2240 error = mii_phy_probe(dev, &sc->dc_miibus, 2241 dc_ifmedia_upd, dc_ifmedia_sts); 2242 2243 if (error && DC_IS_INTEL(sc)) { 2244 sc->dc_pmode = tmp; 2245 if (sc->dc_pmode != DC_PMODE_SIA) 2246 sc->dc_pmode = DC_PMODE_SYM; 2247 sc->dc_flags |= DC_21143_NWAY; 2248 mii_phy_probe(dev, &sc->dc_miibus, 2249 dc_ifmedia_upd, dc_ifmedia_sts); 2250 /* 2251 * For non-MII cards, we need to have the 21143 2252 * drive the LEDs. Except there are some systems 2253 * like the NEC VersaPro NoteBook PC which have no 2254 * LEDs, and twiddling these bits has adverse effects 2255 * on them. (I.e. you suddenly can't get a link.) 2256 */ 2257 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2258 sc->dc_flags |= DC_TULIP_LEDS; 2259 error = 0; 2260 } 2261 2262 if (error) { 2263 printf("dc%d: MII without any PHY!\n", sc->dc_unit); 2264 goto fail; 2265 } 2266 2267 if (DC_IS_XIRCOM(sc)) { 2268 /* 2269 * setup General Purpose Port mode and data so the tulip 2270 * can talk to the MII. 2271 */ 2272 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2273 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2274 DELAY(10); 2275 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2276 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2277 DELAY(10); 2278 } 2279 2280 if (DC_IS_ADMTEK(sc)) { 2281 /* 2282 * Set automatic TX underrun recovery for the ADMtek chips 2283 */ 2284 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2285 } 2286 2287 /* 2288 * Tell the upper layer(s) we support long frames. 2289 */ 2290 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2291 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2292 2293 callout_init(&sc->dc_stat_ch, IS_MPSAFE ? CALLOUT_MPSAFE : 0); 2294 2295#ifdef SRM_MEDIA 2296 sc->dc_srm_media = 0; 2297 2298 /* Remember the SRM console media setting */ 2299 if (DC_IS_INTEL(sc)) { 2300 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2301 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); 2302 switch ((command >> 8) & 0xff) { 2303 case 3: 2304 sc->dc_srm_media = IFM_10_T; 2305 break; 2306 case 4: 2307 sc->dc_srm_media = IFM_10_T | IFM_FDX; 2308 break; 2309 case 5: 2310 sc->dc_srm_media = IFM_100_TX; 2311 break; 2312 case 6: 2313 sc->dc_srm_media = IFM_100_TX | IFM_FDX; 2314 break; 2315 } 2316 if (sc->dc_srm_media) 2317 sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; 2318 } 2319#endif 2320 2321 /* 2322 * Call MI attach routine. 2323 */ 2324 ether_ifattach(ifp, eaddr); 2325 2326 /* Hook interrupt last to avoid having to lock softc */ 2327 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | 2328 (IS_MPSAFE ? INTR_MPSAFE : 0), 2329 dc_intr, sc, &sc->dc_intrhand); 2330 2331 if (error) { 2332 printf("dc%d: couldn't set up irq\n", unit); 2333 ether_ifdetach(ifp); 2334 goto fail; 2335 } 2336 2337fail: 2338 if (error) 2339 dc_detach(dev); 2340 return (error); 2341} 2342 2343/* 2344 * Shutdown hardware and free up resources. This can be called any 2345 * time after the mutex has been initialized. It is called in both 2346 * the error case in attach and the normal detach case so it needs 2347 * to be careful about only freeing resources that have actually been 2348 * allocated. 2349 */ 2350static int 2351dc_detach(device_t dev) 2352{ 2353 struct dc_softc *sc; 2354 struct ifnet *ifp; 2355 struct dc_mediainfo *m; 2356 int i; 2357 2358 sc = device_get_softc(dev); 2359 KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); 2360 DC_LOCK(sc); 2361 2362 ifp = &sc->arpcom.ac_if; 2363 2364 /* These should only be active if attach succeeded */ 2365 if (device_is_attached(dev)) { 2366 dc_stop(sc); 2367 ether_ifdetach(ifp); 2368 } 2369 if (sc->dc_miibus) 2370 device_delete_child(dev, sc->dc_miibus); 2371 bus_generic_detach(dev); 2372 2373 if (sc->dc_intrhand) 2374 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2375 if (sc->dc_irq) 2376 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2377 if (sc->dc_res) 2378 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2379 2380 if (sc->dc_cdata.dc_sbuf != NULL) 2381 bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); 2382 if (sc->dc_ldata != NULL) 2383 bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap); 2384 for (i = 0; i < DC_TX_LIST_CNT; i++) 2385 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_tx_map[i]); 2386 for (i = 0; i < DC_RX_LIST_CNT; i++) 2387 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); 2388 bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap); 2389 if (sc->dc_stag) 2390 bus_dma_tag_destroy(sc->dc_stag); 2391 if (sc->dc_mtag) 2392 bus_dma_tag_destroy(sc->dc_mtag); 2393 if (sc->dc_ltag) 2394 bus_dma_tag_destroy(sc->dc_ltag); 2395 2396 free(sc->dc_pnic_rx_buf, M_DEVBUF); 2397 2398 while (sc->dc_mi != NULL) { 2399 m = sc->dc_mi->dc_next; 2400 free(sc->dc_mi, M_DEVBUF); 2401 sc->dc_mi = m; 2402 } 2403 free(sc->dc_srom, M_DEVBUF); 2404 2405 DC_UNLOCK(sc); 2406 mtx_destroy(&sc->dc_mtx); 2407 2408 return (0); 2409} 2410 2411/* 2412 * Initialize the transmit descriptors. 2413 */ 2414static int 2415dc_list_tx_init(struct dc_softc *sc) 2416{ 2417 struct dc_chain_data *cd; 2418 struct dc_list_data *ld; 2419 int i, nexti; 2420 2421 cd = &sc->dc_cdata; 2422 ld = sc->dc_ldata; 2423 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2424 if (i == DC_TX_LIST_CNT - 1) 2425 nexti = 0; 2426 else 2427 nexti = i + 1; 2428 ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti)); 2429 cd->dc_tx_chain[i] = NULL; 2430 ld->dc_tx_list[i].dc_data = 0; 2431 ld->dc_tx_list[i].dc_ctl = 0; 2432 } 2433 2434 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2435 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2436 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2437 return (0); 2438} 2439 2440 2441/* 2442 * Initialize the RX descriptors and allocate mbufs for them. Note that 2443 * we arrange the descriptors in a closed ring, so that the last descriptor 2444 * points back to the first. 2445 */ 2446static int 2447dc_list_rx_init(struct dc_softc *sc) 2448{ 2449 struct dc_chain_data *cd; 2450 struct dc_list_data *ld; 2451 int i, nexti; 2452 2453 cd = &sc->dc_cdata; 2454 ld = sc->dc_ldata; 2455 2456 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2457 if (dc_newbuf(sc, i, 1) != 0) 2458 return (ENOBUFS); 2459 if (i == DC_RX_LIST_CNT - 1) 2460 nexti = 0; 2461 else 2462 nexti = i + 1; 2463 ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti)); 2464 } 2465 2466 cd->dc_rx_prod = 0; 2467 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2468 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2469 return (0); 2470} 2471 2472static void 2473dc_dma_map_rxbuf(arg, segs, nseg, mapsize, error) 2474 void *arg; 2475 bus_dma_segment_t *segs; 2476 int nseg; 2477 bus_size_t mapsize; 2478 int error; 2479{ 2480 struct dc_softc *sc; 2481 struct dc_desc *c; 2482 2483 sc = arg; 2484 c = &sc->dc_ldata->dc_rx_list[sc->dc_cdata.dc_rx_cur]; 2485 if (error) { 2486 sc->dc_cdata.dc_rx_err = error; 2487 return; 2488 } 2489 2490 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 2491 sc->dc_cdata.dc_rx_err = 0; 2492 c->dc_data = htole32(segs->ds_addr); 2493} 2494 2495/* 2496 * Initialize an RX descriptor and attach an MBUF cluster. 2497 */ 2498static int 2499dc_newbuf(struct dc_softc *sc, int i, int alloc) 2500{ 2501 struct mbuf *m_new; 2502 bus_dmamap_t tmp; 2503 int error; 2504 2505 if (alloc) { 2506 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2507 if (m_new == NULL) 2508 return (ENOBUFS); 2509 } else { 2510 m_new = sc->dc_cdata.dc_rx_chain[i]; 2511 m_new->m_data = m_new->m_ext.ext_buf; 2512 } 2513 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2514 m_adj(m_new, sizeof(u_int64_t)); 2515 2516 /* 2517 * If this is a PNIC chip, zero the buffer. This is part 2518 * of the workaround for the receive bug in the 82c168 and 2519 * 82c169 chips. 2520 */ 2521 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2522 bzero(mtod(m_new, char *), m_new->m_len); 2523 2524 /* No need to remap the mbuf if we're reusing it. */ 2525 if (alloc) { 2526 sc->dc_cdata.dc_rx_cur = i; 2527 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_sparemap, 2528 m_new, dc_dma_map_rxbuf, sc, 0); 2529 if (error) { 2530 m_freem(m_new); 2531 return (error); 2532 } 2533 if (sc->dc_cdata.dc_rx_err != 0) { 2534 m_freem(m_new); 2535 return (sc->dc_cdata.dc_rx_err); 2536 } 2537 bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); 2538 tmp = sc->dc_cdata.dc_rx_map[i]; 2539 sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; 2540 sc->dc_sparemap = tmp; 2541 sc->dc_cdata.dc_rx_chain[i] = m_new; 2542 } 2543 2544 sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); 2545 sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); 2546 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2547 BUS_DMASYNC_PREREAD); 2548 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2549 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2550 return (0); 2551} 2552 2553/* 2554 * Grrrrr. 2555 * The PNIC chip has a terrible bug in it that manifests itself during 2556 * periods of heavy activity. The exact mode of failure if difficult to 2557 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2558 * will happen on slow machines. The bug is that sometimes instead of 2559 * uploading one complete frame during reception, it uploads what looks 2560 * like the entire contents of its FIFO memory. The frame we want is at 2561 * the end of the whole mess, but we never know exactly how much data has 2562 * been uploaded, so salvaging the frame is hard. 2563 * 2564 * There is only one way to do it reliably, and it's disgusting. 2565 * Here's what we know: 2566 * 2567 * - We know there will always be somewhere between one and three extra 2568 * descriptors uploaded. 2569 * 2570 * - We know the desired received frame will always be at the end of the 2571 * total data upload. 2572 * 2573 * - We know the size of the desired received frame because it will be 2574 * provided in the length field of the status word in the last descriptor. 2575 * 2576 * Here's what we do: 2577 * 2578 * - When we allocate buffers for the receive ring, we bzero() them. 2579 * This means that we know that the buffer contents should be all 2580 * zeros, except for data uploaded by the chip. 2581 * 2582 * - We also force the PNIC chip to upload frames that include the 2583 * ethernet CRC at the end. 2584 * 2585 * - We gather all of the bogus frame data into a single buffer. 2586 * 2587 * - We then position a pointer at the end of this buffer and scan 2588 * backwards until we encounter the first non-zero byte of data. 2589 * This is the end of the received frame. We know we will encounter 2590 * some data at the end of the frame because the CRC will always be 2591 * there, so even if the sender transmits a packet of all zeros, 2592 * we won't be fooled. 2593 * 2594 * - We know the size of the actual received frame, so we subtract 2595 * that value from the current pointer location. This brings us 2596 * to the start of the actual received packet. 2597 * 2598 * - We copy this into an mbuf and pass it on, along with the actual 2599 * frame length. 2600 * 2601 * The performance hit is tremendous, but it beats dropping frames all 2602 * the time. 2603 */ 2604 2605#define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG) 2606static void 2607dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2608{ 2609 struct dc_desc *cur_rx; 2610 struct dc_desc *c = NULL; 2611 struct mbuf *m = NULL; 2612 unsigned char *ptr; 2613 int i, total_len; 2614 u_int32_t rxstat = 0; 2615 2616 i = sc->dc_pnic_rx_bug_save; 2617 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2618 ptr = sc->dc_pnic_rx_buf; 2619 bzero(ptr, DC_RXLEN * 5); 2620 2621 /* Copy all the bytes from the bogus buffers. */ 2622 while (1) { 2623 c = &sc->dc_ldata->dc_rx_list[i]; 2624 rxstat = le32toh(c->dc_status); 2625 m = sc->dc_cdata.dc_rx_chain[i]; 2626 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2627 ptr += DC_RXLEN; 2628 /* If this is the last buffer, break out. */ 2629 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2630 break; 2631 dc_newbuf(sc, i, 0); 2632 DC_INC(i, DC_RX_LIST_CNT); 2633 } 2634 2635 /* Find the length of the actual receive frame. */ 2636 total_len = DC_RXBYTES(rxstat); 2637 2638 /* Scan backwards until we hit a non-zero byte. */ 2639 while (*ptr == 0x00) 2640 ptr--; 2641 2642 /* Round off. */ 2643 if ((uintptr_t)(ptr) & 0x3) 2644 ptr -= 1; 2645 2646 /* Now find the start of the frame. */ 2647 ptr -= total_len; 2648 if (ptr < sc->dc_pnic_rx_buf) 2649 ptr = sc->dc_pnic_rx_buf; 2650 2651 /* 2652 * Now copy the salvaged frame to the last mbuf and fake up 2653 * the status word to make it look like a successful 2654 * frame reception. 2655 */ 2656 dc_newbuf(sc, i, 0); 2657 bcopy(ptr, mtod(m, char *), total_len); 2658 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); 2659} 2660 2661/* 2662 * This routine searches the RX ring for dirty descriptors in the 2663 * event that the rxeof routine falls out of sync with the chip's 2664 * current descriptor pointer. This may happen sometimes as a result 2665 * of a "no RX buffer available" condition that happens when the chip 2666 * consumes all of the RX buffers before the driver has a chance to 2667 * process the RX ring. This routine may need to be called more than 2668 * once to bring the driver back in sync with the chip, however we 2669 * should still be getting RX DONE interrupts to drive the search 2670 * for new packets in the RX ring, so we should catch up eventually. 2671 */ 2672static int 2673dc_rx_resync(struct dc_softc *sc) 2674{ 2675 struct dc_desc *cur_rx; 2676 int i, pos; 2677 2678 pos = sc->dc_cdata.dc_rx_prod; 2679 2680 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2681 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2682 if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN)) 2683 break; 2684 DC_INC(pos, DC_RX_LIST_CNT); 2685 } 2686 2687 /* If the ring really is empty, then just return. */ 2688 if (i == DC_RX_LIST_CNT) 2689 return (0); 2690 2691 /* We've fallen behing the chip: catch it. */ 2692 sc->dc_cdata.dc_rx_prod = pos; 2693 2694 return (EAGAIN); 2695} 2696 2697/* 2698 * A frame has been uploaded: pass the resulting mbuf chain up to 2699 * the higher level protocols. 2700 */ 2701static void 2702dc_rxeof(struct dc_softc *sc) 2703{ 2704 struct mbuf *m; 2705 struct ifnet *ifp; 2706 struct dc_desc *cur_rx; 2707 int i, total_len = 0; 2708 u_int32_t rxstat; 2709 2710 ifp = &sc->arpcom.ac_if; 2711 i = sc->dc_cdata.dc_rx_prod; 2712 2713 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2714 while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & 2715 DC_RXSTAT_OWN)) { 2716#ifdef DEVICE_POLLING 2717 if (ifp->if_flags & IFF_POLLING) { 2718 if (sc->rxcycles <= 0) 2719 break; 2720 sc->rxcycles--; 2721 } 2722#endif 2723 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2724 rxstat = le32toh(cur_rx->dc_status); 2725 m = sc->dc_cdata.dc_rx_chain[i]; 2726 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2727 BUS_DMASYNC_POSTREAD); 2728 total_len = DC_RXBYTES(rxstat); 2729 2730 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2731 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2732 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2733 sc->dc_pnic_rx_bug_save = i; 2734 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2735 DC_INC(i, DC_RX_LIST_CNT); 2736 continue; 2737 } 2738 dc_pnic_rx_bug_war(sc, i); 2739 rxstat = le32toh(cur_rx->dc_status); 2740 total_len = DC_RXBYTES(rxstat); 2741 } 2742 } 2743 2744 /* 2745 * If an error occurs, update stats, clear the 2746 * status word and leave the mbuf cluster in place: 2747 * it should simply get re-used next time this descriptor 2748 * comes up in the ring. However, don't report long 2749 * frames as errors since they could be vlans. 2750 */ 2751 if ((rxstat & DC_RXSTAT_RXERR)) { 2752 if (!(rxstat & DC_RXSTAT_GIANT) || 2753 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2754 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2755 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2756 ifp->if_ierrors++; 2757 if (rxstat & DC_RXSTAT_COLLSEEN) 2758 ifp->if_collisions++; 2759 dc_newbuf(sc, i, 0); 2760 if (rxstat & DC_RXSTAT_CRCERR) { 2761 DC_INC(i, DC_RX_LIST_CNT); 2762 continue; 2763 } else { 2764 dc_init(sc); 2765 return; 2766 } 2767 } 2768 } 2769 2770 /* No errors; receive the packet. */ 2771 total_len -= ETHER_CRC_LEN; 2772#ifdef __i386__ 2773 /* 2774 * On the x86 we do not have alignment problems, so try to 2775 * allocate a new buffer for the receive ring, and pass up 2776 * the one where the packet is already, saving the expensive 2777 * copy done in m_devget(). 2778 * If we are on an architecture with alignment problems, or 2779 * if the allocation fails, then use m_devget and leave the 2780 * existing buffer in the receive ring. 2781 */ 2782 if (dc_quick && dc_newbuf(sc, i, 1) == 0) { 2783 m->m_pkthdr.rcvif = ifp; 2784 m->m_pkthdr.len = m->m_len = total_len; 2785 DC_INC(i, DC_RX_LIST_CNT); 2786 } else 2787#endif 2788 { 2789 struct mbuf *m0; 2790 2791 m0 = m_devget(mtod(m, char *), total_len, 2792 ETHER_ALIGN, ifp, NULL); 2793 dc_newbuf(sc, i, 0); 2794 DC_INC(i, DC_RX_LIST_CNT); 2795 if (m0 == NULL) { 2796 ifp->if_ierrors++; 2797 continue; 2798 } 2799 m = m0; 2800 } 2801 2802 ifp->if_ipackets++; 2803 (*ifp->if_input)(ifp, m); 2804 } 2805 2806 sc->dc_cdata.dc_rx_prod = i; 2807} 2808 2809/* 2810 * A frame was downloaded to the chip. It's safe for us to clean up 2811 * the list buffers. 2812 */ 2813 2814static void 2815dc_txeof(struct dc_softc *sc) 2816{ 2817 struct dc_desc *cur_tx = NULL; 2818 struct ifnet *ifp; 2819 int idx; 2820 u_int32_t ctl, txstat; 2821 2822 ifp = &sc->arpcom.ac_if; 2823 2824 /* 2825 * Go through our tx list and free mbufs for those 2826 * frames that have been transmitted. 2827 */ 2828 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2829 idx = sc->dc_cdata.dc_tx_cons; 2830 while (idx != sc->dc_cdata.dc_tx_prod) { 2831 2832 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2833 txstat = le32toh(cur_tx->dc_status); 2834 ctl = le32toh(cur_tx->dc_ctl); 2835 2836 if (txstat & DC_TXSTAT_OWN) 2837 break; 2838 2839 if (!(ctl & DC_TXCTL_FIRSTFRAG) || ctl & DC_TXCTL_SETUP) { 2840 if (ctl & DC_TXCTL_SETUP) { 2841 /* 2842 * Yes, the PNIC is so brain damaged 2843 * that it will sometimes generate a TX 2844 * underrun error while DMAing the RX 2845 * filter setup frame. If we detect this, 2846 * we have to send the setup frame again, 2847 * or else the filter won't be programmed 2848 * correctly. 2849 */ 2850 if (DC_IS_PNIC(sc)) { 2851 if (txstat & DC_TXSTAT_ERRSUM) 2852 dc_setfilt(sc); 2853 } 2854 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2855 } 2856 sc->dc_cdata.dc_tx_cnt--; 2857 DC_INC(idx, DC_TX_LIST_CNT); 2858 continue; 2859 } 2860 2861 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2862 /* 2863 * XXX: Why does my Xircom taunt me so? 2864 * For some reason it likes setting the CARRLOST flag 2865 * even when the carrier is there. wtf?!? 2866 * Who knows, but Conexant chips have the 2867 * same problem. Maybe they took lessons 2868 * from Xircom. 2869 */ 2870 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2871 sc->dc_pmode == DC_PMODE_MII && 2872 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2873 DC_TXSTAT_NOCARRIER))) 2874 txstat &= ~DC_TXSTAT_ERRSUM; 2875 } else { 2876 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2877 sc->dc_pmode == DC_PMODE_MII && 2878 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2879 DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST))) 2880 txstat &= ~DC_TXSTAT_ERRSUM; 2881 } 2882 2883 if (txstat & DC_TXSTAT_ERRSUM) { 2884 ifp->if_oerrors++; 2885 if (txstat & DC_TXSTAT_EXCESSCOLL) 2886 ifp->if_collisions++; 2887 if (txstat & DC_TXSTAT_LATECOLL) 2888 ifp->if_collisions++; 2889 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2890 dc_init(sc); 2891 return; 2892 } 2893 } 2894 2895 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2896 2897 ifp->if_opackets++; 2898 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2899 bus_dmamap_sync(sc->dc_mtag, 2900 sc->dc_cdata.dc_tx_map[idx], 2901 BUS_DMASYNC_POSTWRITE); 2902 bus_dmamap_unload(sc->dc_mtag, 2903 sc->dc_cdata.dc_tx_map[idx]); 2904 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2905 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2906 } 2907 2908 sc->dc_cdata.dc_tx_cnt--; 2909 DC_INC(idx, DC_TX_LIST_CNT); 2910 } 2911 2912 if (idx != sc->dc_cdata.dc_tx_cons) { 2913 /* Some buffers have been freed. */ 2914 sc->dc_cdata.dc_tx_cons = idx; 2915 ifp->if_flags &= ~IFF_OACTIVE; 2916 } 2917 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2918} 2919 2920static void 2921dc_tick(void *xsc) 2922{ 2923 struct dc_softc *sc; 2924 struct mii_data *mii; 2925 struct ifnet *ifp; 2926 u_int32_t r; 2927 2928 sc = xsc; 2929 DC_LOCK(sc); 2930 ifp = &sc->arpcom.ac_if; 2931 mii = device_get_softc(sc->dc_miibus); 2932 2933 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2934 if (sc->dc_flags & DC_21143_NWAY) { 2935 r = CSR_READ_4(sc, DC_10BTSTAT); 2936 if (IFM_SUBTYPE(mii->mii_media_active) == 2937 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2938 sc->dc_link = 0; 2939 mii_mediachg(mii); 2940 } 2941 if (IFM_SUBTYPE(mii->mii_media_active) == 2942 IFM_10_T && (r & DC_TSTAT_LS10)) { 2943 sc->dc_link = 0; 2944 mii_mediachg(mii); 2945 } 2946 if (sc->dc_link == 0) 2947 mii_tick(mii); 2948 } else { 2949 r = CSR_READ_4(sc, DC_ISR); 2950 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2951 sc->dc_cdata.dc_tx_cnt == 0) { 2952 mii_tick(mii); 2953 if (!(mii->mii_media_status & IFM_ACTIVE)) 2954 sc->dc_link = 0; 2955 } 2956 } 2957 } else 2958 mii_tick(mii); 2959 2960 /* 2961 * When the init routine completes, we expect to be able to send 2962 * packets right away, and in fact the network code will send a 2963 * gratuitous ARP the moment the init routine marks the interface 2964 * as running. However, even though the MAC may have been initialized, 2965 * there may be a delay of a few seconds before the PHY completes 2966 * autonegotiation and the link is brought up. Any transmissions 2967 * made during that delay will be lost. Dealing with this is tricky: 2968 * we can't just pause in the init routine while waiting for the 2969 * PHY to come ready since that would bring the whole system to 2970 * a screeching halt for several seconds. 2971 * 2972 * What we do here is prevent the TX start routine from sending 2973 * any packets until a link has been established. After the 2974 * interface has been initialized, the tick routine will poll 2975 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2976 * that time, packets will stay in the send queue, and once the 2977 * link comes up, they will be flushed out to the wire. 2978 */ 2979 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 2980 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2981 sc->dc_link++; 2982 if (ifp->if_snd.ifq_head != NULL) 2983 dc_start(ifp); 2984 } 2985 2986 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2987 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 2988 else 2989 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 2990 2991 DC_UNLOCK(sc); 2992} 2993 2994/* 2995 * A transmit underrun has occurred. Back off the transmit threshold, 2996 * or switch to store and forward mode if we have to. 2997 */ 2998static void 2999dc_tx_underrun(struct dc_softc *sc) 3000{ 3001 u_int32_t isr; 3002 int i; 3003 3004 if (DC_IS_DAVICOM(sc)) 3005 dc_init(sc); 3006 3007 if (DC_IS_INTEL(sc)) { 3008 /* 3009 * The real 21143 requires that the transmitter be idle 3010 * in order to change the transmit threshold or store 3011 * and forward state. 3012 */ 3013 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3014 3015 for (i = 0; i < DC_TIMEOUT; i++) { 3016 isr = CSR_READ_4(sc, DC_ISR); 3017 if (isr & DC_ISR_TX_IDLE) 3018 break; 3019 DELAY(10); 3020 } 3021 if (i == DC_TIMEOUT) { 3022 printf("dc%d: failed to force tx to idle state\n", 3023 sc->dc_unit); 3024 dc_init(sc); 3025 } 3026 } 3027 3028 printf("dc%d: TX underrun -- ", sc->dc_unit); 3029 sc->dc_txthresh += DC_TXTHRESH_INC; 3030 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3031 printf("using store and forward mode\n"); 3032 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3033 } else { 3034 printf("increasing TX threshold\n"); 3035 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3036 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3037 } 3038 3039 if (DC_IS_INTEL(sc)) 3040 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3041} 3042 3043#ifdef DEVICE_POLLING 3044static poll_handler_t dc_poll; 3045 3046static void 3047dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3048{ 3049 struct dc_softc *sc = ifp->if_softc; 3050 3051 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 3052 /* Re-enable interrupts. */ 3053 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3054 return; 3055 } 3056 sc->rxcycles = count; 3057 dc_rxeof(sc); 3058 dc_txeof(sc); 3059 if (ifp->if_snd.ifq_head != NULL && !(ifp->if_flags & IFF_OACTIVE)) 3060 dc_start(ifp); 3061 3062 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 3063 u_int32_t status; 3064 3065 status = CSR_READ_4(sc, DC_ISR); 3066 status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF | 3067 DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN | 3068 DC_ISR_BUS_ERR); 3069 if (!status) 3070 return; 3071 /* ack what we have */ 3072 CSR_WRITE_4(sc, DC_ISR, status); 3073 3074 if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) { 3075 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 3076 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 3077 3078 if (dc_rx_resync(sc)) 3079 dc_rxeof(sc); 3080 } 3081 /* restart transmit unit if necessary */ 3082 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 3083 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3084 3085 if (status & DC_ISR_TX_UNDERRUN) 3086 dc_tx_underrun(sc); 3087 3088 if (status & DC_ISR_BUS_ERR) { 3089 printf("dc_poll: dc%d bus error\n", sc->dc_unit); 3090 dc_reset(sc); 3091 dc_init(sc); 3092 } 3093 } 3094} 3095#endif /* DEVICE_POLLING */ 3096 3097static void 3098dc_intr(void *arg) 3099{ 3100 struct dc_softc *sc; 3101 struct ifnet *ifp; 3102 u_int32_t status; 3103 3104 sc = arg; 3105 3106 if (sc->suspended) 3107 return; 3108 3109 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 3110 return; 3111 3112 DC_LOCK(sc); 3113 ifp = &sc->arpcom.ac_if; 3114#ifdef DEVICE_POLLING 3115 if (ifp->if_flags & IFF_POLLING) 3116 goto done; 3117 if (ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */ 3118 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3119 goto done; 3120 } 3121#endif 3122 3123 /* Suppress unwanted interrupts */ 3124 if (!(ifp->if_flags & IFF_UP)) { 3125 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 3126 dc_stop(sc); 3127 DC_UNLOCK(sc); 3128 return; 3129 } 3130 3131 /* Disable interrupts. */ 3132 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3133 3134 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) 3135 && status != 0xFFFFFFFF) { 3136 3137 CSR_WRITE_4(sc, DC_ISR, status); 3138 3139 if (status & DC_ISR_RX_OK) { 3140 int curpkts; 3141 curpkts = ifp->if_ipackets; 3142 dc_rxeof(sc); 3143 if (curpkts == ifp->if_ipackets) { 3144 while (dc_rx_resync(sc)) 3145 dc_rxeof(sc); 3146 } 3147 } 3148 3149 if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF)) 3150 dc_txeof(sc); 3151 3152 if (status & DC_ISR_TX_IDLE) { 3153 dc_txeof(sc); 3154 if (sc->dc_cdata.dc_tx_cnt) { 3155 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3156 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3157 } 3158 } 3159 3160 if (status & DC_ISR_TX_UNDERRUN) 3161 dc_tx_underrun(sc); 3162 3163 if ((status & DC_ISR_RX_WATDOGTIMEO) 3164 || (status & DC_ISR_RX_NOBUF)) { 3165 int curpkts; 3166 curpkts = ifp->if_ipackets; 3167 dc_rxeof(sc); 3168 if (curpkts == ifp->if_ipackets) { 3169 while (dc_rx_resync(sc)) 3170 dc_rxeof(sc); 3171 } 3172 } 3173 3174 if (status & DC_ISR_BUS_ERR) { 3175 dc_reset(sc); 3176 dc_init(sc); 3177 } 3178 } 3179 3180 /* Re-enable interrupts. */ 3181 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3182 3183 if (ifp->if_snd.ifq_head != NULL) 3184 dc_start(ifp); 3185 3186#ifdef DEVICE_POLLING 3187done: 3188#endif 3189 3190 DC_UNLOCK(sc); 3191} 3192 3193static void 3194dc_dma_map_txbuf(arg, segs, nseg, mapsize, error) 3195 void *arg; 3196 bus_dma_segment_t *segs; 3197 int nseg; 3198 bus_size_t mapsize; 3199 int error; 3200{ 3201 struct dc_softc *sc; 3202 struct dc_desc *f; 3203 int cur, first, frag, i; 3204 3205 sc = arg; 3206 if (error) { 3207 sc->dc_cdata.dc_tx_err = error; 3208 return; 3209 } 3210 3211 first = cur = frag = sc->dc_cdata.dc_tx_prod; 3212 for (i = 0; i < nseg; i++) { 3213 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && 3214 (frag == (DC_TX_LIST_CNT - 1)) && 3215 (first != sc->dc_cdata.dc_tx_first)) { 3216 bus_dmamap_unload(sc->dc_mtag, 3217 sc->dc_cdata.dc_tx_map[first]); 3218 sc->dc_cdata.dc_tx_err = ENOBUFS; 3219 return; 3220 } 3221 3222 f = &sc->dc_ldata->dc_tx_list[frag]; 3223 f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len); 3224 if (i == 0) { 3225 f->dc_status = 0; 3226 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); 3227 } else 3228 f->dc_status = htole32(DC_TXSTAT_OWN); 3229 f->dc_data = htole32(segs[i].ds_addr); 3230 cur = frag; 3231 DC_INC(frag, DC_TX_LIST_CNT); 3232 } 3233 3234 sc->dc_cdata.dc_tx_err = 0; 3235 sc->dc_cdata.dc_tx_prod = frag; 3236 sc->dc_cdata.dc_tx_cnt += nseg; 3237 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); 3238 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3239 sc->dc_ldata->dc_tx_list[first].dc_ctl |= 3240 htole32(DC_TXCTL_FINT); 3241 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3242 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3243 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3244 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3245 sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN); 3246} 3247 3248/* 3249 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 3250 * pointers to the fragment pointers. 3251 */ 3252static int 3253dc_encap(struct dc_softc *sc, struct mbuf *m_head) 3254{ 3255 struct mbuf *m; 3256 int error, idx, chainlen = 0; 3257 3258 /* 3259 * If there's no way we can send any packets, return now. 3260 */ 3261 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt < 6) 3262 return (ENOBUFS); 3263 3264 /* 3265 * Count the number of frags in this chain to see if 3266 * we need to m_defrag. Since the descriptor list is shared 3267 * by all packets, we'll m_defrag long chains so that they 3268 * do not use up the entire list, even if they would fit. 3269 */ 3270 for (m = m_head; m != NULL; m = m->m_next) 3271 chainlen++; 3272 3273 if ((chainlen > DC_TX_LIST_CNT / 4) || 3274 ((DC_TX_LIST_CNT - (chainlen + sc->dc_cdata.dc_tx_cnt)) < 6)) { 3275 m = m_defrag(m_head, M_DONTWAIT); 3276 if (m == NULL) 3277 return (ENOBUFS); 3278 m_head = m; 3279 } 3280 3281 /* 3282 * Start packing the mbufs in this chain into 3283 * the fragment pointers. Stop when we run out 3284 * of fragments or hit the end of the mbuf chain. 3285 */ 3286 idx = sc->dc_cdata.dc_tx_prod; 3287 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], 3288 m_head, dc_dma_map_txbuf, sc, 0); 3289 if (error) 3290 return (error); 3291 if (sc->dc_cdata.dc_tx_err != 0) 3292 return (sc->dc_cdata.dc_tx_err); 3293 sc->dc_cdata.dc_tx_chain[idx] = m_head; 3294 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], 3295 BUS_DMASYNC_PREWRITE); 3296 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 3297 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3298 return (0); 3299} 3300 3301/* 3302 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3303 * to the mbuf data regions directly in the transmit lists. We also save a 3304 * copy of the pointers since the transmit list fragment pointers are 3305 * physical addresses. 3306 */ 3307 3308static void 3309dc_start(struct ifnet *ifp) 3310{ 3311 struct dc_softc *sc; 3312 struct mbuf *m_head = NULL, *m; 3313 int idx; 3314 3315 sc = ifp->if_softc; 3316 3317 DC_LOCK(sc); 3318 3319 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) { 3320 DC_UNLOCK(sc); 3321 return; 3322 } 3323 3324 if (ifp->if_flags & IFF_OACTIVE) { 3325 DC_UNLOCK(sc); 3326 return; 3327 } 3328 3329 idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; 3330 3331 while (sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3332 IF_DEQUEUE(&ifp->if_snd, m_head); 3333 if (m_head == NULL) 3334 break; 3335 3336 if (sc->dc_flags & DC_TX_COALESCE && 3337 (m_head->m_next != NULL || 3338 sc->dc_flags & DC_TX_ALIGN)) { 3339 m = m_defrag(m_head, M_DONTWAIT); 3340 if (m == NULL) { 3341 IF_PREPEND(&ifp->if_snd, m_head); 3342 ifp->if_flags |= IFF_OACTIVE; 3343 break; 3344 } else { 3345 m_head = m; 3346 } 3347 } 3348 3349 if (dc_encap(sc, m_head)) { 3350 IF_PREPEND(&ifp->if_snd, m_head); 3351 ifp->if_flags |= IFF_OACTIVE; 3352 break; 3353 } 3354 idx = sc->dc_cdata.dc_tx_prod; 3355 3356 /* 3357 * If there's a BPF listener, bounce a copy of this frame 3358 * to him. 3359 */ 3360 BPF_MTAP(ifp, m_head); 3361 3362 if (sc->dc_flags & DC_TX_ONE) { 3363 ifp->if_flags |= IFF_OACTIVE; 3364 break; 3365 } 3366 } 3367 3368 /* Transmit */ 3369 if (!(sc->dc_flags & DC_TX_POLL)) 3370 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3371 3372 /* 3373 * Set a timeout in case the chip goes out to lunch. 3374 */ 3375 ifp->if_timer = 5; 3376 3377 DC_UNLOCK(sc); 3378} 3379 3380static void 3381dc_init(void *xsc) 3382{ 3383 struct dc_softc *sc = xsc; 3384 struct ifnet *ifp = &sc->arpcom.ac_if; 3385 struct mii_data *mii; 3386 3387 DC_LOCK(sc); 3388 3389 mii = device_get_softc(sc->dc_miibus); 3390 3391 /* 3392 * Cancel pending I/O and free all RX/TX buffers. 3393 */ 3394 dc_stop(sc); 3395 dc_reset(sc); 3396 3397 /* 3398 * Set cache alignment and burst length. 3399 */ 3400 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3401 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3402 else 3403 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE); 3404 /* 3405 * Evenly share the bus between receive and transmit process. 3406 */ 3407 if (DC_IS_INTEL(sc)) 3408 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3409 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3410 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3411 } else { 3412 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3413 } 3414 if (sc->dc_flags & DC_TX_POLL) 3415 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3416 switch(sc->dc_cachesize) { 3417 case 32: 3418 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3419 break; 3420 case 16: 3421 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3422 break; 3423 case 8: 3424 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3425 break; 3426 case 0: 3427 default: 3428 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3429 break; 3430 } 3431 3432 if (sc->dc_flags & DC_TX_STORENFWD) 3433 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3434 else { 3435 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3436 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3437 } else { 3438 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3439 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3440 } 3441 } 3442 3443 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3444 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3445 3446 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3447 /* 3448 * The app notes for the 98713 and 98715A say that 3449 * in order to have the chips operate properly, a magic 3450 * number must be written to CSR16. Macronix does not 3451 * document the meaning of these bits so there's no way 3452 * to know exactly what they do. The 98713 has a magic 3453 * number all its own; the rest all use a different one. 3454 */ 3455 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3456 if (sc->dc_type == DC_TYPE_98713) 3457 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3458 else 3459 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3460 } 3461 3462 if (DC_IS_XIRCOM(sc)) { 3463 /* 3464 * setup General Purpose Port mode and data so the tulip 3465 * can talk to the MII. 3466 */ 3467 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3468 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3469 DELAY(10); 3470 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3471 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3472 DELAY(10); 3473 } 3474 3475 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3476 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3477 3478 /* Init circular RX list. */ 3479 if (dc_list_rx_init(sc) == ENOBUFS) { 3480 printf("dc%d: initialization failed: no " 3481 "memory for rx buffers\n", sc->dc_unit); 3482 dc_stop(sc); 3483 DC_UNLOCK(sc); 3484 return; 3485 } 3486 3487 /* 3488 * Init TX descriptors. 3489 */ 3490 dc_list_tx_init(sc); 3491 3492 /* 3493 * Load the address of the RX list. 3494 */ 3495 CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); 3496 CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); 3497 3498 /* 3499 * Enable interrupts. 3500 */ 3501#ifdef DEVICE_POLLING 3502 /* 3503 * ... but only if we are not polling, and make sure they are off in 3504 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3505 * after a reset. 3506 */ 3507 if (ifp->if_flags & IFF_POLLING) 3508 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3509 else 3510#endif 3511 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3512 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3513 3514 /* Enable transmitter. */ 3515 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3516 3517 /* 3518 * If this is an Intel 21143 and we're not using the 3519 * MII port, program the LED control pins so we get 3520 * link and activity indications. 3521 */ 3522 if (sc->dc_flags & DC_TULIP_LEDS) { 3523 CSR_WRITE_4(sc, DC_WATCHDOG, 3524 DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY); 3525 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3526 } 3527 3528 /* 3529 * Load the RX/multicast filter. We do this sort of late 3530 * because the filter programming scheme on the 21143 and 3531 * some clones requires DMAing a setup frame via the TX 3532 * engine, and we need the transmitter enabled for that. 3533 */ 3534 dc_setfilt(sc); 3535 3536 /* Enable receiver. */ 3537 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3538 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3539 3540 mii_mediachg(mii); 3541 dc_setcfg(sc, sc->dc_if_media); 3542 3543 ifp->if_flags |= IFF_RUNNING; 3544 ifp->if_flags &= ~IFF_OACTIVE; 3545 3546 /* Don't start the ticker if this is a homePNA link. */ 3547 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3548 sc->dc_link = 1; 3549 else { 3550 if (sc->dc_flags & DC_21143_NWAY) 3551 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 3552 else 3553 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 3554 } 3555 3556#ifdef SRM_MEDIA 3557 if(sc->dc_srm_media) { 3558 struct ifreq ifr; 3559 3560 ifr.ifr_media = sc->dc_srm_media; 3561 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 3562 sc->dc_srm_media = 0; 3563 } 3564#endif 3565 DC_UNLOCK(sc); 3566} 3567 3568/* 3569 * Set media options. 3570 */ 3571static int 3572dc_ifmedia_upd(struct ifnet *ifp) 3573{ 3574 struct dc_softc *sc; 3575 struct mii_data *mii; 3576 struct ifmedia *ifm; 3577 3578 sc = ifp->if_softc; 3579 mii = device_get_softc(sc->dc_miibus); 3580 mii_mediachg(mii); 3581 ifm = &mii->mii_media; 3582 3583 if (DC_IS_DAVICOM(sc) && 3584 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3585 dc_setcfg(sc, ifm->ifm_media); 3586 else 3587 sc->dc_link = 0; 3588 3589 return (0); 3590} 3591 3592/* 3593 * Report current media status. 3594 */ 3595static void 3596dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3597{ 3598 struct dc_softc *sc; 3599 struct mii_data *mii; 3600 struct ifmedia *ifm; 3601 3602 sc = ifp->if_softc; 3603 mii = device_get_softc(sc->dc_miibus); 3604 mii_pollstat(mii); 3605 ifm = &mii->mii_media; 3606 if (DC_IS_DAVICOM(sc)) { 3607 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3608 ifmr->ifm_active = ifm->ifm_media; 3609 ifmr->ifm_status = 0; 3610 return; 3611 } 3612 } 3613 ifmr->ifm_active = mii->mii_media_active; 3614 ifmr->ifm_status = mii->mii_media_status; 3615} 3616 3617static int 3618dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3619{ 3620 struct dc_softc *sc = ifp->if_softc; 3621 struct ifreq *ifr = (struct ifreq *)data; 3622 struct mii_data *mii; 3623 int error = 0; 3624 3625 DC_LOCK(sc); 3626 3627 switch (command) { 3628 case SIOCSIFFLAGS: 3629 if (ifp->if_flags & IFF_UP) { 3630 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3631 (IFF_PROMISC | IFF_ALLMULTI); 3632 3633 if (ifp->if_flags & IFF_RUNNING) { 3634 if (need_setfilt) 3635 dc_setfilt(sc); 3636 } else { 3637 sc->dc_txthresh = 0; 3638 dc_init(sc); 3639 } 3640 } else { 3641 if (ifp->if_flags & IFF_RUNNING) 3642 dc_stop(sc); 3643 } 3644 sc->dc_if_flags = ifp->if_flags; 3645 error = 0; 3646 break; 3647 case SIOCADDMULTI: 3648 case SIOCDELMULTI: 3649 dc_setfilt(sc); 3650 error = 0; 3651 break; 3652 case SIOCGIFMEDIA: 3653 case SIOCSIFMEDIA: 3654 mii = device_get_softc(sc->dc_miibus); 3655 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3656#ifdef SRM_MEDIA 3657 if (sc->dc_srm_media) 3658 sc->dc_srm_media = 0; 3659#endif 3660 break; 3661 default: 3662 error = ether_ioctl(ifp, command, data); 3663 break; 3664 } 3665 3666 DC_UNLOCK(sc); 3667 3668 return (error); 3669} 3670 3671static void 3672dc_watchdog(struct ifnet *ifp) 3673{ 3674 struct dc_softc *sc; 3675 3676 sc = ifp->if_softc; 3677 3678 DC_LOCK(sc); 3679 3680 ifp->if_oerrors++; 3681 printf("dc%d: watchdog timeout\n", sc->dc_unit); 3682 3683 dc_stop(sc); 3684 dc_reset(sc); 3685 dc_init(sc); 3686 3687 if (ifp->if_snd.ifq_head != NULL) 3688 dc_start(ifp); 3689 3690 DC_UNLOCK(sc); 3691} 3692 3693/* 3694 * Stop the adapter and free any mbufs allocated to the 3695 * RX and TX lists. 3696 */ 3697static void 3698dc_stop(struct dc_softc *sc) 3699{ 3700 struct ifnet *ifp; 3701 struct dc_list_data *ld; 3702 struct dc_chain_data *cd; 3703 int i; 3704 u_int32_t ctl; 3705 3706 DC_LOCK(sc); 3707 3708 ifp = &sc->arpcom.ac_if; 3709 ifp->if_timer = 0; 3710 ld = sc->dc_ldata; 3711 cd = &sc->dc_cdata; 3712 3713 callout_stop(&sc->dc_stat_ch); 3714 3715 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3716#ifdef DEVICE_POLLING 3717 ether_poll_deregister(ifp); 3718#endif 3719 3720 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); 3721 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3722 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3723 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3724 sc->dc_link = 0; 3725 3726 /* 3727 * Free data in the RX lists. 3728 */ 3729 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3730 if (cd->dc_rx_chain[i] != NULL) { 3731 m_freem(cd->dc_rx_chain[i]); 3732 cd->dc_rx_chain[i] = NULL; 3733 } 3734 } 3735 bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list)); 3736 3737 /* 3738 * Free the TX list buffers. 3739 */ 3740 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3741 if (cd->dc_tx_chain[i] != NULL) { 3742 ctl = le32toh(ld->dc_tx_list[i].dc_ctl); 3743 if ((ctl & DC_TXCTL_SETUP) || 3744 !(ctl & DC_TXCTL_FIRSTFRAG)) { 3745 cd->dc_tx_chain[i] = NULL; 3746 continue; 3747 } 3748 bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]); 3749 m_freem(cd->dc_tx_chain[i]); 3750 cd->dc_tx_chain[i] = NULL; 3751 } 3752 } 3753 bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list)); 3754 3755 DC_UNLOCK(sc); 3756} 3757 3758/* 3759 * Device suspend routine. Stop the interface and save some PCI 3760 * settings in case the BIOS doesn't restore them properly on 3761 * resume. 3762 */ 3763static int 3764dc_suspend(device_t dev) 3765{ 3766 struct dc_softc *sc; 3767 int i, s; 3768 3769 s = splimp(); 3770 3771 sc = device_get_softc(dev); 3772 3773 dc_stop(sc); 3774 3775 for (i = 0; i < 5; i++) 3776 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3777 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3778 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3779 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3780 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3781 3782 sc->suspended = 1; 3783 3784 splx(s); 3785 return (0); 3786} 3787 3788/* 3789 * Device resume routine. Restore some PCI settings in case the BIOS 3790 * doesn't, re-enable busmastering, and restart the interface if 3791 * appropriate. 3792 */ 3793static int 3794dc_resume(device_t dev) 3795{ 3796 struct dc_softc *sc; 3797 struct ifnet *ifp; 3798 int i, s; 3799 3800 s = splimp(); 3801 3802 sc = device_get_softc(dev); 3803 ifp = &sc->arpcom.ac_if; 3804#ifndef BURN_BRIDGES 3805 dc_acpi(dev); 3806#endif 3807 /* better way to do this? */ 3808 for (i = 0; i < 5; i++) 3809 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3810 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3811 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3812 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3813 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3814 3815 /* reenable busmastering */ 3816 pci_enable_busmaster(dev); 3817 pci_enable_io(dev, DC_RES); 3818 3819 /* reinitialize interface if necessary */ 3820 if (ifp->if_flags & IFF_UP) 3821 dc_init(sc); 3822 3823 sc->suspended = 0; 3824 3825 splx(s); 3826 return (0); 3827} 3828 3829/* 3830 * Stop all chip I/O so that the kernel's probe routines don't 3831 * get confused by errant DMAs when rebooting. 3832 */ 3833static void 3834dc_shutdown(device_t dev) 3835{ 3836 struct dc_softc *sc; 3837 3838 sc = device_get_softc(dev); 3839 3840 dc_stop(sc); 3841} 3842