if_vr.c revision 148654
1/*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/vr/if_vr.c 148654 2005-08-03 00:18:35Z rwatson $"); 35 36/* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48/* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * The Rhine has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63#include <sys/param.h> 64#include <sys/systm.h> 65#include <sys/sockio.h> 66#include <sys/mbuf.h> 67#include <sys/malloc.h> 68#include <sys/kernel.h> 69#include <sys/module.h> 70#include <sys/socket.h> 71 72#include <net/if.h> 73#include <net/if_arp.h> 74#include <net/ethernet.h> 75#include <net/if_dl.h> 76#include <net/if_media.h> 77#include <net/if_types.h> 78 79#include <net/bpf.h> 80 81#include <vm/vm.h> /* for vtophys */ 82#include <vm/pmap.h> /* for vtophys */ 83#include <machine/bus.h> 84#include <machine/resource.h> 85#include <sys/bus.h> 86#include <sys/rman.h> 87 88#include <dev/mii/mii.h> 89#include <dev/mii/miivar.h> 90 91#include <dev/pci/pcireg.h> 92#include <dev/pci/pcivar.h> 93 94#define VR_USEIOSPACE 95 96#include <pci/if_vrreg.h> 97 98MODULE_DEPEND(vr, pci, 1, 1, 1); 99MODULE_DEPEND(vr, ether, 1, 1, 1); 100MODULE_DEPEND(vr, miibus, 1, 1, 1); 101 102/* "controller miibus0" required. See GENERIC if you get errors here. */ 103#include "miibus_if.h" 104 105#undef VR_USESWSHIFT 106 107/* 108 * Various supported device vendors/types and their names. 109 */ 110static struct vr_type vr_devs[] = { 111 { VIA_VENDORID, VIA_DEVICEID_RHINE, 112 "VIA VT3043 Rhine I 10/100BaseTX" }, 113 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 114 "VIA VT86C100A Rhine II 10/100BaseTX" }, 115 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 116 "VIA VT6102 Rhine II 10/100BaseTX" }, 117 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 118 "VIA VT6105 Rhine III 10/100BaseTX" }, 119 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 120 "VIA VT6105M Rhine III 10/100BaseTX" }, 121 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 122 "Delta Electronics Rhine II 10/100BaseTX" }, 123 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 124 "Addtron Technology Rhine II 10/100BaseTX" }, 125 { 0, 0, NULL } 126}; 127 128static int vr_probe(device_t); 129static int vr_attach(device_t); 130static int vr_detach(device_t); 131 132static int vr_newbuf(struct vr_softc *, struct vr_chain_onefrag *, 133 struct mbuf *); 134static int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf * ); 135 136static void vr_rxeof(struct vr_softc *); 137static void vr_rxeoc(struct vr_softc *); 138static void vr_txeof(struct vr_softc *); 139static void vr_tick(void *); 140static void vr_intr(void *); 141static void vr_start(struct ifnet *); 142static void vr_start_locked(struct ifnet *); 143static int vr_ioctl(struct ifnet *, u_long, caddr_t); 144static void vr_init(void *); 145static void vr_init_locked(struct vr_softc *); 146static void vr_stop(struct vr_softc *); 147static void vr_watchdog(struct ifnet *); 148static void vr_shutdown(device_t); 149static int vr_ifmedia_upd(struct ifnet *); 150static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 151 152#ifdef VR_USESWSHIFT 153static void vr_mii_sync(struct vr_softc *); 154static void vr_mii_send(struct vr_softc *, uint32_t, int); 155#endif 156static int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 157static int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 158static int vr_miibus_readreg(device_t, uint16_t, uint16_t); 159static int vr_miibus_writereg(device_t, uint16_t, uint16_t, uint16_t); 160static void vr_miibus_statchg(device_t); 161 162static void vr_setcfg(struct vr_softc *, int); 163static void vr_setmulti(struct vr_softc *); 164static void vr_reset(struct vr_softc *); 165static int vr_list_rx_init(struct vr_softc *); 166static int vr_list_tx_init(struct vr_softc *); 167 168#ifdef VR_USEIOSPACE 169#define VR_RES SYS_RES_IOPORT 170#define VR_RID VR_PCI_LOIO 171#else 172#define VR_RES SYS_RES_MEMORY 173#define VR_RID VR_PCI_LOMEM 174#endif 175 176static device_method_t vr_methods[] = { 177 /* Device interface */ 178 DEVMETHOD(device_probe, vr_probe), 179 DEVMETHOD(device_attach, vr_attach), 180 DEVMETHOD(device_detach, vr_detach), 181 DEVMETHOD(device_shutdown, vr_shutdown), 182 183 /* bus interface */ 184 DEVMETHOD(bus_print_child, bus_generic_print_child), 185 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 186 187 /* MII interface */ 188 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 189 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 190 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 191 192 { 0, 0 } 193}; 194 195static driver_t vr_driver = { 196 "vr", 197 vr_methods, 198 sizeof(struct vr_softc) 199}; 200 201static devclass_t vr_devclass; 202 203DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 204DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 205 206#define VR_SETBIT(sc, reg, x) \ 207 CSR_WRITE_1(sc, reg, \ 208 CSR_READ_1(sc, reg) | (x)) 209 210#define VR_CLRBIT(sc, reg, x) \ 211 CSR_WRITE_1(sc, reg, \ 212 CSR_READ_1(sc, reg) & ~(x)) 213 214#define VR_SETBIT16(sc, reg, x) \ 215 CSR_WRITE_2(sc, reg, \ 216 CSR_READ_2(sc, reg) | (x)) 217 218#define VR_CLRBIT16(sc, reg, x) \ 219 CSR_WRITE_2(sc, reg, \ 220 CSR_READ_2(sc, reg) & ~(x)) 221 222#define VR_SETBIT32(sc, reg, x) \ 223 CSR_WRITE_4(sc, reg, \ 224 CSR_READ_4(sc, reg) | (x)) 225 226#define VR_CLRBIT32(sc, reg, x) \ 227 CSR_WRITE_4(sc, reg, \ 228 CSR_READ_4(sc, reg) & ~(x)) 229 230#define SIO_SET(x) \ 231 CSR_WRITE_1(sc, VR_MIICMD, \ 232 CSR_READ_1(sc, VR_MIICMD) | (x)) 233 234#define SIO_CLR(x) \ 235 CSR_WRITE_1(sc, VR_MIICMD, \ 236 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 237 238#ifdef VR_USESWSHIFT 239/* 240 * Sync the PHYs by setting data bit and strobing the clock 32 times. 241 */ 242static void 243vr_mii_sync(struct vr_softc *sc) 244{ 245 register int i; 246 247 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 248 249 for (i = 0; i < 32; i++) { 250 SIO_SET(VR_MIICMD_CLK); 251 DELAY(1); 252 SIO_CLR(VR_MIICMD_CLK); 253 DELAY(1); 254 } 255} 256 257/* 258 * Clock a series of bits through the MII. 259 */ 260static void 261vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt) 262{ 263 int i; 264 265 SIO_CLR(VR_MIICMD_CLK); 266 267 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 268 if (bits & i) { 269 SIO_SET(VR_MIICMD_DATAIN); 270 } else { 271 SIO_CLR(VR_MIICMD_DATAIN); 272 } 273 DELAY(1); 274 SIO_CLR(VR_MIICMD_CLK); 275 DELAY(1); 276 SIO_SET(VR_MIICMD_CLK); 277 } 278} 279#endif 280 281/* 282 * Read an PHY register through the MII. 283 */ 284static int 285vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 286#ifdef VR_USESWSHIFT 287{ 288 int i, ack; 289 290 /* Set up frame for RX. */ 291 frame->mii_stdelim = VR_MII_STARTDELIM; 292 frame->mii_opcode = VR_MII_READOP; 293 frame->mii_turnaround = 0; 294 frame->mii_data = 0; 295 296 CSR_WRITE_1(sc, VR_MIICMD, 0); 297 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 298 299 /* Turn on data xmit. */ 300 SIO_SET(VR_MIICMD_DIR); 301 302 vr_mii_sync(sc); 303 304 /* Send command/address info. */ 305 vr_mii_send(sc, frame->mii_stdelim, 2); 306 vr_mii_send(sc, frame->mii_opcode, 2); 307 vr_mii_send(sc, frame->mii_phyaddr, 5); 308 vr_mii_send(sc, frame->mii_regaddr, 5); 309 310 /* Idle bit. */ 311 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 312 DELAY(1); 313 SIO_SET(VR_MIICMD_CLK); 314 DELAY(1); 315 316 /* Turn off xmit. */ 317 SIO_CLR(VR_MIICMD_DIR); 318 319 /* Check for ack */ 320 SIO_CLR(VR_MIICMD_CLK); 321 DELAY(1); 322 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 323 SIO_SET(VR_MIICMD_CLK); 324 DELAY(1); 325 326 /* 327 * Now try reading data bits. If the ack failed, we still 328 * need to clock through 16 cycles to keep the PHY(s) in sync. 329 */ 330 if (ack) { 331 for(i = 0; i < 16; i++) { 332 SIO_CLR(VR_MIICMD_CLK); 333 DELAY(1); 334 SIO_SET(VR_MIICMD_CLK); 335 DELAY(1); 336 } 337 goto fail; 338 } 339 340 for (i = 0x8000; i; i >>= 1) { 341 SIO_CLR(VR_MIICMD_CLK); 342 DELAY(1); 343 if (!ack) { 344 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 345 frame->mii_data |= i; 346 DELAY(1); 347 } 348 SIO_SET(VR_MIICMD_CLK); 349 DELAY(1); 350 } 351 352fail: 353 SIO_CLR(VR_MIICMD_CLK); 354 DELAY(1); 355 SIO_SET(VR_MIICMD_CLK); 356 DELAY(1); 357 358 if (ack) 359 return (1); 360 return (0); 361} 362#else 363{ 364 int i; 365 366 /* Set the PHY address. */ 367 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 368 frame->mii_phyaddr); 369 370 /* Set the register address. */ 371 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 372 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 373 374 for (i = 0; i < 10000; i++) { 375 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 376 break; 377 DELAY(1); 378 } 379 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 380 381 return (0); 382} 383#endif 384 385 386/* 387 * Write to a PHY register through the MII. 388 */ 389static int 390vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 391#ifdef VR_USESWSHIFT 392{ 393 CSR_WRITE_1(sc, VR_MIICMD, 0); 394 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 395 396 /* Set up frame for TX. */ 397 frame->mii_stdelim = VR_MII_STARTDELIM; 398 frame->mii_opcode = VR_MII_WRITEOP; 399 frame->mii_turnaround = VR_MII_TURNAROUND; 400 401 /* Turn on data output. */ 402 SIO_SET(VR_MIICMD_DIR); 403 404 vr_mii_sync(sc); 405 406 vr_mii_send(sc, frame->mii_stdelim, 2); 407 vr_mii_send(sc, frame->mii_opcode, 2); 408 vr_mii_send(sc, frame->mii_phyaddr, 5); 409 vr_mii_send(sc, frame->mii_regaddr, 5); 410 vr_mii_send(sc, frame->mii_turnaround, 2); 411 vr_mii_send(sc, frame->mii_data, 16); 412 413 /* Idle bit. */ 414 SIO_SET(VR_MIICMD_CLK); 415 DELAY(1); 416 SIO_CLR(VR_MIICMD_CLK); 417 DELAY(1); 418 419 /* Turn off xmit. */ 420 SIO_CLR(VR_MIICMD_DIR); 421 422 return (0); 423} 424#else 425{ 426 int i; 427 428 /* Set the PHY address. */ 429 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 430 frame->mii_phyaddr); 431 432 /* Set the register address and data to write. */ 433 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 434 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 435 436 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 437 438 for (i = 0; i < 10000; i++) { 439 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 440 break; 441 DELAY(1); 442 } 443 444 return (0); 445} 446#endif 447 448static int 449vr_miibus_readreg(device_t dev, uint16_t phy, uint16_t reg) 450{ 451 struct vr_mii_frame frame; 452 struct vr_softc *sc = device_get_softc(dev); 453 454 switch (sc->vr_revid) { 455 case REV_ID_VT6102_APOLLO: 456 if (phy != 1) { 457 frame.mii_data = 0; 458 goto out; 459 } 460 default: 461 break; 462 } 463 464 bzero((char *)&frame, sizeof(frame)); 465 frame.mii_phyaddr = phy; 466 frame.mii_regaddr = reg; 467 vr_mii_readreg(sc, &frame); 468 469out: 470 return (frame.mii_data); 471} 472 473static int 474vr_miibus_writereg(device_t dev, uint16_t phy, uint16_t reg, uint16_t data) 475{ 476 struct vr_mii_frame frame; 477 struct vr_softc *sc = device_get_softc(dev); 478 479 switch (sc->vr_revid) { 480 case REV_ID_VT6102_APOLLO: 481 if (phy != 1) 482 return (0); 483 default: 484 break; 485 } 486 487 bzero((char *)&frame, sizeof(frame)); 488 frame.mii_phyaddr = phy; 489 frame.mii_regaddr = reg; 490 frame.mii_data = data; 491 vr_mii_writereg(sc, &frame); 492 493 return (0); 494} 495 496static void 497vr_miibus_statchg(device_t dev) 498{ 499 struct mii_data *mii; 500 struct vr_softc *sc = device_get_softc(dev); 501 502 mii = device_get_softc(sc->vr_miibus); 503 vr_setcfg(sc, mii->mii_media_active); 504} 505 506/* 507 * Program the 64-bit multicast hash filter. 508 */ 509static void 510vr_setmulti(struct vr_softc *sc) 511{ 512 struct ifnet *ifp = sc->vr_ifp; 513 int h = 0; 514 uint32_t hashes[2] = { 0, 0 }; 515 struct ifmultiaddr *ifma; 516 uint8_t rxfilt; 517 int mcnt = 0; 518 519 VR_LOCK_ASSERT(sc); 520 521 rxfilt = CSR_READ_1(sc, VR_RXCFG); 522 523 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 524 rxfilt |= VR_RXCFG_RX_MULTI; 525 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 526 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 527 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 528 return; 529 } 530 531 /* First, zero out all the existing hash bits. */ 532 CSR_WRITE_4(sc, VR_MAR0, 0); 533 CSR_WRITE_4(sc, VR_MAR1, 0); 534 535 /* Now program new ones. */ 536 IF_ADDR_LOCK(ifp); 537 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 538 if (ifma->ifma_addr->sa_family != AF_LINK) 539 continue; 540 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 541 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 542 if (h < 32) 543 hashes[0] |= (1 << h); 544 else 545 hashes[1] |= (1 << (h - 32)); 546 mcnt++; 547 } 548 IF_ADDR_UNLOCK(ifp); 549 550 if (mcnt) 551 rxfilt |= VR_RXCFG_RX_MULTI; 552 else 553 rxfilt &= ~VR_RXCFG_RX_MULTI; 554 555 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 556 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 557 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 558} 559 560/* 561 * In order to fiddle with the 562 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 563 * first have to put the transmit and/or receive logic in the idle state. 564 */ 565static void 566vr_setcfg(struct vr_softc *sc, int media) 567{ 568 int restart = 0; 569 570 VR_LOCK_ASSERT(sc); 571 572 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 573 restart = 1; 574 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 575 } 576 577 if ((media & IFM_GMASK) == IFM_FDX) 578 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 579 else 580 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 581 582 if (restart) 583 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 584} 585 586static void 587vr_reset(struct vr_softc *sc) 588{ 589 register int i; 590 591 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during detach w/o lock. */ 592 593 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 594 595 for (i = 0; i < VR_TIMEOUT; i++) { 596 DELAY(10); 597 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 598 break; 599 } 600 if (i == VR_TIMEOUT) { 601 if (sc->vr_revid < REV_ID_VT3065_A) 602 printf("vr%d: reset never completed!\n", sc->vr_unit); 603 else { 604 /* Use newer force reset command */ 605 printf("vr%d: Using force reset command.\n", 606 sc->vr_unit); 607 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 608 } 609 } 610 611 /* Wait a little while for the chip to get its brains in order. */ 612 DELAY(1000); 613} 614 615/* 616 * Probe for a VIA Rhine chip. Check the PCI vendor and device 617 * IDs against our list and return a device name if we find a match. 618 */ 619static int 620vr_probe(device_t dev) 621{ 622 struct vr_type *t = vr_devs; 623 624 while (t->vr_name != NULL) { 625 if ((pci_get_vendor(dev) == t->vr_vid) && 626 (pci_get_device(dev) == t->vr_did)) { 627 device_set_desc(dev, t->vr_name); 628 return (BUS_PROBE_DEFAULT); 629 } 630 t++; 631 } 632 633 return (ENXIO); 634} 635 636/* 637 * Attach the interface. Allocate softc structures, do ifmedia 638 * setup and ethernet/BPF attach. 639 */ 640static int 641vr_attach(dev) 642 device_t dev; 643{ 644 int i; 645 u_char eaddr[ETHER_ADDR_LEN]; 646 struct vr_softc *sc; 647 struct ifnet *ifp; 648 int unit, error = 0, rid; 649 650 sc = device_get_softc(dev); 651 unit = device_get_unit(dev); 652 653 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 654 MTX_DEF); 655 /* 656 * Map control/status registers. 657 */ 658 pci_enable_busmaster(dev); 659 sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF; 660 661 rid = VR_RID; 662 sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE); 663 664 if (sc->vr_res == NULL) { 665 printf("vr%d: couldn't map ports/memory\n", unit); 666 error = ENXIO; 667 goto fail; 668 } 669 670 sc->vr_btag = rman_get_bustag(sc->vr_res); 671 sc->vr_bhandle = rman_get_bushandle(sc->vr_res); 672 673 /* Allocate interrupt */ 674 rid = 0; 675 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 676 RF_SHAREABLE | RF_ACTIVE); 677 678 if (sc->vr_irq == NULL) { 679 printf("vr%d: couldn't map interrupt\n", unit); 680 error = ENXIO; 681 goto fail; 682 } 683 684 /* 685 * Windows may put the chip in suspend mode when it 686 * shuts down. Be sure to kick it in the head to wake it 687 * up again. 688 */ 689 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 690 691 /* Reset the adapter. */ 692 vr_reset(sc); 693 694 /* 695 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 696 * initialization and disable AUTOPOLL. 697 */ 698 pci_write_config(dev, VR_PCI_MODE, 699 pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4); 700 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 701 702 /* 703 * Get station address. The way the Rhine chips work, 704 * you're not allowed to directly access the EEPROM once 705 * they've been programmed a special way. Consequently, 706 * we need to read the node address from the PAR0 and PAR1 707 * registers. 708 */ 709 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 710 DELAY(200); 711 for (i = 0; i < ETHER_ADDR_LEN; i++) 712 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 713 714 sc->vr_unit = unit; 715 716 sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF, 717 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 718 719 if (sc->vr_ldata == NULL) { 720 printf("vr%d: no memory for list buffers!\n", unit); 721 error = ENXIO; 722 goto fail; 723 } 724 725 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 726 727 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 728 if (ifp == NULL) { 729 printf("vr%d: can not if_alloc()\n", unit); 730 error = ENOSPC; 731 goto fail; 732 } 733 ifp->if_softc = sc; 734 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 735 ifp->if_mtu = ETHERMTU; 736 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 737 ifp->if_ioctl = vr_ioctl; 738 ifp->if_start = vr_start; 739 ifp->if_watchdog = vr_watchdog; 740 ifp->if_init = vr_init; 741 ifp->if_baudrate = 10000000; 742 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1); 743 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; 744 IFQ_SET_READY(&ifp->if_snd); 745#ifdef DEVICE_POLLING 746 ifp->if_capabilities |= IFCAP_POLLING; 747#endif 748 ifp->if_capenable = ifp->if_capabilities; 749 750 /* Do MII setup. */ 751 if (mii_phy_probe(dev, &sc->vr_miibus, 752 vr_ifmedia_upd, vr_ifmedia_sts)) { 753 printf("vr%d: MII without any phy!\n", sc->vr_unit); 754 error = ENXIO; 755 goto fail; 756 } 757 758 callout_handle_init(&sc->vr_stat_ch); 759 760 /* Call MI attach routine. */ 761 ether_ifattach(ifp, eaddr); 762 763 sc->suspended = 0; 764 765 /* Hook interrupt last to avoid having to lock softc */ 766 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 767 vr_intr, sc, &sc->vr_intrhand); 768 769 if (error) { 770 printf("vr%d: couldn't set up irq\n", unit); 771 ether_ifdetach(ifp); 772 if_free(ifp); 773 goto fail; 774 } 775 776fail: 777 if (error) 778 vr_detach(dev); 779 780 return (error); 781} 782 783/* 784 * Shutdown hardware and free up resources. This can be called any 785 * time after the mutex has been initialized. It is called in both 786 * the error case in attach and the normal detach case so it needs 787 * to be careful about only freeing resources that have actually been 788 * allocated. 789 */ 790static int 791vr_detach(device_t dev) 792{ 793 struct vr_softc *sc = device_get_softc(dev); 794 struct ifnet *ifp = sc->vr_ifp; 795 796 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 797 798 VR_LOCK(sc); 799 800 sc->suspended = 1; 801 802 /* These should only be active if attach succeeded */ 803 if (device_is_attached(dev)) { 804 vr_stop(sc); 805 VR_UNLOCK(sc); /* XXX: Avoid recursive acquire. */ 806 ether_ifdetach(ifp); 807 if_free(ifp); 808 VR_LOCK(sc); 809 } 810 if (sc->vr_miibus) 811 device_delete_child(dev, sc->vr_miibus); 812 bus_generic_detach(dev); 813 814 if (sc->vr_intrhand) 815 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 816 if (sc->vr_irq) 817 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 818 if (sc->vr_res) 819 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 820 821 if (sc->vr_ldata) 822 contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); 823 824 VR_UNLOCK(sc); 825 mtx_destroy(&sc->vr_mtx); 826 827 return (0); 828} 829 830/* 831 * Initialize the transmit descriptors. 832 */ 833static int 834vr_list_tx_init(struct vr_softc *sc) 835{ 836 struct vr_chain_data *cd; 837 struct vr_list_data *ld; 838 int i; 839 840 cd = &sc->vr_cdata; 841 ld = sc->vr_ldata; 842 for (i = 0; i < VR_TX_LIST_CNT; i++) { 843 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 844 if (i == (VR_TX_LIST_CNT - 1)) 845 cd->vr_tx_chain[i].vr_nextdesc = 846 &cd->vr_tx_chain[0]; 847 else 848 cd->vr_tx_chain[i].vr_nextdesc = 849 &cd->vr_tx_chain[i + 1]; 850 } 851 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 852 853 return (0); 854} 855 856 857/* 858 * Initialize the RX descriptors and allocate mbufs for them. Note that 859 * we arrange the descriptors in a closed ring, so that the last descriptor 860 * points back to the first. 861 */ 862static int 863vr_list_rx_init(struct vr_softc *sc) 864{ 865 struct vr_chain_data *cd; 866 struct vr_list_data *ld; 867 int i; 868 869 VR_LOCK_ASSERT(sc); 870 871 cd = &sc->vr_cdata; 872 ld = sc->vr_ldata; 873 874 for (i = 0; i < VR_RX_LIST_CNT; i++) { 875 cd->vr_rx_chain[i].vr_ptr = 876 (struct vr_desc *)&ld->vr_rx_list[i]; 877 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) 878 return (ENOBUFS); 879 if (i == (VR_RX_LIST_CNT - 1)) { 880 cd->vr_rx_chain[i].vr_nextdesc = 881 &cd->vr_rx_chain[0]; 882 ld->vr_rx_list[i].vr_next = 883 vtophys(&ld->vr_rx_list[0]); 884 } else { 885 cd->vr_rx_chain[i].vr_nextdesc = 886 &cd->vr_rx_chain[i + 1]; 887 ld->vr_rx_list[i].vr_next = 888 vtophys(&ld->vr_rx_list[i + 1]); 889 } 890 } 891 892 cd->vr_rx_head = &cd->vr_rx_chain[0]; 893 894 return (0); 895} 896 897/* 898 * Initialize an RX descriptor and attach an MBUF cluster. 899 * Note: the length fields are only 11 bits wide, which means the 900 * largest size we can specify is 2047. This is important because 901 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 902 * overflow the field and make a mess. 903 */ 904static int 905vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m) 906{ 907 struct mbuf *m_new = NULL; 908 909 if (m == NULL) { 910 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 911 if (m_new == NULL) 912 return (ENOBUFS); 913 914 MCLGET(m_new, M_DONTWAIT); 915 if (!(m_new->m_flags & M_EXT)) { 916 m_freem(m_new); 917 return (ENOBUFS); 918 } 919 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 920 } else { 921 m_new = m; 922 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 923 m_new->m_data = m_new->m_ext.ext_buf; 924 } 925 926 m_adj(m_new, sizeof(uint64_t)); 927 928 c->vr_mbuf = m_new; 929 c->vr_ptr->vr_status = VR_RXSTAT; 930 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 931 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; 932 933 return (0); 934} 935 936/* 937 * A frame has been uploaded: pass the resulting mbuf chain up to 938 * the higher level protocols. 939 */ 940static void 941vr_rxeof(struct vr_softc *sc) 942{ 943 struct mbuf *m, *m0; 944 struct ifnet *ifp; 945 struct vr_chain_onefrag *cur_rx; 946 int total_len = 0; 947 uint32_t rxstat; 948 949 VR_LOCK_ASSERT(sc); 950 ifp = sc->vr_ifp; 951 952 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 953 VR_RXSTAT_OWN)) { 954#ifdef DEVICE_POLLING 955 if (ifp->if_flags & IFF_POLLING) { 956 if (sc->rxcycles <= 0) 957 break; 958 sc->rxcycles--; 959 } 960#endif /* DEVICE_POLLING */ 961 m0 = NULL; 962 cur_rx = sc->vr_cdata.vr_rx_head; 963 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 964 m = cur_rx->vr_mbuf; 965 966 /* 967 * If an error occurs, update stats, clear the 968 * status word and leave the mbuf cluster in place: 969 * it should simply get re-used next time this descriptor 970 * comes up in the ring. 971 */ 972 if (rxstat & VR_RXSTAT_RXERR) { 973 ifp->if_ierrors++; 974 printf("vr%d: rx error (%02x):", sc->vr_unit, 975 rxstat & 0x000000ff); 976 if (rxstat & VR_RXSTAT_CRCERR) 977 printf(" crc error"); 978 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 979 printf(" frame alignment error\n"); 980 if (rxstat & VR_RXSTAT_FIFOOFLOW) 981 printf(" FIFO overflow"); 982 if (rxstat & VR_RXSTAT_GIANT) 983 printf(" received giant packet"); 984 if (rxstat & VR_RXSTAT_RUNT) 985 printf(" received runt packet"); 986 if (rxstat & VR_RXSTAT_BUSERR) 987 printf(" system bus error"); 988 if (rxstat & VR_RXSTAT_BUFFERR) 989 printf("rx buffer error"); 990 printf("\n"); 991 vr_newbuf(sc, cur_rx, m); 992 continue; 993 } 994 995 /* No errors; receive the packet. */ 996 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 997 998 /* 999 * XXX The VIA Rhine chip includes the CRC with every 1000 * received frame, and there's no way to turn this 1001 * behavior off (at least, I can't find anything in 1002 * the manual that explains how to do it) so we have 1003 * to trim off the CRC manually. 1004 */ 1005 total_len -= ETHER_CRC_LEN; 1006 1007 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, 1008 NULL); 1009 vr_newbuf(sc, cur_rx, m); 1010 if (m0 == NULL) { 1011 ifp->if_ierrors++; 1012 continue; 1013 } 1014 m = m0; 1015 1016 ifp->if_ipackets++; 1017 VR_UNLOCK(sc); 1018 (*ifp->if_input)(ifp, m); 1019 VR_LOCK(sc); 1020 } 1021} 1022 1023static void 1024vr_rxeoc(struct vr_softc *sc) 1025{ 1026 struct ifnet *ifp = sc->vr_ifp; 1027 int i; 1028 1029 VR_LOCK_ASSERT(sc); 1030 1031 ifp->if_ierrors++; 1032 1033 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1034 DELAY(10000); 1035 1036 /* Wait for receiver to stop */ 1037 for (i = 0x400; 1038 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 1039 i--) { 1040 ; 1041 } 1042 1043 if (!i) { 1044 printf("vr%d: rx shutdown error!\n", sc->vr_unit); 1045 sc->vr_flags |= VR_F_RESTART; 1046 return; 1047 } 1048 1049 vr_rxeof(sc); 1050 1051 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1052 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1053 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1054} 1055 1056/* 1057 * A frame was downloaded to the chip. It's safe for us to clean up 1058 * the list buffers. 1059 */ 1060static void 1061vr_txeof(struct vr_softc *sc) 1062{ 1063 struct vr_chain *cur_tx; 1064 struct ifnet *ifp = sc->vr_ifp; 1065 1066 VR_LOCK_ASSERT(sc); 1067 1068 /* 1069 * Go through our tx list and free mbufs for those 1070 * frames that have been transmitted. 1071 */ 1072 cur_tx = sc->vr_cdata.vr_tx_cons; 1073 while (cur_tx->vr_mbuf != NULL) { 1074 uint32_t txstat; 1075 int i; 1076 1077 txstat = cur_tx->vr_ptr->vr_status; 1078 1079 if ((txstat & VR_TXSTAT_ABRT) || 1080 (txstat & VR_TXSTAT_UDF)) { 1081 for (i = 0x400; 1082 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 1083 i--) 1084 ; /* Wait for chip to shutdown */ 1085 if (!i) { 1086 printf("vr%d: tx shutdown timeout\n", 1087 sc->vr_unit); 1088 sc->vr_flags |= VR_F_RESTART; 1089 break; 1090 } 1091 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1092 CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr)); 1093 break; 1094 } 1095 1096 if (txstat & VR_TXSTAT_OWN) 1097 break; 1098 1099 if (txstat & VR_TXSTAT_ERRSUM) { 1100 ifp->if_oerrors++; 1101 if (txstat & VR_TXSTAT_DEFER) 1102 ifp->if_collisions++; 1103 if (txstat & VR_TXSTAT_LATECOLL) 1104 ifp->if_collisions++; 1105 } 1106 1107 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1108 1109 ifp->if_opackets++; 1110 m_freem(cur_tx->vr_mbuf); 1111 cur_tx->vr_mbuf = NULL; 1112 ifp->if_flags &= ~IFF_OACTIVE; 1113 1114 cur_tx = cur_tx->vr_nextdesc; 1115 } 1116 sc->vr_cdata.vr_tx_cons = cur_tx; 1117 if (cur_tx->vr_mbuf == NULL) 1118 ifp->if_timer = 0; 1119} 1120 1121static void 1122vr_tick(void *xsc) 1123{ 1124 struct vr_softc *sc = xsc; 1125 struct mii_data *mii; 1126 1127 VR_LOCK(sc); 1128 1129 if (sc->vr_flags & VR_F_RESTART) { 1130 printf("vr%d: restarting\n", sc->vr_unit); 1131 vr_stop(sc); 1132 vr_reset(sc); 1133 vr_init_locked(sc); 1134 sc->vr_flags &= ~VR_F_RESTART; 1135 } 1136 1137 mii = device_get_softc(sc->vr_miibus); 1138 mii_tick(mii); 1139 sc->vr_stat_ch = timeout(vr_tick, sc, hz); 1140 1141 VR_UNLOCK(sc); 1142} 1143 1144#ifdef DEVICE_POLLING 1145static poll_handler_t vr_poll; 1146static poll_handler_t vr_poll_locked; 1147 1148static void 1149vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1150{ 1151 struct vr_softc *sc = ifp->if_softc; 1152 1153 VR_LOCK(sc); 1154 vr_poll_locked(ifp, cmd, count); 1155 VR_UNLOCK(sc); 1156} 1157 1158static void 1159vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1160{ 1161 struct vr_softc *sc = ifp->if_softc; 1162 1163 VR_LOCK_ASSERT(sc); 1164 1165 if (!(ifp->if_capenable & IFCAP_POLLING)) { 1166 ether_poll_deregister(ifp); 1167 cmd = POLL_DEREGISTER; 1168 } 1169 1170 if (cmd == POLL_DEREGISTER) { 1171 /* Final call, enable interrupts. */ 1172 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1173 return; 1174 } 1175 1176 sc->rxcycles = count; 1177 vr_rxeof(sc); 1178 vr_txeof(sc); 1179 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1180 vr_start_locked(ifp); 1181 1182 if (cmd == POLL_AND_CHECK_STATUS) { 1183 uint16_t status; 1184 1185 /* Also check status register. */ 1186 status = CSR_READ_2(sc, VR_ISR); 1187 if (status) 1188 CSR_WRITE_2(sc, VR_ISR, status); 1189 1190 if ((status & VR_INTRS) == 0) 1191 return; 1192 1193 if (status & VR_ISR_RX_DROPPED) { 1194 printf("vr%d: rx packet lost\n", sc->vr_unit); 1195 ifp->if_ierrors++; 1196 } 1197 1198 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1199 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { 1200 printf("vr%d: receive error (%04x)", 1201 sc->vr_unit, status); 1202 if (status & VR_ISR_RX_NOBUF) 1203 printf(" no buffers"); 1204 if (status & VR_ISR_RX_OFLOW) 1205 printf(" overflow"); 1206 if (status & VR_ISR_RX_DROPPED) 1207 printf(" packet lost"); 1208 printf("\n"); 1209 vr_rxeoc(sc); 1210 } 1211 1212 if ((status & VR_ISR_BUSERR) || 1213 (status & VR_ISR_TX_UNDERRUN)) { 1214 vr_reset(sc); 1215 vr_init_locked(sc); 1216 return; 1217 } 1218 1219 if ((status & VR_ISR_UDFI) || 1220 (status & VR_ISR_TX_ABRT2) || 1221 (status & VR_ISR_TX_ABRT)) { 1222 ifp->if_oerrors++; 1223 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1224 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1225 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1226 } 1227 } 1228 } 1229} 1230#endif /* DEVICE_POLLING */ 1231 1232static void 1233vr_intr(void *arg) 1234{ 1235 struct vr_softc *sc = arg; 1236 struct ifnet *ifp = sc->vr_ifp; 1237 uint16_t status; 1238 1239 VR_LOCK(sc); 1240 1241 if (sc->suspended) { 1242 /* 1243 * Forcibly disable interrupts. 1244 * XXX: Mobile VIA based platforms may need 1245 * interrupt re-enable on resume. 1246 */ 1247 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1248 goto done_locked; 1249 } 1250 1251#ifdef DEVICE_POLLING 1252 if (ifp->if_flags & IFF_POLLING) 1253 goto done_locked; 1254 1255 if ((ifp->if_capenable & IFCAP_POLLING) && 1256 ether_poll_register(vr_poll, ifp)) { 1257 /* OK, disable interrupts. */ 1258 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1259 vr_poll_locked(ifp, 0, 1); 1260 goto done_locked; 1261 } 1262#endif /* DEVICE_POLLING */ 1263 1264 /* Suppress unwanted interrupts. */ 1265 if (!(ifp->if_flags & IFF_UP)) { 1266 vr_stop(sc); 1267 goto done_locked; 1268 } 1269 1270 /* Disable interrupts. */ 1271 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1272 1273 for (;;) { 1274 status = CSR_READ_2(sc, VR_ISR); 1275 if (status) 1276 CSR_WRITE_2(sc, VR_ISR, status); 1277 1278 if ((status & VR_INTRS) == 0) 1279 break; 1280 1281 if (status & VR_ISR_RX_OK) 1282 vr_rxeof(sc); 1283 1284 if (status & VR_ISR_RX_DROPPED) { 1285 printf("vr%d: rx packet lost\n", sc->vr_unit); 1286 ifp->if_ierrors++; 1287 } 1288 1289 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1290 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { 1291 printf("vr%d: receive error (%04x)", 1292 sc->vr_unit, status); 1293 if (status & VR_ISR_RX_NOBUF) 1294 printf(" no buffers"); 1295 if (status & VR_ISR_RX_OFLOW) 1296 printf(" overflow"); 1297 if (status & VR_ISR_RX_DROPPED) 1298 printf(" packet lost"); 1299 printf("\n"); 1300 vr_rxeoc(sc); 1301 } 1302 1303 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1304 vr_reset(sc); 1305 vr_init_locked(sc); 1306 break; 1307 } 1308 1309 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1310 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1311 vr_txeof(sc); 1312 if ((status & VR_ISR_UDFI) || 1313 (status & VR_ISR_TX_ABRT2) || 1314 (status & VR_ISR_TX_ABRT)) { 1315 ifp->if_oerrors++; 1316 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1317 VR_SETBIT16(sc, VR_COMMAND, 1318 VR_CMD_TX_ON); 1319 VR_SETBIT16(sc, VR_COMMAND, 1320 VR_CMD_TX_GO); 1321 } 1322 } 1323 } 1324 } 1325 1326 /* Re-enable interrupts. */ 1327 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1328 1329 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1330 vr_start_locked(ifp); 1331 1332done_locked: 1333 VR_UNLOCK(sc); 1334} 1335 1336/* 1337 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1338 * pointers to the fragment pointers. 1339 */ 1340static int 1341vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1342{ 1343 struct vr_desc *f = NULL; 1344 struct mbuf *m; 1345 1346 VR_LOCK_ASSERT(sc); 1347 /* 1348 * The VIA Rhine wants packet buffers to be longword 1349 * aligned, but very often our mbufs aren't. Rather than 1350 * waste time trying to decide when to copy and when not 1351 * to copy, just do it all the time. 1352 */ 1353 m = m_defrag(m_head, M_DONTWAIT); 1354 if (m == NULL) 1355 return (1); 1356 1357 /* 1358 * The Rhine chip doesn't auto-pad, so we have to make 1359 * sure to pad short frames out to the minimum frame length 1360 * ourselves. 1361 */ 1362 if (m->m_len < VR_MIN_FRAMELEN) { 1363 m->m_pkthdr.len += VR_MIN_FRAMELEN - m->m_len; 1364 m->m_len = m->m_pkthdr.len; 1365 } 1366 1367 c->vr_mbuf = m; 1368 f = c->vr_ptr; 1369 f->vr_data = vtophys(mtod(m, caddr_t)); 1370 f->vr_ctl = m->m_len; 1371 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1372 f->vr_status = 0; 1373 f->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; 1374 f->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1375 1376 return (0); 1377} 1378 1379/* 1380 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1381 * to the mbuf data regions directly in the transmit lists. We also save a 1382 * copy of the pointers since the transmit list fragment pointers are 1383 * physical addresses. 1384 */ 1385 1386static void 1387vr_start(struct ifnet *ifp) 1388{ 1389 struct vr_softc *sc = ifp->if_softc; 1390 1391 VR_LOCK(sc); 1392 vr_start_locked(ifp); 1393 VR_UNLOCK(sc); 1394} 1395 1396static void 1397vr_start_locked(struct ifnet *ifp) 1398{ 1399 struct vr_softc *sc = ifp->if_softc; 1400 struct mbuf *m_head; 1401 struct vr_chain *cur_tx; 1402 1403 if (ifp->if_flags & IFF_OACTIVE) 1404 return; 1405 1406 cur_tx = sc->vr_cdata.vr_tx_prod; 1407 while (cur_tx->vr_mbuf == NULL) { 1408 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1409 if (m_head == NULL) 1410 break; 1411 1412 /* Pack the data into the descriptor. */ 1413 if (vr_encap(sc, cur_tx, m_head)) { 1414 /* Rollback, send what we were able to encap. */ 1415 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1416 break; 1417 } 1418 1419 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1420 1421 /* 1422 * If there's a BPF listener, bounce a copy of this frame 1423 * to him. 1424 */ 1425 BPF_MTAP(ifp, cur_tx->vr_mbuf); 1426 1427 cur_tx = cur_tx->vr_nextdesc; 1428 } 1429 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { 1430 sc->vr_cdata.vr_tx_prod = cur_tx; 1431 1432 /* Tell the chip to start transmitting. */ 1433 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/ VR_CMD_TX_GO); 1434 1435 /* Set a timeout in case the chip goes out to lunch. */ 1436 ifp->if_timer = 5; 1437 1438 if (cur_tx->vr_mbuf != NULL) 1439 ifp->if_flags |= IFF_OACTIVE; 1440 } 1441} 1442 1443static void 1444vr_init(void *xsc) 1445{ 1446 struct vr_softc *sc = xsc; 1447 1448 VR_LOCK(sc); 1449 vr_init_locked(sc); 1450 VR_UNLOCK(sc); 1451} 1452 1453static void 1454vr_init_locked(struct vr_softc *sc) 1455{ 1456 struct ifnet *ifp = sc->vr_ifp; 1457 struct mii_data *mii; 1458 int i; 1459 1460 VR_LOCK_ASSERT(sc); 1461 1462 mii = device_get_softc(sc->vr_miibus); 1463 1464 /* Cancel pending I/O and free all RX/TX buffers. */ 1465 vr_stop(sc); 1466 vr_reset(sc); 1467 1468 /* Set our station address. */ 1469 for (i = 0; i < ETHER_ADDR_LEN; i++) 1470 CSR_WRITE_1(sc, VR_PAR0 + i, IFP2ENADDR(sc->vr_ifp)[i]); 1471 1472 /* Set DMA size. */ 1473 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1474 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1475 1476 /* 1477 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1478 * so we must set both. 1479 */ 1480 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1481 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1482 1483 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1484 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1485 1486 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1487 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1488 1489 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1490 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1491 1492 /* Init circular RX list. */ 1493 if (vr_list_rx_init(sc) == ENOBUFS) { 1494 printf( 1495"vr%d: initialization failed: no memory for rx buffers\n", sc->vr_unit); 1496 vr_stop(sc); 1497 return; 1498 } 1499 1500 /* Init tx descriptors. */ 1501 vr_list_tx_init(sc); 1502 1503 /* If we want promiscuous mode, set the allframes bit. */ 1504 if (ifp->if_flags & IFF_PROMISC) 1505 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1506 else 1507 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1508 1509 /* Set capture broadcast bit to capture broadcast frames. */ 1510 if (ifp->if_flags & IFF_BROADCAST) 1511 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1512 else 1513 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1514 1515 /* 1516 * Program the multicast filter, if necessary. 1517 */ 1518 vr_setmulti(sc); 1519 1520 /* 1521 * Load the address of the RX list. 1522 */ 1523 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1524 1525 /* Enable receiver and transmitter. */ 1526 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1527 VR_CMD_TX_ON|VR_CMD_RX_ON| 1528 VR_CMD_RX_GO); 1529 1530 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1531 1532 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1533#ifdef DEVICE_POLLING 1534 /* 1535 * Disable interrupts if we are polling. 1536 */ 1537 if (ifp->if_flags & IFF_POLLING) 1538 CSR_WRITE_2(sc, VR_IMR, 0); 1539 else 1540#endif /* DEVICE_POLLING */ 1541 /* 1542 * Enable interrupts. 1543 */ 1544 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1545 1546 mii_mediachg(mii); 1547 1548 ifp->if_flags |= IFF_RUNNING; 1549 ifp->if_flags &= ~IFF_OACTIVE; 1550 1551 sc->vr_stat_ch = timeout(vr_tick, sc, hz); 1552} 1553 1554/* 1555 * Set media options. 1556 */ 1557static int 1558vr_ifmedia_upd(struct ifnet *ifp) 1559{ 1560 struct vr_softc *sc = ifp->if_softc; 1561 1562 if (ifp->if_flags & IFF_UP) 1563 vr_init(sc); 1564 1565 return (0); 1566} 1567 1568/* 1569 * Report current media status. 1570 */ 1571static void 1572vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1573{ 1574 struct vr_softc *sc = ifp->if_softc; 1575 struct mii_data *mii; 1576 1577 mii = device_get_softc(sc->vr_miibus); 1578 VR_LOCK(sc); 1579 mii_pollstat(mii); 1580 VR_UNLOCK(sc); 1581 ifmr->ifm_active = mii->mii_media_active; 1582 ifmr->ifm_status = mii->mii_media_status; 1583} 1584 1585static int 1586vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1587{ 1588 struct vr_softc *sc = ifp->if_softc; 1589 struct ifreq *ifr = (struct ifreq *) data; 1590 struct mii_data *mii; 1591 int error = 0; 1592 1593 switch (command) { 1594 case SIOCSIFFLAGS: 1595 VR_LOCK(sc); 1596 if (ifp->if_flags & IFF_UP) { 1597 vr_init_locked(sc); 1598 } else { 1599 if (ifp->if_flags & IFF_RUNNING) 1600 vr_stop(sc); 1601 } 1602 VR_UNLOCK(sc); 1603 error = 0; 1604 break; 1605 case SIOCADDMULTI: 1606 case SIOCDELMULTI: 1607 VR_LOCK(sc); 1608 vr_setmulti(sc); 1609 VR_UNLOCK(sc); 1610 error = 0; 1611 break; 1612 case SIOCGIFMEDIA: 1613 case SIOCSIFMEDIA: 1614 mii = device_get_softc(sc->vr_miibus); 1615 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1616 break; 1617 case SIOCSIFCAP: 1618 ifp->if_capenable = ifr->ifr_reqcap; 1619 break; 1620 default: 1621 error = ether_ioctl(ifp, command, data); 1622 break; 1623 } 1624 1625 return (error); 1626} 1627 1628static void 1629vr_watchdog(struct ifnet *ifp) 1630{ 1631 struct vr_softc *sc = ifp->if_softc; 1632 1633 VR_LOCK(sc); 1634 1635 ifp->if_oerrors++; 1636 printf("vr%d: watchdog timeout\n", sc->vr_unit); 1637 1638 vr_stop(sc); 1639 vr_reset(sc); 1640 vr_init_locked(sc); 1641 1642 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1643 vr_start_locked(ifp); 1644 1645 VR_UNLOCK(sc); 1646} 1647 1648/* 1649 * Stop the adapter and free any mbufs allocated to the 1650 * RX and TX lists. 1651 */ 1652static void 1653vr_stop(struct vr_softc *sc) 1654{ 1655 register int i; 1656 struct ifnet *ifp; 1657 1658 VR_LOCK_ASSERT(sc); 1659 1660 ifp = sc->vr_ifp; 1661 ifp->if_timer = 0; 1662 1663 untimeout(vr_tick, sc, sc->vr_stat_ch); 1664 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1665#ifdef DEVICE_POLLING 1666 ether_poll_deregister(ifp); 1667#endif /* DEVICE_POLLING */ 1668 1669 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1670 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1671 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1672 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1673 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1674 1675 /* 1676 * Free data in the RX lists. 1677 */ 1678 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1679 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1680 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1681 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1682 } 1683 } 1684 bzero((char *)&sc->vr_ldata->vr_rx_list, 1685 sizeof(sc->vr_ldata->vr_rx_list)); 1686 1687 /* 1688 * Free the TX list buffers. 1689 */ 1690 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1691 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1692 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1693 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1694 } 1695 } 1696 bzero((char *)&sc->vr_ldata->vr_tx_list, 1697 sizeof(sc->vr_ldata->vr_tx_list)); 1698} 1699 1700/* 1701 * Stop all chip I/O so that the kernel's probe routines don't 1702 * get confused by errant DMAs when rebooting. 1703 */ 1704static void 1705vr_shutdown(device_t dev) 1706{ 1707 1708 vr_detach(dev); 1709} 1710