if_vge.c revision 1.56
1/* $OpenBSD: if_vge.c,v 1.56 2013/08/07 01:06:38 bluhm Exp $ */ 2/* $FreeBSD: if_vge.c,v 1.3 2004/09/11 22:13:25 wpaul Exp $ */ 3/* 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35/* 36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 * 42 * Ported to OpenBSD by Peter Valchev <pvalchev@openbsd.org> 43 */ 44 45/* 46 * The VIA Networking VT6122 is a 32bit, 33/66MHz PCI device that 47 * combines a tri-speed ethernet MAC and PHY, with the following 48 * features: 49 * 50 * o Jumbo frame support up to 16K 51 * o Transmit and receive flow control 52 * o IPv4 checksum offload 53 * o VLAN tag insertion and stripping 54 * o TCP large send 55 * o 64-bit multicast hash table filter 56 * o 64 entry CAM filter 57 * o 16K RX FIFO and 48K TX FIFO memory 58 * o Interrupt moderation 59 * 60 * The VT6122 supports up to four transmit DMA queues. The descriptors 61 * in the transmit ring can address up to 7 data fragments; frames which 62 * span more than 7 data buffers must be coalesced, but in general the 63 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 64 * long. The receive descriptors address only a single buffer. 65 * 66 * There are two peculiar design issues with the VT6122. One is that 67 * receive data buffers must be aligned on a 32-bit boundary. This is 68 * not a problem where the VT6122 is used as a LOM device in x86-based 69 * systems, but on architectures that generate unaligned access traps, we 70 * have to do some copying. 71 * 72 * The other issue has to do with the way 64-bit addresses are handled. 73 * The DMA descriptors only allow you to specify 48 bits of addressing 74 * information. The remaining 16 bits are specified using one of the 75 * I/O registers. If you only have a 32-bit system, then this isn't 76 * an issue, but if you have a 64-bit system and more than 4GB of 77 * memory, you must have to make sure your network data buffers reside 78 * in the same 48-bit 'segment.' 79 * 80 * Special thanks to Ryan Fu at VIA Networking for providing documentation 81 * and sample NICs for testing. 82 */ 83 84#include "bpfilter.h" 85#include "vlan.h" 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/kernel.h> 94#include <sys/device.h> 95#include <sys/timeout.h> 96#include <sys/socket.h> 97 98#include <net/if.h> 99#include <net/if_dl.h> 100#include <net/if_media.h> 101 102#ifdef INET 103#include <netinet/in.h> 104#include <netinet/in_systm.h> 105#include <netinet/ip.h> 106#include <netinet/if_ether.h> 107#endif 108 109#if NVLAN > 0 110#include <net/if_types.h> 111#include <net/if_vlan_var.h> 112#endif 113 114#if NBPFILTER > 0 115#include <net/bpf.h> 116#endif 117 118#include <dev/mii/mii.h> 119#include <dev/mii/miivar.h> 120 121#include <dev/pci/pcireg.h> 122#include <dev/pci/pcivar.h> 123#include <dev/pci/pcidevs.h> 124 125#include <dev/pci/if_vgereg.h> 126#include <dev/pci/if_vgevar.h> 127 128int vge_probe (struct device *, void *, void *); 129void vge_attach (struct device *, struct device *, void *); 130int vge_detach (struct device *, int); 131 132int vge_encap (struct vge_softc *, struct mbuf *, int); 133 134int vge_allocmem (struct vge_softc *); 135void vge_freemem (struct vge_softc *); 136int vge_newbuf (struct vge_softc *, int, struct mbuf *); 137int vge_rx_list_init (struct vge_softc *); 138int vge_tx_list_init (struct vge_softc *); 139void vge_rxeof (struct vge_softc *); 140void vge_txeof (struct vge_softc *); 141int vge_intr (void *); 142void vge_tick (void *); 143void vge_start (struct ifnet *); 144int vge_ioctl (struct ifnet *, u_long, caddr_t); 145int vge_init (struct ifnet *); 146void vge_stop (struct vge_softc *); 147void vge_watchdog (struct ifnet *); 148int vge_ifmedia_upd (struct ifnet *); 149void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 150 151#ifdef VGE_EEPROM 152void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 153#endif 154void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 155 156void vge_miipoll_start (struct vge_softc *); 157void vge_miipoll_stop (struct vge_softc *); 158int vge_miibus_readreg (struct device *, int, int); 159void vge_miibus_writereg (struct device *, int, int, int); 160void vge_miibus_statchg (struct device *); 161 162void vge_cam_clear (struct vge_softc *); 163int vge_cam_set (struct vge_softc *, uint8_t *); 164void vge_iff (struct vge_softc *); 165void vge_reset (struct vge_softc *); 166 167struct cfattach vge_ca = { 168 sizeof(struct vge_softc), vge_probe, vge_attach, vge_detach 169}; 170 171struct cfdriver vge_cd = { 172 NULL, "vge", DV_IFNET 173}; 174 175#define VGE_PCI_LOIO 0x10 176#define VGE_PCI_LOMEM 0x14 177 178int vge_debug = 0; 179#define DPRINTF(x) if (vge_debug) printf x 180#define DPRINTFN(n, x) if (vge_debug >= (n)) printf x 181 182const struct pci_matchid vge_devices[] = { 183 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612x }, 184}; 185 186#ifdef VGE_EEPROM 187/* 188 * Read a word of data stored in the EEPROM at address 'addr.' 189 */ 190void 191vge_eeprom_getword(struct vge_softc *sc, int addr, u_int16_t *dest) 192{ 193 int i; 194 u_int16_t word = 0; 195 196 /* 197 * Enter EEPROM embedded programming mode. In order to 198 * access the EEPROM at all, we first have to set the 199 * EELOAD bit in the CHIPCFG2 register. 200 */ 201 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 202 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 203 204 /* Select the address of the word we want to read */ 205 CSR_WRITE_1(sc, VGE_EEADDR, addr); 206 207 /* Issue read command */ 208 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 209 210 /* Wait for the done bit to be set. */ 211 for (i = 0; i < VGE_TIMEOUT; i++) { 212 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 213 break; 214 } 215 216 if (i == VGE_TIMEOUT) { 217 printf("%s: EEPROM read timed out\n", sc->vge_dev.dv_xname); 218 *dest = 0; 219 return; 220 } 221 222 /* Read the result */ 223 word = CSR_READ_2(sc, VGE_EERDDAT); 224 225 /* Turn off EEPROM access mode. */ 226 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 227 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 228 229 *dest = word; 230} 231#endif 232 233/* 234 * Read a sequence of words from the EEPROM. 235 */ 236void 237vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, 238 int swap) 239{ 240 int i; 241#ifdef VGE_EEPROM 242 u_int16_t word = 0, *ptr; 243 244 for (i = 0; i < cnt; i++) { 245 vge_eeprom_getword(sc, off + i, &word); 246 ptr = (u_int16_t *)(dest + (i * 2)); 247 if (swap) 248 *ptr = ntohs(word); 249 else 250 *ptr = word; 251 } 252#else 253 for (i = 0; i < ETHER_ADDR_LEN; i++) 254 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 255#endif 256} 257 258void 259vge_miipoll_stop(struct vge_softc *sc) 260{ 261 int i; 262 263 CSR_WRITE_1(sc, VGE_MIICMD, 0); 264 265 for (i = 0; i < VGE_TIMEOUT; i++) { 266 DELAY(1); 267 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 268 break; 269 } 270 271 if (i == VGE_TIMEOUT) 272 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); 273} 274 275void 276vge_miipoll_start(struct vge_softc *sc) 277{ 278 int i; 279 280 /* First, make sure we're idle. */ 281 282 CSR_WRITE_1(sc, VGE_MIICMD, 0); 283 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 284 285 for (i = 0; i < VGE_TIMEOUT; i++) { 286 DELAY(1); 287 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 288 break; 289 } 290 291 if (i == VGE_TIMEOUT) { 292 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); 293 return; 294 } 295 296 /* Now enable auto poll mode. */ 297 298 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 299 300 /* And make sure it started. */ 301 302 for (i = 0; i < VGE_TIMEOUT; i++) { 303 DELAY(1); 304 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 305 break; 306 } 307 308 if (i == VGE_TIMEOUT) 309 printf("%s: failed to start MII autopoll\n", sc->vge_dev.dv_xname); 310} 311 312int 313vge_miibus_readreg(struct device *dev, int phy, int reg) 314{ 315 struct vge_softc *sc = (struct vge_softc *)dev; 316 int i, s; 317 u_int16_t rval = 0; 318 319 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 320 return(0); 321 322 s = splnet(); 323 324 vge_miipoll_stop(sc); 325 326 /* Specify the register we want to read. */ 327 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 328 329 /* Issue read command. */ 330 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 331 332 /* Wait for the read command bit to self-clear. */ 333 for (i = 0; i < VGE_TIMEOUT; i++) { 334 DELAY(1); 335 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 336 break; 337 } 338 339 if (i == VGE_TIMEOUT) 340 printf("%s: MII read timed out\n", sc->vge_dev.dv_xname); 341 else 342 rval = CSR_READ_2(sc, VGE_MIIDATA); 343 344 vge_miipoll_start(sc); 345 splx(s); 346 347 return (rval); 348} 349 350void 351vge_miibus_writereg(struct device *dev, int phy, int reg, int data) 352{ 353 struct vge_softc *sc = (struct vge_softc *)dev; 354 int i, s; 355 356 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 357 return; 358 359 s = splnet(); 360 vge_miipoll_stop(sc); 361 362 /* Specify the register we want to write. */ 363 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 364 365 /* Specify the data we want to write. */ 366 CSR_WRITE_2(sc, VGE_MIIDATA, data); 367 368 /* Issue write command. */ 369 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 370 371 /* Wait for the write command bit to self-clear. */ 372 for (i = 0; i < VGE_TIMEOUT; i++) { 373 DELAY(1); 374 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 375 break; 376 } 377 378 if (i == VGE_TIMEOUT) { 379 printf("%s: MII write timed out\n", sc->vge_dev.dv_xname); 380 } 381 382 vge_miipoll_start(sc); 383 splx(s); 384} 385 386void 387vge_cam_clear(struct vge_softc *sc) 388{ 389 int i; 390 391 /* 392 * Turn off all the mask bits. This tells the chip 393 * that none of the entries in the CAM filter are valid. 394 * desired entries will be enabled as we fill the filter in. 395 */ 396 397 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 398 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 399 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 400 for (i = 0; i < 8; i++) 401 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 402 403 /* Clear the VLAN filter too. */ 404 405 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 406 for (i = 0; i < 8; i++) 407 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 408 409 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 410 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 411 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 412 413 sc->vge_camidx = 0; 414} 415 416int 417vge_cam_set(struct vge_softc *sc, uint8_t *addr) 418{ 419 int i, error = 0; 420 421 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 422 return(ENOSPC); 423 424 /* Select the CAM data page. */ 425 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 426 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 427 428 /* Set the filter entry we want to update and enable writing. */ 429 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 430 431 /* Write the address to the CAM registers */ 432 for (i = 0; i < ETHER_ADDR_LEN; i++) 433 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 434 435 /* Issue a write command. */ 436 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 437 438 /* Wake for it to clear. */ 439 for (i = 0; i < VGE_TIMEOUT; i++) { 440 DELAY(1); 441 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 442 break; 443 } 444 445 if (i == VGE_TIMEOUT) { 446 printf("%s: setting CAM filter failed\n", sc->vge_dev.dv_xname); 447 error = EIO; 448 goto fail; 449 } 450 451 /* Select the CAM mask page. */ 452 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 453 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 454 455 /* Set the mask bit that enables this filter. */ 456 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 457 1<<(sc->vge_camidx & 7)); 458 459 sc->vge_camidx++; 460 461fail: 462 /* Turn off access to CAM. */ 463 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 464 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 465 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 466 467 return (error); 468} 469 470/* 471 * We use the 64-entry CAM filter for perfect filtering. 472 * If there's more than 64 multicast addresses, we use the 473 * hash filter instead. 474 */ 475void 476vge_iff(struct vge_softc *sc) 477{ 478 struct arpcom *ac = &sc->arpcom; 479 struct ifnet *ifp = &ac->ac_if; 480 struct ether_multi *enm; 481 struct ether_multistep step; 482 u_int32_t h = 0, hashes[2]; 483 u_int8_t rxctl; 484 int error; 485 486 vge_cam_clear(sc); 487 rxctl = CSR_READ_1(sc, VGE_RXCTL); 488 rxctl &= ~(VGE_RXCTL_RX_BCAST | VGE_RXCTL_RX_MCAST | 489 VGE_RXCTL_RX_PROMISC | VGE_RXCTL_RX_UCAST); 490 bzero(hashes, sizeof(hashes)); 491 ifp->if_flags &= ~IFF_ALLMULTI; 492 493 /* 494 * Always accept broadcast frames. 495 * Always accept frames destined to our station address. 496 */ 497 rxctl |= VGE_RXCTL_RX_BCAST | VGE_RXCTL_RX_UCAST; 498 499 if ((ifp->if_flags & IFF_PROMISC) == 0) 500 rxctl |= VGE_RXCTL_RX_MCAST; 501 502 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 503 ifp->if_flags |= IFF_ALLMULTI; 504 if (ifp->if_flags & IFF_PROMISC) 505 rxctl |= VGE_RXCTL_RX_PROMISC; 506 hashes[0] = hashes[1] = 0xFFFFFFFF; 507 } else if (ac->ac_multicnt > VGE_CAM_MAXADDRS) { 508 ETHER_FIRST_MULTI(step, ac, enm); 509 while (enm != NULL) { 510 h = ether_crc32_be(enm->enm_addrlo, 511 ETHER_ADDR_LEN) >> 26; 512 513 hashes[h >> 5] |= 1 << (h & 0x1f); 514 515 ETHER_NEXT_MULTI(step, enm); 516 } 517 } else { 518 ETHER_FIRST_MULTI(step, ac, enm); 519 while (enm != NULL) { 520 error = vge_cam_set(sc, enm->enm_addrlo); 521 if (error) 522 break; 523 524 ETHER_NEXT_MULTI(step, enm); 525 } 526 } 527 528 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 529 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 530 CSR_WRITE_1(sc, VGE_RXCTL, rxctl); 531} 532 533void 534vge_reset(struct vge_softc *sc) 535{ 536 int i; 537 538 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 539 540 for (i = 0; i < VGE_TIMEOUT; i++) { 541 DELAY(5); 542 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 543 break; 544 } 545 546 if (i == VGE_TIMEOUT) { 547 printf("%s: soft reset timed out", sc->vge_dev.dv_xname); 548 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 549 DELAY(2000); 550 } 551 552 DELAY(5000); 553 554 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 555 556 for (i = 0; i < VGE_TIMEOUT; i++) { 557 DELAY(5); 558 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 559 break; 560 } 561 562 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 563} 564 565/* 566 * Probe for a VIA gigabit chip. Check the PCI vendor and device 567 * IDs against our list and return a device name if we find a match. 568 */ 569int 570vge_probe(struct device *dev, void *match, void *aux) 571{ 572 return (pci_matchbyid((struct pci_attach_args *)aux, vge_devices, 573 nitems(vge_devices))); 574} 575 576/* 577 * Allocate memory for RX/TX rings 578 */ 579int 580vge_allocmem(struct vge_softc *sc) 581{ 582 int nseg, rseg; 583 int i, error; 584 585 nseg = 32; 586 587 /* Allocate DMA'able memory for the TX ring */ 588 589 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_LIST_SZ, 1, 590 VGE_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, 591 &sc->vge_ldata.vge_tx_list_map); 592 if (error) 593 return (ENOMEM); 594 error = bus_dmamem_alloc(sc->sc_dmat, VGE_TX_LIST_SZ, 595 ETHER_ALIGN, 0, 596 &sc->vge_ldata.vge_tx_listseg, 1, &rseg, BUS_DMA_NOWAIT); 597 if (error) { 598 printf("%s: can't alloc TX list\n", sc->vge_dev.dv_xname); 599 return (ENOMEM); 600 } 601 602 /* Load the map for the TX ring. */ 603 error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, 604 1, VGE_TX_LIST_SZ, 605 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT); 606 memset(sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ); 607 if (error) { 608 printf("%s: can't map TX dma buffers\n", 609 sc->vge_dev.dv_xname); 610 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg); 611 return (ENOMEM); 612 } 613 614 error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map, 615 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 616 if (error) { 617 printf("%s: can't load TX dma map\n", sc->vge_dev.dv_xname); 618 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map); 619 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list, 620 VGE_TX_LIST_SZ); 621 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg); 622 return (ENOMEM); 623 } 624 625 /* Create DMA maps for TX buffers */ 626 627 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 628 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg, 629 MCLBYTES, 0, BUS_DMA_ALLOCNOW, 630 &sc->vge_ldata.vge_tx_dmamap[i]); 631 if (error) { 632 printf("%s: can't create DMA map for TX\n", 633 sc->vge_dev.dv_xname); 634 return (ENOMEM); 635 } 636 } 637 638 /* Allocate DMA'able memory for the RX ring */ 639 640 error = bus_dmamap_create(sc->sc_dmat, VGE_RX_LIST_SZ, 1, 641 VGE_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, 642 &sc->vge_ldata.vge_rx_list_map); 643 if (error) 644 return (ENOMEM); 645 error = bus_dmamem_alloc(sc->sc_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN, 646 0, &sc->vge_ldata.vge_rx_listseg, 1, &rseg, BUS_DMA_NOWAIT); 647 if (error) { 648 printf("%s: can't alloc RX list\n", sc->vge_dev.dv_xname); 649 return (ENOMEM); 650 } 651 652 /* Load the map for the RX ring. */ 653 654 error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, 655 1, VGE_RX_LIST_SZ, 656 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT); 657 memset(sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ); 658 if (error) { 659 printf("%s: can't map RX dma buffers\n", 660 sc->vge_dev.dv_xname); 661 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg); 662 return (ENOMEM); 663 } 664 error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map, 665 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 666 if (error) { 667 printf("%s: can't load RX dma map\n", sc->vge_dev.dv_xname); 668 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map); 669 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list, 670 VGE_RX_LIST_SZ); 671 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg); 672 return (ENOMEM); 673 } 674 675 /* Create DMA maps for RX buffers */ 676 677 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 678 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg, 679 MCLBYTES, 0, BUS_DMA_ALLOCNOW, 680 &sc->vge_ldata.vge_rx_dmamap[i]); 681 if (error) { 682 printf("%s: can't create DMA map for RX\n", 683 sc->vge_dev.dv_xname); 684 return (ENOMEM); 685 } 686 } 687 688 return (0); 689} 690 691void 692vge_freemem(struct vge_softc *sc) 693{ 694 int i; 695 696 for (i = 0; i < VGE_RX_DESC_CNT; i++) 697 bus_dmamap_destroy(sc->sc_dmat, 698 sc->vge_ldata.vge_rx_dmamap[i]); 699 700 bus_dmamap_unload(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map); 701 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map); 702 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list, 703 VGE_RX_LIST_SZ); 704 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, 1); 705 706 for (i = 0; i < VGE_TX_DESC_CNT; i++) 707 bus_dmamap_destroy(sc->sc_dmat, 708 sc->vge_ldata.vge_tx_dmamap[i]); 709 710 bus_dmamap_unload(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map); 711 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map); 712 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list, 713 VGE_TX_LIST_SZ); 714 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, 1); 715} 716 717/* 718 * Attach the interface. Allocate softc structures, do ifmedia 719 * setup and ethernet/BPF attach. 720 */ 721void 722vge_attach(struct device *parent, struct device *self, void *aux) 723{ 724 u_char eaddr[ETHER_ADDR_LEN]; 725 struct vge_softc *sc = (struct vge_softc *)self; 726 struct pci_attach_args *pa = aux; 727 pci_chipset_tag_t pc = pa->pa_pc; 728 pci_intr_handle_t ih; 729 const char *intrstr = NULL; 730 struct ifnet *ifp; 731 int error = 0; 732 733 /* 734 * Map control/status registers. 735 */ 736 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 737 &sc->vge_btag, &sc->vge_bhandle, NULL, &sc->vge_bsize, 0)) { 738 if (pci_mapreg_map(pa, VGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 739 &sc->vge_btag, &sc->vge_bhandle, NULL, &sc->vge_bsize, 0)) { 740 printf(": can't map mem or i/o space\n"); 741 return; 742 } 743 } 744 745 /* Allocate interrupt */ 746 if (pci_intr_map(pa, &ih)) { 747 printf(": couldn't map interrupt\n"); 748 return; 749 } 750 intrstr = pci_intr_string(pc, ih); 751 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc, 752 sc->vge_dev.dv_xname); 753 if (sc->vge_intrhand == NULL) { 754 printf(": couldn't establish interrupt"); 755 if (intrstr != NULL) 756 printf(" at %s", intrstr); 757 return; 758 } 759 printf(": %s", intrstr); 760 761 sc->sc_dmat = pa->pa_dmat; 762 sc->sc_pc = pa->pa_pc; 763 764 /* Reset the adapter. */ 765 vge_reset(sc); 766 767 /* 768 * Get station address from the EEPROM. 769 */ 770 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 1); 771 772 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 773 774 printf(", address %s\n", 775 ether_sprintf(sc->arpcom.ac_enaddr)); 776 777 error = vge_allocmem(sc); 778 779 if (error) 780 return; 781 782 ifp = &sc->arpcom.ac_if; 783 ifp->if_softc = sc; 784 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 785 ifp->if_ioctl = vge_ioctl; 786 ifp->if_start = vge_start; 787 ifp->if_watchdog = vge_watchdog; 788#ifdef VGE_JUMBO 789 ifp->if_hardmtu = VGE_JUMBO_MTU; 790#endif 791 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN); 792 IFQ_SET_READY(&ifp->if_snd); 793 794 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 795 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 796 797#if NVLAN > 0 798 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 799#endif 800 801 /* Set interface name */ 802 strlcpy(ifp->if_xname, sc->vge_dev.dv_xname, IFNAMSIZ); 803 804 /* Do MII setup */ 805 sc->sc_mii.mii_ifp = ifp; 806 sc->sc_mii.mii_readreg = vge_miibus_readreg; 807 sc->sc_mii.mii_writereg = vge_miibus_writereg; 808 sc->sc_mii.mii_statchg = vge_miibus_statchg; 809 ifmedia_init(&sc->sc_mii.mii_media, 0, 810 vge_ifmedia_upd, vge_ifmedia_sts); 811 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 812 MII_OFFSET_ANY, MIIF_DOPAUSE); 813 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 814 printf("%s: no PHY found!\n", sc->vge_dev.dv_xname); 815 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 816 0, NULL); 817 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 818 } else 819 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 820 821 timeout_set(&sc->timer_handle, vge_tick, sc); 822 823 /* 824 * Call MI attach routine. 825 */ 826 if_attach(ifp); 827 ether_ifattach(ifp); 828} 829 830int 831vge_detach(struct device *self, int flags) 832{ 833 struct vge_softc *sc = (void *)self; 834 struct ifnet *ifp = &sc->arpcom.ac_if; 835 836 pci_intr_disestablish(sc->sc_pc, sc->vge_intrhand); 837 838 vge_stop(sc); 839 840 /* Detach all PHYs */ 841 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 842 843 /* Delete any remaining media. */ 844 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 845 846 ether_ifdetach(ifp); 847 if_detach(ifp); 848 849 vge_freemem(sc); 850 851 bus_space_unmap(sc->vge_btag, sc->vge_bhandle, sc->vge_bsize); 852 return (0); 853} 854 855int 856vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 857{ 858 struct mbuf *m_new = NULL; 859 struct vge_rx_desc *r; 860 bus_dmamap_t rxmap = sc->vge_ldata.vge_rx_dmamap[idx]; 861 int i; 862 863 if (m == NULL) { 864 /* Allocate a new mbuf */ 865 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 866 if (m_new == NULL) 867 return (ENOBUFS); 868 869 /* Allocate a cluster */ 870 MCLGET(m_new, M_DONTWAIT); 871 if (!(m_new->m_flags & M_EXT)) { 872 m_freem(m_new); 873 return (ENOBUFS); 874 } 875 876 m = m_new; 877 } else 878 m->m_data = m->m_ext.ext_buf; 879 880 m->m_len = m->m_pkthdr.len = MCLBYTES; 881 /* Fix-up alignment so payload is doubleword-aligned */ 882 /* XXX m_adj(m, ETHER_ALIGN); */ 883 884 if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) 885 return (ENOBUFS); 886 887 if (rxmap->dm_nsegs > 1) 888 goto out; 889 890 /* Map the segments into RX descriptors */ 891 r = &sc->vge_ldata.vge_rx_list[idx]; 892 893 if (letoh32(r->vge_sts) & VGE_RDSTS_OWN) { 894 printf("%s: tried to map a busy RX descriptor\n", 895 sc->vge_dev.dv_xname); 896 goto out; 897 } 898 r->vge_buflen = htole16(VGE_BUFLEN(rxmap->dm_segs[0].ds_len) | VGE_RXDESC_I); 899 r->vge_addrlo = htole32(VGE_ADDR_LO(rxmap->dm_segs[0].ds_addr)); 900 r->vge_addrhi = htole16(VGE_ADDR_HI(rxmap->dm_segs[0].ds_addr) & 0xFFFF); 901 r->vge_sts = htole32(0); 902 r->vge_ctl = htole32(0); 903 904 /* 905 * Note: the manual fails to document the fact that for 906 * proper operation, the driver needs to replenish the RX 907 * DMA ring 4 descriptors at a time (rather than one at a 908 * time, like most chips). We can allocate the new buffers 909 * but we should not set the OWN bits until we're ready 910 * to hand back 4 of them in one shot. 911 */ 912#define VGE_RXCHUNK 4 913 sc->vge_rx_consumed++; 914 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 915 for (i = idx; i != idx - sc->vge_rx_consumed; i--) 916 sc->vge_ldata.vge_rx_list[i].vge_sts |= 917 htole32(VGE_RDSTS_OWN); 918 sc->vge_rx_consumed = 0; 919 } 920 921 sc->vge_ldata.vge_rx_mbuf[idx] = m; 922 923 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, 924 rxmap->dm_mapsize, BUS_DMASYNC_PREREAD); 925 926 return (0); 927out: 928 DPRINTF(("vge_newbuf: out of memory\n")); 929 if (m_new != NULL) 930 m_freem(m_new); 931 return (ENOMEM); 932} 933 934int 935vge_tx_list_init(struct vge_softc *sc) 936{ 937 bzero(sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 938 bzero(&sc->vge_ldata.vge_tx_mbuf, 939 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 940 941 bus_dmamap_sync(sc->sc_dmat, 942 sc->vge_ldata.vge_tx_list_map, 0, 943 sc->vge_ldata.vge_tx_list_map->dm_mapsize, 944 BUS_DMASYNC_PREWRITE); 945 sc->vge_ldata.vge_tx_prodidx = 0; 946 sc->vge_ldata.vge_tx_considx = 0; 947 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 948 949 return (0); 950} 951 952/* Init RX descriptors and allocate mbufs with vge_newbuf() 953 * A ring is used, and last descriptor points to first. */ 954int 955vge_rx_list_init(struct vge_softc *sc) 956{ 957 int i; 958 959 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 960 bzero(&sc->vge_ldata.vge_rx_mbuf, 961 (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 962 963 sc->vge_rx_consumed = 0; 964 965 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 966 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 967 return (ENOBUFS); 968 } 969 970 /* Flush the RX descriptors */ 971 972 bus_dmamap_sync(sc->sc_dmat, 973 sc->vge_ldata.vge_rx_list_map, 974 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 975 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 976 977 sc->vge_ldata.vge_rx_prodidx = 0; 978 sc->vge_rx_consumed = 0; 979 sc->vge_head = sc->vge_tail = NULL; 980 981 return (0); 982} 983 984/* 985 * RX handler. We support the reception of jumbo frames that have 986 * been fragmented across multiple 2K mbuf cluster buffers. 987 */ 988void 989vge_rxeof(struct vge_softc *sc) 990{ 991 struct mbuf *m; 992 struct ifnet *ifp; 993 int i, total_len; 994 int lim = 0; 995 struct vge_rx_desc *cur_rx; 996 u_int32_t rxstat, rxctl; 997 998 ifp = &sc->arpcom.ac_if; 999 i = sc->vge_ldata.vge_rx_prodidx; 1000 1001 /* Invalidate the descriptor memory */ 1002 1003 bus_dmamap_sync(sc->sc_dmat, 1004 sc->vge_ldata.vge_rx_list_map, 1005 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 1006 BUS_DMASYNC_POSTREAD); 1007 1008 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1009 struct mbuf *m0 = NULL; 1010 1011 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1012 m = sc->vge_ldata.vge_rx_mbuf[i]; 1013 total_len = VGE_RXBYTES(cur_rx); 1014 rxstat = letoh32(cur_rx->vge_sts); 1015 rxctl = letoh32(cur_rx->vge_ctl); 1016 1017 /* Invalidate the RX mbuf and unload its map */ 1018 1019 bus_dmamap_sync(sc->sc_dmat, 1020 sc->vge_ldata.vge_rx_dmamap[i], 1021 0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize, 1022 BUS_DMASYNC_POSTWRITE); 1023 bus_dmamap_unload(sc->sc_dmat, 1024 sc->vge_ldata.vge_rx_dmamap[i]); 1025 1026 /* 1027 * If the 'start of frame' bit is set, this indicates 1028 * either the first fragment in a multi-fragment receive, 1029 * or an intermediate fragment. Either way, we want to 1030 * accumulate the buffers. 1031 */ 1032 if (rxstat & VGE_RXPKT_SOF) { 1033 DPRINTF(("vge_rxeof: SOF\n")); 1034 m->m_len = MCLBYTES; 1035 if (sc->vge_head == NULL) 1036 sc->vge_head = sc->vge_tail = m; 1037 else { 1038 m->m_flags &= ~M_PKTHDR; 1039 sc->vge_tail->m_next = m; 1040 sc->vge_tail = m; 1041 } 1042 vge_newbuf(sc, i, NULL); 1043 VGE_RX_DESC_INC(i); 1044 continue; 1045 } 1046 1047 /* 1048 * Bad/error frames will have the RXOK bit cleared. 1049 * However, there's one error case we want to allow: 1050 * if a VLAN tagged frame arrives and the chip can't 1051 * match it against the CAM filter, it considers this 1052 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1053 * We don't want to drop the frame though: our VLAN 1054 * filtering is done in software. 1055 */ 1056 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1057 && !(rxstat & VGE_RDSTS_CSUMERR)) { 1058 ifp->if_ierrors++; 1059 /* 1060 * If this is part of a multi-fragment packet, 1061 * discard all the pieces. 1062 */ 1063 if (sc->vge_head != NULL) { 1064 m_freem(sc->vge_head); 1065 sc->vge_head = sc->vge_tail = NULL; 1066 } 1067 vge_newbuf(sc, i, m); 1068 VGE_RX_DESC_INC(i); 1069 continue; 1070 } 1071 1072 /* 1073 * If allocating a replacement mbuf fails, 1074 * reload the current one. 1075 */ 1076 1077 if (vge_newbuf(sc, i, NULL) == ENOBUFS) { 1078 if (sc->vge_head != NULL) { 1079 m_freem(sc->vge_head); 1080 sc->vge_head = sc->vge_tail = NULL; 1081 } 1082 1083 m0 = m_devget(mtod(m, char *), 1084 total_len - ETHER_CRC_LEN, ETHER_ALIGN, ifp, NULL); 1085 vge_newbuf(sc, i, m); 1086 if (m0 == NULL) { 1087 ifp->if_ierrors++; 1088 continue; 1089 } 1090 m = m0; 1091 1092 VGE_RX_DESC_INC(i); 1093 continue; 1094 } 1095 1096 VGE_RX_DESC_INC(i); 1097 1098 if (sc->vge_head != NULL) { 1099 m->m_len = total_len % MCLBYTES; 1100 /* 1101 * Special case: if there's 4 bytes or less 1102 * in this buffer, the mbuf can be discarded: 1103 * the last 4 bytes is the CRC, which we don't 1104 * care about anyway. 1105 */ 1106 if (m->m_len <= ETHER_CRC_LEN) { 1107 sc->vge_tail->m_len -= 1108 (ETHER_CRC_LEN - m->m_len); 1109 m_freem(m); 1110 } else { 1111 m->m_len -= ETHER_CRC_LEN; 1112 m->m_flags &= ~M_PKTHDR; 1113 sc->vge_tail->m_next = m; 1114 } 1115 m = sc->vge_head; 1116 sc->vge_head = sc->vge_tail = NULL; 1117 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1118 } else 1119 m->m_pkthdr.len = m->m_len = 1120 (total_len - ETHER_CRC_LEN); 1121 1122#ifdef __STRICT_ALIGNMENT 1123 bcopy(m->m_data, m->m_data + ETHER_ALIGN, total_len); 1124 m->m_data += ETHER_ALIGN; 1125#endif 1126 ifp->if_ipackets++; 1127 m->m_pkthdr.rcvif = ifp; 1128 1129 /* Do RX checksumming */ 1130 1131 /* Check IP header checksum */ 1132 if ((rxctl & VGE_RDCTL_IPPKT) && 1133 (rxctl & VGE_RDCTL_IPCSUMOK)) 1134 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1135 1136 /* Check TCP/UDP checksum */ 1137 if ((rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT)) && 1138 (rxctl & VGE_RDCTL_PROTOCSUMOK)) 1139 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1140 1141#if NVLAN > 0 1142 if (rxstat & VGE_RDSTS_VTAG) { 1143 m->m_pkthdr.ether_vtag = swap16(rxctl & VGE_RDCTL_VLANID); 1144 m->m_flags |= M_VLANTAG; 1145 } 1146#endif 1147 1148#if NBPFILTER > 0 1149 if (ifp->if_bpf) 1150 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1151#endif 1152 ether_input_mbuf(ifp, m); 1153 1154 lim++; 1155 if (lim == VGE_RX_DESC_CNT) 1156 break; 1157 } 1158 1159 /* Flush the RX DMA ring */ 1160 bus_dmamap_sync(sc->sc_dmat, 1161 sc->vge_ldata.vge_rx_list_map, 1162 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 1163 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1164 1165 sc->vge_ldata.vge_rx_prodidx = i; 1166 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1167} 1168 1169void 1170vge_txeof(struct vge_softc *sc) 1171{ 1172 struct ifnet *ifp; 1173 u_int32_t txstat; 1174 int idx; 1175 1176 ifp = &sc->arpcom.ac_if; 1177 idx = sc->vge_ldata.vge_tx_considx; 1178 1179 /* Invalidate the TX descriptor list */ 1180 1181 bus_dmamap_sync(sc->sc_dmat, 1182 sc->vge_ldata.vge_tx_list_map, 1183 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize, 1184 BUS_DMASYNC_POSTREAD); 1185 1186 /* Transmitted frames can be now free'd from the TX list */ 1187 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1188 txstat = letoh32(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1189 if (txstat & VGE_TDSTS_OWN) 1190 break; 1191 1192 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1193 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1194 bus_dmamap_unload(sc->sc_dmat, 1195 sc->vge_ldata.vge_tx_dmamap[idx]); 1196 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1197 ifp->if_collisions++; 1198 if (txstat & VGE_TDSTS_TXERR) 1199 ifp->if_oerrors++; 1200 else 1201 ifp->if_opackets++; 1202 1203 sc->vge_ldata.vge_tx_free++; 1204 VGE_TX_DESC_INC(idx); 1205 } 1206 1207 /* No changes made to the TX ring, so no flush needed */ 1208 1209 if (idx != sc->vge_ldata.vge_tx_considx) { 1210 sc->vge_ldata.vge_tx_considx = idx; 1211 ifp->if_flags &= ~IFF_OACTIVE; 1212 ifp->if_timer = 0; 1213 } 1214 1215 /* 1216 * If not all descriptors have been released reaped yet, 1217 * reload the timer so that we will eventually get another 1218 * interrupt that will cause us to re-enter this routine. 1219 * This is done in case the transmitter has gone idle. 1220 */ 1221 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) 1222 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1223} 1224 1225void 1226vge_tick(void *xsc) 1227{ 1228 struct vge_softc *sc = xsc; 1229 struct ifnet *ifp = &sc->arpcom.ac_if; 1230 struct mii_data *mii = &sc->sc_mii; 1231 int s; 1232 1233 s = splnet(); 1234 1235 mii_tick(mii); 1236 1237 if (sc->vge_link) { 1238 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1239 sc->vge_link = 0; 1240 ifp->if_link_state = LINK_STATE_DOWN; 1241 if_link_state_change(ifp); 1242 } 1243 } else { 1244 if (mii->mii_media_status & IFM_ACTIVE && 1245 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1246 sc->vge_link = 1; 1247 if (mii->mii_media_status & IFM_FDX) 1248 ifp->if_link_state = LINK_STATE_FULL_DUPLEX; 1249 else 1250 ifp->if_link_state = LINK_STATE_HALF_DUPLEX; 1251 if_link_state_change(ifp); 1252 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1253 vge_start(ifp); 1254 } 1255 } 1256 timeout_add_sec(&sc->timer_handle, 1); 1257 splx(s); 1258} 1259 1260int 1261vge_intr(void *arg) 1262{ 1263 struct vge_softc *sc = arg; 1264 struct ifnet *ifp; 1265 u_int32_t status; 1266 int claimed = 0; 1267 1268 ifp = &sc->arpcom.ac_if; 1269 1270 if (!(ifp->if_flags & IFF_UP)) 1271 return 0; 1272 1273 /* Disable interrupts */ 1274 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1275 1276 for (;;) { 1277 status = CSR_READ_4(sc, VGE_ISR); 1278 DPRINTFN(3, ("vge_intr: status=%#x\n", status)); 1279 1280 /* If the card has gone away the read returns 0xffffffff. */ 1281 if (status == 0xFFFFFFFF) 1282 break; 1283 1284 if (status) { 1285 CSR_WRITE_4(sc, VGE_ISR, status); 1286 } 1287 1288 if ((status & VGE_INTRS) == 0) 1289 break; 1290 1291 claimed = 1; 1292 1293 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1294 vge_rxeof(sc); 1295 1296 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1297 DPRINTFN(2, ("vge_intr: RX error, recovering\n")); 1298 vge_rxeof(sc); 1299 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1300 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1301 } 1302 1303 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1304 vge_txeof(sc); 1305 1306 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1307 DPRINTFN(2, ("DMA_STALL\n")); 1308 vge_init(ifp); 1309 } 1310 1311 if (status & VGE_ISR_LINKSTS) { 1312 timeout_del(&sc->timer_handle); 1313 vge_tick(sc); 1314 } 1315 } 1316 1317 /* Re-enable interrupts */ 1318 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1319 1320 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1321 vge_start(ifp); 1322 1323 return (claimed); 1324} 1325 1326/* 1327 * Encapsulate an mbuf chain into the TX ring by combining it w/ 1328 * the descriptors. 1329 */ 1330int 1331vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1332{ 1333 struct ifnet *ifp = &sc->arpcom.ac_if; 1334 bus_dmamap_t txmap; 1335 struct vge_tx_desc *d = NULL; 1336 struct vge_tx_frag *f; 1337 struct mbuf *mnew = NULL; 1338 int error, frag; 1339 u_int32_t vge_flags; 1340 1341 vge_flags = 0; 1342 1343 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1344 vge_flags |= VGE_TDCTL_IPCSUM; 1345 if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1346 vge_flags |= VGE_TDCTL_TCPCSUM; 1347 if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1348 vge_flags |= VGE_TDCTL_UDPCSUM; 1349 1350 txmap = sc->vge_ldata.vge_tx_dmamap[idx]; 1351repack: 1352 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, 1353 m_head, BUS_DMA_NOWAIT); 1354 if (error) { 1355 printf("%s: can't map mbuf (error %d)\n", 1356 sc->vge_dev.dv_xname, error); 1357 return (ENOBUFS); 1358 } 1359 1360 d = &sc->vge_ldata.vge_tx_list[idx]; 1361 /* If owned by chip, fail */ 1362 if (letoh32(d->vge_sts) & VGE_TDSTS_OWN) 1363 return (ENOBUFS); 1364 1365 for (frag = 0; frag < txmap->dm_nsegs; frag++) { 1366 /* Check if we have used all 7 fragments. */ 1367 if (frag == VGE_TX_FRAGS) 1368 break; 1369 f = &d->vge_frag[frag]; 1370 f->vge_buflen = htole16(VGE_BUFLEN(txmap->dm_segs[frag].ds_len)); 1371 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[frag].ds_addr)); 1372 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[frag].ds_addr) & 0xFFFF); 1373 } 1374 1375 /* 1376 * We used up all 7 fragments! Now what we have to do is 1377 * copy the data into a mbuf cluster and map that. 1378 */ 1379 if (frag == VGE_TX_FRAGS) { 1380 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1381 if (mnew == NULL) 1382 return (ENOBUFS); 1383 1384 if (m_head->m_pkthdr.len > MHLEN) { 1385 MCLGET(mnew, M_DONTWAIT); 1386 if (!(mnew->m_flags & M_EXT)) { 1387 m_freem(mnew); 1388 return (ENOBUFS); 1389 } 1390 } 1391 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1392 mtod(mnew, caddr_t)); 1393 mnew->m_pkthdr.len = mnew->m_len = m_head->m_pkthdr.len; 1394 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1395 m_freem(m_head); 1396 m_head = mnew; 1397 goto repack; 1398 } 1399 1400 /* This chip does not do auto-padding */ 1401 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1402 f = &d->vge_frag[frag]; 1403 1404 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 1405 m_head->m_pkthdr.len)); 1406 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[0].ds_addr)); 1407 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[0].ds_addr) & 0xFFFF); 1408 m_head->m_pkthdr.len = VGE_MIN_FRAMELEN; 1409 frag++; 1410 } 1411 /* For some reason, we need to tell the card fragment + 1 */ 1412 frag++; 1413 1414 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, 1415 BUS_DMASYNC_PREWRITE); 1416 1417 d->vge_sts = htole32(m_head->m_pkthdr.len << 16); 1418 d->vge_ctl = htole32(vge_flags|(frag << 28) | VGE_TD_LS_NORM); 1419 1420 if (m_head->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 1421 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO); 1422 1423#if NVLAN > 0 1424 /* Set up hardware VLAN tagging. */ 1425 if (m_head->m_flags & M_VLANTAG) { 1426 d->vge_ctl |= htole32(m_head->m_pkthdr.ether_vtag | 1427 VGE_TDCTL_VTAG); 1428 } 1429#endif 1430 1431 sc->vge_ldata.vge_tx_dmamap[idx] = txmap; 1432 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1433 sc->vge_ldata.vge_tx_free--; 1434 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1435 1436 idx++; 1437 if (mnew == NULL) { 1438 /* if mbuf is coalesced, it is already dequeued */ 1439 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1440 } 1441 return (0); 1442} 1443 1444/* 1445 * Main transmit routine. 1446 */ 1447void 1448vge_start(struct ifnet *ifp) 1449{ 1450 struct vge_softc *sc; 1451 struct mbuf *m_head = NULL; 1452 int idx, pidx = 0; 1453 1454 sc = ifp->if_softc; 1455 1456 if (!sc->vge_link || ifp->if_flags & IFF_OACTIVE) 1457 return; 1458 1459 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1460 return; 1461 1462 idx = sc->vge_ldata.vge_tx_prodidx; 1463 1464 pidx = idx - 1; 1465 if (pidx < 0) 1466 pidx = VGE_TX_DESC_CNT - 1; 1467 1468 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1469 IFQ_POLL(&ifp->if_snd, m_head); 1470 if (m_head == NULL) 1471 break; 1472 1473 /* 1474 * If there's a BPF listener, bounce a copy of this frame 1475 * to him. 1476 */ 1477#if NBPFILTER > 0 1478 if (ifp->if_bpf) 1479 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1480#endif 1481 1482 if (vge_encap(sc, m_head, idx)) { 1483 ifp->if_flags |= IFF_OACTIVE; 1484 break; 1485 } 1486 1487 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1488 htole16(VGE_TXDESC_Q); 1489 1490 pidx = idx; 1491 VGE_TX_DESC_INC(idx); 1492 } 1493 1494 if (idx == sc->vge_ldata.vge_tx_prodidx) { 1495 return; 1496 } 1497 1498 /* Flush the TX descriptors */ 1499 1500 bus_dmamap_sync(sc->sc_dmat, 1501 sc->vge_ldata.vge_tx_list_map, 1502 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize, 1503 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1504 1505 /* Issue a transmit command. */ 1506 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1507 1508 sc->vge_ldata.vge_tx_prodidx = idx; 1509 1510 /* 1511 * Use the countdown timer for interrupt moderation. 1512 * 'TX done' interrupts are disabled. Instead, we reset the 1513 * countdown timer, which will begin counting until it hits 1514 * the value in the SSTIMER register, and then trigger an 1515 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1516 * the timer count is reloaded. Only when the transmitter 1517 * is idle will the timer hit 0 and an interrupt fire. 1518 */ 1519 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1520 1521 /* 1522 * Set a timeout in case the chip goes out to lunch. 1523 */ 1524 ifp->if_timer = 5; 1525} 1526 1527int 1528vge_init(struct ifnet *ifp) 1529{ 1530 struct vge_softc *sc = ifp->if_softc; 1531 int i; 1532 1533 /* 1534 * Cancel pending I/O and free all RX/TX buffers. 1535 */ 1536 vge_stop(sc); 1537 vge_reset(sc); 1538 1539 /* Initialize RX descriptors list */ 1540 if (vge_rx_list_init(sc) == ENOBUFS) { 1541 printf("%s: init failed: no memory for RX buffers\n", 1542 sc->vge_dev.dv_xname); 1543 vge_stop(sc); 1544 return (ENOBUFS); 1545 } 1546 /* Initialize TX descriptors */ 1547 if (vge_tx_list_init(sc) == ENOBUFS) { 1548 printf("%s: init failed: no memory for TX buffers\n", 1549 sc->vge_dev.dv_xname); 1550 vge_stop(sc); 1551 return (ENOBUFS); 1552 } 1553 1554 /* Set our station address */ 1555 for (i = 0; i < ETHER_ADDR_LEN; i++) 1556 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1557 1558 /* Set receive FIFO threshold */ 1559 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR); 1560 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 1561 1562 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) { 1563 /* 1564 * Allow transmission and reception of VLAN tagged 1565 * frames. 1566 */ 1567 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_VTAGOPT); 1568 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_VTAG_OPT2); 1569 } 1570 1571 /* Set DMA burst length */ 1572 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1573 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1574 1575 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1576 1577 /* Set collision backoff algorithm */ 1578 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1579 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1580 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1581 1582 /* Disable LPSEL field in priority resolution */ 1583 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1584 1585 /* 1586 * Load the addresses of the DMA queues into the chip. 1587 * Note that we only use one transmit queue. 1588 */ 1589 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1590 VGE_ADDR_LO(sc->vge_ldata.vge_tx_listseg.ds_addr)); 1591 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1592 1593 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1594 VGE_ADDR_LO(sc->vge_ldata.vge_rx_listseg.ds_addr)); 1595 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1596 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1597 1598 /* Enable and wake up the RX descriptor queue */ 1599 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1600 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1601 1602 /* Enable the TX descriptor queue */ 1603 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1604 1605 /* Set up the receive filter -- allow large frames for VLANs. */ 1606 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_GIANT); 1607 1608 /* Program promiscuous mode and multicast filters. */ 1609 vge_iff(sc); 1610 1611 /* Initialize pause timer. */ 1612 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF); 1613 /* 1614 * Initialize flow control parameters. 1615 * TX XON high threshold : 48 1616 * TX pause low threshold : 24 1617 * Disable half-duplex flow control 1618 */ 1619 CSR_WRITE_1(sc, VGE_CRC2, 0xFF); 1620 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B); 1621 1622 /* Enable jumbo frame reception (if desired) */ 1623 1624 /* Start the MAC. */ 1625 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1626 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1627 CSR_WRITE_1(sc, VGE_CRS0, 1628 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1629 1630 /* 1631 * Configure one-shot timer for microsecond 1632 * resulution and load it for 500 usecs. 1633 */ 1634 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1635 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1636 1637 /* 1638 * Configure interrupt moderation for receive. Enable 1639 * the holdoff counter and load it, and set the RX 1640 * suppression count to the number of descriptors we 1641 * want to allow before triggering an interrupt. 1642 * The holdoff timer is in units of 20 usecs. 1643 */ 1644 1645#ifdef notyet 1646 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1647 /* Select the interrupt holdoff timer page. */ 1648 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1649 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1650 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1651 1652 /* Enable use of the holdoff timer. */ 1653 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1654 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1655 1656 /* Select the RX suppression threshold page. */ 1657 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1658 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1659 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1660 1661 /* Restore the page select bits. */ 1662 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1663 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1664#endif 1665 1666 /* 1667 * Enable interrupts. 1668 */ 1669 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 1670 CSR_WRITE_4(sc, VGE_ISR, 0); 1671 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1672 1673 /* Restore BMCR state */ 1674 mii_mediachg(&sc->sc_mii); 1675 1676 ifp->if_flags |= IFF_RUNNING; 1677 ifp->if_flags &= ~IFF_OACTIVE; 1678 1679 sc->vge_link = 0; 1680 1681 if (!timeout_pending(&sc->timer_handle)) 1682 timeout_add_sec(&sc->timer_handle, 1); 1683 1684 return (0); 1685} 1686 1687/* 1688 * Set media options. 1689 */ 1690int 1691vge_ifmedia_upd(struct ifnet *ifp) 1692{ 1693 struct vge_softc *sc = ifp->if_softc; 1694 1695 return (mii_mediachg(&sc->sc_mii)); 1696} 1697 1698/* 1699 * Report current media status. 1700 */ 1701void 1702vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1703{ 1704 struct vge_softc *sc = ifp->if_softc; 1705 1706 mii_pollstat(&sc->sc_mii); 1707 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1708 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1709} 1710 1711void 1712vge_miibus_statchg(struct device *dev) 1713{ 1714 struct vge_softc *sc = (struct vge_softc *)dev; 1715 struct mii_data *mii; 1716 struct ifmedia_entry *ife; 1717 1718 mii = &sc->sc_mii; 1719 ife = mii->mii_media.ifm_cur; 1720 1721 /* 1722 * If the user manually selects a media mode, we need to turn 1723 * on the forced MAC mode bit in the DIAGCTL register. If the 1724 * user happens to choose a full duplex mode, we also need to 1725 * set the 'force full duplex' bit. This applies only to 1726 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1727 * mode is disabled, and in 1000baseT mode, full duplex is 1728 * always implied, so we turn on the forced mode bit but leave 1729 * the FDX bit cleared. 1730 */ 1731 1732 switch (IFM_SUBTYPE(ife->ifm_media)) { 1733 case IFM_AUTO: 1734 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1735 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1736 break; 1737 case IFM_1000_T: 1738 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1739 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1740 break; 1741 case IFM_100_TX: 1742 case IFM_10_T: 1743 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1744 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1745 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1746 } else { 1747 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1748 } 1749 break; 1750 default: 1751 printf("%s: unknown media type: %x\n", 1752 sc->vge_dev.dv_xname, IFM_SUBTYPE(ife->ifm_media)); 1753 break; 1754 } 1755 1756 /* 1757 * 802.3x flow control 1758 */ 1759 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE | 1760 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1761 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1762 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_FDX_TXFLOWCTL_ENABLE); 1763 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1764 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1765} 1766 1767int 1768vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1769{ 1770 struct vge_softc *sc = ifp->if_softc; 1771 struct ifaddr *ifa = (struct ifaddr *) data; 1772 struct ifreq *ifr = (struct ifreq *) data; 1773 int s, error = 0; 1774 1775 s = splnet(); 1776 1777 switch (command) { 1778 case SIOCSIFADDR: 1779 ifp->if_flags |= IFF_UP; 1780 if (!(ifp->if_flags & IFF_RUNNING)) 1781 vge_init(ifp); 1782#ifdef INET 1783 if (ifa->ifa_addr->sa_family == AF_INET) 1784 arp_ifinit(&sc->arpcom, ifa); 1785#endif 1786 break; 1787 1788 case SIOCSIFFLAGS: 1789 if (ifp->if_flags & IFF_UP) { 1790 if (ifp->if_flags & IFF_RUNNING) 1791 error = ENETRESET; 1792 else 1793 vge_init(ifp); 1794 } else { 1795 if (ifp->if_flags & IFF_RUNNING) 1796 vge_stop(sc); 1797 } 1798 break; 1799 1800 case SIOCGIFMEDIA: 1801 case SIOCSIFMEDIA: 1802 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1803 break; 1804 1805 default: 1806 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1807 } 1808 1809 if (error == ENETRESET) { 1810 if (ifp->if_flags & IFF_RUNNING) 1811 vge_iff(sc); 1812 error = 0; 1813 } 1814 1815 splx(s); 1816 return (error); 1817} 1818 1819void 1820vge_watchdog(struct ifnet *ifp) 1821{ 1822 struct vge_softc *sc = ifp->if_softc; 1823 int s; 1824 1825 s = splnet(); 1826 printf("%s: watchdog timeout\n", sc->vge_dev.dv_xname); 1827 ifp->if_oerrors++; 1828 1829 vge_txeof(sc); 1830 vge_rxeof(sc); 1831 1832 vge_init(ifp); 1833 1834 splx(s); 1835} 1836 1837/* 1838 * Stop the adapter and free any mbufs allocated to the 1839 * RX and TX lists. 1840 */ 1841void 1842vge_stop(struct vge_softc *sc) 1843{ 1844 int i; 1845 struct ifnet *ifp; 1846 1847 ifp = &sc->arpcom.ac_if; 1848 ifp->if_timer = 0; 1849 1850 timeout_del(&sc->timer_handle); 1851 1852 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1853 1854 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1855 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 1856 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 1857 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 1858 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 1859 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 1860 1861 if (sc->vge_head != NULL) { 1862 m_freem(sc->vge_head); 1863 sc->vge_head = sc->vge_tail = NULL; 1864 } 1865 1866 /* Free the TX list buffers. */ 1867 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1868 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 1869 bus_dmamap_unload(sc->sc_dmat, 1870 sc->vge_ldata.vge_tx_dmamap[i]); 1871 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 1872 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 1873 } 1874 } 1875 1876 /* Free the RX list buffers. */ 1877 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1878 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 1879 bus_dmamap_unload(sc->sc_dmat, 1880 sc->vge_ldata.vge_rx_dmamap[i]); 1881 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 1882 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 1883 } 1884 } 1885} 1886