if_vge.c revision 1.12
1/* $OpenBSD: if_vge.c,v 1.12 2005/04/30 19:24:00 brad Exp $ */ 2/* $FreeBSD: if_vge.c,v 1.3 2004/09/11 22:13:25 wpaul Exp $ */ 3/* 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35/* 36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 * 42 * Ported to OpenBSD by Peter Valchev <pvalchev@openbsd.org> 43 */ 44 45/* 46 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 47 * combines a tri-speed ethernet MAC and PHY, with the following 48 * features: 49 * 50 * o Jumbo frame support up to 16K 51 * o Transmit and receive flow control 52 * o IPv4 checksum offload 53 * o VLAN tag insertion and stripping 54 * o TCP large send 55 * o 64-bit multicast hash table filter 56 * o 64 entry CAM filter 57 * o 16K RX FIFO and 48K TX FIFO memory 58 * o Interrupt moderation 59 * 60 * The VT6122 supports up to four transmit DMA queues. The descriptors 61 * in the transmit ring can address up to 7 data fragments; frames which 62 * span more than 7 data buffers must be coalesced, but in general the 63 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 64 * long. The receive descriptors address only a single buffer. 65 * 66 * There are two peculiar design issues with the VT6122. One is that 67 * receive data buffers must be aligned on a 32-bit boundary. This is 68 * not a problem where the VT6122 is used as a LOM device in x86-based 69 * systems, but on architectures that generate unaligned access traps, we 70 * have to do some copying. 71 * 72 * The other issue has to do with the way 64-bit addresses are handled. 73 * The DMA descriptors only allow you to specify 48 bits of addressing 74 * information. The remaining 16 bits are specified using one of the 75 * I/O registers. If you only have a 32-bit system, then this isn't 76 * an issue, but if you have a 64-bit system and more than 4GB of 77 * memory, you must have to make sure your network data buffers reside 78 * in the same 48-bit 'segment.' 79 * 80 * Special thanks to Ryan Fu at VIA Networking for providing documentation 81 * and sample NICs for testing. 82 */ 83 84#include "bpfilter.h" 85#include "vlan.h" 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/kernel.h> 94#include <sys/device.h> 95#include <sys/socket.h> 96 97#include <net/if.h> 98#include <net/if_dl.h> 99#include <net/if_media.h> 100 101#ifdef INET 102#include <netinet/in.h> 103#include <netinet/in_systm.h> 104#include <netinet/in_var.h> 105#include <netinet/ip.h> 106#include <netinet/if_ether.h> 107#endif 108 109#if NVLAN > 0 110#include <net/if_types.h> 111#include <net/if_vlan_var.h> 112#endif 113 114#if NBPFILTER > 0 115#include <net/bpf.h> 116#endif 117 118#include <dev/mii/mii.h> 119#include <dev/mii/miivar.h> 120 121#include <dev/pci/pcireg.h> 122#include <dev/pci/pcivar.h> 123#include <dev/pci/pcidevs.h> 124 125#include <dev/pci/if_vgereg.h> 126#include <dev/pci/if_vgevar.h> 127 128int vge_probe (struct device *, void *, void *); 129void vge_attach (struct device *, struct device *, void *); 130 131int vge_encap (struct vge_softc *, struct mbuf *, int); 132 133int vge_allocmem (struct vge_softc *); 134int vge_newbuf (struct vge_softc *, int, struct mbuf *); 135int vge_rx_list_init (struct vge_softc *); 136int vge_tx_list_init (struct vge_softc *); 137void vge_rxeof (struct vge_softc *); 138void vge_txeof (struct vge_softc *); 139int vge_intr (void *); 140void vge_tick (void *); 141void vge_start (struct ifnet *); 142int vge_ioctl (struct ifnet *, u_long, caddr_t); 143int vge_init (struct ifnet *); 144void vge_stop (struct vge_softc *); 145void vge_watchdog (struct ifnet *); 146int vge_ifmedia_upd (struct ifnet *); 147void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 148 149#ifdef VGE_EEPROM 150void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 151#endif 152void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 153 154void vge_miipoll_start (struct vge_softc *); 155void vge_miipoll_stop (struct vge_softc *); 156int vge_miibus_readreg (struct device *, int, int); 157void vge_miibus_writereg (struct device *, int, int, int); 158void vge_miibus_statchg (struct device *); 159 160void vge_cam_clear (struct vge_softc *); 161int vge_cam_set (struct vge_softc *, uint8_t *); 162void vge_setmulti (struct vge_softc *); 163void vge_reset (struct vge_softc *); 164 165struct cfattach vge_ca = { 166 sizeof(struct vge_softc), vge_probe, vge_attach 167}; 168 169struct cfdriver vge_cd = { 170 0, "vge", DV_IFNET 171}; 172 173#define VGE_PCI_LOIO 0x10 174#define VGE_PCI_LOMEM 0x14 175 176int vge_debug = 0; 177#define DPRINTF(x) if (vge_debug) printf x 178#define DPRINTFN(n, x) if (vge_debug >= (n)) printf x 179 180const struct pci_matchid vge_devices[] = { 181 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612x }, 182}; 183 184#ifdef VGE_EEPROM 185/* 186 * Read a word of data stored in the EEPROM at address 'addr.' 187 */ 188void 189vge_eeprom_getword(struct vge_softc *sc, int addr, u_int16_t *dest) 190{ 191 register int i; 192 u_int16_t word = 0; 193 194 /* 195 * Enter EEPROM embedded programming mode. In order to 196 * access the EEPROM at all, we first have to set the 197 * EELOAD bit in the CHIPCFG2 register. 198 */ 199 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 200 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 201 202 /* Select the address of the word we want to read */ 203 CSR_WRITE_1(sc, VGE_EEADDR, addr); 204 205 /* Issue read command */ 206 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 207 208 /* Wait for the done bit to be set. */ 209 for (i = 0; i < VGE_TIMEOUT; i++) { 210 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 211 break; 212 } 213 214 if (i == VGE_TIMEOUT) { 215 printf("%s: EEPROM read timed out\n", sc->vge_dev.dv_xname); 216 *dest = 0; 217 return; 218 } 219 220 /* Read the result */ 221 word = CSR_READ_2(sc, VGE_EERDDAT); 222 223 /* Turn off EEPROM access mode. */ 224 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 225 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 226 227 *dest = word; 228} 229#endif 230 231/* 232 * Read a sequence of words from the EEPROM. 233 */ 234void 235vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, 236 int swap) 237{ 238 int i; 239#ifdef VGE_EEPROM 240 u_int16_t word = 0, *ptr; 241 242 for (i = 0; i < cnt; i++) { 243 vge_eeprom_getword(sc, off + i, &word); 244 ptr = (u_int16_t *)(dest + (i * 2)); 245 if (swap) 246 *ptr = ntohs(word); 247 else 248 *ptr = word; 249 } 250#else 251 for (i = 0; i < ETHER_ADDR_LEN; i++) 252 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 253#endif 254} 255 256void 257vge_miipoll_stop(struct vge_softc *sc) 258{ 259 int i; 260 261 CSR_WRITE_1(sc, VGE_MIICMD, 0); 262 263 for (i = 0; i < VGE_TIMEOUT; i++) { 264 DELAY(1); 265 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 266 break; 267 } 268 269 if (i == VGE_TIMEOUT) 270 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); 271} 272 273void 274vge_miipoll_start(struct vge_softc *sc) 275{ 276 int i; 277 278 /* First, make sure we're idle. */ 279 280 CSR_WRITE_1(sc, VGE_MIICMD, 0); 281 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 282 283 for (i = 0; i < VGE_TIMEOUT; i++) { 284 DELAY(1); 285 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 286 break; 287 } 288 289 if (i == VGE_TIMEOUT) { 290 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); 291 return; 292 } 293 294 /* Now enable auto poll mode. */ 295 296 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 297 298 /* And make sure it started. */ 299 300 for (i = 0; i < VGE_TIMEOUT; i++) { 301 DELAY(1); 302 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 303 break; 304 } 305 306 if (i == VGE_TIMEOUT) 307 printf("%s: failed to start MII autopoll\n", sc->vge_dev.dv_xname); 308} 309 310int 311vge_miibus_readreg(struct device *dev, int phy, int reg) 312{ 313 struct vge_softc *sc = (struct vge_softc *)dev; 314 int i, s; 315 u_int16_t rval = 0; 316 317 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 318 return(0); 319 320 s = splimp(); 321 322 vge_miipoll_stop(sc); 323 324 /* Specify the register we want to read. */ 325 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 326 327 /* Issue read command. */ 328 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 329 330 /* Wait for the read command bit to self-clear. */ 331 for (i = 0; i < VGE_TIMEOUT; i++) { 332 DELAY(1); 333 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 334 break; 335 } 336 337 if (i == VGE_TIMEOUT) 338 printf("%s: MII read timed out\n", sc->vge_dev.dv_xname); 339 else 340 rval = CSR_READ_2(sc, VGE_MIIDATA); 341 342 vge_miipoll_start(sc); 343 splx(s); 344 345 return (rval); 346} 347 348void 349vge_miibus_writereg(struct device *dev, int phy, int reg, int data) 350{ 351 struct vge_softc *sc = (struct vge_softc *)dev; 352 int i, s; 353 354 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 355 return; 356 357 s = splimp(); 358 vge_miipoll_stop(sc); 359 360 /* Specify the register we want to write. */ 361 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 362 363 /* Specify the data we want to write. */ 364 CSR_WRITE_2(sc, VGE_MIIDATA, data); 365 366 /* Issue write command. */ 367 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 368 369 /* Wait for the write command bit to self-clear. */ 370 for (i = 0; i < VGE_TIMEOUT; i++) { 371 DELAY(1); 372 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 373 break; 374 } 375 376 if (i == VGE_TIMEOUT) { 377 printf("%s: MII write timed out\n", sc->vge_dev.dv_xname); 378 } 379 380 vge_miipoll_start(sc); 381 splx(s); 382} 383 384void 385vge_cam_clear(struct vge_softc *sc) 386{ 387 int i; 388 389 /* 390 * Turn off all the mask bits. This tells the chip 391 * that none of the entries in the CAM filter are valid. 392 * desired entries will be enabled as we fill the filter in. 393 */ 394 395 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 396 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 397 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 398 for (i = 0; i < 8; i++) 399 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 400 401 /* Clear the VLAN filter too. */ 402 403 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 404 for (i = 0; i < 8; i++) 405 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 406 407 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 408 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 409 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 410 411 sc->vge_camidx = 0; 412} 413 414int 415vge_cam_set(struct vge_softc *sc, uint8_t *addr) 416{ 417 int i, error = 0; 418 419 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 420 return(ENOSPC); 421 422 /* Select the CAM data page. */ 423 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 424 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 425 426 /* Set the filter entry we want to update and enable writing. */ 427 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 428 429 /* Write the address to the CAM registers */ 430 for (i = 0; i < ETHER_ADDR_LEN; i++) 431 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 432 433 /* Issue a write command. */ 434 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 435 436 /* Wake for it to clear. */ 437 for (i = 0; i < VGE_TIMEOUT; i++) { 438 DELAY(1); 439 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 440 break; 441 } 442 443 if (i == VGE_TIMEOUT) { 444 printf("%s: setting CAM filter failed\n", sc->vge_dev.dv_xname); 445 error = EIO; 446 goto fail; 447 } 448 449 /* Select the CAM mask page. */ 450 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 451 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 452 453 /* Set the mask bit that enables this filter. */ 454 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 455 1<<(sc->vge_camidx & 7)); 456 457 sc->vge_camidx++; 458 459fail: 460 /* Turn off access to CAM. */ 461 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 462 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 463 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 464 465 return (error); 466} 467 468/* 469 * Program the multicast filter. We use the 64-entry CAM filter 470 * for perfect filtering. If there's more than 64 multicast addresses, 471 * we use the hash filter instead. 472 */ 473void 474vge_setmulti(struct vge_softc *sc) 475{ 476 struct arpcom *ac = &sc->arpcom; 477 struct ifnet *ifp = &ac->ac_if; 478 struct ether_multi *enm; 479 struct ether_multistep step; 480 u_int32_t h = 0, hashes[2] = { 0, 0 }; 481 482 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 483 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 484 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 485 return; 486 } 487 /* reset existing hash bits */ 488 CSR_WRITE_4(sc, VGE_MAR0, 0); 489 CSR_WRITE_4(sc, VGE_MAR1, 0); 490 491 /* program new ones */ 492 ETHER_FIRST_MULTI(step, ac, enm); 493 while (enm != NULL) { 494 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) 495 ifp->if_flags |= IFF_ALLMULTI; 496 h = (ether_crc32_be(enm->enm_addrlo, 497 ETHER_ADDR_LEN) >> 26) & 0x0000003F; 498 if (h < 32) 499 hashes[0] |= (1 << h); 500 else 501 hashes[1] |= (1 << (h - 32)); 502 ETHER_NEXT_MULTI(step, enm); 503 } 504 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 505 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 506 507#ifdef CAM_FILTERING 508 struct ifnet *ifp; 509 u_int32_t h, hashes[2] = { 0, 0 }; 510 int mcnt = 0; 511 struct arpcom *ac = &sc->arpcom; 512 struct ether_multi *enm; 513 struct ether_multistep step; 514 515 ifp = &sc->arpcom.ac_if; 516 517 /* First, zot all the multicast entries. */ 518 vge_cam_clear(sc); 519 CSR_WRITE_4(sc, VGE_MAR0, 0); 520 CSR_WRITE_4(sc, VGE_MAR1, 0); 521 522 /* 523 * If the user wants allmulti or promisc mode, enable reception 524 * of all multicast frames. 525 */ 526 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 527 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 528 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 529 return; 530 } 531 532 ETHER_FIRST_MULTI(step, ac, enm); 533 while (enm != NULL) { 534 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 535 ifp->if_flags |= IFF_ALLMULTI; 536 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 537 } 538 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 539 break; 540 541 h = (ether_crc32_be(enm->enm_addrlo, 542 ETHER_ADDR_LEN) >> 26) & 0x0000003F; 543 if (h < 32) 544 hashes[0] |= (1 << h); 545 else 546 hashes[1] |= (1 << (h - 32)); 547 mcnt++; 548 ETHER_NEXT_MULTI(step, enm); 549 } 550 551 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 552 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 553#endif 554} 555 556void 557vge_reset(struct vge_softc *sc) 558{ 559 register int i; 560 561 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 562 563 for (i = 0; i < VGE_TIMEOUT; i++) { 564 DELAY(5); 565 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 566 break; 567 } 568 569 if (i == VGE_TIMEOUT) { 570 printf("%s: soft reset timed out", sc->vge_dev.dv_xname); 571 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 572 DELAY(2000); 573 } 574 575 DELAY(5000); 576 577 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 578 579 for (i = 0; i < VGE_TIMEOUT; i++) { 580 DELAY(5); 581 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 582 break; 583 } 584 585 if (i == VGE_TIMEOUT) { 586 printf("%s: EEPROM reload timed out\n", sc->vge_dev.dv_xname); 587 return; 588 } 589 590 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 591} 592 593/* 594 * Probe for a VIA gigabit chip. Check the PCI vendor and device 595 * IDs against our list and return a device name if we find a match. 596 */ 597int 598vge_probe(struct device *dev, void *match, void *aux) 599{ 600 return (pci_matchbyid((struct pci_attach_args *)aux, vge_devices, 601 sizeof(vge_devices)/sizeof(vge_devices[0]))); 602} 603 604/* 605 * Allocate memory for RX/TX rings 606 */ 607int 608vge_allocmem(struct vge_softc *sc) 609{ 610 int nseg, rseg; 611 int i, error; 612 613 nseg = 32; 614 615 /* Allocate DMA'able memory for the TX ring */ 616 617 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_LIST_SZ, 1, 618 VGE_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, 619 &sc->vge_ldata.vge_tx_list_map); 620 if (error) 621 return (ENOMEM); 622 error = bus_dmamem_alloc(sc->sc_dmat, VGE_TX_LIST_SZ, 623 ETHER_ALIGN, 0, 624 &sc->vge_ldata.vge_tx_listseg, 1, &rseg, BUS_DMA_NOWAIT); 625 if (error) { 626 printf("%s: can't alloc TX list\n", sc->vge_dev.dv_xname); 627 return (ENOMEM); 628 } 629 630 /* Load the map for the TX ring. */ 631 error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, 632 1, VGE_TX_LIST_SZ, 633 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT); 634 memset(sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ); 635 if (error) { 636 printf("%s: can't map TX dma buffers\n", 637 sc->vge_dev.dv_xname); 638 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg); 639 return (ENOMEM); 640 } 641 642 error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map, 643 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 644 if (error) { 645 printf("%s: can't load TX dma map\n", sc->vge_dev.dv_xname); 646 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map); 647 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list, 648 VGE_TX_LIST_SZ); 649 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg); 650 return (ENOMEM); 651 } 652 653 /* Create DMA maps for TX buffers */ 654 655 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 656 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg, 657 MCLBYTES, 0, BUS_DMA_ALLOCNOW, 658 &sc->vge_ldata.vge_tx_dmamap[i]); 659 if (error) { 660 printf("%s: can't create DMA map for TX\n", 661 sc->vge_dev.dv_xname); 662 return (ENOMEM); 663 } 664 } 665 666 /* Allocate DMA'able memory for the RX ring */ 667 668 error = bus_dmamap_create(sc->sc_dmat, VGE_RX_LIST_SZ, 1, 669 VGE_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, 670 &sc->vge_ldata.vge_rx_list_map); 671 if (error) 672 return (ENOMEM); 673 error = bus_dmamem_alloc(sc->sc_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN, 674 0, &sc->vge_ldata.vge_rx_listseg, 1, &rseg, BUS_DMA_NOWAIT); 675 if (error) { 676 printf("%s: can't alloc RX list\n", sc->vge_dev.dv_xname); 677 return (ENOMEM); 678 } 679 680 /* Load the map for the RX ring. */ 681 682 error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, 683 1, VGE_RX_LIST_SZ, 684 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT); 685 memset(sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ); 686 if (error) { 687 printf("%s: can't map RX dma buffers\n", 688 sc->vge_dev.dv_xname); 689 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg); 690 return (ENOMEM); 691 } 692 error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map, 693 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 694 if (error) { 695 printf("%s: can't load RX dma map\n", sc->vge_dev.dv_xname); 696 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map); 697 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list, 698 VGE_RX_LIST_SZ); 699 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg); 700 return (ENOMEM); 701 } 702 703 /* Create DMA maps for RX buffers */ 704 705 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 706 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg, 707 MCLBYTES, 0, BUS_DMA_ALLOCNOW, 708 &sc->vge_ldata.vge_rx_dmamap[i]); 709 if (error) { 710 printf("%s: can't create DMA map for RX\n", 711 sc->vge_dev.dv_xname); 712 return (ENOMEM); 713 } 714 } 715 716 return (0); 717} 718 719/* 720 * Attach the interface. Allocate softc structures, do ifmedia 721 * setup and ethernet/BPF attach. 722 */ 723void 724vge_attach(struct device *parent, struct device *self, void *aux) 725{ 726 u_char eaddr[ETHER_ADDR_LEN]; 727 u_int16_t as[3]; 728 struct vge_softc *sc = (struct vge_softc *)self; 729 struct pci_attach_args *pa = aux; 730 pci_chipset_tag_t pc = pa->pa_pc; 731 pci_intr_handle_t ih; 732 const char *intrstr = NULL; 733 struct ifnet *ifp; 734 int error = 0, i; 735 bus_size_t iosize; 736 pcireg_t command; 737 738 /* 739 * Map control/status registers. 740 */ 741 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 742 command |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | 743 PCI_COMMAND_MASTER_ENABLE; 744 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 745 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 746 747 if ((command & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) == 0) { 748 printf(": neither i/o nor mem enabled\n"); 749 return; 750 } 751 752 if (command & PCI_COMMAND_MEM_ENABLE) { 753 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 754 &sc->vge_btag, &sc->vge_bhandle, NULL, &iosize, 0)) { 755 printf(": can't map mem space\n"); 756 return; 757 } 758 } else { 759 if (pci_mapreg_map(pa, VGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 760 &sc->vge_btag, &sc->vge_bhandle, NULL, &iosize, 0)) { 761 printf(": can't map i/o space\n"); 762 return; 763 } 764 } 765 766 /* Allocate interrupt */ 767 if (pci_intr_map(pa, &ih)) { 768 printf(": couldn't map interrupt\n"); 769 return; 770 } 771 intrstr = pci_intr_string(pc, ih); 772 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc, 773 sc->vge_dev.dv_xname); 774 if (sc->vge_intrhand == NULL) { 775 printf(": couldn't establish interrupt"); 776 if (intrstr != NULL) 777 printf(" at %s", intrstr); 778 return; 779 } 780 printf(": %s", intrstr); 781 782 sc->sc_dmat = pa->pa_dmat; 783 784 /* Reset the adapter. */ 785 vge_reset(sc); 786 787 /* 788 * Get station address from the EEPROM. 789 */ 790 vge_read_eeprom(sc, (caddr_t)as, VGE_EE_EADDR, 3, 0); 791 for (i = 0; i < 3; i++) { 792 eaddr[(i * 2) + 0] = as[i] & 0xff; 793 eaddr[(i * 2) + 1] = as[i] >> 8; 794 } 795 796 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 797 798 printf(", address %s\n", 799 ether_sprintf(sc->arpcom.ac_enaddr)); 800 801 error = vge_allocmem(sc); 802 803 if (error) 804 return; 805 806 ifp = &sc->arpcom.ac_if; 807 ifp->if_softc = sc; 808 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 809 ifp->if_ioctl = vge_ioctl; 810 ifp->if_start = vge_start; 811 ifp->if_watchdog = vge_watchdog; 812 ifp->if_init = vge_init; 813 ifp->if_baudrate = 1000000000; 814 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN); 815 IFQ_SET_READY(&ifp->if_snd); 816 817 ifp->if_capabilities = IFCAP_VLAN_MTU; 818 819#ifdef VGE_VLAN 820 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 821#endif 822#ifdef VGE_CSUM_OFFLOAD 823 ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 824 IFCAP_CSUM_UDPv4; 825#endif 826 827 /* Set interface name */ 828 strlcpy(ifp->if_xname, sc->vge_dev.dv_xname, IFNAMSIZ); 829 830 /* Do MII setup */ 831 sc->sc_mii.mii_ifp = ifp; 832 sc->sc_mii.mii_readreg = vge_miibus_readreg; 833 sc->sc_mii.mii_writereg = vge_miibus_writereg; 834 sc->sc_mii.mii_statchg = vge_miibus_statchg; 835 ifmedia_init(&sc->sc_mii.mii_media, 0, 836 vge_ifmedia_upd, vge_ifmedia_sts); 837 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 838 MII_OFFSET_ANY, 0); 839 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 840 printf("%s: no PHY found!\n", sc->vge_dev.dv_xname); 841 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 842 0, NULL); 843 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 844 } else 845 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 846 847 timeout_set(&sc->timer_handle, vge_tick, sc); 848 849 /* 850 * Call MI attach routine. 851 */ 852 if_attach(ifp); 853 ether_ifattach(ifp); 854} 855 856int 857vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 858{ 859 struct mbuf *m_new = NULL; 860 struct vge_rx_desc *r; 861 bus_dmamap_t rxmap = sc->vge_ldata.vge_rx_dmamap[idx]; 862 int i; 863 864 if (m == NULL) { 865 /* Allocate a new mbuf */ 866 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 867 if (m_new == NULL) 868 return (ENOBUFS); 869 870 /* Allocate a cluster */ 871 MCLGET(m_new, M_DONTWAIT); 872 if (!(m_new->m_flags & M_EXT)) { 873 m_freem(m_new); 874 return (ENOBUFS); 875 } 876 } else 877 m_new->m_data = m_new->m_ext.ext_buf; 878 879 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 880 /* Fix-up alignment so payload is doubleword-aligned */ 881 /* XXX m_adj(m_new, ETHER_ALIGN); */ 882 883 if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m_new, BUS_DMA_NOWAIT)) 884 return (ENOBUFS); 885 886 if (rxmap->dm_nsegs > 1) 887 goto out; 888 889 /* Map the segments into RX descriptors */ 890 r = &sc->vge_ldata.vge_rx_list[idx]; 891 892 if (letoh32(r->vge_sts) & VGE_RDSTS_OWN) { 893 printf("%s: tried to map a busy RX descriptor\n", 894 sc->vge_dev.dv_xname); 895 goto out; 896 } 897 r->vge_buflen = htole16(VGE_BUFLEN(rxmap->dm_segs[0].ds_len) | VGE_RXDESC_I); 898 r->vge_addrlo = htole32(VGE_ADDR_LO(rxmap->dm_segs[0].ds_addr)); 899 r->vge_addrhi = htole16(VGE_ADDR_HI(rxmap->dm_segs[0].ds_addr) & 0xFFFF); 900 r->vge_sts = htole32(0); 901 r->vge_ctl = htole32(0); 902 903 /* 904 * Note: the manual fails to document the fact that for 905 * proper operation, the driver needs to replenish the RX 906 * DMA ring 4 descriptors at a time (rather than one at a 907 * time, like most chips). We can allocate the new buffers 908 * but we should not set the OWN bits until we're ready 909 * to hand back 4 of them in one shot. 910 */ 911#define VGE_RXCHUNK 4 912 sc->vge_rx_consumed++; 913 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 914 for (i = idx; i != idx - sc->vge_rx_consumed; i--) 915 sc->vge_ldata.vge_rx_list[i].vge_sts |= 916 htole32(VGE_RDSTS_OWN); 917 sc->vge_rx_consumed = 0; 918 } 919 920 sc->vge_ldata.vge_rx_mbuf[idx] = m_new; 921 922 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, 923 rxmap->dm_mapsize, BUS_DMASYNC_PREREAD); 924 925 return (0); 926out: 927 DPRINTF(("vge_newbuf: out of memory\n")); 928 if (m_new != NULL) 929 m_freem(m_new); 930 return (ENOMEM); 931} 932 933int 934vge_tx_list_init(struct vge_softc *sc) 935{ 936 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 937 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 938 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 939 940 bus_dmamap_sync(sc->sc_dmat, 941 sc->vge_ldata.vge_tx_list_map, 0, 942 sc->vge_ldata.vge_tx_list_map->dm_mapsize, 943 BUS_DMASYNC_PREWRITE); 944 sc->vge_ldata.vge_tx_prodidx = 0; 945 sc->vge_ldata.vge_tx_considx = 0; 946 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 947 948 return (0); 949} 950 951/* Init RX descriptors and allocate mbufs with vge_newbuf() 952 * A ring is used, and last descriptor points to first. */ 953int 954vge_rx_list_init(struct vge_softc *sc) 955{ 956 int i; 957 958 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 959 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, 960 (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 961 962 sc->vge_rx_consumed = 0; 963 964 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 965 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 966 return (ENOBUFS); 967 } 968 969 /* Flush the RX descriptors */ 970 971 bus_dmamap_sync(sc->sc_dmat, 972 sc->vge_ldata.vge_rx_list_map, 973 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 974 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 975 976 sc->vge_ldata.vge_rx_prodidx = 0; 977 sc->vge_rx_consumed = 0; 978 sc->vge_head = sc->vge_tail = NULL; 979 980 return (0); 981} 982 983/* 984 * RX handler. We support the reception of jumbo frames that have 985 * been fragmented across multiple 2K mbuf cluster buffers. 986 */ 987void 988vge_rxeof(struct vge_softc *sc) 989{ 990 struct mbuf *m; 991 struct ifnet *ifp; 992 int i, total_len; 993 int lim = 0; 994 struct vge_rx_desc *cur_rx; 995 u_int32_t rxstat, rxctl; 996 997 ifp = &sc->arpcom.ac_if; 998 i = sc->vge_ldata.vge_rx_prodidx; 999 1000 /* Invalidate the descriptor memory */ 1001 1002 bus_dmamap_sync(sc->sc_dmat, 1003 sc->vge_ldata.vge_rx_list_map, 1004 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 1005 BUS_DMASYNC_POSTREAD); 1006 1007 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1008 struct mbuf *m0 = NULL; 1009 1010 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1011 m = sc->vge_ldata.vge_rx_mbuf[i]; 1012 total_len = VGE_RXBYTES(cur_rx); 1013 rxstat = letoh32(cur_rx->vge_sts); 1014 rxctl = letoh32(cur_rx->vge_ctl); 1015 1016 /* Invalidate the RX mbuf and unload its map */ 1017 1018 bus_dmamap_sync(sc->sc_dmat, 1019 sc->vge_ldata.vge_rx_dmamap[i], 1020 0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize, 1021 BUS_DMASYNC_POSTWRITE); 1022 bus_dmamap_unload(sc->sc_dmat, 1023 sc->vge_ldata.vge_rx_dmamap[i]); 1024 1025 /* 1026 * If the 'start of frame' bit is set, this indicates 1027 * either the first fragment in a multi-fragment receive, 1028 * or an intermediate fragment. Either way, we want to 1029 * accumulate the buffers. 1030 */ 1031 if (rxstat & VGE_RXPKT_SOF) { 1032 DPRINTF(("vge_rxeof: SOF\n")); 1033 m->m_len = MCLBYTES; 1034 if (sc->vge_head == NULL) 1035 sc->vge_head = sc->vge_tail = m; 1036 else { 1037 m->m_flags &= ~M_PKTHDR; 1038 sc->vge_tail->m_next = m; 1039 sc->vge_tail = m; 1040 } 1041 vge_newbuf(sc, i, NULL); 1042 VGE_RX_DESC_INC(i); 1043 continue; 1044 } 1045 1046 /* 1047 * Bad/error frames will have the RXOK bit cleared. 1048 * However, there's one error case we want to allow: 1049 * if a VLAN tagged frame arrives and the chip can't 1050 * match it against the CAM filter, it considers this 1051 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1052 * We don't want to drop the frame though: our VLAN 1053 * filtering is done in software. 1054 */ 1055 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1056 && !(rxstat & VGE_RDSTS_CSUMERR)) { 1057 ifp->if_ierrors++; 1058 /* 1059 * If this is part of a multi-fragment packet, 1060 * discard all the pieces. 1061 */ 1062 if (sc->vge_head != NULL) { 1063 m_freem(sc->vge_head); 1064 sc->vge_head = sc->vge_tail = NULL; 1065 } 1066 vge_newbuf(sc, i, m); 1067 VGE_RX_DESC_INC(i); 1068 continue; 1069 } 1070 1071 /* 1072 * If allocating a replacement mbuf fails, 1073 * reload the current one. 1074 */ 1075 1076 if (vge_newbuf(sc, i, NULL) == ENOBUFS) { 1077 if (sc->vge_head != NULL) { 1078 m_freem(sc->vge_head); 1079 sc->vge_head = sc->vge_tail = NULL; 1080 } 1081 1082 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1083 total_len - ETHER_CRC_LEN + ETHER_ALIGN, 1084 0, ifp, NULL); 1085 vge_newbuf(sc, i, m); 1086 if (m0 == NULL) { 1087 ifp->if_ierrors++; 1088 continue; 1089 } 1090 m_adj(m0, ETHER_ALIGN); 1091 m = m0; 1092 1093 VGE_RX_DESC_INC(i); 1094 continue; 1095 } 1096 1097 VGE_RX_DESC_INC(i); 1098 1099 if (sc->vge_head != NULL) { 1100 m->m_len = total_len % MCLBYTES; 1101 /* 1102 * Special case: if there's 4 bytes or less 1103 * in this buffer, the mbuf can be discarded: 1104 * the last 4 bytes is the CRC, which we don't 1105 * care about anyway. 1106 */ 1107 if (m->m_len <= ETHER_CRC_LEN) { 1108 sc->vge_tail->m_len -= 1109 (ETHER_CRC_LEN - m->m_len); 1110 m_freem(m); 1111 } else { 1112 m->m_len -= ETHER_CRC_LEN; 1113 m->m_flags &= ~M_PKTHDR; 1114 sc->vge_tail->m_next = m; 1115 } 1116 m = sc->vge_head; 1117 sc->vge_head = sc->vge_tail = NULL; 1118 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1119 } else 1120 m->m_pkthdr.len = m->m_len = 1121 (total_len - ETHER_CRC_LEN); 1122 1123#ifdef __STRICT_ALIGNMENT 1124 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 1125 total_len); 1126 m->m_data += ETHER_ALIGN; 1127#endif 1128 ifp->if_ipackets++; 1129 m->m_pkthdr.rcvif = ifp; 1130 1131 /* Do RX checksumming */ 1132 1133 /* Check IP header checksum */ 1134 if ((rxctl & VGE_RDCTL_IPPKT) && 1135 (rxctl & VGE_RDCTL_IPCSUMOK)) 1136 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1137 1138 /* Check TCP/UDP checksum */ 1139 if ((rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT)) && 1140 (rxctl & VGE_RDCTL_PROTOCSUMOK)) 1141 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1142 1143#ifdef VGE_VLAN 1144 if (rxstat & VGE_RDSTS_VTAG) 1145 VLAN_INPUT_TAG(ifp, m, 1146 ntohs((rxctl & VGE_RDCTL_VLANID)), continue); 1147#endif 1148 1149#if NBPFILTER > 0 1150 if (ifp->if_bpf) 1151 bpf_mtap(ifp->if_bpf, m); 1152#endif 1153 ether_input_mbuf(ifp, m); 1154 1155 lim++; 1156 if (lim == VGE_RX_DESC_CNT) 1157 break; 1158 } 1159 1160 /* Flush the RX DMA ring */ 1161 bus_dmamap_sync(sc->sc_dmat, 1162 sc->vge_ldata.vge_rx_list_map, 1163 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 1164 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1165 1166 sc->vge_ldata.vge_rx_prodidx = i; 1167 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1168} 1169 1170void 1171vge_txeof(struct vge_softc *sc) 1172{ 1173 struct ifnet *ifp; 1174 u_int32_t txstat; 1175 int idx; 1176 1177 ifp = &sc->arpcom.ac_if; 1178 idx = sc->vge_ldata.vge_tx_considx; 1179 1180 /* Invalidate the TX descriptor list */ 1181 1182 bus_dmamap_sync(sc->sc_dmat, 1183 sc->vge_ldata.vge_tx_list_map, 1184 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize, 1185 BUS_DMASYNC_POSTREAD); 1186 1187 /* Transmitted frames can be now free'd from the TX list */ 1188 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1189 txstat = letoh32(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1190 if (txstat & VGE_TDSTS_OWN) 1191 break; 1192 1193 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1194 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1195 bus_dmamap_unload(sc->sc_dmat, 1196 sc->vge_ldata.vge_tx_dmamap[idx]); 1197 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1198 ifp->if_collisions++; 1199 if (txstat & VGE_TDSTS_TXERR) 1200 ifp->if_oerrors++; 1201 else 1202 ifp->if_opackets++; 1203 1204 sc->vge_ldata.vge_tx_free++; 1205 VGE_TX_DESC_INC(idx); 1206 } 1207 1208 /* No changes made to the TX ring, so no flush needed */ 1209 1210 if (idx != sc->vge_ldata.vge_tx_considx) { 1211 sc->vge_ldata.vge_tx_considx = idx; 1212 ifp->if_flags &= ~IFF_OACTIVE; 1213 ifp->if_timer = 0; 1214 } 1215 1216 /* 1217 * If not all descriptors have been released reaped yet, 1218 * reload the timer so that we will eventually get another 1219 * interrupt that will cause us to re-enter this routine. 1220 * This is done in case the transmitter has gone idle. 1221 */ 1222 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) 1223 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1224} 1225 1226void 1227vge_tick(void *xsc) 1228{ 1229 struct vge_softc *sc = xsc; 1230 struct ifnet *ifp = &sc->arpcom.ac_if; 1231 struct mii_data *mii = &sc->sc_mii; 1232 int s; 1233 1234 s = splimp(); 1235 1236 mii_tick(mii); 1237 1238 if (sc->vge_link) { 1239 if (!(mii->mii_media_status & IFM_ACTIVE)) 1240 sc->vge_link = 0; 1241 ifp->if_link_state = LINK_STATE_DOWN; 1242 if_link_state_change(ifp); 1243 } else { 1244 if (mii->mii_media_status & IFM_ACTIVE && 1245 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1246 sc->vge_link = 1; 1247 ifp->if_link_state = LINK_STATE_UP; 1248 if_link_state_change(ifp); 1249 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1250 vge_start(ifp); 1251 } 1252 } 1253 timeout_add(&sc->timer_handle, hz); 1254 splx(s); 1255} 1256 1257int 1258vge_intr(void *arg) 1259{ 1260 struct vge_softc *sc = arg; 1261 struct ifnet *ifp; 1262 u_int32_t status; 1263 int claimed = 0; 1264 1265 ifp = &sc->arpcom.ac_if; 1266 1267 if (!(ifp->if_flags & IFF_UP)) 1268 return 0; 1269 1270 /* Disable interrupts */ 1271 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1272 1273 for (;;) { 1274 status = CSR_READ_4(sc, VGE_ISR); 1275 DPRINTFN(3, ("vge_intr: status=%#x\n", status)); 1276 1277 /* If the card has gone away the read returns 0xffffffff. */ 1278 if (status == 0xFFFFFFFF) 1279 break; 1280 1281 if (status) { 1282 CSR_WRITE_4(sc, VGE_ISR, status); 1283 } 1284 1285 if ((status & VGE_INTRS) == 0) 1286 break; 1287 1288 claimed = 1; 1289 1290 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1291 vge_rxeof(sc); 1292 1293 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1294 DPRINTFN(2, ("vge_intr: RX error, recovering\n")); 1295 vge_rxeof(sc); 1296 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1297 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1298 } 1299 1300 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1301 vge_txeof(sc); 1302 1303 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1304 DPRINTFN(2, ("DMA_STALL\n")); 1305 vge_init(ifp); 1306 } 1307 1308 if (status & VGE_ISR_LINKSTS) { 1309 timeout_del(&sc->timer_handle); 1310 vge_tick(sc); 1311 } 1312 } 1313 1314 /* Re-enable interrupts */ 1315 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1316 1317 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1318 vge_start(ifp); 1319 1320 return (claimed); 1321} 1322 1323/* 1324 * Encapsulate an mbuf chain into the TX ring by combining it w/ 1325 * the descriptors. 1326 */ 1327int 1328vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1329{ 1330 bus_dmamap_t txmap; 1331 struct vge_tx_desc *d = NULL; 1332 struct vge_tx_frag *f; 1333 int error, frag; 1334 1335 txmap = sc->vge_ldata.vge_tx_dmamap[idx]; 1336repack: 1337 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, 1338 m_head, BUS_DMA_NOWAIT); 1339 if (error) { 1340 printf("%s: can't map mbuf (error %d)\n", 1341 sc->vge_dev.dv_xname, error); 1342 return (ENOBUFS); 1343 } 1344 1345 d = &sc->vge_ldata.vge_tx_list[idx]; 1346 /* If owned by chip, fail */ 1347 if (letoh32(d->vge_sts) & VGE_TDSTS_OWN) 1348 return (ENOBUFS); 1349 1350 for (frag = 0; frag < txmap->dm_nsegs; frag++) { 1351 /* Check if we have used all 7 fragments. */ 1352 if (frag == VGE_TX_FRAGS) 1353 break; 1354 f = &d->vge_frag[frag]; 1355 f->vge_buflen = htole16(VGE_BUFLEN(txmap->dm_segs[frag].ds_len)); 1356 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[frag].ds_addr)); 1357 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[frag].ds_addr) & 0xFFFF); 1358 } 1359 1360 /* 1361 * We used up all 7 fragments! Now what we have to do is 1362 * copy the data into a mbuf cluster and map that. 1363 */ 1364 if (frag == VGE_TX_FRAGS) { 1365 struct mbuf *m = NULL; 1366 1367 MGETHDR(m, M_DONTWAIT, MT_DATA); 1368 if (m == NULL) { 1369 m_freem(m_head); 1370 return (ENOBUFS); 1371 } 1372 if (m_head->m_pkthdr.len > MHLEN) { 1373 MCLGET(m, M_DONTWAIT); 1374 if (!(m->m_flags & M_EXT)) { 1375 m_freem(m); 1376 m_freem(m_head); 1377 return (ENOBUFS); 1378 } 1379 } 1380 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1381 mtod(m, caddr_t)); 1382 m->m_pkthdr.len = m->m_len = m_head->m_pkthdr.len; 1383 m_freem(m_head); 1384 m_head = m; 1385 goto repack; 1386 } 1387 1388 /* This chip does not do auto-padding */ 1389 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1390 f = &d->vge_frag[frag]; 1391 1392 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 1393 m_head->m_pkthdr.len)); 1394 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[0].ds_addr)); 1395 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[0].ds_addr) & 0xFFFF); 1396 m_head->m_pkthdr.len = VGE_MIN_FRAMELEN; 1397 frag++; 1398 } 1399 /* For some reason, we need to tell the card fragment + 1 */ 1400 frag++; 1401 1402 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, 1403 BUS_DMASYNC_PREWRITE); 1404 1405 d->vge_sts = htole32(m_head->m_pkthdr.len << 16); 1406 d->vge_ctl = htole32((frag << 28) | VGE_TD_LS_NORM); 1407 1408 if (m_head->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 1409 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO); 1410 1411 sc->vge_ldata.vge_tx_dmamap[idx] = txmap; 1412 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1413 sc->vge_ldata.vge_tx_free--; 1414 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1415 1416 /* 1417 * Set up hardware VLAN tagging. 1418 */ 1419#ifdef VGE_VLAN 1420 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); 1421 if (mtag != NULL) 1422 sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1423 htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG); 1424#endif 1425 1426 idx++; 1427 1428 return (0); 1429} 1430 1431/* 1432 * Main transmit routine. 1433 */ 1434void 1435vge_start(struct ifnet *ifp) 1436{ 1437 struct vge_softc *sc; 1438 struct mbuf *m_head = NULL; 1439 int idx, pidx = 0; 1440 1441 sc = ifp->if_softc; 1442 1443 if (!sc->vge_link || ifp->if_flags & IFF_OACTIVE) 1444 return; 1445 1446 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1447 return; 1448 1449 idx = sc->vge_ldata.vge_tx_prodidx; 1450 1451 pidx = idx - 1; 1452 if (pidx < 0) 1453 pidx = VGE_TX_DESC_CNT - 1; 1454 1455 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1456 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1457 if (m_head == NULL) 1458 break; 1459 1460 /* 1461 * If there's a BPF listener, bounce a copy of this frame 1462 * to him. 1463 */ 1464#if NBPFILTER > 0 1465 if (ifp->if_bpf) 1466 bpf_mtap(ifp->if_bpf, m_head); 1467#endif 1468 1469 if (vge_encap(sc, m_head, idx)) { 1470 IF_PREPEND(&ifp->if_snd, m_head); 1471 ifp->if_flags |= IFF_OACTIVE; 1472 break; 1473 } 1474 1475 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1476 htole16(VGE_TXDESC_Q); 1477 1478 pidx = idx; 1479 VGE_TX_DESC_INC(idx); 1480 } 1481 1482 if (idx == sc->vge_ldata.vge_tx_prodidx) { 1483 return; 1484 } 1485 1486 /* Flush the TX descriptors */ 1487 1488 bus_dmamap_sync(sc->sc_dmat, 1489 sc->vge_ldata.vge_tx_list_map, 1490 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize, 1491 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1492 1493 /* Issue a transmit command. */ 1494 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1495 1496 sc->vge_ldata.vge_tx_prodidx = idx; 1497 1498 /* 1499 * Use the countdown timer for interrupt moderation. 1500 * 'TX done' interrupts are disabled. Instead, we reset the 1501 * countdown timer, which will begin counting until it hits 1502 * the value in the SSTIMER register, and then trigger an 1503 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1504 * the timer count is reloaded. Only when the transmitter 1505 * is idle will the timer hit 0 and an interrupt fire. 1506 */ 1507 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1508 1509 /* 1510 * Set a timeout in case the chip goes out to lunch. 1511 */ 1512 ifp->if_timer = 5; 1513} 1514 1515int 1516vge_init(struct ifnet *ifp) 1517{ 1518 struct vge_softc *sc = ifp->if_softc; 1519 int i; 1520 1521 /* 1522 * Cancel pending I/O and free all RX/TX buffers. 1523 */ 1524 vge_stop(sc); 1525 vge_reset(sc); 1526 1527 /* Initialize RX descriptors list */ 1528 if (vge_rx_list_init(sc) == ENOBUFS) { 1529 printf("%s: init failed: no memory for RX buffers\n", 1530 sc->vge_dev.dv_xname); 1531 vge_stop(sc); 1532 return (ENOBUFS); 1533 } 1534 /* Initialize TX descriptors */ 1535 if (vge_tx_list_init(sc) == ENOBUFS) { 1536 printf("%s: init failed: no memory for TX buffers\n", 1537 sc->vge_dev.dv_xname); 1538 vge_stop(sc); 1539 return (ENOBUFS); 1540 } 1541 1542 /* Set our station address */ 1543 for (i = 0; i < ETHER_ADDR_LEN; i++) 1544 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1545 1546 /* 1547 * Set receive FIFO threshold. Also allow transmission and 1548 * reception of VLAN tagged frames. 1549 */ 1550 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1551 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1552 1553 /* Set DMA burst length */ 1554 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1555 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1556 1557 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1558 1559 /* Set collision backoff algorithm */ 1560 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1561 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1562 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1563 1564 /* Disable LPSEL field in priority resolution */ 1565 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1566 1567 /* 1568 * Load the addresses of the DMA queues into the chip. 1569 * Note that we only use one transmit queue. 1570 */ 1571 1572 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1573 VGE_ADDR_LO(sc->vge_ldata.vge_tx_listseg.ds_addr)); 1574 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1575 1576 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1577 VGE_ADDR_LO(sc->vge_ldata.vge_rx_listseg.ds_addr)); 1578 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1579 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1580 1581 /* Enable and wake up the RX descriptor queue */ 1582 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1583 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1584 1585 /* Enable the TX descriptor queue */ 1586 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1587 1588 /* Set up the receive filter -- allow large frames for VLANs. */ 1589 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1590 1591 /* If we want promiscuous mode, set the allframes bit. */ 1592 if (ifp->if_flags & IFF_PROMISC) { 1593 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1594 } 1595 1596 /* Set capture broadcast bit to capture broadcast frames. */ 1597 if (ifp->if_flags & IFF_BROADCAST) { 1598 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1599 } 1600 1601 /* Set multicast bit to capture multicast frames. */ 1602 if (ifp->if_flags & IFF_MULTICAST) { 1603 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1604 } 1605 1606 /* Init the cam filter. */ 1607#ifdef CAM_FILTERING 1608 vge_cam_clear(sc); 1609#endif 1610 1611 /* Init the multicast filter. */ 1612 vge_setmulti(sc); 1613 1614 /* Enable flow control */ 1615 1616 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1617 1618 /* Enable jumbo frame reception (if desired) */ 1619 1620 /* Start the MAC. */ 1621 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1622 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1623 CSR_WRITE_1(sc, VGE_CRS0, 1624 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1625 1626 /* 1627 * Configure one-shot timer for microsecond 1628 * resulution and load it for 500 usecs. 1629 */ 1630 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1631 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1632 1633 /* 1634 * Configure interrupt moderation for receive. Enable 1635 * the holdoff counter and load it, and set the RX 1636 * suppression count to the number of descriptors we 1637 * want to allow before triggering an interrupt. 1638 * The holdoff timer is in units of 20 usecs. 1639 */ 1640 1641#ifdef notyet 1642 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1643 /* Select the interrupt holdoff timer page. */ 1644 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1645 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1646 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1647 1648 /* Enable use of the holdoff timer. */ 1649 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1650 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1651 1652 /* Select the RX suppression threshold page. */ 1653 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1654 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1655 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1656 1657 /* Restore the page select bits. */ 1658 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1659 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1660#endif 1661 1662 /* 1663 * Enable interrupts. 1664 */ 1665 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 1666 CSR_WRITE_4(sc, VGE_ISR, 0); 1667 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1668 1669 /* Restore BMCR state */ 1670 mii_mediachg(&sc->sc_mii); 1671 1672 ifp->if_flags |= IFF_RUNNING; 1673 ifp->if_flags &= ~IFF_OACTIVE; 1674 1675 sc->vge_if_flags = 0; 1676 sc->vge_link = 0; 1677 1678 if (!timeout_pending(&sc->timer_handle)) 1679 timeout_add(&sc->timer_handle, hz); 1680 1681 return (0); 1682} 1683 1684/* 1685 * Set media options. 1686 */ 1687int 1688vge_ifmedia_upd(struct ifnet *ifp) 1689{ 1690 struct vge_softc *sc = ifp->if_softc; 1691 1692 return (mii_mediachg(&sc->sc_mii)); 1693} 1694 1695/* 1696 * Report current media status. 1697 */ 1698void 1699vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1700{ 1701 struct vge_softc *sc = ifp->if_softc; 1702 1703 mii_pollstat(&sc->sc_mii); 1704 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1705 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1706} 1707 1708void 1709vge_miibus_statchg(struct device *dev) 1710{ 1711 struct vge_softc *sc = (struct vge_softc *)dev; 1712 struct mii_data *mii; 1713 struct ifmedia_entry *ife; 1714 1715 mii = &sc->sc_mii; 1716 ife = mii->mii_media.ifm_cur; 1717 1718 /* 1719 * If the user manually selects a media mode, we need to turn 1720 * on the forced MAC mode bit in the DIAGCTL register. If the 1721 * user happens to choose a full duplex mode, we also need to 1722 * set the 'force full duplex' bit. This applies only to 1723 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1724 * mode is disabled, and in 1000baseT mode, full duplex is 1725 * always implied, so we turn on the forced mode bit but leave 1726 * the FDX bit cleared. 1727 */ 1728 1729 switch (IFM_SUBTYPE(ife->ifm_media)) { 1730 case IFM_AUTO: 1731 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1732 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1733 break; 1734 case IFM_1000_T: 1735 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1736 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1737 break; 1738 case IFM_100_TX: 1739 case IFM_10_T: 1740 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1741 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1742 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1743 } else { 1744 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1745 } 1746 break; 1747 default: 1748 printf("%s: unknown media type: %x\n", 1749 sc->vge_dev.dv_xname, IFM_SUBTYPE(ife->ifm_media)); 1750 break; 1751 } 1752} 1753 1754int 1755vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1756{ 1757 struct vge_softc *sc = ifp->if_softc; 1758 struct ifreq *ifr = (struct ifreq *) data; 1759 struct ifaddr *ifa = (struct ifaddr *) data; 1760 int s, error = 0; 1761 1762 s = splimp(); 1763 1764 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 1765 splx(s); 1766 return (error); 1767 } 1768 1769 switch (command) { 1770 case SIOCSIFADDR: 1771 ifp->if_flags |= IFF_UP; 1772 switch (ifa->ifa_addr->sa_family) { 1773#ifdef INET 1774 case AF_INET: 1775 vge_init(ifp); 1776 arp_ifinit(&sc->arpcom, ifa); 1777 break; 1778#endif 1779 default: 1780 vge_init(ifp); 1781 break; 1782 } 1783#if 0 /* XXX mtu gets reset to 0 at ifconfig up for some reason with this */ 1784 case SIOCSIFMTU: 1785 if (ifr->ifr_mtu > ETHERMTU_JUMBO) 1786 error = EINVAL; 1787 else 1788 ifp->if_mtu = ifr->ifr_mtu; 1789 break; 1790#endif 1791 case SIOCSIFFLAGS: 1792 if (ifp->if_flags & IFF_UP) { 1793 if (ifp->if_flags & IFF_RUNNING && 1794 ifp->if_flags & IFF_PROMISC && 1795 !(sc->vge_if_flags & IFF_PROMISC)) { 1796 CSR_SETBIT_1(sc, VGE_RXCTL, 1797 VGE_RXCTL_RX_PROMISC); 1798 vge_setmulti(sc); 1799 } else if (ifp->if_flags & IFF_RUNNING && 1800 !(ifp->if_flags & IFF_PROMISC) && 1801 sc->vge_if_flags & IFF_PROMISC) { 1802 CSR_CLRBIT_1(sc, VGE_RXCTL, 1803 VGE_RXCTL_RX_PROMISC); 1804 vge_setmulti(sc); 1805 } else 1806 vge_init(ifp); 1807 } else { 1808 if (ifp->if_flags & IFF_RUNNING) 1809 vge_stop(sc); 1810 } 1811 sc->vge_if_flags = ifp->if_flags; 1812 break; 1813 case SIOCADDMULTI: 1814 case SIOCDELMULTI: 1815 error = (command == SIOCADDMULTI) ? 1816 ether_addmulti(ifr, &sc->arpcom) : 1817 ether_delmulti(ifr, &sc->arpcom); 1818 1819 if (error == ENETRESET) { 1820 if (ifp->if_flags & IFF_RUNNING) 1821 vge_setmulti(sc); 1822 error = 0; 1823 } 1824 break; 1825 case SIOCGIFMEDIA: 1826 case SIOCSIFMEDIA: 1827 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1828 break; 1829 default: 1830 error = EINVAL; 1831 break; 1832 } 1833 1834 splx(s); 1835 return (error); 1836} 1837 1838void 1839vge_watchdog(struct ifnet *ifp) 1840{ 1841 struct vge_softc *sc = ifp->if_softc; 1842 int s; 1843 1844 s = splnet(); 1845 printf("%s: watchdog timeout\n", sc->vge_dev.dv_xname); 1846 ifp->if_oerrors++; 1847 1848 vge_txeof(sc); 1849 vge_rxeof(sc); 1850 1851 vge_init(ifp); 1852 1853 splx(s); 1854} 1855 1856/* 1857 * Stop the adapter and free any mbufs allocated to the 1858 * RX and TX lists. 1859 */ 1860void 1861vge_stop(struct vge_softc *sc) 1862{ 1863 register int i; 1864 struct ifnet *ifp; 1865 1866 ifp = &sc->arpcom.ac_if; 1867 ifp->if_timer = 0; 1868 if (timeout_pending(&sc->timer_handle)) 1869 timeout_del(&sc->timer_handle); 1870 1871 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1872 1873 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1874 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 1875 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 1876 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 1877 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 1878 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 1879 1880 if (sc->vge_head != NULL) { 1881 m_freem(sc->vge_head); 1882 sc->vge_head = sc->vge_tail = NULL; 1883 } 1884 1885 /* Free the TX list buffers. */ 1886 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1887 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 1888 bus_dmamap_unload(sc->sc_dmat, 1889 sc->vge_ldata.vge_tx_dmamap[i]); 1890 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 1891 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 1892 } 1893 } 1894 1895 /* Free the RX list buffers. */ 1896 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1897 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 1898 bus_dmamap_unload(sc->sc_dmat, 1899 sc->vge_ldata.vge_rx_dmamap[i]); 1900 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 1901 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 1902 } 1903 } 1904} 1905