1/* $NetBSD: if_vge.c,v 1.51 2010/04/05 07:20:27 joerg Exp $ */ 2 3/*- 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp 35 */ 36 37#include <sys/cdefs.h> 38__KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.51 2010/04/05 07:20:27 joerg Exp $"); 39 40/* 41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 42 * 43 * Written by Bill Paul <wpaul@windriver.com> 44 * Senior Networking Software Engineer 45 * Wind River Systems 46 */ 47 48/* 49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that 50 * combines a tri-speed ethernet MAC and PHY, with the following 51 * features: 52 * 53 * o Jumbo frame support up to 16K 54 * o Transmit and receive flow control 55 * o IPv4 checksum offload 56 * o VLAN tag insertion and stripping 57 * o TCP large send 58 * o 64-bit multicast hash table filter 59 * o 64 entry CAM filter 60 * o 16K RX FIFO and 48K TX FIFO memory 61 * o Interrupt moderation 62 * 63 * The VT6122 supports up to four transmit DMA queues. The descriptors 64 * in the transmit ring can address up to 7 data fragments; frames which 65 * span more than 7 data buffers must be coalesced, but in general the 66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 67 * long. The receive descriptors address only a single buffer. 68 * 69 * There are two peculiar design issues with the VT6122. One is that 70 * receive data buffers must be aligned on a 32-bit boundary. This is 71 * not a problem where the VT6122 is used as a LOM device in x86-based 72 * systems, but on architectures that generate unaligned access traps, we 73 * have to do some copying. 74 * 75 * The other issue has to do with the way 64-bit addresses are handled. 76 * The DMA descriptors only allow you to specify 48 bits of addressing 77 * information. The remaining 16 bits are specified using one of the 78 * I/O registers. If you only have a 32-bit system, then this isn't 79 * an issue, but if you have a 64-bit system and more than 4GB of 80 * memory, you must have to make sure your network data buffers reside 81 * in the same 48-bit 'segment.' 82 * 83 * Special thanks to Ryan Fu at VIA Networking for providing documentation 84 * and sample NICs for testing. 85 */ 86 87 88#include <sys/param.h> 89#include <sys/endian.h> 90#include <sys/systm.h> 91#include <sys/device.h> 92#include <sys/sockio.h> 93#include <sys/mbuf.h> 94#include <sys/malloc.h> 95#include <sys/kernel.h> 96#include <sys/socket.h> 97 98#include <net/if.h> 99#include <net/if_arp.h> 100#include <net/if_ether.h> 101#include <net/if_dl.h> 102#include <net/if_media.h> 103 104#include <net/bpf.h> 105 106#include <sys/bus.h> 107 108#include <dev/mii/mii.h> 109#include <dev/mii/miivar.h> 110 111#include <dev/pci/pcireg.h> 112#include <dev/pci/pcivar.h> 113#include <dev/pci/pcidevs.h> 114 115#include <dev/pci/if_vgereg.h> 116 117#define VGE_IFQ_MAXLEN 64 118 119#define VGE_RING_ALIGN 256 120 121#define VGE_NTXDESC 256 122#define VGE_NTXDESC_MASK (VGE_NTXDESC - 1) 123#define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK) 124#define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK) 125 126#define VGE_NRXDESC 256 /* Must be a multiple of 4!! */ 127#define VGE_NRXDESC_MASK (VGE_NRXDESC - 1) 128#define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK) 129#define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK) 130 131#define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF) 132#define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32) 133#define VGE_BUFLEN(y) ((y) & 0x7FFF) 134#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 135 136#define VGE_POWER_MANAGEMENT 0 /* disabled for now */ 137 138/* 139 * Mbuf adjust factor to force 32-bit alignment of IP header. 140 * Drivers should pad ETHER_ALIGN bytes when setting up a 141 * RX mbuf so the upper layers get the IP header properly aligned 142 * past the 14-byte Ethernet header. 143 * 144 * See also comment in vge_encap(). 145 */ 146#define ETHER_ALIGN 2 147 148#ifdef __NO_STRICT_ALIGNMENT 149#define VGE_RX_BUFSIZE MCLBYTES 150#else 151#define VGE_RX_PAD sizeof(uint32_t) 152#define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD) 153#endif 154 155/* 156 * Control structures are DMA'd to the vge chip. We allocate them in 157 * a single clump that maps to a single DMA segment to make several things 158 * easier. 159 */ 160struct vge_control_data { 161 /* TX descriptors */ 162 struct vge_txdesc vcd_txdescs[VGE_NTXDESC]; 163 /* RX descriptors */ 164 struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC]; 165 /* dummy data for TX padding */ 166 uint8_t vcd_pad[ETHER_PAD_LEN]; 167}; 168 169#define VGE_CDOFF(x) offsetof(struct vge_control_data, x) 170#define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)]) 171#define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)]) 172#define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0]) 173 174/* 175 * Software state for TX jobs. 176 */ 177struct vge_txsoft { 178 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 179 bus_dmamap_t txs_dmamap; /* our DMA map */ 180}; 181 182/* 183 * Software state for RX jobs. 184 */ 185struct vge_rxsoft { 186 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 187 bus_dmamap_t rxs_dmamap; /* our DMA map */ 188}; 189 190 191struct vge_softc { 192 device_t sc_dev; 193 194 bus_space_tag_t sc_bst; /* bus space tag */ 195 bus_space_handle_t sc_bsh; /* bus space handle */ 196 bus_dma_tag_t sc_dmat; 197 198 struct ethercom sc_ethercom; /* interface info */ 199 uint8_t sc_eaddr[ETHER_ADDR_LEN]; 200 201 void *sc_intrhand; 202 struct mii_data sc_mii; 203 uint8_t sc_type; 204 int sc_if_flags; 205 int sc_link; 206 int sc_camidx; 207 callout_t sc_timeout; 208 209 bus_dmamap_t sc_cddmamap; 210#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 211 212 struct vge_txsoft sc_txsoft[VGE_NTXDESC]; 213 struct vge_rxsoft sc_rxsoft[VGE_NRXDESC]; 214 struct vge_control_data *sc_control_data; 215#define sc_txdescs sc_control_data->vcd_txdescs 216#define sc_rxdescs sc_control_data->vcd_rxdescs 217 218 int sc_tx_prodidx; 219 int sc_tx_considx; 220 int sc_tx_free; 221 222 struct mbuf *sc_rx_mhead; 223 struct mbuf *sc_rx_mtail; 224 int sc_rx_prodidx; 225 int sc_rx_consumed; 226 227 int sc_suspended; /* 0 = normal 1 = suspended */ 228 uint32_t sc_saved_maps[5]; /* pci data */ 229 uint32_t sc_saved_biosaddr; 230 uint8_t sc_saved_intline; 231 uint8_t sc_saved_cachelnsz; 232 uint8_t sc_saved_lattimer; 233}; 234 235#define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x)) 236#define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x)) 237#define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF()) 238 239#define VGE_TXDESCSYNC(sc, idx, ops) \ 240 bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \ 241 VGE_CDTXOFF(idx), \ 242 offsetof(struct vge_txdesc, td_frag[0]), \ 243 (ops)) 244#define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \ 245 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 246 VGE_CDTXOFF(idx) + \ 247 offsetof(struct vge_txdesc, td_frag[0]), \ 248 sizeof(struct vge_txfrag) * (nsegs), \ 249 (ops)) 250#define VGE_RXDESCSYNC(sc, idx, ops) \ 251 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 252 VGE_CDRXOFF(idx), \ 253 sizeof(struct vge_rxdesc), \ 254 (ops)) 255 256/* 257 * register space access macros 258 */ 259#define CSR_WRITE_4(sc, reg, val) \ 260 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 261#define CSR_WRITE_2(sc, reg, val) \ 262 bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 263#define CSR_WRITE_1(sc, reg, val) \ 264 bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 265 266#define CSR_READ_4(sc, reg) \ 267 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 268#define CSR_READ_2(sc, reg) \ 269 bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg)) 270#define CSR_READ_1(sc, reg) \ 271 bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg)) 272 273#define CSR_SETBIT_1(sc, reg, x) \ 274 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x)) 275#define CSR_SETBIT_2(sc, reg, x) \ 276 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x)) 277#define CSR_SETBIT_4(sc, reg, x) \ 278 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x)) 279 280#define CSR_CLRBIT_1(sc, reg, x) \ 281 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x)) 282#define CSR_CLRBIT_2(sc, reg, x) \ 283 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x)) 284#define CSR_CLRBIT_4(sc, reg, x) \ 285 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x)) 286 287#define VGE_TIMEOUT 10000 288 289#define VGE_PCI_LOIO 0x10 290#define VGE_PCI_LOMEM 0x14 291 292static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t); 293static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t); 294 295static int vge_ifflags_cb(struct ethercom *); 296 297static int vge_match(device_t, cfdata_t, void *); 298static void vge_attach(device_t, device_t, void *); 299 300static int vge_encap(struct vge_softc *, struct mbuf *, int); 301 302static int vge_allocmem(struct vge_softc *); 303static int vge_newbuf(struct vge_softc *, int, struct mbuf *); 304#ifndef __NO_STRICT_ALIGNMENT 305static inline void vge_fixup_rx(struct mbuf *); 306#endif 307static void vge_rxeof(struct vge_softc *); 308static void vge_txeof(struct vge_softc *); 309static int vge_intr(void *); 310static void vge_tick(void *); 311static void vge_start(struct ifnet *); 312static int vge_ioctl(struct ifnet *, u_long, void *); 313static int vge_init(struct ifnet *); 314static void vge_stop(struct ifnet *, int); 315static void vge_watchdog(struct ifnet *); 316#if VGE_POWER_MANAGEMENT 317static int vge_suspend(device_t); 318static int vge_resume(device_t); 319#endif 320static bool vge_shutdown(device_t, int); 321 322static uint16_t vge_read_eeprom(struct vge_softc *, int); 323 324static void vge_miipoll_start(struct vge_softc *); 325static void vge_miipoll_stop(struct vge_softc *); 326static int vge_miibus_readreg(device_t, int, int); 327static void vge_miibus_writereg(device_t, int, int, int); 328static void vge_miibus_statchg(device_t); 329 330static void vge_cam_clear(struct vge_softc *); 331static int vge_cam_set(struct vge_softc *, uint8_t *); 332static void vge_setmulti(struct vge_softc *); 333static void vge_reset(struct vge_softc *); 334 335CFATTACH_DECL_NEW(vge, sizeof(struct vge_softc), 336 vge_match, vge_attach, NULL, NULL); 337 338static inline void 339vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr) 340{ 341 342 f->tf_addrlo = htole32((uint32_t)daddr); 343 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 344 f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 345 else 346 f->tf_addrhi = 0; 347} 348 349static inline void 350vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr) 351{ 352 353 rxd->rd_addrlo = htole32((uint32_t)daddr); 354 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 355 rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 356 else 357 rxd->rd_addrhi = 0; 358} 359 360/* 361 * Defragment mbuf chain contents to be as linear as possible. 362 * Returns new mbuf chain on success, NULL on failure. Old mbuf 363 * chain is always freed. 364 * XXX temporary until there would be generic function doing this. 365 */ 366#define m_defrag vge_m_defrag 367struct mbuf * vge_m_defrag(struct mbuf *, int); 368 369struct mbuf * 370vge_m_defrag(struct mbuf *mold, int flags) 371{ 372 struct mbuf *m0, *mn, *n; 373 size_t sz = mold->m_pkthdr.len; 374 375#ifdef DIAGNOSTIC 376 if ((mold->m_flags & M_PKTHDR) == 0) 377 panic("m_defrag: not a mbuf chain header"); 378#endif 379 380 MGETHDR(m0, flags, MT_DATA); 381 if (m0 == NULL) 382 return NULL; 383 m0->m_pkthdr.len = mold->m_pkthdr.len; 384 mn = m0; 385 386 do { 387 if (sz > MHLEN) { 388 MCLGET(mn, M_DONTWAIT); 389 if ((mn->m_flags & M_EXT) == 0) { 390 m_freem(m0); 391 return NULL; 392 } 393 } 394 395 mn->m_len = MIN(sz, MCLBYTES); 396 397 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len, 398 mtod(mn, void *)); 399 400 sz -= mn->m_len; 401 402 if (sz > 0) { 403 /* need more mbufs */ 404 MGET(n, M_NOWAIT, MT_DATA); 405 if (n == NULL) { 406 m_freem(m0); 407 return NULL; 408 } 409 410 mn->m_next = n; 411 mn = n; 412 } 413 } while (sz > 0); 414 415 return m0; 416} 417 418/* 419 * Read a word of data stored in the EEPROM at address 'addr.' 420 */ 421static uint16_t 422vge_read_eeprom(struct vge_softc *sc, int addr) 423{ 424 int i; 425 uint16_t word = 0; 426 427 /* 428 * Enter EEPROM embedded programming mode. In order to 429 * access the EEPROM at all, we first have to set the 430 * EELOAD bit in the CHIPCFG2 register. 431 */ 432 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 433 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 434 435 /* Select the address of the word we want to read */ 436 CSR_WRITE_1(sc, VGE_EEADDR, addr); 437 438 /* Issue read command */ 439 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 440 441 /* Wait for the done bit to be set. */ 442 for (i = 0; i < VGE_TIMEOUT; i++) { 443 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 444 break; 445 } 446 447 if (i == VGE_TIMEOUT) { 448 printf("%s: EEPROM read timed out\n", device_xname(sc->sc_dev)); 449 return 0; 450 } 451 452 /* Read the result */ 453 word = CSR_READ_2(sc, VGE_EERDDAT); 454 455 /* Turn off EEPROM access mode. */ 456 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 457 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 458 459 return word; 460} 461 462static void 463vge_miipoll_stop(struct vge_softc *sc) 464{ 465 int i; 466 467 CSR_WRITE_1(sc, VGE_MIICMD, 0); 468 469 for (i = 0; i < VGE_TIMEOUT; i++) { 470 DELAY(1); 471 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 472 break; 473 } 474 475 if (i == VGE_TIMEOUT) { 476 printf("%s: failed to idle MII autopoll\n", 477 device_xname(sc->sc_dev)); 478 } 479} 480 481static void 482vge_miipoll_start(struct vge_softc *sc) 483{ 484 int i; 485 486 /* First, make sure we're idle. */ 487 488 CSR_WRITE_1(sc, VGE_MIICMD, 0); 489 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 490 491 for (i = 0; i < VGE_TIMEOUT; i++) { 492 DELAY(1); 493 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 494 break; 495 } 496 497 if (i == VGE_TIMEOUT) { 498 printf("%s: failed to idle MII autopoll\n", 499 device_xname(sc->sc_dev)); 500 return; 501 } 502 503 /* Now enable auto poll mode. */ 504 505 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 506 507 /* And make sure it started. */ 508 509 for (i = 0; i < VGE_TIMEOUT; i++) { 510 DELAY(1); 511 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 512 break; 513 } 514 515 if (i == VGE_TIMEOUT) { 516 printf("%s: failed to start MII autopoll\n", 517 device_xname(sc->sc_dev)); 518 } 519} 520 521static int 522vge_miibus_readreg(device_t dev, int phy, int reg) 523{ 524 struct vge_softc *sc; 525 int i, s; 526 uint16_t rval; 527 528 sc = device_private(dev); 529 rval = 0; 530 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 531 return 0; 532 533 s = splnet(); 534 vge_miipoll_stop(sc); 535 536 /* Specify the register we want to read. */ 537 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 538 539 /* Issue read command. */ 540 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 541 542 /* Wait for the read command bit to self-clear. */ 543 for (i = 0; i < VGE_TIMEOUT; i++) { 544 DELAY(1); 545 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 546 break; 547 } 548 549 if (i == VGE_TIMEOUT) 550 printf("%s: MII read timed out\n", device_xname(sc->sc_dev)); 551 else 552 rval = CSR_READ_2(sc, VGE_MIIDATA); 553 554 vge_miipoll_start(sc); 555 splx(s); 556 557 return rval; 558} 559 560static void 561vge_miibus_writereg(device_t dev, int phy, int reg, int data) 562{ 563 struct vge_softc *sc; 564 int i, s; 565 566 sc = device_private(dev); 567 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 568 return; 569 570 s = splnet(); 571 vge_miipoll_stop(sc); 572 573 /* Specify the register we want to write. */ 574 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 575 576 /* Specify the data we want to write. */ 577 CSR_WRITE_2(sc, VGE_MIIDATA, data); 578 579 /* Issue write command. */ 580 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 581 582 /* Wait for the write command bit to self-clear. */ 583 for (i = 0; i < VGE_TIMEOUT; i++) { 584 DELAY(1); 585 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 586 break; 587 } 588 589 if (i == VGE_TIMEOUT) { 590 printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); 591 } 592 593 vge_miipoll_start(sc); 594 splx(s); 595} 596 597static void 598vge_cam_clear(struct vge_softc *sc) 599{ 600 int i; 601 602 /* 603 * Turn off all the mask bits. This tells the chip 604 * that none of the entries in the CAM filter are valid. 605 * desired entries will be enabled as we fill the filter in. 606 */ 607 608 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 609 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 610 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 611 for (i = 0; i < 8; i++) 612 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 613 614 /* Clear the VLAN filter too. */ 615 616 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 617 for (i = 0; i < 8; i++) 618 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 619 620 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 621 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 622 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 623 624 sc->sc_camidx = 0; 625} 626 627static int 628vge_cam_set(struct vge_softc *sc, uint8_t *addr) 629{ 630 int i, error; 631 632 error = 0; 633 634 if (sc->sc_camidx == VGE_CAM_MAXADDRS) 635 return ENOSPC; 636 637 /* Select the CAM data page. */ 638 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 639 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 640 641 /* Set the filter entry we want to update and enable writing. */ 642 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx); 643 644 /* Write the address to the CAM registers */ 645 for (i = 0; i < ETHER_ADDR_LEN; i++) 646 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 647 648 /* Issue a write command. */ 649 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 650 651 /* Wake for it to clear. */ 652 for (i = 0; i < VGE_TIMEOUT; i++) { 653 DELAY(1); 654 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 655 break; 656 } 657 658 if (i == VGE_TIMEOUT) { 659 printf("%s: setting CAM filter failed\n", 660 device_xname(sc->sc_dev)); 661 error = EIO; 662 goto fail; 663 } 664 665 /* Select the CAM mask page. */ 666 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 667 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 668 669 /* Set the mask bit that enables this filter. */ 670 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8), 671 1 << (sc->sc_camidx & 7)); 672 673 sc->sc_camidx++; 674 675 fail: 676 /* Turn off access to CAM. */ 677 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 678 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 679 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 680 681 return error; 682} 683 684/* 685 * Program the multicast filter. We use the 64-entry CAM filter 686 * for perfect filtering. If there's more than 64 multicast addresses, 687 * we use the hash filter instead. 688 */ 689static void 690vge_setmulti(struct vge_softc *sc) 691{ 692 struct ifnet *ifp; 693 int error; 694 uint32_t h, hashes[2] = { 0, 0 }; 695 struct ether_multi *enm; 696 struct ether_multistep step; 697 698 error = 0; 699 ifp = &sc->sc_ethercom.ec_if; 700 701 /* First, zot all the multicast entries. */ 702 vge_cam_clear(sc); 703 CSR_WRITE_4(sc, VGE_MAR0, 0); 704 CSR_WRITE_4(sc, VGE_MAR1, 0); 705 ifp->if_flags &= ~IFF_ALLMULTI; 706 707 /* 708 * If the user wants allmulti or promisc mode, enable reception 709 * of all multicast frames. 710 */ 711 if (ifp->if_flags & IFF_PROMISC) { 712 allmulti: 713 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 714 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 715 ifp->if_flags |= IFF_ALLMULTI; 716 return; 717 } 718 719 /* Now program new ones */ 720 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 721 while (enm != NULL) { 722 /* 723 * If multicast range, fall back to ALLMULTI. 724 */ 725 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 726 ETHER_ADDR_LEN) != 0) 727 goto allmulti; 728 729 error = vge_cam_set(sc, enm->enm_addrlo); 730 if (error) 731 break; 732 733 ETHER_NEXT_MULTI(step, enm); 734 } 735 736 /* If there were too many addresses, use the hash filter. */ 737 if (error) { 738 vge_cam_clear(sc); 739 740 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 741 while (enm != NULL) { 742 /* 743 * If multicast range, fall back to ALLMULTI. 744 */ 745 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 746 ETHER_ADDR_LEN) != 0) 747 goto allmulti; 748 749 h = ether_crc32_be(enm->enm_addrlo, 750 ETHER_ADDR_LEN) >> 26; 751 hashes[h >> 5] |= 1 << (h & 0x1f); 752 753 ETHER_NEXT_MULTI(step, enm); 754 } 755 756 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 757 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 758 } 759} 760 761static void 762vge_reset(struct vge_softc *sc) 763{ 764 int i; 765 766 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 767 768 for (i = 0; i < VGE_TIMEOUT; i++) { 769 DELAY(5); 770 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 771 break; 772 } 773 774 if (i == VGE_TIMEOUT) { 775 printf("%s: soft reset timed out", device_xname(sc->sc_dev)); 776 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 777 DELAY(2000); 778 } 779 780 DELAY(5000); 781 782 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 783 784 for (i = 0; i < VGE_TIMEOUT; i++) { 785 DELAY(5); 786 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 787 break; 788 } 789 790 if (i == VGE_TIMEOUT) { 791 printf("%s: EEPROM reload timed out\n", 792 device_xname(sc->sc_dev)); 793 return; 794 } 795 796 /* 797 * On some machine, the first read data from EEPROM could be 798 * messed up, so read one dummy data here to avoid the mess. 799 */ 800 (void)vge_read_eeprom(sc, 0); 801 802 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 803} 804 805/* 806 * Probe for a VIA gigabit chip. Check the PCI vendor and device 807 * IDs against our list and return a device name if we find a match. 808 */ 809static int 810vge_match(device_t parent, cfdata_t match, void *aux) 811{ 812 struct pci_attach_args *pa = aux; 813 814 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH 815 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X) 816 return 1; 817 818 return 0; 819} 820 821static int 822vge_allocmem(struct vge_softc *sc) 823{ 824 int error; 825 int nseg; 826 int i; 827 bus_dma_segment_t seg; 828 829 /* 830 * Allocate memory for control data. 831 */ 832 833 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data), 834 VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT); 835 if (error) { 836 aprint_error_dev(sc->sc_dev, 837 "could not allocate control data dma memory\n"); 838 goto fail_1; 839 } 840 841 /* Map the memory to kernel VA space */ 842 843 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 844 sizeof(struct vge_control_data), (void **)&sc->sc_control_data, 845 BUS_DMA_NOWAIT); 846 if (error) { 847 aprint_error_dev(sc->sc_dev, 848 "could not map control data dma memory\n"); 849 goto fail_2; 850 } 851 memset(sc->sc_control_data, 0, sizeof(struct vge_control_data)); 852 853 /* 854 * Create map for control data. 855 */ 856 error = bus_dmamap_create(sc->sc_dmat, 857 sizeof(struct vge_control_data), 1, 858 sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT, 859 &sc->sc_cddmamap); 860 if (error) { 861 aprint_error_dev(sc->sc_dev, 862 "could not create control data dmamap\n"); 863 goto fail_3; 864 } 865 866 /* Load the map for the control data. */ 867 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 868 sc->sc_control_data, sizeof(struct vge_control_data), NULL, 869 BUS_DMA_NOWAIT); 870 if (error) { 871 aprint_error_dev(sc->sc_dev, 872 "could not load control data dma memory\n"); 873 goto fail_4; 874 } 875 876 /* Create DMA maps for TX buffers */ 877 878 for (i = 0; i < VGE_NTXDESC; i++) { 879 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN, 880 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT, 881 &sc->sc_txsoft[i].txs_dmamap); 882 if (error) { 883 aprint_error_dev(sc->sc_dev, 884 "can't create DMA map for TX descs\n"); 885 goto fail_5; 886 } 887 } 888 889 /* Create DMA maps for RX buffers */ 890 891 for (i = 0; i < VGE_NRXDESC; i++) { 892 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 893 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 894 &sc->sc_rxsoft[i].rxs_dmamap); 895 if (error) { 896 aprint_error_dev(sc->sc_dev, 897 "can't create DMA map for RX descs\n"); 898 goto fail_6; 899 } 900 sc->sc_rxsoft[i].rxs_mbuf = NULL; 901 } 902 903 return 0; 904 905 fail_6: 906 for (i = 0; i < VGE_NRXDESC; i++) { 907 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 908 bus_dmamap_destroy(sc->sc_dmat, 909 sc->sc_rxsoft[i].rxs_dmamap); 910 } 911 fail_5: 912 for (i = 0; i < VGE_NTXDESC; i++) { 913 if (sc->sc_txsoft[i].txs_dmamap != NULL) 914 bus_dmamap_destroy(sc->sc_dmat, 915 sc->sc_txsoft[i].txs_dmamap); 916 } 917 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 918 fail_4: 919 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 920 fail_3: 921 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 922 sizeof(struct vge_control_data)); 923 fail_2: 924 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 925 fail_1: 926 return ENOMEM; 927} 928 929/* 930 * Attach the interface. Allocate softc structures, do ifmedia 931 * setup and ethernet/BPF attach. 932 */ 933static void 934vge_attach(device_t parent, device_t self, void *aux) 935{ 936 uint8_t *eaddr; 937 struct vge_softc *sc = device_private(self); 938 struct ifnet *ifp; 939 struct pci_attach_args *pa = aux; 940 pci_chipset_tag_t pc = pa->pa_pc; 941 const char *intrstr; 942 pci_intr_handle_t ih; 943 uint16_t val; 944 945 sc->sc_dev = self; 946 947 pci_aprint_devinfo_fancy(pa, NULL, "VIA VT612X Gigabit Ethernet", 1); 948 949 /* Make sure bus-mastering is enabled */ 950 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 951 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 952 PCI_COMMAND_MASTER_ENABLE); 953 954 /* 955 * Map control/status registers. 956 */ 957 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 958 &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) { 959 aprint_error_dev(self, "couldn't map memory\n"); 960 return; 961 } 962 963 /* 964 * Map and establish our interrupt. 965 */ 966 if (pci_intr_map(pa, &ih)) { 967 aprint_error_dev(self, "unable to map interrupt\n"); 968 return; 969 } 970 intrstr = pci_intr_string(pc, ih); 971 sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc); 972 if (sc->sc_intrhand == NULL) { 973 aprint_error_dev(self, "unable to establish interrupt"); 974 if (intrstr != NULL) 975 aprint_error(" at %s", intrstr); 976 aprint_error("\n"); 977 return; 978 } 979 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 980 981 /* Reset the adapter. */ 982 vge_reset(sc); 983 984 /* 985 * Get station address from the EEPROM. 986 */ 987 eaddr = sc->sc_eaddr; 988 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0); 989 eaddr[0] = val & 0xff; 990 eaddr[1] = val >> 8; 991 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1); 992 eaddr[2] = val & 0xff; 993 eaddr[3] = val >> 8; 994 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2); 995 eaddr[4] = val & 0xff; 996 eaddr[5] = val >> 8; 997 998 aprint_normal_dev(self, "Ethernet address: %s\n", 999 ether_sprintf(eaddr)); 1000 1001 /* 1002 * Use the 32bit tag. Hardware supports 48bit physical addresses, 1003 * but we don't use that for now. 1004 */ 1005 sc->sc_dmat = pa->pa_dmat; 1006 1007 if (vge_allocmem(sc) != 0) 1008 return; 1009 1010 ifp = &sc->sc_ethercom.ec_if; 1011 ifp->if_softc = sc; 1012 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1013 ifp->if_mtu = ETHERMTU; 1014 ifp->if_baudrate = IF_Gbps(1); 1015 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1016 ifp->if_ioctl = vge_ioctl; 1017 ifp->if_start = vge_start; 1018 ifp->if_init = vge_init; 1019 ifp->if_stop = vge_stop; 1020 1021 /* 1022 * We can support 802.1Q VLAN-sized frames and jumbo 1023 * Ethernet frames. 1024 */ 1025 sc->sc_ethercom.ec_capabilities |= 1026 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU | 1027 ETHERCAP_VLAN_HWTAGGING; 1028 1029 /* 1030 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 1031 */ 1032 ifp->if_capabilities |= 1033 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1034 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1035 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 1036 1037#ifdef DEVICE_POLLING 1038#ifdef IFCAP_POLLING 1039 ifp->if_capabilities |= IFCAP_POLLING; 1040#endif 1041#endif 1042 ifp->if_watchdog = vge_watchdog; 1043 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN)); 1044 IFQ_SET_READY(&ifp->if_snd); 1045 1046 /* 1047 * Initialize our media structures and probe the MII. 1048 */ 1049 sc->sc_mii.mii_ifp = ifp; 1050 sc->sc_mii.mii_readreg = vge_miibus_readreg; 1051 sc->sc_mii.mii_writereg = vge_miibus_writereg; 1052 sc->sc_mii.mii_statchg = vge_miibus_statchg; 1053 1054 sc->sc_ethercom.ec_mii = &sc->sc_mii; 1055 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 1056 ether_mediastatus); 1057 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1058 MII_OFFSET_ANY, MIIF_DOPAUSE); 1059 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1060 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1061 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1062 } else 1063 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1064 1065 /* 1066 * Attach the interface. 1067 */ 1068 if_attach(ifp); 1069 ether_ifattach(ifp, eaddr); 1070 ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb); 1071 1072 callout_init(&sc->sc_timeout, 0); 1073 callout_setfunc(&sc->sc_timeout, vge_tick, sc); 1074 1075 /* 1076 * Make sure the interface is shutdown during reboot. 1077 */ 1078 if (pmf_device_register1(self, NULL, NULL, vge_shutdown)) 1079 pmf_class_network_register(self, ifp); 1080 else 1081 aprint_error_dev(self, "couldn't establish power handler\n"); 1082} 1083 1084static int 1085vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1086{ 1087 struct mbuf *m_new; 1088 struct vge_rxdesc *rxd; 1089 struct vge_rxsoft *rxs; 1090 bus_dmamap_t map; 1091 int i; 1092#ifdef DIAGNOSTIC 1093 uint32_t rd_sts; 1094#endif 1095 1096 m_new = NULL; 1097 if (m == NULL) { 1098 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1099 if (m_new == NULL) 1100 return ENOBUFS; 1101 1102 MCLGET(m_new, M_DONTWAIT); 1103 if ((m_new->m_flags & M_EXT) == 0) { 1104 m_freem(m_new); 1105 return ENOBUFS; 1106 } 1107 1108 m = m_new; 1109 } else 1110 m->m_data = m->m_ext.ext_buf; 1111 1112 1113 /* 1114 * This is part of an evil trick to deal with non-x86 platforms. 1115 * The VIA chip requires RX buffers to be aligned on 32-bit 1116 * boundaries, but that will hose non-x86 machines. To get around 1117 * this, we leave some empty space at the start of each buffer 1118 * and for non-x86 hosts, we copy the buffer back two bytes 1119 * to achieve word alignment. This is slightly more efficient 1120 * than allocating a new buffer, copying the contents, and 1121 * discarding the old buffer. 1122 */ 1123 m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE; 1124#ifndef __NO_STRICT_ALIGNMENT 1125 m->m_data += VGE_RX_PAD; 1126#endif 1127 rxs = &sc->sc_rxsoft[idx]; 1128 map = rxs->rxs_dmamap; 1129 1130 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) 1131 goto out; 1132 1133 rxd = &sc->sc_rxdescs[idx]; 1134 1135#ifdef DIAGNOSTIC 1136 /* If this descriptor is still owned by the chip, bail. */ 1137 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1138 rd_sts = le32toh(rxd->rd_sts); 1139 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1140 if (rd_sts & VGE_RDSTS_OWN) { 1141 panic("%s: tried to map busy RX descriptor", 1142 device_xname(sc->sc_dev)); 1143 } 1144#endif 1145 1146 rxs->rxs_mbuf = m; 1147 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1148 BUS_DMASYNC_PREREAD); 1149 1150 rxd->rd_buflen = 1151 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I); 1152 vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr); 1153 rxd->rd_sts = 0; 1154 rxd->rd_ctl = 0; 1155 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1156 1157 /* 1158 * Note: the manual fails to document the fact that for 1159 * proper opration, the driver needs to replentish the RX 1160 * DMA ring 4 descriptors at a time (rather than one at a 1161 * time, like most chips). We can allocate the new buffers 1162 * but we should not set the OWN bits until we're ready 1163 * to hand back 4 of them in one shot. 1164 */ 1165 1166#define VGE_RXCHUNK 4 1167 sc->sc_rx_consumed++; 1168 if (sc->sc_rx_consumed == VGE_RXCHUNK) { 1169 for (i = idx; i != idx - VGE_RXCHUNK; i--) { 1170 KASSERT(i >= 0); 1171 sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN); 1172 VGE_RXDESCSYNC(sc, i, 1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1174 } 1175 sc->sc_rx_consumed = 0; 1176 } 1177 1178 return 0; 1179 out: 1180 if (m_new != NULL) 1181 m_freem(m_new); 1182 return ENOMEM; 1183} 1184 1185#ifndef __NO_STRICT_ALIGNMENT 1186static inline void 1187vge_fixup_rx(struct mbuf *m) 1188{ 1189 int i; 1190 uint16_t *src, *dst; 1191 1192 src = mtod(m, uint16_t *); 1193 dst = src - 1; 1194 1195 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1196 *dst++ = *src++; 1197 1198 m->m_data -= ETHER_ALIGN; 1199} 1200#endif 1201 1202/* 1203 * RX handler. We support the reception of jumbo frames that have 1204 * been fragmented across multiple 2K mbuf cluster buffers. 1205 */ 1206static void 1207vge_rxeof(struct vge_softc *sc) 1208{ 1209 struct mbuf *m; 1210 struct ifnet *ifp; 1211 int idx, total_len, lim; 1212 struct vge_rxdesc *cur_rxd; 1213 struct vge_rxsoft *rxs; 1214 uint32_t rxstat, rxctl; 1215 1216 ifp = &sc->sc_ethercom.ec_if; 1217 lim = 0; 1218 1219 /* Invalidate the descriptor memory */ 1220 1221 for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) { 1222 cur_rxd = &sc->sc_rxdescs[idx]; 1223 1224 VGE_RXDESCSYNC(sc, idx, 1225 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1226 rxstat = le32toh(cur_rxd->rd_sts); 1227 if ((rxstat & VGE_RDSTS_OWN) != 0) { 1228 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1229 break; 1230 } 1231 1232 rxctl = le32toh(cur_rxd->rd_ctl); 1233 rxs = &sc->sc_rxsoft[idx]; 1234 m = rxs->rxs_mbuf; 1235 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16; 1236 1237 /* Invalidate the RX mbuf and unload its map */ 1238 1239 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 1240 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1241 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1242 1243 /* 1244 * If the 'start of frame' bit is set, this indicates 1245 * either the first fragment in a multi-fragment receive, 1246 * or an intermediate fragment. Either way, we want to 1247 * accumulate the buffers. 1248 */ 1249 if (rxstat & VGE_RXPKT_SOF) { 1250 m->m_len = VGE_RX_BUFSIZE; 1251 if (sc->sc_rx_mhead == NULL) 1252 sc->sc_rx_mhead = sc->sc_rx_mtail = m; 1253 else { 1254 m->m_flags &= ~M_PKTHDR; 1255 sc->sc_rx_mtail->m_next = m; 1256 sc->sc_rx_mtail = m; 1257 } 1258 vge_newbuf(sc, idx, NULL); 1259 continue; 1260 } 1261 1262 /* 1263 * Bad/error frames will have the RXOK bit cleared. 1264 * However, there's one error case we want to allow: 1265 * if a VLAN tagged frame arrives and the chip can't 1266 * match it against the CAM filter, it considers this 1267 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1268 * We don't want to drop the frame though: our VLAN 1269 * filtering is done in software. 1270 */ 1271 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1272 (rxstat & VGE_RDSTS_VIDM) == 0 && 1273 (rxstat & VGE_RDSTS_CSUMERR) == 0) { 1274 ifp->if_ierrors++; 1275 /* 1276 * If this is part of a multi-fragment packet, 1277 * discard all the pieces. 1278 */ 1279 if (sc->sc_rx_mhead != NULL) { 1280 m_freem(sc->sc_rx_mhead); 1281 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1282 } 1283 vge_newbuf(sc, idx, m); 1284 continue; 1285 } 1286 1287 /* 1288 * If allocating a replacement mbuf fails, 1289 * reload the current one. 1290 */ 1291 1292 if (vge_newbuf(sc, idx, NULL)) { 1293 ifp->if_ierrors++; 1294 if (sc->sc_rx_mhead != NULL) { 1295 m_freem(sc->sc_rx_mhead); 1296 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1297 } 1298 vge_newbuf(sc, idx, m); 1299 continue; 1300 } 1301 1302 if (sc->sc_rx_mhead != NULL) { 1303 m->m_len = total_len % VGE_RX_BUFSIZE; 1304 /* 1305 * Special case: if there's 4 bytes or less 1306 * in this buffer, the mbuf can be discarded: 1307 * the last 4 bytes is the CRC, which we don't 1308 * care about anyway. 1309 */ 1310 if (m->m_len <= ETHER_CRC_LEN) { 1311 sc->sc_rx_mtail->m_len -= 1312 (ETHER_CRC_LEN - m->m_len); 1313 m_freem(m); 1314 } else { 1315 m->m_len -= ETHER_CRC_LEN; 1316 m->m_flags &= ~M_PKTHDR; 1317 sc->sc_rx_mtail->m_next = m; 1318 } 1319 m = sc->sc_rx_mhead; 1320 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1321 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1322 } else 1323 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1324 1325#ifndef __NO_STRICT_ALIGNMENT 1326 vge_fixup_rx(m); 1327#endif 1328 ifp->if_ipackets++; 1329 m->m_pkthdr.rcvif = ifp; 1330 1331 /* Do RX checksumming if enabled */ 1332 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 1333 1334 /* Check IP header checksum */ 1335 if (rxctl & VGE_RDCTL_IPPKT) 1336 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1337 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0) 1338 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1339 } 1340 1341 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { 1342 /* Check UDP checksum */ 1343 if (rxctl & VGE_RDCTL_TCPPKT) 1344 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1345 1346 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1347 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1348 } 1349 1350 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) { 1351 /* Check UDP checksum */ 1352 if (rxctl & VGE_RDCTL_UDPPKT) 1353 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1354 1355 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1356 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1357 } 1358 1359 if (rxstat & VGE_RDSTS_VTAG) { 1360 /* 1361 * We use bswap16() here because: 1362 * On LE machines, tag is stored in BE as stream data. 1363 * On BE machines, tag is stored in BE as stream data 1364 * but it was already swapped by le32toh() above. 1365 */ 1366 VLAN_INPUT_TAG(ifp, m, 1367 bswap16(rxctl & VGE_RDCTL_VLANID), continue); 1368 } 1369 1370 /* 1371 * Handle BPF listeners. 1372 */ 1373 bpf_mtap(ifp, m); 1374 1375 (*ifp->if_input)(ifp, m); 1376 1377 lim++; 1378 if (lim == VGE_NRXDESC) 1379 break; 1380 } 1381 1382 sc->sc_rx_prodidx = idx; 1383 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1384} 1385 1386static void 1387vge_txeof(struct vge_softc *sc) 1388{ 1389 struct ifnet *ifp; 1390 struct vge_txsoft *txs; 1391 uint32_t txstat; 1392 int idx; 1393 1394 ifp = &sc->sc_ethercom.ec_if; 1395 1396 for (idx = sc->sc_tx_considx; 1397 sc->sc_tx_free < VGE_NTXDESC; 1398 idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) { 1399 VGE_TXDESCSYNC(sc, idx, 1400 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1401 txstat = le32toh(sc->sc_txdescs[idx].td_sts); 1402 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1403 if (txstat & VGE_TDSTS_OWN) { 1404 break; 1405 } 1406 1407 txs = &sc->sc_txsoft[idx]; 1408 m_freem(txs->txs_mbuf); 1409 txs->txs_mbuf = NULL; 1410 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, 1411 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1412 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1413 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1414 ifp->if_collisions++; 1415 if (txstat & VGE_TDSTS_TXERR) 1416 ifp->if_oerrors++; 1417 else 1418 ifp->if_opackets++; 1419 } 1420 1421 sc->sc_tx_considx = idx; 1422 1423 if (sc->sc_tx_free > 0) { 1424 ifp->if_flags &= ~IFF_OACTIVE; 1425 } 1426 1427 /* 1428 * If not all descriptors have been released reaped yet, 1429 * reload the timer so that we will eventually get another 1430 * interrupt that will cause us to re-enter this routine. 1431 * This is done in case the transmitter has gone idle. 1432 */ 1433 if (sc->sc_tx_free < VGE_NTXDESC) 1434 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1435 else 1436 ifp->if_timer = 0; 1437} 1438 1439static void 1440vge_tick(void *arg) 1441{ 1442 struct vge_softc *sc; 1443 struct ifnet *ifp; 1444 struct mii_data *mii; 1445 int s; 1446 1447 sc = arg; 1448 ifp = &sc->sc_ethercom.ec_if; 1449 mii = &sc->sc_mii; 1450 1451 s = splnet(); 1452 1453 callout_schedule(&sc->sc_timeout, hz); 1454 1455 mii_tick(mii); 1456 if (sc->sc_link) { 1457 if ((mii->mii_media_status & IFM_ACTIVE) == 0) 1458 sc->sc_link = 0; 1459 } else { 1460 if (mii->mii_media_status & IFM_ACTIVE && 1461 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1462 sc->sc_link = 1; 1463 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1464 vge_start(ifp); 1465 } 1466 } 1467 1468 splx(s); 1469} 1470 1471static int 1472vge_intr(void *arg) 1473{ 1474 struct vge_softc *sc; 1475 struct ifnet *ifp; 1476 uint32_t status; 1477 int claim; 1478 1479 sc = arg; 1480 claim = 0; 1481 if (sc->sc_suspended) { 1482 return claim; 1483 } 1484 1485 ifp = &sc->sc_ethercom.ec_if; 1486 1487 if ((ifp->if_flags & IFF_UP) == 0) { 1488 return claim; 1489 } 1490 1491 /* Disable interrupts */ 1492 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1493 1494 for (;;) { 1495 1496 status = CSR_READ_4(sc, VGE_ISR); 1497 /* If the card has gone away the read returns 0xffffffff. */ 1498 if (status == 0xFFFFFFFF) 1499 break; 1500 1501 if (status) { 1502 claim = 1; 1503 CSR_WRITE_4(sc, VGE_ISR, status); 1504 } 1505 1506 if ((status & VGE_INTRS) == 0) 1507 break; 1508 1509 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1510 vge_rxeof(sc); 1511 1512 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1513 vge_rxeof(sc); 1514 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1515 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1516 } 1517 1518 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1519 vge_txeof(sc); 1520 1521 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1522 vge_init(ifp); 1523 1524 if (status & VGE_ISR_LINKSTS) 1525 vge_tick(sc); 1526 } 1527 1528 /* Re-enable interrupts */ 1529 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1530 1531 if (claim && !IFQ_IS_EMPTY(&ifp->if_snd)) 1532 vge_start(ifp); 1533 1534 return claim; 1535} 1536 1537static int 1538vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1539{ 1540 struct vge_txsoft *txs; 1541 struct vge_txdesc *txd; 1542 struct vge_txfrag *f; 1543 struct mbuf *m_new; 1544 bus_dmamap_t map; 1545 int m_csumflags, seg, error, flags; 1546 struct m_tag *mtag; 1547 size_t sz; 1548 uint32_t td_sts, td_ctl; 1549 1550 KASSERT(sc->sc_tx_free > 0); 1551 1552 txd = &sc->sc_txdescs[idx]; 1553 1554#ifdef DIAGNOSTIC 1555 /* If this descriptor is still owned by the chip, bail. */ 1556 VGE_TXDESCSYNC(sc, idx, 1557 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1558 td_sts = le32toh(txd->td_sts); 1559 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1560 if (td_sts & VGE_TDSTS_OWN) { 1561 return ENOBUFS; 1562 } 1563#endif 1564 1565 /* 1566 * Preserve m_pkthdr.csum_flags here since m_head might be 1567 * updated by m_defrag() 1568 */ 1569 m_csumflags = m_head->m_pkthdr.csum_flags; 1570 1571 txs = &sc->sc_txsoft[idx]; 1572 map = txs->txs_dmamap; 1573 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT); 1574 1575 /* If too many segments to map, coalesce */ 1576 if (error == EFBIG || 1577 (m_head->m_pkthdr.len < ETHER_PAD_LEN && 1578 map->dm_nsegs == VGE_TX_FRAGS)) { 1579 m_new = m_defrag(m_head, M_DONTWAIT); 1580 if (m_new == NULL) 1581 return EFBIG; 1582 1583 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, 1584 m_new, BUS_DMA_NOWAIT); 1585 if (error) { 1586 m_freem(m_new); 1587 return error; 1588 } 1589 1590 m_head = m_new; 1591 } else if (error) 1592 return error; 1593 1594 txs->txs_mbuf = m_head; 1595 1596 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1597 BUS_DMASYNC_PREWRITE); 1598 1599 for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) { 1600 f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len)); 1601 vge_set_txaddr(f, map->dm_segs[seg].ds_addr); 1602 } 1603 1604 /* Argh. This chip does not autopad short frames */ 1605 sz = m_head->m_pkthdr.len; 1606 if (sz < ETHER_PAD_LEN) { 1607 f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz)); 1608 vge_set_txaddr(f, VGE_CDPADADDR(sc)); 1609 sz = ETHER_PAD_LEN; 1610 seg++; 1611 } 1612 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE); 1613 1614 /* 1615 * When telling the chip how many segments there are, we 1616 * must use nsegs + 1 instead of just nsegs. Darned if I 1617 * know why. 1618 */ 1619 seg++; 1620 1621 flags = 0; 1622 if (m_csumflags & M_CSUM_IPv4) 1623 flags |= VGE_TDCTL_IPCSUM; 1624 if (m_csumflags & M_CSUM_TCPv4) 1625 flags |= VGE_TDCTL_TCPCSUM; 1626 if (m_csumflags & M_CSUM_UDPv4) 1627 flags |= VGE_TDCTL_UDPCSUM; 1628 td_sts = sz << 16; 1629 td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM; 1630 1631 if (sz > ETHERMTU + ETHER_HDR_LEN) 1632 td_ctl |= VGE_TDCTL_JUMBO; 1633 1634 /* 1635 * Set up hardware VLAN tagging. 1636 */ 1637 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head); 1638 if (mtag != NULL) { 1639 /* 1640 * No need htons() here since vge(4) chip assumes 1641 * that tags are written in little endian and 1642 * we already use htole32() here. 1643 */ 1644 td_ctl |= VLAN_TAG_VALUE(mtag) | VGE_TDCTL_VTAG; 1645 } 1646 txd->td_ctl = htole32(td_ctl); 1647 txd->td_sts = htole32(td_sts); 1648 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1649 1650 txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts); 1651 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1652 1653 sc->sc_tx_free--; 1654 1655 return 0; 1656} 1657 1658/* 1659 * Main transmit routine. 1660 */ 1661 1662static void 1663vge_start(struct ifnet *ifp) 1664{ 1665 struct vge_softc *sc; 1666 struct vge_txsoft *txs; 1667 struct mbuf *m_head; 1668 int idx, pidx, ofree, error; 1669 1670 sc = ifp->if_softc; 1671 1672 if (!sc->sc_link || 1673 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) { 1674 return; 1675 } 1676 1677 m_head = NULL; 1678 idx = sc->sc_tx_prodidx; 1679 pidx = VGE_PREV_TXDESC(idx); 1680 ofree = sc->sc_tx_free; 1681 1682 /* 1683 * Loop through the send queue, setting up transmit descriptors 1684 * until we drain the queue, or use up all available transmit 1685 * descriptors. 1686 */ 1687 for (;;) { 1688 /* Grab a packet off the queue. */ 1689 IFQ_POLL(&ifp->if_snd, m_head); 1690 if (m_head == NULL) 1691 break; 1692 1693 if (sc->sc_tx_free == 0) { 1694 /* 1695 * All slots used, stop for now. 1696 */ 1697 ifp->if_flags |= IFF_OACTIVE; 1698 break; 1699 } 1700 1701 txs = &sc->sc_txsoft[idx]; 1702 KASSERT(txs->txs_mbuf == NULL); 1703 1704 if ((error = vge_encap(sc, m_head, idx))) { 1705 if (error == EFBIG) { 1706 printf("%s: Tx packet consumes too many " 1707 "DMA segments, dropping...\n", 1708 device_xname(sc->sc_dev)); 1709 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1710 m_freem(m_head); 1711 continue; 1712 } 1713 1714 /* 1715 * Short on resources, just stop for now. 1716 */ 1717 if (error == ENOBUFS) 1718 ifp->if_flags |= IFF_OACTIVE; 1719 break; 1720 } 1721 1722 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1723 1724 /* 1725 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1726 */ 1727 1728 sc->sc_txdescs[pidx].td_frag[0].tf_buflen |= 1729 htole16(VGE_TXDESC_Q); 1730 VGE_TXFRAGSYNC(sc, pidx, 1, 1731 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1732 1733 if (txs->txs_mbuf != m_head) { 1734 m_freem(m_head); 1735 m_head = txs->txs_mbuf; 1736 } 1737 1738 pidx = idx; 1739 idx = VGE_NEXT_TXDESC(idx); 1740 1741 /* 1742 * If there's a BPF listener, bounce a copy of this frame 1743 * to him. 1744 */ 1745 bpf_mtap(ifp, m_head); 1746 } 1747 1748 if (sc->sc_tx_free < ofree) { 1749 /* TX packet queued */ 1750 1751 sc->sc_tx_prodidx = idx; 1752 1753 /* Issue a transmit command. */ 1754 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1755 1756 /* 1757 * Use the countdown timer for interrupt moderation. 1758 * 'TX done' interrupts are disabled. Instead, we reset the 1759 * countdown timer, which will begin counting until it hits 1760 * the value in the SSTIMER register, and then trigger an 1761 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1762 * the timer count is reloaded. Only when the transmitter 1763 * is idle will the timer hit 0 and an interrupt fire. 1764 */ 1765 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1766 1767 /* 1768 * Set a timeout in case the chip goes out to lunch. 1769 */ 1770 ifp->if_timer = 5; 1771 } 1772} 1773 1774static int 1775vge_init(struct ifnet *ifp) 1776{ 1777 struct vge_softc *sc; 1778 int i, rc = 0; 1779 1780 sc = ifp->if_softc; 1781 1782 /* 1783 * Cancel pending I/O and free all RX/TX buffers. 1784 */ 1785 vge_stop(ifp, 0); 1786 vge_reset(sc); 1787 1788 /* Initialize the RX descriptors and mbufs. */ 1789 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 1790 sc->sc_rx_consumed = 0; 1791 for (i = 0; i < VGE_NRXDESC; i++) { 1792 if (vge_newbuf(sc, i, NULL) == ENOBUFS) { 1793 printf("%s: unable to allocate or map rx buffer\n", 1794 device_xname(sc->sc_dev)); 1795 return 1; /* XXX */ 1796 } 1797 } 1798 sc->sc_rx_prodidx = 0; 1799 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1800 1801 /* Initialize the TX descriptors and mbufs. */ 1802 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1803 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, 1804 VGE_CDTXOFF(0), sizeof(sc->sc_txdescs), 1805 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1806 for (i = 0; i < VGE_NTXDESC; i++) 1807 sc->sc_txsoft[i].txs_mbuf = NULL; 1808 1809 sc->sc_tx_prodidx = 0; 1810 sc->sc_tx_considx = 0; 1811 sc->sc_tx_free = VGE_NTXDESC; 1812 1813 /* Set our station address */ 1814 for (i = 0; i < ETHER_ADDR_LEN; i++) 1815 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]); 1816 1817 /* 1818 * Set receive FIFO threshold. Also allow transmission and 1819 * reception of VLAN tagged frames. 1820 */ 1821 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1822 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1823 1824 /* Set DMA burst length */ 1825 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1826 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1827 1828 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1829 1830 /* Set collision backoff algorithm */ 1831 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1832 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1833 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1834 1835 /* Disable LPSEL field in priority resolution */ 1836 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1837 1838 /* 1839 * Load the addresses of the DMA queues into the chip. 1840 * Note that we only use one transmit queue. 1841 */ 1842 1843 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0))); 1844 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1); 1845 1846 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0))); 1847 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1); 1848 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC); 1849 1850 /* Enable and wake up the RX descriptor queue */ 1851 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1852 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1853 1854 /* Enable the TX descriptor queue */ 1855 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1856 1857 /* Set up the receive filter -- allow large frames for VLANs. */ 1858 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1859 1860 /* If we want promiscuous mode, set the allframes bit. */ 1861 if (ifp->if_flags & IFF_PROMISC) { 1862 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1863 } 1864 1865 /* Set capture broadcast bit to capture broadcast frames. */ 1866 if (ifp->if_flags & IFF_BROADCAST) { 1867 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1868 } 1869 1870 /* Set multicast bit to capture multicast frames. */ 1871 if (ifp->if_flags & IFF_MULTICAST) { 1872 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1873 } 1874 1875 /* Init the cam filter. */ 1876 vge_cam_clear(sc); 1877 1878 /* Init the multicast filter. */ 1879 vge_setmulti(sc); 1880 1881 /* Enable flow control */ 1882 1883 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1884 1885 /* Enable jumbo frame reception (if desired) */ 1886 1887 /* Start the MAC. */ 1888 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1889 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1890 CSR_WRITE_1(sc, VGE_CRS0, 1891 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1892 1893 /* 1894 * Configure one-shot timer for microsecond 1895 * resulution and load it for 500 usecs. 1896 */ 1897 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1898 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1899 1900 /* 1901 * Configure interrupt moderation for receive. Enable 1902 * the holdoff counter and load it, and set the RX 1903 * suppression count to the number of descriptors we 1904 * want to allow before triggering an interrupt. 1905 * The holdoff timer is in units of 20 usecs. 1906 */ 1907 1908#ifdef notyet 1909 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1910 /* Select the interrupt holdoff timer page. */ 1911 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1912 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1913 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1914 1915 /* Enable use of the holdoff timer. */ 1916 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1917 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1918 1919 /* Select the RX suppression threshold page. */ 1920 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1921 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1922 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1923 1924 /* Restore the page select bits. */ 1925 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1926 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1927#endif 1928 1929#ifdef DEVICE_POLLING 1930 /* 1931 * Disable interrupts if we are polling. 1932 */ 1933 if (ifp->if_flags & IFF_POLLING) { 1934 CSR_WRITE_4(sc, VGE_IMR, 0); 1935 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1936 } else /* otherwise ... */ 1937#endif /* DEVICE_POLLING */ 1938 { 1939 /* 1940 * Enable interrupts. 1941 */ 1942 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 1943 CSR_WRITE_4(sc, VGE_ISR, 0); 1944 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1945 } 1946 1947 if ((rc = ether_mediachange(ifp)) != 0) 1948 goto out; 1949 1950 ifp->if_flags |= IFF_RUNNING; 1951 ifp->if_flags &= ~IFF_OACTIVE; 1952 1953 sc->sc_if_flags = 0; 1954 sc->sc_link = 0; 1955 1956 callout_schedule(&sc->sc_timeout, hz); 1957 1958out: 1959 return rc; 1960} 1961 1962static void 1963vge_miibus_statchg(device_t self) 1964{ 1965 struct vge_softc *sc; 1966 struct mii_data *mii; 1967 struct ifmedia_entry *ife; 1968 1969 sc = device_private(self); 1970 mii = &sc->sc_mii; 1971 ife = mii->mii_media.ifm_cur; 1972 /* 1973 * If the user manually selects a media mode, we need to turn 1974 * on the forced MAC mode bit in the DIAGCTL register. If the 1975 * user happens to choose a full duplex mode, we also need to 1976 * set the 'force full duplex' bit. This applies only to 1977 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1978 * mode is disabled, and in 1000baseT mode, full duplex is 1979 * always implied, so we turn on the forced mode bit but leave 1980 * the FDX bit cleared. 1981 */ 1982 1983 switch (IFM_SUBTYPE(ife->ifm_media)) { 1984 case IFM_AUTO: 1985 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1986 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1987 break; 1988 case IFM_1000_T: 1989 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1990 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1991 break; 1992 case IFM_100_TX: 1993 case IFM_10_T: 1994 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1995 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1996 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1997 } else { 1998 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1999 } 2000 break; 2001 default: 2002 printf("%s: unknown media type: %x\n", 2003 device_xname(sc->sc_dev), 2004 IFM_SUBTYPE(ife->ifm_media)); 2005 break; 2006 } 2007} 2008 2009static int 2010vge_ifflags_cb(struct ethercom *ec) 2011{ 2012 struct ifnet *ifp = &ec->ec_if; 2013 struct vge_softc *sc = ifp->if_softc; 2014 int change = ifp->if_flags ^ sc->sc_if_flags; 2015 2016 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 2017 return ENETRESET; 2018 else if ((change & IFF_PROMISC) == 0) 2019 return 0; 2020 2021 if ((ifp->if_flags & IFF_PROMISC) == 0) 2022 CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2023 else 2024 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2025 vge_setmulti(sc); 2026 return 0; 2027} 2028 2029static int 2030vge_ioctl(struct ifnet *ifp, u_long command, void *data) 2031{ 2032 struct vge_softc *sc; 2033 struct ifreq *ifr; 2034 int s, error; 2035 2036 sc = ifp->if_softc; 2037 ifr = (struct ifreq *)data; 2038 error = 0; 2039 2040 s = splnet(); 2041 2042 if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) { 2043 error = 0; 2044 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 2045 ; 2046 else if (ifp->if_flags & IFF_RUNNING) { 2047 /* 2048 * Multicast list has changed; set the hardware filter 2049 * accordingly. 2050 */ 2051 vge_setmulti(sc); 2052 } 2053 } 2054 sc->sc_if_flags = ifp->if_flags; 2055 2056 splx(s); 2057 return error; 2058} 2059 2060static void 2061vge_watchdog(struct ifnet *ifp) 2062{ 2063 struct vge_softc *sc; 2064 int s; 2065 2066 sc = ifp->if_softc; 2067 s = splnet(); 2068 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2069 ifp->if_oerrors++; 2070 2071 vge_txeof(sc); 2072 vge_rxeof(sc); 2073 2074 vge_init(ifp); 2075 2076 splx(s); 2077} 2078 2079/* 2080 * Stop the adapter and free any mbufs allocated to the 2081 * RX and TX lists. 2082 */ 2083static void 2084vge_stop(struct ifnet *ifp, int disable) 2085{ 2086 struct vge_softc *sc = ifp->if_softc; 2087 struct vge_txsoft *txs; 2088 struct vge_rxsoft *rxs; 2089 int i, s; 2090 2091 s = splnet(); 2092 ifp->if_timer = 0; 2093 2094 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2095#ifdef DEVICE_POLLING 2096 ether_poll_deregister(ifp); 2097#endif /* DEVICE_POLLING */ 2098 2099 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2100 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2101 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2102 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2103 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2104 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2105 2106 if (sc->sc_rx_mhead != NULL) { 2107 m_freem(sc->sc_rx_mhead); 2108 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 2109 } 2110 2111 /* Free the TX list buffers. */ 2112 2113 for (i = 0; i < VGE_NTXDESC; i++) { 2114 txs = &sc->sc_txsoft[i]; 2115 if (txs->txs_mbuf != NULL) { 2116 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2117 m_freem(txs->txs_mbuf); 2118 txs->txs_mbuf = NULL; 2119 } 2120 } 2121 2122 /* Free the RX list buffers. */ 2123 2124 for (i = 0; i < VGE_NRXDESC; i++) { 2125 rxs = &sc->sc_rxsoft[i]; 2126 if (rxs->rxs_mbuf != NULL) { 2127 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2128 m_freem(rxs->rxs_mbuf); 2129 rxs->rxs_mbuf = NULL; 2130 } 2131 } 2132 2133 splx(s); 2134} 2135 2136#if VGE_POWER_MANAGEMENT 2137/* 2138 * Device suspend routine. Stop the interface and save some PCI 2139 * settings in case the BIOS doesn't restore them properly on 2140 * resume. 2141 */ 2142static int 2143vge_suspend(device_t dev) 2144{ 2145 struct vge_softc *sc; 2146 int i; 2147 2148 sc = device_get_softc(dev); 2149 2150 vge_stop(sc); 2151 2152 for (i = 0; i < 5; i++) 2153 sc->sc_saved_maps[i] = 2154 pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2155 sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2156 sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2157 sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2158 sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2159 2160 sc->suspended = 1; 2161 2162 return 0; 2163} 2164 2165/* 2166 * Device resume routine. Restore some PCI settings in case the BIOS 2167 * doesn't, re-enable busmastering, and restart the interface if 2168 * appropriate. 2169 */ 2170static int 2171vge_resume(device_t dev) 2172{ 2173 struct vge_softc *sc; 2174 struct ifnet *ifp; 2175 int i; 2176 2177 sc = device_private(dev); 2178 ifp = &sc->sc_ethercom.ec_if; 2179 2180 /* better way to do this? */ 2181 for (i = 0; i < 5; i++) 2182 pci_write_config(dev, PCIR_MAPS + i * 4, 2183 sc->sc_saved_maps[i], 4); 2184 pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4); 2185 pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1); 2186 pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1); 2187 pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1); 2188 2189 /* reenable busmastering */ 2190 pci_enable_busmaster(dev); 2191 pci_enable_io(dev, SYS_RES_MEMORY); 2192 2193 /* reinitialize interface if necessary */ 2194 if (ifp->if_flags & IFF_UP) 2195 vge_init(sc); 2196 2197 sc->suspended = 0; 2198 2199 return 0; 2200} 2201#endif 2202 2203/* 2204 * Stop all chip I/O so that the kernel's probe routines don't 2205 * get confused by errant DMAs when rebooting. 2206 */ 2207static bool 2208vge_shutdown(device_t self, int howto) 2209{ 2210 struct vge_softc *sc; 2211 2212 sc = device_private(self); 2213 vge_stop(&sc->sc_ethercom.ec_if, 1); 2214 2215 return true; 2216} 2217