if_lge.c revision 1.21
1/* $OpenBSD: if_lge.c,v 1.21 2005/07/02 23:10:11 brad Exp $ */ 2/* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 5 * Bill Paul <william.paul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.6 2001/06/20 19:47:55 bmilekic Exp $ 35 */ 36 37/* 38 * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public 39 * documentation not available, but ask me nicely. 40 * 41 * Written by Bill Paul <william.paul@windriver.com> 42 * Wind River Systems 43 */ 44 45/* 46 * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. 47 * It's a 64-bit PCI part that supports TCP/IP checksum offload, 48 * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There 49 * are three supported methods for data transfer between host and 50 * NIC: programmed I/O, traditional scatter/gather DMA and Packet 51 * Propulsion Technology (tm) DMA. The latter mechanism is a form 52 * of double buffer DMA where the packet data is copied to a 53 * pre-allocated DMA buffer who's physical address has been loaded 54 * into a table at device initialization time. The rationale is that 55 * the virtual to physical address translation needed for normal 56 * scatter/gather DMA is more expensive than the data copy needed 57 * for double buffering. This may be true in Windows NT and the like, 58 * but it isn't true for us, at least on the x86 arch. This driver 59 * uses the scatter/gather I/O method for both TX and RX. 60 * 61 * The LXT1001 only supports TCP/IP checksum offload on receive. 62 * Also, the VLAN tagging is done using a 16-entry table which allows 63 * the chip to perform hardware filtering based on VLAN tags. Sadly, 64 * our vlan support doesn't currently play well with this kind of 65 * hardware support. 66 * 67 * Special thanks to: 68 * - Jeff James at Intel, for arranging to have the LXT1001 manual 69 * released (at long last) 70 * - Beny Chen at D-Link, for actually sending it to me 71 * - Brad Short and Keith Alexis at SMC, for sending me sample 72 * SMC9462SX and SMC9462TX adapters for testing 73 * - Paul Saab at Y!, for not killing me (though it remains to be seen 74 * if in fact he did me much of a favor) 75 */ 76 77#include "bpfilter.h" 78 79#include <sys/param.h> 80#include <sys/systm.h> 81#include <sys/sockio.h> 82#include <sys/mbuf.h> 83#include <sys/malloc.h> 84#include <sys/kernel.h> 85#include <sys/device.h> 86#include <sys/socket.h> 87 88#include <net/if.h> 89#include <net/if_dl.h> 90#include <net/if_media.h> 91 92#ifdef INET 93#include <netinet/in.h> 94#include <netinet/in_systm.h> 95#include <netinet/in_var.h> 96#include <netinet/ip.h> 97#include <netinet/if_ether.h> 98#endif 99 100#if NVLAN > 0 101#include <net/if_types.h> 102#include <net/if_vlan_var.h> 103#endif 104 105#if NBPFILTER > 0 106#include <net/bpf.h> 107#endif 108 109#include <uvm/uvm_extern.h> /* for vtophys */ 110#include <uvm/uvm_pmap.h> /* for vtophys */ 111 112#include <dev/pci/pcireg.h> 113#include <dev/pci/pcivar.h> 114#include <dev/pci/pcidevs.h> 115 116#include <dev/mii/mii.h> 117#include <dev/mii/miivar.h> 118 119#define LGE_USEIOSPACE 120 121#include <dev/pci/if_lgereg.h> 122 123int lge_probe(struct device *, void *, void *); 124void lge_attach(struct device *, struct device *, void *); 125 126int lge_alloc_jumbo_mem(struct lge_softc *); 127void lge_free_jumbo_mem(struct lge_softc *); 128void *lge_jalloc(struct lge_softc *); 129void lge_jfree(caddr_t, u_int, void *); 130 131int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, 132 struct mbuf *); 133int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *); 134void lge_rxeof(struct lge_softc *, int); 135void lge_rxeoc(struct lge_softc *); 136void lge_txeof(struct lge_softc *); 137int lge_intr(void *); 138void lge_tick(void *); 139void lge_start(struct ifnet *); 140int lge_ioctl(struct ifnet *, u_long, caddr_t); 141void lge_init(void *); 142void lge_stop(struct lge_softc *); 143void lge_watchdog(struct ifnet *); 144void lge_shutdown(void *); 145int lge_ifmedia_upd(struct ifnet *); 146void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 147 148void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *); 149void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int); 150 151int lge_miibus_readreg(struct device *, int, int); 152void lge_miibus_writereg(struct device *, int, int, int); 153void lge_miibus_statchg(struct device *); 154 155void lge_setmulti(struct lge_softc *); 156void lge_reset(struct lge_softc *); 157int lge_list_rx_init(struct lge_softc *); 158int lge_list_tx_init(struct lge_softc *); 159 160#ifdef LGE_USEIOSPACE 161#define LGE_RES SYS_RES_IOPORT 162#define LGE_RID LGE_PCI_LOIO 163#else 164#define LGE_RES SYS_RES_MEMORY 165#define LGE_RID LGE_PCI_LOMEM 166#endif 167 168#ifdef LGE_DEBUG 169#define DPRINTF(x) if (lgedebug) printf x 170#define DPRINTFN(n,x) if (lgedebug >= (n)) printf x 171int lgedebug = 0; 172#else 173#define DPRINTF(x) 174#define DPRINTFN(n,x) 175#endif 176 177#define LGE_SETBIT(sc, reg, x) \ 178 CSR_WRITE_4(sc, reg, \ 179 CSR_READ_4(sc, reg) | (x)) 180 181#define LGE_CLRBIT(sc, reg, x) \ 182 CSR_WRITE_4(sc, reg, \ 183 CSR_READ_4(sc, reg) & ~(x)) 184 185#define SIO_SET(x) \ 186 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x) 187 188#define SIO_CLR(x) \ 189 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x) 190 191/* 192 * Read a word of data stored in the EEPROM at address 'addr.' 193 */ 194void lge_eeprom_getword(sc, addr, dest) 195 struct lge_softc *sc; 196 int addr; 197 u_int16_t *dest; 198{ 199 register int i; 200 u_int32_t val; 201 202 CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| 203 LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8)); 204 205 for (i = 0; i < LGE_TIMEOUT; i++) 206 if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ)) 207 break; 208 209 if (i == LGE_TIMEOUT) { 210 printf("%s: EEPROM read timed out\n", sc->sc_dv.dv_xname); 211 return; 212 } 213 214 val = CSR_READ_4(sc, LGE_EEDATA); 215 216 if (addr & 1) 217 *dest = (val >> 16) & 0xFFFF; 218 else 219 *dest = val & 0xFFFF; 220 221 return; 222} 223 224/* 225 * Read a sequence of words from the EEPROM. 226 */ 227void lge_read_eeprom(sc, dest, off, cnt, swap) 228 struct lge_softc *sc; 229 caddr_t dest; 230 int off; 231 int cnt; 232 int swap; 233{ 234 int i; 235 u_int16_t word = 0, *ptr; 236 237 for (i = 0; i < cnt; i++) { 238 lge_eeprom_getword(sc, off + i, &word); 239 ptr = (u_int16_t *)(dest + (i * 2)); 240 if (swap) 241 *ptr = ntohs(word); 242 else 243 *ptr = word; 244 } 245 246 return; 247} 248 249int lge_miibus_readreg(dev, phy, reg) 250 struct device * dev; 251 int phy, reg; 252{ 253 struct lge_softc *sc = (struct lge_softc *)dev; 254 int i; 255 256 /* 257 * If we have a non-PCS PHY, pretend that the internal 258 * autoneg stuff at PHY address 0 isn't there so that 259 * the miibus code will find only the GMII PHY. 260 */ 261 if (sc->lge_pcs == 0 && phy == 0) 262 return(0); 263 264 CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); 265 266 for (i = 0; i < LGE_TIMEOUT; i++) 267 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 268 break; 269 270 if (i == LGE_TIMEOUT) { 271 printf("%s: PHY read timed out\n", sc->sc_dv.dv_xname); 272 return(0); 273 } 274 275 return(CSR_READ_4(sc, LGE_GMIICTL) >> 16); 276} 277 278void lge_miibus_writereg(dev, phy, reg, data) 279 struct device * dev; 280 int phy, reg, data; 281{ 282 struct lge_softc *sc = (struct lge_softc *)dev; 283 int i; 284 285 CSR_WRITE_4(sc, LGE_GMIICTL, 286 (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); 287 288 for (i = 0; i < LGE_TIMEOUT; i++) 289 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 290 break; 291 292 if (i == LGE_TIMEOUT) { 293 printf("%s: PHY write timed out\n", sc->sc_dv.dv_xname); 294 } 295} 296 297void lge_miibus_statchg(dev) 298 struct device * dev; 299{ 300 struct lge_softc *sc = (struct lge_softc *)dev; 301 struct mii_data *mii = &sc->lge_mii; 302 303 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); 304 switch (IFM_SUBTYPE(mii->mii_media_active)) { 305 case IFM_1000_T: 306 case IFM_1000_SX: 307 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 308 break; 309 case IFM_100_TX: 310 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); 311 break; 312 case IFM_10_T: 313 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); 314 break; 315 default: 316 /* 317 * Choose something, even if it's wrong. Clearing 318 * all the bits will hose autoneg on the internal 319 * PHY. 320 */ 321 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 322 break; 323 } 324 325 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 326 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 327 } else { 328 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 329 } 330 331 return; 332} 333 334void lge_setmulti(sc) 335 struct lge_softc *sc; 336{ 337 struct arpcom *ac = &sc->arpcom; 338 struct ifnet *ifp = &ac->ac_if; 339 struct ether_multi *enm; 340 struct ether_multistep step; 341 u_int32_t h = 0, hashes[2] = { 0, 0 }; 342 343 /* Make sure multicast hash table is enabled. */ 344 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST); 345 346allmulti: 347 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 348 CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); 349 CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); 350 return; 351 } 352 353 /* first, zot all the existing hash bits */ 354 CSR_WRITE_4(sc, LGE_MAR0, 0); 355 CSR_WRITE_4(sc, LGE_MAR1, 0); 356 357 /* now program new ones */ 358 ETHER_FIRST_MULTI(step, ac, enm); 359 while (enm != NULL) { 360 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 361 ifp->if_flags |= IFF_ALLMULTI; 362 goto allmulti; 363 } 364 h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26) & 365 0x0000003F; 366 if (h < 32) 367 hashes[0] |= (1 << h); 368 else 369 hashes[1] |= (1 << (h - 32)); 370 ETHER_NEXT_MULTI(step, enm); 371 } 372 373 CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); 374 CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); 375 376 return; 377} 378 379void lge_reset(sc) 380 struct lge_softc *sc; 381{ 382 register int i; 383 384 LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST); 385 386 for (i = 0; i < LGE_TIMEOUT; i++) { 387 if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST)) 388 break; 389 } 390 391 if (i == LGE_TIMEOUT) 392 printf("%s: reset never completed\n", sc->sc_dv.dv_xname); 393 394 /* Wait a little while for the chip to get its brains in order. */ 395 DELAY(1000); 396 397 return; 398} 399 400/* 401 * Probe for a Level 1 chip. Check the PCI vendor and device 402 * IDs against our list and return a device name if we find a match. 403 */ 404int lge_probe(parent, match, aux) 405 struct device *parent; 406 void *match; 407 void *aux; 408{ 409 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 410 411 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LEVEL1 && 412 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LEVEL1_LXT1001) 413 return (1); 414 415 return (0); 416} 417 418/* 419 * Attach the interface. Allocate softc structures, do ifmedia 420 * setup and ethernet/BPF attach. 421 */ 422void lge_attach(parent, self, aux) 423 struct device *parent, *self; 424 void *aux; 425{ 426 struct lge_softc *sc = (struct lge_softc *)self; 427 struct pci_attach_args *pa = aux; 428 pci_chipset_tag_t pc = pa->pa_pc; 429 pci_intr_handle_t ih; 430 const char *intrstr = NULL; 431 bus_addr_t iobase; 432 bus_size_t iosize; 433 bus_dma_segment_t seg; 434 bus_dmamap_t dmamap; 435 int s, rseg; 436 u_char eaddr[ETHER_ADDR_LEN]; 437 u_int32_t command; 438 struct ifnet *ifp; 439 int error = 0; 440 caddr_t kva; 441 442 s = splimp(); 443 444 bzero(sc, sizeof(struct lge_softc)); 445 446 /* 447 * Handle power management nonsense. 448 */ 449 DPRINTFN(5, ("Preparing for conf read\n")); 450 command = pci_conf_read(pc, pa->pa_tag, LGE_PCI_CAPID) & 0x000000FF; 451 if (command == 0x01) { 452 command = pci_conf_read(pc, pa->pa_tag, LGE_PCI_PWRMGMTCTRL); 453 if (command & LGE_PSTATE_MASK) { 454 u_int32_t iobase, membase, irq; 455 456 /* Save important PCI config data. */ 457 iobase = pci_conf_read(pc, pa->pa_tag, LGE_PCI_LOIO); 458 membase = pci_conf_read(pc, pa->pa_tag, LGE_PCI_LOMEM); 459 irq = pci_conf_read(pc, pa->pa_tag, LGE_PCI_INTLINE); 460 461 /* Reset the power state. */ 462 printf("%s: chip is in D%d power mode " 463 "-- setting to D0\n", sc->sc_dv.dv_xname, 464 command & LGE_PSTATE_MASK); 465 command &= 0xFFFFFFFC; 466 pci_conf_write(pc, pa->pa_tag, 467 LGE_PCI_PWRMGMTCTRL, command); 468 469 /* Restore PCI config data. */ 470 pci_conf_write(pc, pa->pa_tag, LGE_PCI_LOIO, iobase); 471 pci_conf_write(pc, pa->pa_tag, LGE_PCI_LOMEM, membase); 472 pci_conf_write(pc, pa->pa_tag, LGE_PCI_INTLINE, irq); 473 } 474 } 475 476 /* 477 * Map control/status registers. 478 */ 479 DPRINTFN(5, ("Map control/status regs\n")); 480 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 481 command |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | 482 PCI_COMMAND_MASTER_ENABLE; 483 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 484 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 485 486#ifdef LGE_USEIOSPACE 487 if (!(command & PCI_COMMAND_IO_ENABLE)) { 488 printf("%s: failed to enable I/O ports!\n", 489 sc->sc_dv.dv_xname); 490 error = ENXIO; 491 goto fail; 492 } 493 /* 494 * Map control/status registers. 495 */ 496 DPRINTFN(5, ("pci_io_find\n")); 497 if (pci_io_find(pc, pa->pa_tag, LGE_PCI_LOIO, &iobase, &iosize)) { 498 printf(": can't find i/o space\n"); 499 goto fail; 500 } 501 DPRINTFN(5, ("bus_space_map\n")); 502 if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->lge_bhandle)) { 503 printf(": can't map i/o space\n"); 504 goto fail; 505 } 506 sc->lge_btag = pa->pa_iot; 507#else 508 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 509 printf("%s: failed to enable memory mapping!\n", 510 sc->sc_dv.dv_xname); 511 error = ENXIO; 512 goto fail; 513 } 514 DPRINTFN(5, ("pci_mem_find\n")); 515 if (pci_mem_find(pc, pa->pa_tag, LGE_PCI_LOMEM, &iobase, 516 &iosize, NULL)) { 517 printf(": can't find mem space\n"); 518 goto fail; 519 } 520 DPRINTFN(5, ("bus_space_map\n")); 521 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->lge_bhandle)) { 522 printf(": can't map mem space\n"); 523 goto fail; 524 } 525 526 sc->lge_btag = pa->pa_memt; 527#endif 528 529 DPRINTFN(5, ("pci_intr_map\n")); 530 if (pci_intr_map(pa, &ih)) { 531 printf(": couldn't map interrupt\n"); 532 goto fail; 533 } 534 535 DPRINTFN(5, ("pci_intr_string\n")); 536 intrstr = pci_intr_string(pc, ih); 537 DPRINTFN(5, ("pci_intr_establish\n")); 538 sc->lge_intrhand = pci_intr_establish(pc, ih, IPL_NET, lge_intr, sc, 539 sc->sc_dv.dv_xname); 540 if (sc->lge_intrhand == NULL) { 541 printf(": couldn't establish interrupt"); 542 if (intrstr != NULL) 543 printf(" at %s", intrstr); 544 printf("\n"); 545 goto fail; 546 } 547 printf(": %s", intrstr); 548 549 /* Reset the adapter. */ 550 DPRINTFN(5, ("lge_reset\n")); 551 lge_reset(sc); 552 553 /* 554 * Get station address from the EEPROM. 555 */ 556 DPRINTFN(5, ("lge_read_eeprom\n")); 557 lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0); 558 lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0); 559 lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0); 560 561 /* 562 * A Level 1 chip was detected. Inform the world. 563 */ 564 printf(": address: %s\n", ether_sprintf(eaddr)); 565 566 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 567 568 sc->sc_dmatag = pa->pa_dmat; 569 DPRINTFN(5, ("bus_dmamem_alloc\n")); 570 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct lge_list_data), 571 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 572 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 573 goto fail; 574 } 575 DPRINTFN(5, ("bus_dmamem_map\n")); 576 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 577 sizeof(struct lge_list_data), &kva, 578 BUS_DMA_NOWAIT)) { 579 printf("%s: can't map dma buffers (%d bytes)\n", 580 sc->sc_dv.dv_xname, sizeof(struct lge_list_data)); 581 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 582 goto fail; 583 } 584 DPRINTFN(5, ("bus_dmamem_create\n")); 585 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct lge_list_data), 1, 586 sizeof(struct lge_list_data), 0, 587 BUS_DMA_NOWAIT, &dmamap)) { 588 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 589 bus_dmamem_unmap(sc->sc_dmatag, kva, 590 sizeof(struct lge_list_data)); 591 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 592 goto fail; 593 } 594 DPRINTFN(5, ("bus_dmamem_load\n")); 595 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, 596 sizeof(struct lge_list_data), NULL, 597 BUS_DMA_NOWAIT)) { 598 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 599 bus_dmamem_unmap(sc->sc_dmatag, kva, 600 sizeof(struct lge_list_data)); 601 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 602 goto fail; 603 } 604 605 DPRINTFN(5, ("bzero\n")); 606 sc->lge_ldata = (struct lge_list_data *)kva; 607 bzero(sc->lge_ldata, sizeof(struct lge_list_data)); 608 609 /* Try to allocate memory for jumbo buffers. */ 610 DPRINTFN(5, ("lge_alloc_jumbo_mem\n")); 611 if (lge_alloc_jumbo_mem(sc)) { 612 printf("%s: jumbo buffer allocation failed\n", 613 sc->sc_dv.dv_xname); 614 goto fail; 615 } 616 617 ifp = &sc->arpcom.ac_if; 618 ifp->if_softc = sc; 619 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 620 ifp->if_ioctl = lge_ioctl; 621 ifp->if_start = lge_start; 622 ifp->if_watchdog = lge_watchdog; 623 ifp->if_baudrate = 1000000000; 624 IFQ_SET_MAXLEN(&ifp->if_snd, LGE_TX_LIST_CNT - 1); 625 IFQ_SET_READY(&ifp->if_snd); 626 DPRINTFN(5, ("bcopy\n")); 627 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 628 629 if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) 630 sc->lge_pcs = 1; 631 else 632 sc->lge_pcs = 0; 633 634 /* 635 * Do MII setup. 636 */ 637 DPRINTFN(5, ("mii setup\n")); 638 sc->lge_mii.mii_ifp = ifp; 639 sc->lge_mii.mii_readreg = lge_miibus_readreg; 640 sc->lge_mii.mii_writereg = lge_miibus_writereg; 641 sc->lge_mii.mii_statchg = lge_miibus_statchg; 642 ifmedia_init(&sc->lge_mii.mii_media, 0, lge_ifmedia_upd, 643 lge_ifmedia_sts); 644 mii_attach(&sc->sc_dv, &sc->lge_mii, 0xffffffff, MII_PHY_ANY, 645 MII_OFFSET_ANY, 0); 646 647 if (LIST_FIRST(&sc->lge_mii.mii_phys) == NULL) { 648 printf("%s: no PHY found!\n", sc->sc_dv.dv_xname); 649 ifmedia_add(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL, 650 0, NULL); 651 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL); 652 } 653 else 654 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_AUTO); 655 656 /* 657 * Call MI attach routine. 658 */ 659 DPRINTFN(5, ("if_attach\n")); 660 if_attach(ifp); 661 DPRINTFN(5, ("ether_ifattach\n")); 662 ether_ifattach(ifp); 663 DPRINTFN(5, ("timeout_set\n")); 664 timeout_set(&sc->lge_timeout, lge_tick, sc); 665 timeout_add(&sc->lge_timeout, hz); 666 667fail: 668 splx(s); 669} 670 671/* 672 * Initialize the transmit descriptors. 673 */ 674int lge_list_tx_init(sc) 675 struct lge_softc *sc; 676{ 677 struct lge_list_data *ld; 678 struct lge_ring_data *cd; 679 int i; 680 681 cd = &sc->lge_cdata; 682 ld = sc->lge_ldata; 683 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 684 ld->lge_tx_list[i].lge_mbuf = NULL; 685 ld->lge_tx_list[i].lge_ctl = 0; 686 } 687 688 cd->lge_tx_prod = cd->lge_tx_cons = 0; 689 690 return(0); 691} 692 693 694/* 695 * Initialize the RX descriptors and allocate mbufs for them. Note that 696 * we arralge the descriptors in a closed ring, so that the last descriptor 697 * points back to the first. 698 */ 699int lge_list_rx_init(sc) 700 struct lge_softc *sc; 701{ 702 struct lge_list_data *ld; 703 struct lge_ring_data *cd; 704 int i; 705 706 ld = sc->lge_ldata; 707 cd = &sc->lge_cdata; 708 709 cd->lge_rx_prod = cd->lge_rx_cons = 0; 710 711 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 712 713 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 714 if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) 715 break; 716 if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) 717 return(ENOBUFS); 718 } 719 720 /* Clear possible 'rx command queue empty' interrupt. */ 721 CSR_READ_4(sc, LGE_ISR); 722 723 return(0); 724} 725 726/* 727 * Initialize an RX descriptor and attach an MBUF cluster. 728 */ 729int lge_newbuf(sc, c, m) 730 struct lge_softc *sc; 731 struct lge_rx_desc *c; 732 struct mbuf *m; 733{ 734 struct mbuf *m_new = NULL; 735 caddr_t *buf = NULL; 736 737 if (m == NULL) { 738 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 739 if (m_new == NULL) { 740 return(ENOBUFS); 741 } 742 743 /* Allocate the jumbo buffer */ 744 buf = lge_jalloc(sc); 745 if (buf == NULL) { 746 m_freem(m_new); 747 return(ENOBUFS); 748 } 749 /* Attach the buffer to the mbuf */ 750 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf; 751 m_new->m_flags |= M_EXT; 752 m_new->m_ext.ext_size = m_new->m_pkthdr.len = 753 m_new->m_len = LGE_JLEN; 754 m_new->m_ext.ext_free = lge_jfree; 755 m_new->m_ext.ext_arg = sc; 756 MCLINITREFERENCE(m_new); 757 } else { 758 m_new = m; 759 m_new->m_len = m_new->m_pkthdr.len = ETHER_MAX_LEN_JUMBO; 760 m_new->m_data = m_new->m_ext.ext_buf; 761 } 762 763 /* 764 * Adjust alignment so packet payload begins on a 765 * longword boundary. Mandatory for Alpha, useful on 766 * x86 too. 767 */ 768 m_adj(m_new, ETHER_ALIGN); 769 770 c->lge_mbuf = m_new; 771 c->lge_fragptr_hi = 0; 772 c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t)); 773 c->lge_fraglen = m_new->m_len; 774 c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); 775 c->lge_sts = 0; 776 777 /* 778 * Put this buffer in the RX command FIFO. To do this, 779 * we just write the physical address of the descriptor 780 * into the RX descriptor address registers. Note that 781 * there are two registers, one high DWORD and one low 782 * DWORD, which lets us specify a 64-bit address if 783 * desired. We only use a 32-bit address for now. 784 * Writing to the low DWORD register is what actually 785 * causes the command to be issued, so we do that 786 * last. 787 */ 788 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c)); 789 LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); 790 791 return(0); 792} 793 794int lge_alloc_jumbo_mem(sc) 795 struct lge_softc *sc; 796{ 797 caddr_t ptr, kva; 798 bus_dma_segment_t seg; 799 bus_dmamap_t dmamap; 800 int i, rseg; 801 struct lge_jpool_entry *entry; 802 803 /* Grab a big chunk o' storage. */ 804 if (bus_dmamem_alloc(sc->sc_dmatag, LGE_JMEM, PAGE_SIZE, 0, 805 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 806 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 807 return (ENOBUFS); 808 } 809 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, LGE_JMEM, &kva, 810 BUS_DMA_NOWAIT)) { 811 printf("%s: can't map dma buffers (%d bytes)\n", 812 sc->sc_dv.dv_xname, LGE_JMEM); 813 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 814 return (ENOBUFS); 815 } 816 if (bus_dmamap_create(sc->sc_dmatag, LGE_JMEM, 1, 817 LGE_JMEM, 0, BUS_DMA_NOWAIT, &dmamap)) { 818 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 819 bus_dmamem_unmap(sc->sc_dmatag, kva, LGE_JMEM); 820 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 821 return (ENOBUFS); 822 } 823 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, LGE_JMEM, 824 NULL, BUS_DMA_NOWAIT)) { 825 printf("%s: can't load dma map\n", sc->sc_dv.dv_xname); 826 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 827 bus_dmamem_unmap(sc->sc_dmatag, kva, LGE_JMEM); 828 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 829 return (ENOBUFS); 830 } 831 sc->lge_cdata.lge_jumbo_buf = (caddr_t)kva; 832 DPRINTFN(1,("lge_jumbo_buf = 0x%08X\n", sc->lge_cdata.lge_jumbo_buf)); 833 DPRINTFN(1,("LGE_JLEN = 0x%08X\n", LGE_JLEN)); 834 835 LIST_INIT(&sc->lge_jfree_listhead); 836 LIST_INIT(&sc->lge_jinuse_listhead); 837 838 /* 839 * Now divide it up into 9K pieces and save the addresses 840 * in an array. 841 */ 842 ptr = sc->lge_cdata.lge_jumbo_buf; 843 for (i = 0; i < LGE_JSLOTS; i++) { 844 sc->lge_cdata.lge_jslots[i] = ptr; 845 ptr += LGE_JLEN; 846 entry = malloc(sizeof(struct lge_jpool_entry), 847 M_DEVBUF, M_NOWAIT); 848 if (entry == NULL) { 849 bus_dmamap_unload(sc->sc_dmatag, dmamap); 850 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 851 bus_dmamem_unmap(sc->sc_dmatag, kva, LGE_JMEM); 852 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 853 sc->lge_cdata.lge_jumbo_buf = NULL; 854 printf("%s: no memory for jumbo buffer queue!\n", 855 sc->sc_dv.dv_xname); 856 return(ENOBUFS); 857 } 858 entry->slot = i; 859 LIST_INSERT_HEAD(&sc->lge_jfree_listhead, 860 entry, jpool_entries); 861 } 862 863 return(0); 864} 865 866/* 867 * Allocate a jumbo buffer. 868 */ 869void *lge_jalloc(sc) 870 struct lge_softc *sc; 871{ 872 struct lge_jpool_entry *entry; 873 874 entry = LIST_FIRST(&sc->lge_jfree_listhead); 875 876 if (entry == NULL) { 877#ifdef LGE_VERBOSE 878 printf("%s: no free jumbo buffers\n", sc->sc_dv.dv_xname); 879#endif 880 return(NULL); 881 } 882 883 LIST_REMOVE(entry, jpool_entries); 884 LIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries); 885 return(sc->lge_cdata.lge_jslots[entry->slot]); 886} 887 888/* 889 * Release a jumbo buffer. 890 */ 891void lge_jfree(buf, size, arg) 892 caddr_t buf; 893 u_int size; 894 void *arg; 895{ 896 struct lge_softc *sc; 897 int i; 898 struct lge_jpool_entry *entry; 899 900 /* Extract the softc struct pointer. */ 901 sc = (struct lge_softc *)arg; 902 903 if (sc == NULL) 904 panic("lge_jfree: can't find softc pointer!"); 905 906 /* calculate the slot this buffer belongs to */ 907 i = ((vaddr_t)buf - (vaddr_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN; 908 909 if ((i < 0) || (i >= LGE_JSLOTS)) 910 panic("lge_jfree: asked to free buffer that we don't manage!"); 911 912 entry = LIST_FIRST(&sc->lge_jinuse_listhead); 913 if (entry == NULL) 914 panic("lge_jfree: buffer not in use!"); 915 entry->slot = i; 916 LIST_REMOVE(entry, jpool_entries); 917 LIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); 918 919 return; 920} 921 922/* 923 * A frame has been uploaded: pass the resulting mbuf chain up to 924 * the higher level protocols. 925 */ 926void lge_rxeof(sc, cnt) 927 struct lge_softc *sc; 928 int cnt; 929{ 930 struct mbuf *m; 931 struct ifnet *ifp; 932 struct lge_rx_desc *cur_rx; 933 int c, i, total_len = 0; 934 u_int32_t rxsts, rxctl; 935 936 ifp = &sc->arpcom.ac_if; 937 938 /* Find out how many frames were processed. */ 939 c = cnt; 940 i = sc->lge_cdata.lge_rx_cons; 941 942 /* Suck them in. */ 943 while(c) { 944 struct mbuf *m0 = NULL; 945 946 cur_rx = &sc->lge_ldata->lge_rx_list[i]; 947 rxctl = cur_rx->lge_ctl; 948 rxsts = cur_rx->lge_sts; 949 m = cur_rx->lge_mbuf; 950 cur_rx->lge_mbuf = NULL; 951 total_len = LGE_RXBYTES(cur_rx); 952 LGE_INC(i, LGE_RX_LIST_CNT); 953 c--; 954 955 /* 956 * If an error occurs, update stats, clear the 957 * status word and leave the mbuf cluster in place: 958 * it should simply get re-used next time this descriptor 959 * comes up in the ring. 960 */ 961 if (rxctl & LGE_RXCTL_ERRMASK) { 962 ifp->if_ierrors++; 963 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 964 continue; 965 } 966 967 if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { 968 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 969 ifp, NULL); 970 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 971 if (m0 == NULL) { 972 ifp->if_ierrors++; 973 continue; 974 } 975 m = m0; 976 } else { 977 m->m_pkthdr.rcvif = ifp; 978 m->m_pkthdr.len = m->m_len = total_len; 979 } 980 981 ifp->if_ipackets++; 982 983#if NBPFILTER > 0 984 /* 985 * Handle BPF listeners. Let the BPF user see the packet. 986 */ 987 if (ifp->if_bpf) 988 bpf_mtap(ifp->if_bpf, m); 989#endif 990 991 /* Do IP checksum checking. */ 992#if 0 993 if (rxsts & LGE_RXSTS_ISIP) 994 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 995 if (!(rxsts & LGE_RXSTS_IPCSUMERR)) 996 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 997 if ((rxsts & LGE_RXSTS_ISTCP && 998 !(rxsts & LGE_RXSTS_TCPCSUMERR)) || 999 (rxsts & LGE_RXSTS_ISUDP && 1000 !(rxsts & LGE_RXSTS_UDPCSUMERR))) { 1001 m->m_pkthdr.csum_flags |= 1002 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1003 m->m_pkthdr.csum_data = 0xffff; 1004 } 1005#endif 1006 1007 if (rxsts & LGE_RXSTS_ISIP) { 1008 if (rxsts & LGE_RXSTS_IPCSUMERR) 1009 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_BAD; 1010 else 1011 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1012 } 1013 if (rxsts & LGE_RXSTS_ISTCP) { 1014 if (rxsts & LGE_RXSTS_TCPCSUMERR) 1015 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_BAD; 1016 else 1017 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 1018 } 1019 if (rxsts & LGE_RXSTS_ISUDP) { 1020 if (rxsts & LGE_RXSTS_UDPCSUMERR) 1021 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_BAD; 1022 else 1023 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 1024 } 1025 1026 ether_input_mbuf(ifp, m); 1027 } 1028 1029 sc->lge_cdata.lge_rx_cons = i; 1030 1031 return; 1032} 1033 1034void lge_rxeoc(sc) 1035 struct lge_softc *sc; 1036{ 1037 struct ifnet *ifp; 1038 1039 ifp = &sc->arpcom.ac_if; 1040 ifp->if_flags &= ~IFF_RUNNING; 1041 lge_init(sc); 1042 return; 1043} 1044 1045/* 1046 * A frame was downloaded to the chip. It's safe for us to clean up 1047 * the list buffers. 1048 */ 1049 1050void lge_txeof(sc) 1051 struct lge_softc *sc; 1052{ 1053 struct lge_tx_desc *cur_tx = NULL; 1054 struct ifnet *ifp; 1055 u_int32_t idx, txdone; 1056 1057 ifp = &sc->arpcom.ac_if; 1058 1059 /* Clear the timeout timer. */ 1060 ifp->if_timer = 0; 1061 1062 /* 1063 * Go through our tx list and free mbufs for those 1064 * frames that have been transmitted. 1065 */ 1066 idx = sc->lge_cdata.lge_tx_cons; 1067 txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); 1068 1069 while (idx != sc->lge_cdata.lge_tx_prod && txdone) { 1070 cur_tx = &sc->lge_ldata->lge_tx_list[idx]; 1071 1072 ifp->if_opackets++; 1073 if (cur_tx->lge_mbuf != NULL) { 1074 m_freem(cur_tx->lge_mbuf); 1075 cur_tx->lge_mbuf = NULL; 1076 } 1077 cur_tx->lge_ctl = 0; 1078 1079 txdone--; 1080 LGE_INC(idx, LGE_TX_LIST_CNT); 1081 ifp->if_timer = 0; 1082 } 1083 1084 sc->lge_cdata.lge_tx_cons = idx; 1085 1086 if (cur_tx != NULL) 1087 ifp->if_flags &= ~IFF_OACTIVE; 1088 1089 return; 1090} 1091 1092void lge_tick(xsc) 1093 void *xsc; 1094{ 1095 struct lge_softc *sc = xsc; 1096 struct mii_data *mii = &sc->lge_mii; 1097 struct ifnet *ifp = &sc->arpcom.ac_if; 1098 int s; 1099 1100 s = splimp(); 1101 1102 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); 1103 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 1104 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); 1105 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 1106 1107 if (!sc->lge_link) { 1108 mii_tick(mii); 1109 if (mii->mii_media_status & IFM_ACTIVE && 1110 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1111 sc->lge_link++; 1112 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX|| 1113 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1114 printf("%s: gigabit link up\n", 1115 sc->sc_dv.dv_xname); 1116 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1117 lge_start(ifp); 1118 } 1119 } 1120 1121 timeout_add(&sc->lge_timeout, hz); 1122 1123 splx(s); 1124 1125 return; 1126} 1127 1128int lge_intr(arg) 1129 void *arg; 1130{ 1131 struct lge_softc *sc; 1132 struct ifnet *ifp; 1133 u_int32_t status; 1134 int claimed = 0; 1135 1136 sc = arg; 1137 ifp = &sc->arpcom.ac_if; 1138 1139 /* Supress unwanted interrupts */ 1140 if (!(ifp->if_flags & IFF_UP)) { 1141 lge_stop(sc); 1142 return (0); 1143 } 1144 1145 for (;;) { 1146 /* 1147 * Reading the ISR register clears all interrupts, and 1148 * clears the 'interrupts enabled' bit in the IMR 1149 * register. 1150 */ 1151 status = CSR_READ_4(sc, LGE_ISR); 1152 1153 if ((status & LGE_INTRS) == 0) 1154 break; 1155 1156 claimed = 1; 1157 1158 if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) 1159 lge_txeof(sc); 1160 1161 if (status & LGE_ISR_RXDMA_DONE) 1162 lge_rxeof(sc, LGE_RX_DMACNT(status)); 1163 1164 if (status & LGE_ISR_RXCMDFIFO_EMPTY) 1165 lge_rxeoc(sc); 1166 1167 if (status & LGE_ISR_PHY_INTR) { 1168 sc->lge_link = 0; 1169 timeout_del(&sc->lge_timeout); 1170 lge_tick(sc); 1171 } 1172 } 1173 1174 /* Re-enable interrupts. */ 1175 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); 1176 1177 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1178 lge_start(ifp); 1179 1180 return claimed; 1181} 1182 1183/* 1184 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1185 * pointers to the fragment pointers. 1186 */ 1187int lge_encap(sc, m_head, txidx) 1188 struct lge_softc *sc; 1189 struct mbuf *m_head; 1190 u_int32_t *txidx; 1191{ 1192 struct lge_frag *f = NULL; 1193 struct lge_tx_desc *cur_tx; 1194 struct mbuf *m; 1195 int frag = 0, tot_len = 0; 1196 1197 /* 1198 * Start packing the mbufs in this chain into 1199 * the fragment pointers. Stop when we run out 1200 * of fragments or hit the end of the mbuf chain. 1201 */ 1202 m = m_head; 1203 cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; 1204 frag = 0; 1205 1206 for (m = m_head; m != NULL; m = m->m_next) { 1207 if (m->m_len != 0) { 1208 tot_len += m->m_len; 1209 f = &cur_tx->lge_frags[frag]; 1210 f->lge_fraglen = m->m_len; 1211 f->lge_fragptr_lo = vtophys(mtod(m, vaddr_t)); 1212 f->lge_fragptr_hi = 0; 1213 frag++; 1214 } 1215 } 1216 1217 if (m != NULL) 1218 return(ENOBUFS); 1219 1220 cur_tx->lge_mbuf = m_head; 1221 cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; 1222 LGE_INC((*txidx), LGE_TX_LIST_CNT); 1223 1224 /* Queue for transmit */ 1225 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx)); 1226 1227 return(0); 1228} 1229 1230/* 1231 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1232 * to the mbuf data regions directly in the transmit lists. We also save a 1233 * copy of the pointers since the transmit list fragment pointers are 1234 * physical addresses. 1235 */ 1236 1237void lge_start(ifp) 1238 struct ifnet *ifp; 1239{ 1240 struct lge_softc *sc; 1241 struct mbuf *m_head = NULL; 1242 u_int32_t idx; 1243 int pkts = 0; 1244 1245 sc = ifp->if_softc; 1246 1247 if (!sc->lge_link) 1248 return; 1249 1250 idx = sc->lge_cdata.lge_tx_prod; 1251 1252 if (ifp->if_flags & IFF_OACTIVE) 1253 return; 1254 1255 while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { 1256 if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) 1257 break; 1258 1259 IFQ_POLL(&ifp->if_snd, m_head); 1260 if (m_head == NULL) 1261 break; 1262 1263 if (lge_encap(sc, m_head, &idx)) { 1264 ifp->if_flags |= IFF_OACTIVE; 1265 break; 1266 } 1267 1268 /* now we are committed to transmit the packet */ 1269 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1270 pkts++; 1271 1272#if NBPFILTER > 0 1273 /* 1274 * If there's a BPF listener, bounce a copy of this frame 1275 * to him. 1276 */ 1277 if (ifp->if_bpf) 1278 bpf_mtap(ifp->if_bpf, m_head); 1279#endif 1280 } 1281 if (pkts == 0) 1282 return; 1283 1284 sc->lge_cdata.lge_tx_prod = idx; 1285 1286 /* 1287 * Set a timeout in case the chip goes out to lunch. 1288 */ 1289 ifp->if_timer = 5; 1290 1291 return; 1292} 1293 1294void lge_init(xsc) 1295 void *xsc; 1296{ 1297 struct lge_softc *sc = xsc; 1298 struct ifnet *ifp = &sc->arpcom.ac_if; 1299 int s; 1300 1301 if (ifp->if_flags & IFF_RUNNING) 1302 return; 1303 1304 s = splimp(); 1305 1306 /* 1307 * Cancel pending I/O and free all RX/TX buffers. 1308 */ 1309 lge_stop(sc); 1310 lge_reset(sc); 1311 1312 /* Set MAC address */ 1313 CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1314 CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1315 1316 /* Init circular RX list. */ 1317 if (lge_list_rx_init(sc) == ENOBUFS) { 1318 printf("%s: initialization failed: no " 1319 "memory for rx buffers\n", sc->sc_dv.dv_xname); 1320 lge_stop(sc); 1321 splx(s); 1322 return; 1323 } 1324 1325 /* 1326 * Init tx descriptors. 1327 */ 1328 lge_list_tx_init(sc); 1329 1330 /* Set initial value for MODE1 register. */ 1331 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST| 1332 LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD| 1333 LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0| 1334 LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2); 1335 1336 /* If we want promiscuous mode, set the allframes bit. */ 1337 if (ifp->if_flags & IFF_PROMISC) { 1338 CSR_WRITE_4(sc, LGE_MODE1, 1339 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC); 1340 } else { 1341 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); 1342 } 1343 1344 /* 1345 * Set the capture broadcast bit to capture broadcast frames. 1346 */ 1347 if (ifp->if_flags & IFF_BROADCAST) { 1348 CSR_WRITE_4(sc, LGE_MODE1, 1349 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST); 1350 } else { 1351 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); 1352 } 1353 1354 /* Packet padding workaround? */ 1355 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); 1356 1357 /* No error frames */ 1358 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); 1359 1360 /* Receive large frames */ 1361 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS); 1362 1363 /* Workaround: disable RX/TX flow control */ 1364 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); 1365 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); 1366 1367 /* Make sure to strip CRC from received frames */ 1368 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); 1369 1370 /* Turn off magic packet mode */ 1371 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); 1372 1373 /* Turn off all VLAN stuff */ 1374 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX| 1375 LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT); 1376 1377 /* Workarond: FIFO overflow */ 1378 CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); 1379 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); 1380 1381 /* 1382 * Load the multicast filter. 1383 */ 1384 lge_setmulti(sc); 1385 1386 /* 1387 * Enable hardware checksum validation for all received IPv4 1388 * packets, do not reject packets with bad checksums. 1389 */ 1390 CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM| 1391 LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM| 1392 LGE_MODE2_RX_ERRCSUM); 1393 1394 /* 1395 * Enable the delivery of PHY interrupts based on 1396 * link/speed/duplex status chalges. 1397 */ 1398 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL); 1399 1400 /* Enable receiver and transmitter. */ 1401 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 1402 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB); 1403 1404 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); 1405 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB); 1406 1407 /* 1408 * Enable interrupts. 1409 */ 1410 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0| 1411 LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS); 1412 1413 lge_ifmedia_upd(ifp); 1414 1415 ifp->if_flags |= IFF_RUNNING; 1416 ifp->if_flags &= ~IFF_OACTIVE; 1417 1418 splx(s); 1419 1420 timeout_add(&sc->lge_timeout, hz); 1421 1422 return; 1423} 1424 1425/* 1426 * Set media options. 1427 */ 1428int lge_ifmedia_upd(ifp) 1429 struct ifnet *ifp; 1430{ 1431 struct lge_softc *sc = ifp->if_softc; 1432 struct mii_data *mii = &sc->lge_mii; 1433 1434 sc->lge_link = 0; 1435 if (mii->mii_instance) { 1436 struct mii_softc *miisc; 1437 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1438 miisc = LIST_NEXT(miisc, mii_list)) 1439 mii_phy_reset(miisc); 1440 } 1441 mii_mediachg(mii); 1442 1443 return(0); 1444} 1445 1446/* 1447 * Report current media status. 1448 */ 1449void lge_ifmedia_sts(ifp, ifmr) 1450 struct ifnet *ifp; 1451 struct ifmediareq *ifmr; 1452{ 1453 struct lge_softc *sc = ifp->if_softc; 1454 struct mii_data *mii = &sc->lge_mii; 1455 1456 mii_pollstat(mii); 1457 ifmr->ifm_active = mii->mii_media_active; 1458 ifmr->ifm_status = mii->mii_media_status; 1459 1460 return; 1461} 1462 1463int lge_ioctl(ifp, command, data) 1464 struct ifnet *ifp; 1465 u_long command; 1466 caddr_t data; 1467{ 1468 struct lge_softc *sc = ifp->if_softc; 1469 struct ifreq *ifr = (struct ifreq *) data; 1470 struct ifaddr *ifa = (struct ifaddr *)data; 1471 struct mii_data *mii; 1472 int s, error = 0; 1473 1474 s = splimp(); 1475 1476 switch(command) { 1477 case SIOCSIFADDR: 1478 ifp->if_flags |= IFF_UP; 1479 switch (ifa->ifa_addr->sa_family) { 1480#ifdef INET 1481 case AF_INET: 1482 lge_init(sc); 1483 arp_ifinit(&sc->arpcom, ifa); 1484 break; 1485#endif /* INET */ 1486 default: 1487 lge_init(sc); 1488 break; 1489 } 1490 break; 1491 case SIOCSIFMTU: 1492 if (ifr->ifr_mtu > ETHERMTU_JUMBO) 1493 error = EINVAL; 1494 else 1495 ifp->if_mtu = ifr->ifr_mtu; 1496 break; 1497 case SIOCSIFFLAGS: 1498 if (ifp->if_flags & IFF_UP) { 1499 if (ifp->if_flags & IFF_RUNNING && 1500 ifp->if_flags & IFF_PROMISC && 1501 !(sc->lge_if_flags & IFF_PROMISC)) { 1502 CSR_WRITE_4(sc, LGE_MODE1, 1503 LGE_MODE1_SETRST_CTL1| 1504 LGE_MODE1_RX_PROMISC); 1505 } else if (ifp->if_flags & IFF_RUNNING && 1506 !(ifp->if_flags & IFF_PROMISC) && 1507 sc->lge_if_flags & IFF_PROMISC) { 1508 CSR_WRITE_4(sc, LGE_MODE1, 1509 LGE_MODE1_RX_PROMISC); 1510 } else { 1511 ifp->if_flags &= ~IFF_RUNNING; 1512 lge_init(sc); 1513 } 1514 } else { 1515 if (ifp->if_flags & IFF_RUNNING) 1516 lge_stop(sc); 1517 } 1518 sc->lge_if_flags = ifp->if_flags; 1519 error = 0; 1520 break; 1521 case SIOCADDMULTI: 1522 case SIOCDELMULTI: 1523 error = (command == SIOCADDMULTI) 1524 ? ether_addmulti(ifr, &sc->arpcom) 1525 : ether_delmulti(ifr, &sc->arpcom); 1526 1527 if (error == ENETRESET) { 1528 if (ifp->if_flags & IFF_RUNNING) 1529 lge_setmulti(sc); 1530 error = 0; 1531 } 1532 break; 1533 case SIOCGIFMEDIA: 1534 case SIOCSIFMEDIA: 1535 mii = &sc->lge_mii; 1536 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1537 break; 1538 default: 1539 error = EINVAL; 1540 break; 1541 } 1542 1543 splx(s); 1544 1545 return(error); 1546} 1547 1548void lge_watchdog(ifp) 1549 struct ifnet *ifp; 1550{ 1551 struct lge_softc *sc; 1552 1553 sc = ifp->if_softc; 1554 1555 ifp->if_oerrors++; 1556 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 1557 1558 lge_stop(sc); 1559 lge_reset(sc); 1560 ifp->if_flags &= ~IFF_RUNNING; 1561 lge_init(sc); 1562 1563 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1564 lge_start(ifp); 1565 1566 return; 1567} 1568 1569/* 1570 * Stop the adapter and free any mbufs allocated to the 1571 * RX and TX lists. 1572 */ 1573void lge_stop(sc) 1574 struct lge_softc *sc; 1575{ 1576 register int i; 1577 struct ifnet *ifp; 1578 1579 ifp = &sc->arpcom.ac_if; 1580 ifp->if_timer = 0; 1581 timeout_del(&sc->lge_timeout); 1582 1583 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1584 1585 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); 1586 1587 /* Disable receiver and transmitter. */ 1588 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); 1589 sc->lge_link = 0; 1590 1591 /* 1592 * Free data in the RX lists. 1593 */ 1594 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 1595 if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { 1596 m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); 1597 sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; 1598 } 1599 } 1600 bzero((char *)&sc->lge_ldata->lge_rx_list, 1601 sizeof(sc->lge_ldata->lge_rx_list)); 1602 1603 /* 1604 * Free the TX list buffers. 1605 */ 1606 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 1607 if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { 1608 m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); 1609 sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; 1610 } 1611 } 1612 1613 bzero((char *)&sc->lge_ldata->lge_tx_list, 1614 sizeof(sc->lge_ldata->lge_tx_list)); 1615 1616 return; 1617} 1618 1619/* 1620 * Stop all chip I/O so that the kernel's probe routines don't 1621 * get confused by errant DMAs when rebooting. 1622 */ 1623void lge_shutdown(xsc) 1624 void *xsc; 1625{ 1626 struct lge_softc *sc = (struct lge_softc *)xsc; 1627 1628 lge_reset(sc); 1629 lge_stop(sc); 1630 1631 return; 1632} 1633 1634struct cfattach lge_ca = { 1635 sizeof(struct lge_softc), lge_probe, lge_attach 1636}; 1637 1638struct cfdriver lge_cd = { 1639 0, "lge", DV_IFNET 1640}; 1641