if_lge.c revision 1.69
1/* $OpenBSD: if_lge.c,v 1.69 2015/11/20 03:35:23 dlg Exp $ */ 2/* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 5 * Bill Paul <william.paul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.6 2001/06/20 19:47:55 bmilekic Exp $ 35 */ 36 37/* 38 * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public 39 * documentation not available, but ask me nicely. 40 * 41 * Written by Bill Paul <william.paul@windriver.com> 42 * Wind River Systems 43 */ 44 45/* 46 * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. 47 * It's a 64-bit PCI part that supports TCP/IP checksum offload, 48 * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There 49 * are three supported methods for data transfer between host and 50 * NIC: programmed I/O, traditional scatter/gather DMA and Packet 51 * Propulsion Technology (tm) DMA. The latter mechanism is a form 52 * of double buffer DMA where the packet data is copied to a 53 * pre-allocated DMA buffer who's physical address has been loaded 54 * into a table at device initialization time. The rationale is that 55 * the virtual to physical address translation needed for normal 56 * scatter/gather DMA is more expensive than the data copy needed 57 * for double buffering. This may be true in Windows NT and the like, 58 * but it isn't true for us, at least on the x86 arch. This driver 59 * uses the scatter/gather I/O method for both TX and RX. 60 * 61 * The LXT1001 only supports TCP/IP checksum offload on receive. 62 * Also, the VLAN tagging is done using a 16-entry table which allows 63 * the chip to perform hardware filtering based on VLAN tags. Sadly, 64 * our vlan support doesn't currently play well with this kind of 65 * hardware support. 66 * 67 * Special thanks to: 68 * - Jeff James at Intel, for arranging to have the LXT1001 manual 69 * released (at long last) 70 * - Beny Chen at D-Link, for actually sending it to me 71 * - Brad Short and Keith Alexis at SMC, for sending me sample 72 * SMC9462SX and SMC9462TX adapters for testing 73 * - Paul Saab at Y!, for not killing me (though it remains to be seen 74 * if in fact he did me much of a favor) 75 */ 76 77#include "bpfilter.h" 78 79#include <sys/param.h> 80#include <sys/systm.h> 81#include <sys/sockio.h> 82#include <sys/mbuf.h> 83#include <sys/malloc.h> 84#include <sys/kernel.h> 85#include <sys/device.h> 86#include <sys/socket.h> 87 88#include <net/if.h> 89#include <net/if_dl.h> 90#include <net/if_media.h> 91 92#include <netinet/in.h> 93#include <netinet/if_ether.h> 94 95#if NBPFILTER > 0 96#include <net/bpf.h> 97#endif 98 99#include <uvm/uvm_extern.h> /* for vtophys */ 100#define VTOPHYS(v) vtophys((vaddr_t)(v)) 101 102#include <dev/pci/pcireg.h> 103#include <dev/pci/pcivar.h> 104#include <dev/pci/pcidevs.h> 105 106#include <dev/mii/mii.h> 107#include <dev/mii/miivar.h> 108 109#define LGE_USEIOSPACE 110 111#include <dev/pci/if_lgereg.h> 112 113int lge_probe(struct device *, void *, void *); 114void lge_attach(struct device *, struct device *, void *); 115 116struct cfattach lge_ca = { 117 sizeof(struct lge_softc), lge_probe, lge_attach 118}; 119 120struct cfdriver lge_cd = { 121 NULL, "lge", DV_IFNET 122}; 123 124int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, 125 struct mbuf *); 126int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *); 127void lge_rxeof(struct lge_softc *, int); 128void lge_txeof(struct lge_softc *); 129int lge_intr(void *); 130void lge_tick(void *); 131void lge_start(struct ifnet *); 132int lge_ioctl(struct ifnet *, u_long, caddr_t); 133void lge_init(void *); 134void lge_stop(struct lge_softc *); 135void lge_watchdog(struct ifnet *); 136int lge_ifmedia_upd(struct ifnet *); 137void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 139void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *); 140void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int); 141 142int lge_miibus_readreg(struct device *, int, int); 143void lge_miibus_writereg(struct device *, int, int, int); 144void lge_miibus_statchg(struct device *); 145 146void lge_setmulti(struct lge_softc *); 147void lge_reset(struct lge_softc *); 148int lge_list_rx_init(struct lge_softc *); 149int lge_list_tx_init(struct lge_softc *); 150 151#ifdef LGE_DEBUG 152#define DPRINTF(x) if (lgedebug) printf x 153#define DPRINTFN(n,x) if (lgedebug >= (n)) printf x 154int lgedebug = 0; 155#else 156#define DPRINTF(x) 157#define DPRINTFN(n,x) 158#endif 159 160const struct pci_matchid lge_devices[] = { 161 { PCI_VENDOR_LEVEL1, PCI_PRODUCT_LEVEL1_LXT1001 } 162}; 163 164#define LGE_SETBIT(sc, reg, x) \ 165 CSR_WRITE_4(sc, reg, \ 166 CSR_READ_4(sc, reg) | (x)) 167 168#define LGE_CLRBIT(sc, reg, x) \ 169 CSR_WRITE_4(sc, reg, \ 170 CSR_READ_4(sc, reg) & ~(x)) 171 172#define SIO_SET(x) \ 173 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x) 174 175#define SIO_CLR(x) \ 176 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x) 177 178/* 179 * Read a word of data stored in the EEPROM at address 'addr.' 180 */ 181void 182lge_eeprom_getword(struct lge_softc *sc, int addr, u_int16_t *dest) 183{ 184 int i; 185 u_int32_t val; 186 187 CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| 188 LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8)); 189 190 for (i = 0; i < LGE_TIMEOUT; i++) 191 if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ)) 192 break; 193 194 if (i == LGE_TIMEOUT) { 195 printf("%s: EEPROM read timed out\n", sc->sc_dv.dv_xname); 196 return; 197 } 198 199 val = CSR_READ_4(sc, LGE_EEDATA); 200 201 if (addr & 1) 202 *dest = (val >> 16) & 0xFFFF; 203 else 204 *dest = val & 0xFFFF; 205} 206 207/* 208 * Read a sequence of words from the EEPROM. 209 */ 210void 211lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off, 212 int cnt, int swap) 213{ 214 int i; 215 u_int16_t word = 0, *ptr; 216 217 for (i = 0; i < cnt; i++) { 218 lge_eeprom_getword(sc, off + i, &word); 219 ptr = (u_int16_t *)(dest + (i * 2)); 220 if (swap) 221 *ptr = ntohs(word); 222 else 223 *ptr = word; 224 } 225} 226 227int 228lge_miibus_readreg(struct device *dev, int phy, int reg) 229{ 230 struct lge_softc *sc = (struct lge_softc *)dev; 231 int i; 232 233 /* 234 * If we have a non-PCS PHY, pretend that the internal 235 * autoneg stuff at PHY address 0 isn't there so that 236 * the miibus code will find only the GMII PHY. 237 */ 238 if (sc->lge_pcs == 0 && phy == 0) 239 return (0); 240 241 CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); 242 243 for (i = 0; i < LGE_TIMEOUT; i++) 244 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 245 break; 246 247 if (i == LGE_TIMEOUT) { 248 printf("%s: PHY read timed out\n", sc->sc_dv.dv_xname); 249 return (0); 250 } 251 252 return (CSR_READ_4(sc, LGE_GMIICTL) >> 16); 253} 254 255void 256lge_miibus_writereg(struct device *dev, int phy, int reg, int data) 257{ 258 struct lge_softc *sc = (struct lge_softc *)dev; 259 int i; 260 261 CSR_WRITE_4(sc, LGE_GMIICTL, 262 (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); 263 264 for (i = 0; i < LGE_TIMEOUT; i++) 265 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 266 break; 267 268 if (i == LGE_TIMEOUT) { 269 printf("%s: PHY write timed out\n", sc->sc_dv.dv_xname); 270 } 271} 272 273void 274lge_miibus_statchg(struct device *dev) 275{ 276 struct lge_softc *sc = (struct lge_softc *)dev; 277 struct mii_data *mii = &sc->lge_mii; 278 279 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); 280 switch (IFM_SUBTYPE(mii->mii_media_active)) { 281 case IFM_1000_T: 282 case IFM_1000_SX: 283 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 284 break; 285 case IFM_100_TX: 286 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); 287 break; 288 case IFM_10_T: 289 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); 290 break; 291 default: 292 /* 293 * Choose something, even if it's wrong. Clearing 294 * all the bits will hose autoneg on the internal 295 * PHY. 296 */ 297 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 298 break; 299 } 300 301 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 302 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 303 } else { 304 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 305 } 306} 307 308void 309lge_setmulti(struct lge_softc *sc) 310{ 311 struct arpcom *ac = &sc->arpcom; 312 struct ifnet *ifp = &ac->ac_if; 313 struct ether_multi *enm; 314 struct ether_multistep step; 315 u_int32_t h = 0, hashes[2] = { 0, 0 }; 316 317 /* Make sure multicast hash table is enabled. */ 318 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST); 319 320 if (ac->ac_multirangecnt > 0) 321 ifp->if_flags |= IFF_ALLMULTI; 322 323 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 324 CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); 325 CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); 326 return; 327 } 328 329 /* first, zot all the existing hash bits */ 330 CSR_WRITE_4(sc, LGE_MAR0, 0); 331 CSR_WRITE_4(sc, LGE_MAR1, 0); 332 333 /* now program new ones */ 334 ETHER_FIRST_MULTI(step, ac, enm); 335 while (enm != NULL) { 336 h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26) & 337 0x0000003F; 338 if (h < 32) 339 hashes[0] |= (1 << h); 340 else 341 hashes[1] |= (1 << (h - 32)); 342 ETHER_NEXT_MULTI(step, enm); 343 } 344 345 CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); 346 CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); 347} 348 349void 350lge_reset(struct lge_softc *sc) 351{ 352 int i; 353 354 LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST); 355 356 for (i = 0; i < LGE_TIMEOUT; i++) { 357 if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST)) 358 break; 359 } 360 361 if (i == LGE_TIMEOUT) 362 printf("%s: reset never completed\n", sc->sc_dv.dv_xname); 363 364 /* Wait a little while for the chip to get its brains in order. */ 365 DELAY(1000); 366} 367 368/* 369 * Probe for a Level 1 chip. Check the PCI vendor and device 370 * IDs against our list and return a device name if we find a match. 371 */ 372int 373lge_probe(struct device *parent, void *match, void *aux) 374{ 375 return (pci_matchbyid((struct pci_attach_args *)aux, lge_devices, 376 nitems(lge_devices))); 377} 378 379/* 380 * Attach the interface. Allocate softc structures, do ifmedia 381 * setup and ethernet/BPF attach. 382 */ 383void 384lge_attach(struct device *parent, struct device *self, void *aux) 385{ 386 struct lge_softc *sc = (struct lge_softc *)self; 387 struct pci_attach_args *pa = aux; 388 pci_chipset_tag_t pc = pa->pa_pc; 389 pci_intr_handle_t ih; 390 const char *intrstr = NULL; 391 bus_size_t size; 392 bus_dma_segment_t seg; 393 bus_dmamap_t dmamap; 394 int rseg; 395 u_char eaddr[ETHER_ADDR_LEN]; 396#ifndef LGE_USEIOSPACE 397 pcireg_t memtype; 398#endif 399 struct ifnet *ifp; 400 caddr_t kva; 401 402 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 403 404 /* 405 * Map control/status registers. 406 */ 407 DPRINTFN(5, ("Map control/status regs\n")); 408 409 DPRINTFN(5, ("pci_mapreg_map\n")); 410#ifdef LGE_USEIOSPACE 411 if (pci_mapreg_map(pa, LGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 412 &sc->lge_btag, &sc->lge_bhandle, NULL, &size, 0)) { 413 printf(": can't map i/o space\n"); 414 return; 415 } 416#else 417 memtype = pci_mapreg_type(pc, pa->pa_tag, LGE_PCI_LOMEM); 418 if (pci_mapreg_map(pa, LGE_PCI_LOMEM, memtype, 0, &sc->lge_btag, 419 &sc->lge_bhandle, NULL, &size, 0)) { 420 printf(": can't map mem space\n"); 421 return; 422 } 423#endif 424 425 DPRINTFN(5, ("pci_intr_map\n")); 426 if (pci_intr_map(pa, &ih)) { 427 printf(": couldn't map interrupt\n"); 428 goto fail_1; 429 } 430 431 DPRINTFN(5, ("pci_intr_string\n")); 432 intrstr = pci_intr_string(pc, ih); 433 DPRINTFN(5, ("pci_intr_establish\n")); 434 sc->lge_intrhand = pci_intr_establish(pc, ih, IPL_NET, lge_intr, sc, 435 sc->sc_dv.dv_xname); 436 if (sc->lge_intrhand == NULL) { 437 printf(": couldn't establish interrupt"); 438 if (intrstr != NULL) 439 printf(" at %s", intrstr); 440 printf("\n"); 441 goto fail_1; 442 } 443 printf(": %s", intrstr); 444 445 /* Reset the adapter. */ 446 DPRINTFN(5, ("lge_reset\n")); 447 lge_reset(sc); 448 449 /* 450 * Get station address from the EEPROM. 451 */ 452 DPRINTFN(5, ("lge_read_eeprom\n")); 453 lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0); 454 lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0); 455 lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0); 456 457 /* 458 * A Level 1 chip was detected. Inform the world. 459 */ 460 printf(", address %s\n", ether_sprintf(eaddr)); 461 462 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 463 464 sc->sc_dmatag = pa->pa_dmat; 465 DPRINTFN(5, ("bus_dmamem_alloc\n")); 466 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct lge_list_data), 467 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 468 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 469 goto fail_2; 470 } 471 DPRINTFN(5, ("bus_dmamem_map\n")); 472 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 473 sizeof(struct lge_list_data), &kva, 474 BUS_DMA_NOWAIT)) { 475 printf("%s: can't map dma buffers (%zd bytes)\n", 476 sc->sc_dv.dv_xname, sizeof(struct lge_list_data)); 477 goto fail_3; 478 } 479 DPRINTFN(5, ("bus_dmamem_create\n")); 480 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct lge_list_data), 1, 481 sizeof(struct lge_list_data), 0, 482 BUS_DMA_NOWAIT, &dmamap)) { 483 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 484 goto fail_4; 485 } 486 DPRINTFN(5, ("bus_dmamem_load\n")); 487 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, 488 sizeof(struct lge_list_data), NULL, 489 BUS_DMA_NOWAIT)) { 490 goto fail_5; 491 } 492 493 DPRINTFN(5, ("bzero\n")); 494 sc->lge_ldata = (struct lge_list_data *)kva; 495 496 ifp = &sc->arpcom.ac_if; 497 ifp->if_softc = sc; 498 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 499 ifp->if_ioctl = lge_ioctl; 500 ifp->if_start = lge_start; 501 ifp->if_watchdog = lge_watchdog; 502 ifp->if_hardmtu = LGE_JUMBO_MTU; 503 IFQ_SET_MAXLEN(&ifp->if_snd, LGE_TX_LIST_CNT - 1); 504 IFQ_SET_READY(&ifp->if_snd); 505 DPRINTFN(5, ("bcopy\n")); 506 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 507 508 ifp->if_capabilities = IFCAP_VLAN_MTU; 509 510 if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) 511 sc->lge_pcs = 1; 512 else 513 sc->lge_pcs = 0; 514 515 /* 516 * Do MII setup. 517 */ 518 DPRINTFN(5, ("mii setup\n")); 519 sc->lge_mii.mii_ifp = ifp; 520 sc->lge_mii.mii_readreg = lge_miibus_readreg; 521 sc->lge_mii.mii_writereg = lge_miibus_writereg; 522 sc->lge_mii.mii_statchg = lge_miibus_statchg; 523 ifmedia_init(&sc->lge_mii.mii_media, 0, lge_ifmedia_upd, 524 lge_ifmedia_sts); 525 mii_attach(&sc->sc_dv, &sc->lge_mii, 0xffffffff, MII_PHY_ANY, 526 MII_OFFSET_ANY, 0); 527 528 if (LIST_FIRST(&sc->lge_mii.mii_phys) == NULL) { 529 printf("%s: no PHY found!\n", sc->sc_dv.dv_xname); 530 ifmedia_add(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL, 531 0, NULL); 532 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL); 533 } else { 534 DPRINTFN(5, ("ifmedia_set\n")); 535 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_AUTO); 536 } 537 538 /* 539 * Call MI attach routine. 540 */ 541 DPRINTFN(5, ("if_attach\n")); 542 if_attach(ifp); 543 DPRINTFN(5, ("ether_ifattach\n")); 544 ether_ifattach(ifp); 545 DPRINTFN(5, ("timeout_set\n")); 546 timeout_set(&sc->lge_timeout, lge_tick, sc); 547 timeout_add_sec(&sc->lge_timeout, 1); 548 return; 549 550fail_5: 551 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 552 553fail_4: 554 bus_dmamem_unmap(sc->sc_dmatag, kva, 555 sizeof(struct lge_list_data)); 556 557fail_3: 558 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 559 560fail_2: 561 pci_intr_disestablish(pc, sc->lge_intrhand); 562 563fail_1: 564 bus_space_unmap(sc->lge_btag, sc->lge_bhandle, size); 565} 566 567/* 568 * Initialize the transmit descriptors. 569 */ 570int 571lge_list_tx_init(struct lge_softc *sc) 572{ 573 struct lge_list_data *ld; 574 struct lge_ring_data *cd; 575 int i; 576 577 cd = &sc->lge_cdata; 578 ld = sc->lge_ldata; 579 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 580 ld->lge_tx_list[i].lge_mbuf = NULL; 581 ld->lge_tx_list[i].lge_ctl = 0; 582 } 583 584 cd->lge_tx_prod = cd->lge_tx_cons = 0; 585 586 return (0); 587} 588 589 590/* 591 * Initialize the RX descriptors and allocate mbufs for them. Note that 592 * we arrange the descriptors in a closed ring, so that the last descriptor 593 * points back to the first. 594 */ 595int 596lge_list_rx_init(struct lge_softc *sc) 597{ 598 struct lge_list_data *ld; 599 struct lge_ring_data *cd; 600 int i; 601 602 ld = sc->lge_ldata; 603 cd = &sc->lge_cdata; 604 605 cd->lge_rx_prod = cd->lge_rx_cons = 0; 606 607 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 608 609 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 610 if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) 611 break; 612 if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) 613 return (ENOBUFS); 614 } 615 616 /* Clear possible 'rx command queue empty' interrupt. */ 617 CSR_READ_4(sc, LGE_ISR); 618 619 return (0); 620} 621 622/* 623 * Initialize a RX descriptor and attach a MBUF cluster. 624 */ 625int 626lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m) 627{ 628 struct mbuf *m_new = NULL; 629 630 if (m == NULL) { 631 m_new = MCLGETI(NULL, LGE_JLEN, NULL, M_DONTWAIT); 632 if (m_new == NULL) 633 return (ENOBUFS); 634 } else { 635 /* 636 * We're re-using a previously allocated mbuf; 637 * be sure to re-init pointers and lengths to 638 * default values. 639 */ 640 m_new = m; 641 m_new->m_data = m_new->m_ext.ext_buf; 642 } 643 m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN; 644 645 /* 646 * Adjust alignment so packet payload begins on a 647 * longword boundary. Mandatory for Alpha, useful on 648 * x86 too. 649 */ 650 m_adj(m_new, ETHER_ALIGN); 651 652 c->lge_mbuf = m_new; 653 c->lge_fragptr_hi = 0; 654 c->lge_fragptr_lo = VTOPHYS(mtod(m_new, caddr_t)); 655 c->lge_fraglen = m_new->m_len; 656 c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); 657 c->lge_sts = 0; 658 659 /* 660 * Put this buffer in the RX command FIFO. To do this, 661 * we just write the physical address of the descriptor 662 * into the RX descriptor address registers. Note that 663 * there are two registers, one high DWORD and one low 664 * DWORD, which lets us specify a 64-bit address if 665 * desired. We only use a 32-bit address for now. 666 * Writing to the low DWORD register is what actually 667 * causes the command to be issued, so we do that 668 * last. 669 */ 670 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, VTOPHYS(c)); 671 LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); 672 673 return (0); 674} 675 676/* 677 * A frame has been uploaded: pass the resulting mbuf chain up to 678 * the higher level protocols. 679 */ 680void 681lge_rxeof(struct lge_softc *sc, int cnt) 682{ 683 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 684 struct mbuf *m; 685 struct ifnet *ifp; 686 struct lge_rx_desc *cur_rx; 687 int c, i, total_len = 0; 688 u_int32_t rxsts, rxctl; 689 690 ifp = &sc->arpcom.ac_if; 691 692 /* Find out how many frames were processed. */ 693 c = cnt; 694 i = sc->lge_cdata.lge_rx_cons; 695 696 /* Suck them in. */ 697 while(c) { 698 struct mbuf *m0 = NULL; 699 700 cur_rx = &sc->lge_ldata->lge_rx_list[i]; 701 rxctl = cur_rx->lge_ctl; 702 rxsts = cur_rx->lge_sts; 703 m = cur_rx->lge_mbuf; 704 cur_rx->lge_mbuf = NULL; 705 total_len = LGE_RXBYTES(cur_rx); 706 LGE_INC(i, LGE_RX_LIST_CNT); 707 c--; 708 709 /* 710 * If an error occurs, update stats, clear the 711 * status word and leave the mbuf cluster in place: 712 * it should simply get re-used next time this descriptor 713 * comes up in the ring. 714 */ 715 if (rxctl & LGE_RXCTL_ERRMASK) { 716 ifp->if_ierrors++; 717 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 718 continue; 719 } 720 721 if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { 722 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN); 723 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 724 if (m0 == NULL) { 725 ifp->if_ierrors++; 726 continue; 727 } 728 m = m0; 729 } else { 730 m->m_pkthdr.len = m->m_len = total_len; 731 } 732 733 /* Do IP checksum checking. */ 734 if (rxsts & LGE_RXSTS_ISIP) { 735 if (!(rxsts & LGE_RXSTS_IPCSUMERR)) 736 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 737 } 738 if (rxsts & LGE_RXSTS_ISTCP) { 739 if (!(rxsts & LGE_RXSTS_TCPCSUMERR)) 740 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 741 } 742 if (rxsts & LGE_RXSTS_ISUDP) { 743 if (!(rxsts & LGE_RXSTS_UDPCSUMERR)) 744 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 745 } 746 747 ml_enqueue(&ml, m); 748 } 749 750 if_input(ifp, &ml); 751 752 sc->lge_cdata.lge_rx_cons = i; 753} 754 755/* 756 * A frame was downloaded to the chip. It's safe for us to clean up 757 * the list buffers. 758 */ 759 760void 761lge_txeof(struct lge_softc *sc) 762{ 763 struct lge_tx_desc *cur_tx = NULL; 764 struct ifnet *ifp; 765 u_int32_t idx, txdone; 766 767 ifp = &sc->arpcom.ac_if; 768 769 /* Clear the timeout timer. */ 770 ifp->if_timer = 0; 771 772 /* 773 * Go through our tx list and free mbufs for those 774 * frames that have been transmitted. 775 */ 776 idx = sc->lge_cdata.lge_tx_cons; 777 txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); 778 779 while (idx != sc->lge_cdata.lge_tx_prod && txdone) { 780 cur_tx = &sc->lge_ldata->lge_tx_list[idx]; 781 782 ifp->if_opackets++; 783 if (cur_tx->lge_mbuf != NULL) { 784 m_freem(cur_tx->lge_mbuf); 785 cur_tx->lge_mbuf = NULL; 786 } 787 cur_tx->lge_ctl = 0; 788 789 txdone--; 790 LGE_INC(idx, LGE_TX_LIST_CNT); 791 ifp->if_timer = 0; 792 } 793 794 sc->lge_cdata.lge_tx_cons = idx; 795 796 if (cur_tx != NULL) 797 ifp->if_flags &= ~IFF_OACTIVE; 798} 799 800void 801lge_tick(void *xsc) 802{ 803 struct lge_softc *sc = xsc; 804 struct mii_data *mii = &sc->lge_mii; 805 struct ifnet *ifp = &sc->arpcom.ac_if; 806 int s; 807 808 s = splnet(); 809 810 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); 811 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 812 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); 813 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 814 815 if (!sc->lge_link) { 816 mii_tick(mii); 817 if (mii->mii_media_status & IFM_ACTIVE && 818 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 819 sc->lge_link++; 820 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 821 lge_start(ifp); 822 } 823 } 824 825 timeout_add_sec(&sc->lge_timeout, 1); 826 827 splx(s); 828} 829 830int 831lge_intr(void *arg) 832{ 833 struct lge_softc *sc; 834 struct ifnet *ifp; 835 u_int32_t status; 836 int claimed = 0; 837 838 sc = arg; 839 ifp = &sc->arpcom.ac_if; 840 841 /* Suppress unwanted interrupts */ 842 if (!(ifp->if_flags & IFF_UP)) { 843 lge_stop(sc); 844 return (0); 845 } 846 847 for (;;) { 848 /* 849 * Reading the ISR register clears all interrupts, and 850 * clears the 'interrupts enabled' bit in the IMR 851 * register. 852 */ 853 status = CSR_READ_4(sc, LGE_ISR); 854 855 if ((status & LGE_INTRS) == 0) 856 break; 857 858 claimed = 1; 859 860 if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) 861 lge_txeof(sc); 862 863 if (status & LGE_ISR_RXDMA_DONE) 864 lge_rxeof(sc, LGE_RX_DMACNT(status)); 865 866 if (status & LGE_ISR_RXCMDFIFO_EMPTY) 867 lge_init(sc); 868 869 if (status & LGE_ISR_PHY_INTR) { 870 sc->lge_link = 0; 871 timeout_del(&sc->lge_timeout); 872 lge_tick(sc); 873 } 874 } 875 876 /* Re-enable interrupts. */ 877 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); 878 879 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 880 lge_start(ifp); 881 882 return (claimed); 883} 884 885/* 886 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 887 * pointers to the fragment pointers. 888 */ 889int 890lge_encap(struct lge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 891{ 892 struct lge_frag *f = NULL; 893 struct lge_tx_desc *cur_tx; 894 struct mbuf *m; 895 int frag = 0, tot_len = 0; 896 897 /* 898 * Start packing the mbufs in this chain into 899 * the fragment pointers. Stop when we run out 900 * of fragments or hit the end of the mbuf chain. 901 */ 902 m = m_head; 903 cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; 904 frag = 0; 905 906 for (m = m_head; m != NULL; m = m->m_next) { 907 if (m->m_len != 0) { 908 tot_len += m->m_len; 909 f = &cur_tx->lge_frags[frag]; 910 f->lge_fraglen = m->m_len; 911 f->lge_fragptr_lo = VTOPHYS(mtod(m, vaddr_t)); 912 f->lge_fragptr_hi = 0; 913 frag++; 914 } 915 } 916 917 if (m != NULL) 918 return (ENOBUFS); 919 920 cur_tx->lge_mbuf = m_head; 921 cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; 922 LGE_INC((*txidx), LGE_TX_LIST_CNT); 923 924 /* Queue for transmit */ 925 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, VTOPHYS(cur_tx)); 926 927 return (0); 928} 929 930/* 931 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 932 * to the mbuf data regions directly in the transmit lists. We also save a 933 * copy of the pointers since the transmit list fragment pointers are 934 * physical addresses. 935 */ 936 937void 938lge_start(struct ifnet *ifp) 939{ 940 struct lge_softc *sc; 941 struct mbuf *m_head = NULL; 942 u_int32_t idx; 943 int pkts = 0; 944 945 sc = ifp->if_softc; 946 947 if (!sc->lge_link) 948 return; 949 950 idx = sc->lge_cdata.lge_tx_prod; 951 952 if (ifp->if_flags & IFF_OACTIVE) 953 return; 954 955 while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { 956 if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) 957 break; 958 959 m_head = ifq_deq_begin(&ifp->if_snd); 960 if (m_head == NULL) 961 break; 962 963 if (lge_encap(sc, m_head, &idx)) { 964 ifq_deq_rollback(&ifp->if_snd, m_head); 965 ifp->if_flags |= IFF_OACTIVE; 966 break; 967 } 968 969 /* now we are committed to transmit the packet */ 970 ifq_deq_commit(&ifp->if_snd, m_head); 971 pkts++; 972 973#if NBPFILTER > 0 974 /* 975 * If there's a BPF listener, bounce a copy of this frame 976 * to him. 977 */ 978 if (ifp->if_bpf) 979 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 980#endif 981 } 982 if (pkts == 0) 983 return; 984 985 sc->lge_cdata.lge_tx_prod = idx; 986 987 /* 988 * Set a timeout in case the chip goes out to lunch. 989 */ 990 ifp->if_timer = 5; 991} 992 993void 994lge_init(void *xsc) 995{ 996 struct lge_softc *sc = xsc; 997 struct ifnet *ifp = &sc->arpcom.ac_if; 998 int s; 999 1000 s = splnet(); 1001 1002 /* 1003 * Cancel pending I/O and free all RX/TX buffers. 1004 */ 1005 lge_stop(sc); 1006 lge_reset(sc); 1007 1008 /* Set MAC address */ 1009 CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1010 CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1011 1012 /* Init circular RX list. */ 1013 if (lge_list_rx_init(sc) == ENOBUFS) { 1014 printf("%s: initialization failed: no " 1015 "memory for rx buffers\n", sc->sc_dv.dv_xname); 1016 lge_stop(sc); 1017 splx(s); 1018 return; 1019 } 1020 1021 /* 1022 * Init tx descriptors. 1023 */ 1024 lge_list_tx_init(sc); 1025 1026 /* Set initial value for MODE1 register. */ 1027 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST| 1028 LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD| 1029 LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0| 1030 LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2); 1031 1032 /* If we want promiscuous mode, set the allframes bit. */ 1033 if (ifp->if_flags & IFF_PROMISC) { 1034 CSR_WRITE_4(sc, LGE_MODE1, 1035 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC); 1036 } else { 1037 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); 1038 } 1039 1040 /* 1041 * Set the capture broadcast bit to capture broadcast frames. 1042 */ 1043 if (ifp->if_flags & IFF_BROADCAST) { 1044 CSR_WRITE_4(sc, LGE_MODE1, 1045 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST); 1046 } else { 1047 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); 1048 } 1049 1050 /* Packet padding workaround? */ 1051 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); 1052 1053 /* No error frames */ 1054 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); 1055 1056 /* Receive large frames */ 1057 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS); 1058 1059 /* Workaround: disable RX/TX flow control */ 1060 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); 1061 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); 1062 1063 /* Make sure to strip CRC from received frames */ 1064 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); 1065 1066 /* Turn off magic packet mode */ 1067 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); 1068 1069 /* Turn off all VLAN stuff */ 1070 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX| 1071 LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT); 1072 1073 /* Workarond: FIFO overflow */ 1074 CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); 1075 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); 1076 1077 /* 1078 * Load the multicast filter. 1079 */ 1080 lge_setmulti(sc); 1081 1082 /* 1083 * Enable hardware checksum validation for all received IPv4 1084 * packets, do not reject packets with bad checksums. 1085 */ 1086 CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM| 1087 LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM| 1088 LGE_MODE2_RX_ERRCSUM); 1089 1090 /* 1091 * Enable the delivery of PHY interrupts based on 1092 * link/speed/duplex status chalges. 1093 */ 1094 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL); 1095 1096 /* Enable receiver and transmitter. */ 1097 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 1098 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB); 1099 1100 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); 1101 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB); 1102 1103 /* 1104 * Enable interrupts. 1105 */ 1106 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0| 1107 LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS); 1108 1109 lge_ifmedia_upd(ifp); 1110 1111 ifp->if_flags |= IFF_RUNNING; 1112 ifp->if_flags &= ~IFF_OACTIVE; 1113 1114 splx(s); 1115 1116 timeout_add_sec(&sc->lge_timeout, 1); 1117} 1118 1119/* 1120 * Set media options. 1121 */ 1122int 1123lge_ifmedia_upd(struct ifnet *ifp) 1124{ 1125 struct lge_softc *sc = ifp->if_softc; 1126 struct mii_data *mii = &sc->lge_mii; 1127 1128 sc->lge_link = 0; 1129 if (mii->mii_instance) { 1130 struct mii_softc *miisc; 1131 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1132 mii_phy_reset(miisc); 1133 } 1134 mii_mediachg(mii); 1135 1136 return (0); 1137} 1138 1139/* 1140 * Report current media status. 1141 */ 1142void 1143lge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1144{ 1145 struct lge_softc *sc = ifp->if_softc; 1146 struct mii_data *mii = &sc->lge_mii; 1147 1148 mii_pollstat(mii); 1149 ifmr->ifm_active = mii->mii_media_active; 1150 ifmr->ifm_status = mii->mii_media_status; 1151} 1152 1153int 1154lge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1155{ 1156 struct lge_softc *sc = ifp->if_softc; 1157 struct ifreq *ifr = (struct ifreq *) data; 1158 struct mii_data *mii; 1159 int s, error = 0; 1160 1161 s = splnet(); 1162 1163 switch(command) { 1164 case SIOCSIFADDR: 1165 ifp->if_flags |= IFF_UP; 1166 if (!(ifp->if_flags & IFF_RUNNING)) 1167 lge_init(sc); 1168 break; 1169 1170 case SIOCSIFFLAGS: 1171 if (ifp->if_flags & IFF_UP) { 1172 if (ifp->if_flags & IFF_RUNNING && 1173 ifp->if_flags & IFF_PROMISC && 1174 !(sc->lge_if_flags & IFF_PROMISC)) { 1175 CSR_WRITE_4(sc, LGE_MODE1, 1176 LGE_MODE1_SETRST_CTL1| 1177 LGE_MODE1_RX_PROMISC); 1178 lge_setmulti(sc); 1179 } else if (ifp->if_flags & IFF_RUNNING && 1180 !(ifp->if_flags & IFF_PROMISC) && 1181 sc->lge_if_flags & IFF_PROMISC) { 1182 CSR_WRITE_4(sc, LGE_MODE1, 1183 LGE_MODE1_RX_PROMISC); 1184 lge_setmulti(sc); 1185 } else if (ifp->if_flags & IFF_RUNNING && 1186 (ifp->if_flags ^ sc->lge_if_flags) & IFF_ALLMULTI) { 1187 lge_setmulti(sc); 1188 } else { 1189 if (!(ifp->if_flags & IFF_RUNNING)) 1190 lge_init(sc); 1191 } 1192 } else { 1193 if (ifp->if_flags & IFF_RUNNING) 1194 lge_stop(sc); 1195 } 1196 sc->lge_if_flags = ifp->if_flags; 1197 break; 1198 1199 case SIOCGIFMEDIA: 1200 case SIOCSIFMEDIA: 1201 mii = &sc->lge_mii; 1202 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1203 break; 1204 1205 default: 1206 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1207 } 1208 1209 if (error == ENETRESET) { 1210 if (ifp->if_flags & IFF_RUNNING) 1211 lge_setmulti(sc); 1212 error = 0; 1213 } 1214 1215 splx(s); 1216 return (error); 1217} 1218 1219void 1220lge_watchdog(struct ifnet *ifp) 1221{ 1222 struct lge_softc *sc; 1223 1224 sc = ifp->if_softc; 1225 1226 ifp->if_oerrors++; 1227 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 1228 1229 lge_stop(sc); 1230 lge_reset(sc); 1231 lge_init(sc); 1232 1233 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1234 lge_start(ifp); 1235} 1236 1237/* 1238 * Stop the adapter and free any mbufs allocated to the 1239 * RX and TX lists. 1240 */ 1241void 1242lge_stop(struct lge_softc *sc) 1243{ 1244 int i; 1245 struct ifnet *ifp; 1246 1247 ifp = &sc->arpcom.ac_if; 1248 ifp->if_timer = 0; 1249 timeout_del(&sc->lge_timeout); 1250 1251 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1252 1253 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); 1254 1255 /* Disable receiver and transmitter. */ 1256 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); 1257 sc->lge_link = 0; 1258 1259 /* 1260 * Free data in the RX lists. 1261 */ 1262 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 1263 if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { 1264 m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); 1265 sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; 1266 } 1267 } 1268 bzero(&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list)); 1269 1270 /* 1271 * Free the TX list buffers. 1272 */ 1273 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 1274 if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { 1275 m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); 1276 sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; 1277 } 1278 } 1279 1280 bzero(&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list)); 1281} 1282