i82557.c revision 1.16
1/* $NetBSD: i82557.c,v 1.16 2000/02/02 08:05:27 thorpej Exp $ */ 2 3/*- 4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40/* 41 * Copyright (c) 1995, David Greenman 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice unmodified, this list of conditions, and the following 49 * disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * Id: if_fxp.c,v 1.47 1998/01/08 23:42:29 eivind Exp 67 */ 68 69/* 70 * Device driver for the Intel i82557 fast Ethernet controller, 71 * and its successors, the i82558 and i82559. 72 */ 73 74#include "opt_inet.h" 75#include "opt_ns.h" 76#include "bpfilter.h" 77#include "rnd.h" 78 79#include <sys/param.h> 80#include <sys/systm.h> 81#include <sys/mbuf.h> 82#include <sys/malloc.h> 83#include <sys/kernel.h> 84#include <sys/socket.h> 85#include <sys/ioctl.h> 86#include <sys/errno.h> 87#include <sys/device.h> 88 89#include <machine/endian.h> 90 91#include <vm/vm.h> /* for PAGE_SIZE */ 92 93#if NRND > 0 94#include <sys/rnd.h> 95#endif 96 97#include <net/if.h> 98#include <net/if_dl.h> 99#include <net/if_media.h> 100#include <net/if_ether.h> 101 102#if NBPFILTER > 0 103#include <net/bpf.h> 104#endif 105 106#ifdef INET 107#include <netinet/in.h> 108#include <netinet/if_inarp.h> 109#endif 110 111#ifdef NS 112#include <netns/ns.h> 113#include <netns/ns_if.h> 114#endif 115 116#include <machine/bus.h> 117#include <machine/intr.h> 118 119#include <dev/mii/miivar.h> 120 121#include <dev/ic/i82557reg.h> 122#include <dev/ic/i82557var.h> 123 124/* 125 * NOTE! On the Alpha, we have an alignment constraint. The 126 * card DMAs the packet immediately following the RFA. However, 127 * the first thing in the packet is a 14-byte Ethernet header. 128 * This means that the packet is misaligned. To compensate, 129 * we actually offset the RFA 2 bytes into the cluster. This 130 * alignes the packet after the Ethernet header at a 32-bit 131 * boundary. HOWEVER! This means that the RFA is misaligned! 132 */ 133#define RFA_ALIGNMENT_FUDGE 2 134 135/* 136 * Template for default configuration parameters. 137 * See struct fxp_cb_config for the bit definitions. 138 */ 139u_int8_t fxp_cb_config_template[] = { 140 0x0, 0x0, /* cb_status */ 141 0x80, 0x2, /* cb_command */ 142 0xff, 0xff, 0xff, 0xff, /* link_addr */ 143 0x16, /* 0 */ 144 0x8, /* 1 */ 145 0x0, /* 2 */ 146 0x0, /* 3 */ 147 0x0, /* 4 */ 148 0x80, /* 5 */ 149 0xb2, /* 6 */ 150 0x3, /* 7 */ 151 0x1, /* 8 */ 152 0x0, /* 9 */ 153 0x26, /* 10 */ 154 0x0, /* 11 */ 155 0x60, /* 12 */ 156 0x0, /* 13 */ 157 0xf2, /* 14 */ 158 0x48, /* 15 */ 159 0x0, /* 16 */ 160 0x40, /* 17 */ 161 0xf3, /* 18 */ 162 0x0, /* 19 */ 163 0x3f, /* 20 */ 164 0x5 /* 21 */ 165}; 166 167void fxp_mii_initmedia __P((struct fxp_softc *)); 168int fxp_mii_mediachange __P((struct ifnet *)); 169void fxp_mii_mediastatus __P((struct ifnet *, struct ifmediareq *)); 170 171void fxp_80c24_initmedia __P((struct fxp_softc *)); 172int fxp_80c24_mediachange __P((struct ifnet *)); 173void fxp_80c24_mediastatus __P((struct ifnet *, struct ifmediareq *)); 174 175inline void fxp_scb_wait __P((struct fxp_softc *)); 176 177void fxp_start __P((struct ifnet *)); 178int fxp_ioctl __P((struct ifnet *, u_long, caddr_t)); 179int fxp_init __P((struct fxp_softc *)); 180void fxp_rxdrain __P((struct fxp_softc *)); 181void fxp_stop __P((struct fxp_softc *, int)); 182void fxp_watchdog __P((struct ifnet *)); 183int fxp_add_rfabuf __P((struct fxp_softc *, bus_dmamap_t, int)); 184int fxp_mdi_read __P((struct device *, int, int)); 185void fxp_statchg __P((struct device *)); 186void fxp_mdi_write __P((struct device *, int, int, int)); 187void fxp_autosize_eeprom __P((struct fxp_softc*)); 188void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, int, int)); 189void fxp_get_info __P((struct fxp_softc *, u_int8_t *)); 190void fxp_tick __P((void *)); 191void fxp_mc_setup __P((struct fxp_softc *)); 192 193void fxp_shutdown __P((void *)); 194void fxp_power __P((int, void *)); 195 196int fxp_copy_small = 0; 197 198int fxp_enable __P((struct fxp_softc*)); 199void fxp_disable __P((struct fxp_softc*)); 200 201struct fxp_phytype { 202 int fp_phy; /* type of PHY, -1 for MII at the end. */ 203 void (*fp_init) __P((struct fxp_softc *)); 204} fxp_phytype_table[] = { 205 { FXP_PHY_80C24, fxp_80c24_initmedia }, 206 { -1, fxp_mii_initmedia }, 207}; 208 209/* 210 * Set initial transmit threshold at 64 (512 bytes). This is 211 * increased by 64 (512 bytes) at a time, to maximum of 192 212 * (1536 bytes), if an underrun occurs. 213 */ 214static int tx_threshold = 64; 215 216/* 217 * Wait for the previous command to be accepted (but not necessarily 218 * completed). 219 */ 220inline void 221fxp_scb_wait(sc) 222 struct fxp_softc *sc; 223{ 224 int i = 10000; 225 226 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) 227 delay(2); 228 if (i == 0) 229 printf("%s: WARNING: SCB timed out!\n", sc->sc_dev.dv_xname); 230} 231 232/* 233 * Finish attaching an i82557 interface. Called by bus-specific front-end. 234 */ 235void 236fxp_attach(sc) 237 struct fxp_softc *sc; 238{ 239 u_int8_t enaddr[6]; 240 struct ifnet *ifp; 241 bus_dma_segment_t seg; 242 int rseg, i, error; 243 struct fxp_phytype *fp; 244 245 /* 246 * Allocate the control data structures, and create and load the 247 * DMA map for it. 248 */ 249 if ((error = bus_dmamem_alloc(sc->sc_dmat, 250 sizeof(struct fxp_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 251 0)) != 0) { 252 printf("%s: unable to allocate control data, error = %d\n", 253 sc->sc_dev.dv_xname, error); 254 goto fail_0; 255 } 256 257 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 258 sizeof(struct fxp_control_data), (caddr_t *)&sc->sc_control_data, 259 BUS_DMA_COHERENT)) != 0) { 260 printf("%s: unable to map control data, error = %d\n", 261 sc->sc_dev.dv_xname, error); 262 goto fail_1; 263 } 264 bzero(sc->sc_control_data, sizeof(struct fxp_control_data)); 265 266 if ((error = bus_dmamap_create(sc->sc_dmat, 267 sizeof(struct fxp_control_data), 1, 268 sizeof(struct fxp_control_data), 0, 0, &sc->sc_dmamap)) != 0) { 269 printf("%s: unable to create control data DMA map, " 270 "error = %d\n", sc->sc_dev.dv_xname, error); 271 goto fail_2; 272 } 273 274 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, 275 sc->sc_control_data, sizeof(struct fxp_control_data), NULL, 276 0)) != 0) { 277 printf("%s: can't load control data DMA map, error = %d\n", 278 sc->sc_dev.dv_xname, error); 279 goto fail_3; 280 } 281 282 /* 283 * Create the transmit buffer DMA maps. 284 */ 285 for (i = 0; i < FXP_NTXCB; i++) { 286 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 287 FXP_NTXSEG, MCLBYTES, 0, 0, 288 &FXP_DSTX(sc, i)->txs_dmamap)) != 0) { 289 printf("%s: unable to create tx DMA map %d, " 290 "error = %d\n", sc->sc_dev.dv_xname, i, error); 291 goto fail_4; 292 } 293 } 294 295 /* 296 * Create the receive buffer DMA maps. 297 */ 298 for (i = 0; i < FXP_NRFABUFS; i++) { 299 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 300 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 301 printf("%s: unable to create rx DMA map %d, " 302 "error = %d\n", sc->sc_dev.dv_xname, i, error); 303 goto fail_5; 304 } 305 } 306 307 /* Initialize MAC address and media structures. */ 308 fxp_get_info(sc, enaddr); 309 310 printf("%s: Ethernet address %s, %s Mb/s\n", sc->sc_dev.dv_xname, 311 ether_sprintf(enaddr), sc->phy_10Mbps_only ? "10" : "10/100"); 312 313 ifp = &sc->sc_ethercom.ec_if; 314 315 /* 316 * Get info about our media interface, and initialize it. Note 317 * the table terminates itself with a phy of -1, indicating 318 * that we're using MII. 319 */ 320 for (fp = fxp_phytype_table; fp->fp_phy != -1; fp++) 321 if (fp->fp_phy == sc->phy_primary_device) 322 break; 323 (*fp->fp_init)(sc); 324 325 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 326 ifp->if_softc = sc; 327 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 328 ifp->if_ioctl = fxp_ioctl; 329 ifp->if_start = fxp_start; 330 ifp->if_watchdog = fxp_watchdog; 331 332 /* 333 * Attach the interface. 334 */ 335 if_attach(ifp); 336 ether_ifattach(ifp, enaddr); 337#if NBPFILTER > 0 338 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 339 sizeof(struct ether_header)); 340#endif 341#if NRND > 0 342 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 343 RND_TYPE_NET, 0); 344#endif 345 346 /* 347 * Add shutdown hook so that DMA is disabled prior to reboot. Not 348 * doing do could allow DMA to corrupt kernel memory during the 349 * reboot before the driver initializes. 350 */ 351 sc->sc_sdhook = shutdownhook_establish(fxp_shutdown, sc); 352 if (sc->sc_sdhook == NULL) 353 printf("%s: WARNING: unable to establish shutdown hook\n", 354 sc->sc_dev.dv_xname); 355 /* 356 * Add suspend hook, for similar reasons.. 357 */ 358 sc->sc_powerhook = powerhook_establish(fxp_power, sc); 359 if (sc->sc_powerhook == NULL) 360 printf("%s: WARNING: unable to establish power hook\n", 361 sc->sc_dev.dv_xname); 362 return; 363 364 /* 365 * Free any resources we've allocated during the failed attach 366 * attempt. Do this in reverse order and fall though. 367 */ 368 fail_5: 369 for (i = 0; i < FXP_NRFABUFS; i++) { 370 if (sc->sc_rxmaps[i] != NULL) 371 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmaps[i]); 372 } 373 fail_4: 374 for (i = 0; i < FXP_NTXCB; i++) { 375 if (FXP_DSTX(sc, i)->txs_dmamap != NULL) 376 bus_dmamap_destroy(sc->sc_dmat, 377 FXP_DSTX(sc, i)->txs_dmamap); 378 } 379 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 380 fail_3: 381 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 382 fail_2: 383 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 384 sizeof(struct fxp_control_data)); 385 fail_1: 386 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 387 fail_0: 388 return; 389} 390 391void 392fxp_mii_initmedia(sc) 393 struct fxp_softc *sc; 394{ 395 396 sc->sc_flags |= FXPF_MII; 397 398 sc->sc_mii.mii_ifp = &sc->sc_ethercom.ec_if; 399 sc->sc_mii.mii_readreg = fxp_mdi_read; 400 sc->sc_mii.mii_writereg = fxp_mdi_write; 401 sc->sc_mii.mii_statchg = fxp_statchg; 402 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mii_mediachange, 403 fxp_mii_mediastatus); 404 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 405 MII_OFFSET_ANY); 406 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 407 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 408 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 409 } else 410 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 411} 412 413void 414fxp_80c24_initmedia(sc) 415 struct fxp_softc *sc; 416{ 417 418 /* 419 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter 420 * doesn't have a programming interface of any sort. The 421 * media is sensed automatically based on how the link partner 422 * is configured. This is, in essence, manual configuration. 423 */ 424 printf("%s: Seeq 80c24 AutoDUPLEX media interface present\n", 425 sc->sc_dev.dv_xname); 426 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_80c24_mediachange, 427 fxp_80c24_mediastatus); 428 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 429 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 430} 431 432/* 433 * Device shutdown routine. Called at system shutdown after sync. The 434 * main purpose of this routine is to shut off receiver DMA so that 435 * kernel memory doesn't get clobbered during warmboot. 436 */ 437void 438fxp_shutdown(arg) 439 void *arg; 440{ 441 struct fxp_softc *sc = arg; 442 443 /* 444 * Since the system's going to halt shortly, don't bother 445 * freeing mbufs. 446 */ 447 fxp_stop(sc, 0); 448} 449/* 450 * Power handler routine. Called when the system is transitioning 451 * into/out of power save modes. As with fxp_shutdown, the main 452 * purpose of this routine is to shut off receiver DMA so it doesn't 453 * clobber kernel memory at the wrong time. 454 */ 455void 456fxp_power(why, arg) 457 int why; 458 void *arg; 459{ 460 struct fxp_softc *sc = arg; 461 struct ifnet *ifp; 462 int s; 463 464 s = splnet(); 465 if (why != PWR_RESUME) 466 fxp_stop(sc, 0); 467 else { 468 ifp = &sc->sc_ethercom.ec_if; 469 if (ifp->if_flags & IFF_UP) 470 fxp_init(sc); 471 } 472 splx(s); 473} 474 475/* 476 * Initialize the interface media. 477 */ 478void 479fxp_get_info(sc, enaddr) 480 struct fxp_softc *sc; 481 u_int8_t *enaddr; 482{ 483 u_int16_t data, myea[3]; 484 485 /* 486 * Reset to a stable state. 487 */ 488 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 489 DELAY(10); 490 491 sc->sc_eeprom_size = 0; 492 fxp_autosize_eeprom(sc); 493 if(sc->sc_eeprom_size == 0) { 494 printf("%s: failed to detect EEPROM size", sc->sc_dev.dv_xname); 495 sc->sc_eeprom_size = 6; /* XXX panic here? */ 496 } 497#ifdef DEBUG 498 printf("%s: detected %d word EEPROM\n", 499 sc->sc_dev.dv_xname, 500 1 << sc->sc_eeprom_size); 501#endif 502 503 /* 504 * Get info about the primary PHY 505 */ 506 fxp_read_eeprom(sc, &data, 6, 1); 507 sc->phy_primary_addr = data & 0xff; 508 sc->phy_primary_device = (data >> 8) & 0x3f; 509 sc->phy_10Mbps_only = data >> 15; 510 511 /* 512 * Read MAC address. 513 */ 514 fxp_read_eeprom(sc, myea, 0, 3); 515 bcopy(myea, enaddr, ETHER_ADDR_LEN); 516} 517 518/* 519 * Figure out EEPROM size. 520 * 521 * 559's can have either 64-word or 256-word EEPROMs, the 558 522 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 523 * talks about the existance of 16 to 256 word EEPROMs. 524 * 525 * The only known sizes are 64 and 256, where the 256 version is used 526 * by CardBus cards to store CIS information. 527 * 528 * The address is shifted in msb-to-lsb, and after the last 529 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 530 * after which follows the actual data. We try to detect this zero, by 531 * probing the data-out bit in the EEPROM control register just after 532 * having shifted in a bit. If the bit is zero, we assume we've 533 * shifted enough address bits. The data-out should be tri-state, 534 * before this, which should translate to a logical one. 535 * 536 * Other ways to do this would be to try to read a register with known 537 * contents with a varying number of address bits, but no such 538 * register seem to be available. The high bits of register 10 are 01 539 * on the 558 and 559, but apparently not on the 557. 540 * 541 * The Linux driver computes a checksum on the EEPROM data, but the 542 * value of this checksum is not very well documented. 543 */ 544 545void 546fxp_autosize_eeprom(sc) 547 struct fxp_softc *sc; 548{ 549 u_int16_t reg; 550 int x; 551 552 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 553 /* 554 * Shift in read opcode. 555 */ 556 for (x = 3; x > 0; x--) { 557 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 558 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 559 } else { 560 reg = FXP_EEPROM_EECS; 561 } 562 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 563 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 564 reg | FXP_EEPROM_EESK); 565 DELAY(1); 566 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 567 DELAY(1); 568 } 569 /* 570 * Shift in address, wait for the dummy zero following a correct 571 * address shift. 572 */ 573 for (x = 1; x <= 8; x++) { 574 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 575 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 576 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 577 DELAY(1); 578 if((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 579 FXP_EEPROM_EEDO) == 0) 580 break; 581 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 582 DELAY(1); 583 } 584 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 585 DELAY(1); 586 if(x != 6 && x != 8) { 587#ifdef DEBUG 588 printf("%s: strange EEPROM size (%d)\n", 589 sc->sc_dev.dv_xname, 1 << x); 590#endif 591 } else 592 sc->sc_eeprom_size = x; 593} 594 595/* 596 * Read from the serial EEPROM. Basically, you manually shift in 597 * the read opcode (one bit at a time) and then shift in the address, 598 * and then you shift out the data (all of this one bit at a time). 599 * The word size is 16 bits, so you have to provide the address for 600 * every 16 bits of data. 601 */ 602void 603fxp_read_eeprom(sc, data, offset, words) 604 struct fxp_softc *sc; 605 u_int16_t *data; 606 int offset; 607 int words; 608{ 609 u_int16_t reg; 610 int i, x; 611 612 for (i = 0; i < words; i++) { 613 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 614 /* 615 * Shift in read opcode. 616 */ 617 for (x = 3; x > 0; x--) { 618 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 619 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 620 } else { 621 reg = FXP_EEPROM_EECS; 622 } 623 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 624 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 625 reg | FXP_EEPROM_EESK); 626 DELAY(1); 627 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 628 DELAY(1); 629 } 630 /* 631 * Shift in address. 632 */ 633 for (x = sc->sc_eeprom_size; x > 0; x--) { 634 if ((i + offset) & (1 << (x - 1))) { 635 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 636 } else { 637 reg = FXP_EEPROM_EECS; 638 } 639 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 640 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 641 reg | FXP_EEPROM_EESK); 642 DELAY(1); 643 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 644 DELAY(1); 645 } 646 reg = FXP_EEPROM_EECS; 647 data[i] = 0; 648 /* 649 * Shift out data. 650 */ 651 for (x = 16; x > 0; x--) { 652 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 653 reg | FXP_EEPROM_EESK); 654 DELAY(1); 655 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 656 FXP_EEPROM_EEDO) 657 data[i] |= (1 << (x - 1)); 658 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 659 DELAY(1); 660 } 661 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 662 DELAY(1); 663 } 664} 665 666/* 667 * Start packet transmission on the interface. 668 */ 669void 670fxp_start(ifp) 671 struct ifnet *ifp; 672{ 673 struct fxp_softc *sc = ifp->if_softc; 674 struct mbuf *m0, *m; 675 struct fxp_cb_tx *txd; 676 struct fxp_txsoft *txs; 677 struct fxp_tbdlist *tbd; 678 bus_dmamap_t dmamap; 679 int error, lasttx, nexttx, opending, seg; 680 681 /* 682 * If we want a re-init, bail out now. 683 */ 684 if (sc->sc_flags & FXPF_WANTINIT) { 685 ifp->if_flags |= IFF_OACTIVE; 686 return; 687 } 688 689 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 690 return; 691 692 /* 693 * Remember the previous txpending and the current lasttx. 694 */ 695 opending = sc->sc_txpending; 696 lasttx = sc->sc_txlast; 697 698 /* 699 * Loop through the send queue, setting up transmit descriptors 700 * until we drain the queue, or use up all available transmit 701 * descriptors. 702 */ 703 while (sc->sc_txpending < FXP_NTXCB) { 704 /* 705 * Grab a packet off the queue. 706 */ 707 IF_DEQUEUE(&ifp->if_snd, m0); 708 if (m0 == NULL) 709 break; 710 711 /* 712 * Get the next available transmit descriptor. 713 */ 714 nexttx = FXP_NEXTTX(sc->sc_txlast); 715 txd = FXP_CDTX(sc, nexttx); 716 tbd = FXP_CDTBD(sc, nexttx); 717 txs = FXP_DSTX(sc, nexttx); 718 dmamap = txs->txs_dmamap; 719 720 /* 721 * Load the DMA map. If this fails, the packet either 722 * didn't fit in the allotted number of frags, or we were 723 * short on resources. In this case, we'll copy and try 724 * again. 725 */ 726 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 727 BUS_DMA_NOWAIT) != 0) { 728 MGETHDR(m, M_DONTWAIT, MT_DATA); 729 if (m == NULL) { 730 printf("%s: unable to allocate Tx mbuf\n", 731 sc->sc_dev.dv_xname); 732 IF_PREPEND(&ifp->if_snd, m0); 733 break; 734 } 735 if (m0->m_pkthdr.len > MHLEN) { 736 MCLGET(m, M_DONTWAIT); 737 if ((m->m_flags & M_EXT) == 0) { 738 printf("%s: unable to allocate Tx " 739 "cluster\n", sc->sc_dev.dv_xname); 740 m_freem(m); 741 IF_PREPEND(&ifp->if_snd, m0); 742 break; 743 } 744 } 745 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 746 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 747 m_freem(m0); 748 m0 = m; 749 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 750 m0, BUS_DMA_NOWAIT); 751 if (error) { 752 printf("%s: unable to load Tx buffer, " 753 "error = %d\n", sc->sc_dev.dv_xname, error); 754 IF_PREPEND(&ifp->if_snd, m0); 755 break; 756 } 757 } 758 759 /* Initialize the fraglist. */ 760 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 761 tbd->tbd_d[seg].tb_addr = 762 htole32(dmamap->dm_segs[seg].ds_addr); 763 tbd->tbd_d[seg].tb_size = 764 htole32(dmamap->dm_segs[seg].ds_len); 765 } 766 767 FXP_CDTBDSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE); 768 769 /* Sync the DMA map. */ 770 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 771 BUS_DMASYNC_PREWRITE); 772 773 /* 774 * Store a pointer to the packet so we can free it later. 775 */ 776 txs->txs_mbuf = m0; 777 778 /* 779 * Initialize the transmit descriptor. 780 */ 781 /* BIG_ENDIAN: no need to swap to store 0 */ 782 txd->cb_status = 0; 783 txd->cb_command = 784 htole16(FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF); 785 txd->tx_threshold = tx_threshold; 786 txd->tbd_number = dmamap->dm_nsegs; 787 788 FXP_CDTXSYNC(sc, nexttx, 789 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 790 791 /* Advance the tx pointer. */ 792 sc->sc_txpending++; 793 sc->sc_txlast = nexttx; 794 795#if NBPFILTER > 0 796 /* 797 * Pass packet to bpf if there is a listener. 798 */ 799 if (ifp->if_bpf) 800 bpf_mtap(ifp->if_bpf, m0); 801#endif 802 } 803 804 if (sc->sc_txpending == FXP_NTXCB) { 805 /* No more slots; notify upper layer. */ 806 ifp->if_flags |= IFF_OACTIVE; 807 } 808 809 if (sc->sc_txpending != opending) { 810 /* 811 * We enqueued packets. If the transmitter was idle, 812 * reset the txdirty pointer. 813 */ 814 if (opending == 0) 815 sc->sc_txdirty = FXP_NEXTTX(lasttx); 816 817 /* 818 * Cause the chip to interrupt and suspend command 819 * processing once the last packet we've enqueued 820 * has been transmitted. 821 */ 822 FXP_CDTX(sc, sc->sc_txlast)->cb_command |= 823 htole16(FXP_CB_COMMAND_I | FXP_CB_COMMAND_S); 824 FXP_CDTXSYNC(sc, sc->sc_txlast, 825 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 826 827 /* 828 * The entire packet chain is set up. Clear the suspend bit 829 * on the command prior to the first packet we set up. 830 */ 831 FXP_CDTXSYNC(sc, lasttx, 832 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 833 FXP_CDTX(sc, lasttx)->cb_command &= htole16(~FXP_CB_COMMAND_S); 834 FXP_CDTXSYNC(sc, lasttx, 835 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 836 837 /* 838 * Issue a Resume command in case the chip was suspended. 839 */ 840 fxp_scb_wait(sc); 841 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 842 843 /* Set a watchdog timer in case the chip flakes out. */ 844 ifp->if_timer = 5; 845 } 846} 847 848/* 849 * Process interface interrupts. 850 */ 851int 852fxp_intr(arg) 853 void *arg; 854{ 855 struct fxp_softc *sc = arg; 856 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 857 struct fxp_cb_tx *txd; 858 struct fxp_txsoft *txs; 859 struct mbuf *m, *m0; 860 bus_dmamap_t rxmap; 861 struct fxp_rfa *rfa; 862 struct ether_header *eh; 863 int i, claimed = 0; 864 u_int16_t len, rxstat, txstat; 865 u_int8_t statack; 866 867 /* 868 * If the interface isn't running, don't try to 869 * service the interrupt.. just ack it and bail. 870 */ 871 if ((ifp->if_flags & IFF_RUNNING) == 0) { 872 statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); 873 if (statack) { 874 claimed = 1; 875 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 876 } 877 return claimed; 878 } 879 880 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 881 claimed = 1; 882 883 /* 884 * First ACK all the interrupts in this pass. 885 */ 886 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 887 888 /* 889 * Process receiver interrupts. If a no-resource (RNR) 890 * condition exists, get whatever packets we can and 891 * re-start the receiver. 892 */ 893 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { 894 rcvloop: 895 m = sc->sc_rxq.ifq_head; 896 rfa = FXP_MTORFA(m); 897 rxmap = M_GETCTX(m, bus_dmamap_t); 898 899 FXP_RFASYNC(sc, m, 900 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 901 902 rxstat = le16toh(rfa->rfa_status); 903 904 if ((rxstat & FXP_RFA_STATUS_C) == 0) { 905 /* 906 * We have processed all of the 907 * receive buffers. 908 */ 909 goto do_transmit; 910 } 911 912 IF_DEQUEUE(&sc->sc_rxq, m); 913 914 FXP_RXBUFSYNC(sc, m, BUS_DMASYNC_POSTREAD); 915 916 len = le16toh(rfa->actual_size) & 917 (m->m_ext.ext_size - 1); 918 919 if (len < sizeof(struct ether_header)) { 920 /* 921 * Runt packet; drop it now. 922 */ 923 FXP_INIT_RFABUF(sc, m); 924 goto rcvloop; 925 } 926 927 /* 928 * If the packet is small enough to fit in a 929 * single header mbuf, allocate one and copy 930 * the data into it. This greatly reduces 931 * memory consumption when we receive lots 932 * of small packets. 933 * 934 * Otherwise, we add a new buffer to the receive 935 * chain. If this fails, we drop the packet and 936 * recycle the old buffer. 937 */ 938 if (fxp_copy_small != 0 && len <= MHLEN) { 939 MGETHDR(m0, M_DONTWAIT, MT_DATA); 940 if (m == NULL) 941 goto dropit; 942 memcpy(mtod(m0, caddr_t), 943 mtod(m, caddr_t), len); 944 FXP_INIT_RFABUF(sc, m); 945 m = m0; 946 } else { 947 if (fxp_add_rfabuf(sc, rxmap, 1) != 0) { 948 dropit: 949 ifp->if_ierrors++; 950 FXP_INIT_RFABUF(sc, m); 951 goto rcvloop; 952 } 953 } 954 955 m->m_pkthdr.rcvif = ifp; 956 m->m_pkthdr.len = m->m_len = len; 957 eh = mtod(m, struct ether_header *); 958 959#if NBPFILTER > 0 960 /* 961 * Pass this up to any BPF listeners, but only 962 * pass it up the stack it its for us. 963 */ 964 if (ifp->if_bpf) { 965 bpf_mtap(ifp->if_bpf, m); 966 967 if ((ifp->if_flags & IFF_PROMISC) != 0 && 968 (rxstat & FXP_RFA_STATUS_IAMATCH) != 0 && 969 (eh->ether_dhost[0] & 1) == 0) { 970 m_freem(m); 971 goto rcvloop; 972 } 973 } 974#endif /* NBPFILTER > 0 */ 975 976 /* Pass it on. */ 977 (*ifp->if_input)(ifp, m); 978 goto rcvloop; 979 } 980 981 do_transmit: 982 if (statack & FXP_SCB_STATACK_RNR) { 983 rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); 984 fxp_scb_wait(sc); 985 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 986 rxmap->dm_segs[0].ds_addr + 987 RFA_ALIGNMENT_FUDGE); 988 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 989 FXP_SCB_COMMAND_RU_START); 990 } 991 992 /* 993 * Free any finished transmit mbuf chains. 994 */ 995 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { 996 ifp->if_flags &= ~IFF_OACTIVE; 997 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 998 i = FXP_NEXTTX(i), sc->sc_txpending--) { 999 txd = FXP_CDTX(sc, i); 1000 txs = FXP_DSTX(sc, i); 1001 1002 FXP_CDTXSYNC(sc, i, 1003 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1004 1005 txstat = le16toh(txd->cb_status); 1006 1007 if ((txstat & FXP_CB_STATUS_C) == 0) 1008 break; 1009 1010 FXP_CDTBDSYNC(sc, i, BUS_DMASYNC_POSTWRITE); 1011 1012 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1013 0, txs->txs_dmamap->dm_mapsize, 1014 BUS_DMASYNC_POSTWRITE); 1015 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1016 m_freem(txs->txs_mbuf); 1017 txs->txs_mbuf = NULL; 1018 } 1019 1020 /* Update the dirty transmit buffer pointer. */ 1021 sc->sc_txdirty = i; 1022 1023 /* 1024 * Cancel the watchdog timer if there are no pending 1025 * transmissions. 1026 */ 1027 if (sc->sc_txpending == 0) { 1028 ifp->if_timer = 0; 1029 1030 /* 1031 * If we want a re-init, do that now. 1032 */ 1033 if (sc->sc_flags & FXPF_WANTINIT) 1034 (void) fxp_init(sc); 1035 } 1036 1037 /* 1038 * Try to get more packets going. 1039 */ 1040 fxp_start(ifp); 1041 } 1042 } 1043 1044#if NRND > 0 1045 if (claimed) 1046 rnd_add_uint32(&sc->rnd_source, statack); 1047#endif 1048 return (claimed); 1049} 1050 1051/* 1052 * Update packet in/out/collision statistics. The i82557 doesn't 1053 * allow you to access these counters without doing a fairly 1054 * expensive DMA to get _all_ of the statistics it maintains, so 1055 * we do this operation here only once per second. The statistics 1056 * counters in the kernel are updated from the previous dump-stats 1057 * DMA and then a new dump-stats DMA is started. The on-chip 1058 * counters are zeroed when the DMA completes. If we can't start 1059 * the DMA immediately, we don't wait - we just prepare to read 1060 * them again next time. 1061 */ 1062void 1063fxp_tick(arg) 1064 void *arg; 1065{ 1066 struct fxp_softc *sc = arg; 1067 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1068 struct fxp_stats *sp = &sc->sc_control_data->fcd_stats; 1069 int s; 1070 1071 s = splnet(); 1072 1073 ifp->if_opackets += le32toh(sp->tx_good); 1074 ifp->if_collisions += le32toh(sp->tx_total_collisions); 1075 if (sp->rx_good) { 1076 ifp->if_ipackets += le32toh(sp->rx_good); 1077 sc->sc_rxidle = 0; 1078 } else { 1079 sc->sc_rxidle++; 1080 } 1081 ifp->if_ierrors += 1082 le32toh(sp->rx_crc_errors) + 1083 le32toh(sp->rx_alignment_errors) + 1084 le32toh(sp->rx_rnr_errors) + 1085 le32toh(sp->rx_overrun_errors); 1086 /* 1087 * If any transmit underruns occured, bump up the transmit 1088 * threshold by another 512 bytes (64 * 8). 1089 */ 1090 if (sp->tx_underruns) { 1091 ifp->if_oerrors += le32toh(sp->tx_underruns); 1092 if (tx_threshold < 192) 1093 tx_threshold += 64; 1094 } 1095 1096 /* 1097 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1098 * then assume the receiver has locked up and attempt to clear 1099 * the condition by reprogramming the multicast filter (actually, 1100 * resetting the interface). This is a work-around for a bug in 1101 * the 82557 where the receiver locks up if it gets certain types 1102 * of garbage in the syncronization bits prior to the packet header. 1103 * This bug is supposed to only occur in 10Mbps mode, but has been 1104 * seen to occur in 100Mbps mode as well (perhaps due to a 10/100 1105 * speed transition). 1106 */ 1107 if (sc->sc_rxidle > FXP_MAX_RX_IDLE) { 1108 (void) fxp_init(sc); 1109 splx(s); 1110 return; 1111 } 1112 /* 1113 * If there is no pending command, start another stats 1114 * dump. Otherwise punt for now. 1115 */ 1116 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1117 /* 1118 * Start another stats dump. 1119 */ 1120 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1121 FXP_SCB_COMMAND_CU_DUMPRESET); 1122 } else { 1123 /* 1124 * A previous command is still waiting to be accepted. 1125 * Just zero our copy of the stats and wait for the 1126 * next timer event to update them. 1127 */ 1128 /* BIG_ENDIAN: no swap required to store 0 */ 1129 sp->tx_good = 0; 1130 sp->tx_underruns = 0; 1131 sp->tx_total_collisions = 0; 1132 1133 sp->rx_good = 0; 1134 sp->rx_crc_errors = 0; 1135 sp->rx_alignment_errors = 0; 1136 sp->rx_rnr_errors = 0; 1137 sp->rx_overrun_errors = 0; 1138 } 1139 1140 if (sc->sc_flags & FXPF_MII) { 1141 /* Tick the MII clock. */ 1142 mii_tick(&sc->sc_mii); 1143 } 1144 1145 splx(s); 1146 1147 /* 1148 * Schedule another timeout one second from now. 1149 */ 1150 timeout(fxp_tick, sc, hz); 1151} 1152 1153/* 1154 * Drain the receive queue. 1155 */ 1156void 1157fxp_rxdrain(sc) 1158 struct fxp_softc *sc; 1159{ 1160 bus_dmamap_t rxmap; 1161 struct mbuf *m; 1162 1163 for (;;) { 1164 IF_DEQUEUE(&sc->sc_rxq, m); 1165 if (m == NULL) 1166 break; 1167 rxmap = M_GETCTX(m, bus_dmamap_t); 1168 bus_dmamap_unload(sc->sc_dmat, rxmap); 1169 FXP_RXMAP_PUT(sc, rxmap); 1170 m_freem(m); 1171 } 1172} 1173 1174/* 1175 * Stop the interface. Cancels the statistics updater and resets 1176 * the interface. 1177 */ 1178void 1179fxp_stop(sc, drain) 1180 struct fxp_softc *sc; 1181 int drain; 1182{ 1183 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1184 struct fxp_txsoft *txs; 1185 int i; 1186 1187 /* 1188 * Turn down interface (done early to avoid bad interactions 1189 * between panics, shutdown hooks, and the watchdog timer) 1190 */ 1191 ifp->if_timer = 0; 1192 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1193 1194 /* 1195 * Cancel stats updater. 1196 */ 1197 untimeout(fxp_tick, sc); 1198 if (sc->sc_flags & FXPF_MII) { 1199 /* Down the MII. */ 1200 mii_down(&sc->sc_mii); 1201 } 1202 1203 /* 1204 * Issue software reset 1205 */ 1206 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1207 DELAY(10); 1208 1209 /* 1210 * Release any xmit buffers. 1211 */ 1212 for (i = 0; i < FXP_NTXCB; i++) { 1213 txs = FXP_DSTX(sc, i); 1214 if (txs->txs_mbuf != NULL) { 1215 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1216 m_freem(txs->txs_mbuf); 1217 txs->txs_mbuf = NULL; 1218 } 1219 } 1220 sc->sc_txpending = 0; 1221 1222 if (drain) { 1223 /* 1224 * Release the receive buffers. 1225 */ 1226 fxp_rxdrain(sc); 1227 } 1228 1229} 1230 1231/* 1232 * Watchdog/transmission transmit timeout handler. Called when a 1233 * transmission is started on the interface, but no interrupt is 1234 * received before the timeout. This usually indicates that the 1235 * card has wedged for some reason. 1236 */ 1237void 1238fxp_watchdog(ifp) 1239 struct ifnet *ifp; 1240{ 1241 struct fxp_softc *sc = ifp->if_softc; 1242 1243 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1244 ifp->if_oerrors++; 1245 1246 (void) fxp_init(sc); 1247} 1248 1249/* 1250 * Initialize the interface. Must be called at splnet(). 1251 */ 1252int 1253fxp_init(sc) 1254 struct fxp_softc *sc; 1255{ 1256 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1257 struct fxp_cb_config *cbp; 1258 struct fxp_cb_ias *cb_ias; 1259 struct fxp_cb_tx *txd; 1260 bus_dmamap_t rxmap; 1261 int i, prm, allm, error = 0; 1262 1263 /* 1264 * Cancel any pending I/O 1265 */ 1266 fxp_stop(sc, 0); 1267 1268 sc->sc_flags = 0; 1269 1270 /* 1271 * Initialize base of CBL and RFA memory. Loading with zero 1272 * sets it up for regular linear addressing. 1273 */ 1274 fxp_scb_wait(sc); 1275 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1276 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); 1277 1278 fxp_scb_wait(sc); 1279 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); 1280 1281 /* 1282 * Initialize the multicast filter. Do this now, since we might 1283 * have to setup the config block differently. 1284 */ 1285 fxp_mc_setup(sc); 1286 1287 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1288 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1289 1290 /* 1291 * Initialize base of dump-stats buffer. 1292 */ 1293 fxp_scb_wait(sc); 1294 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1295 sc->sc_cddma + FXP_CDSTATSOFF); 1296 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); 1297 1298 cbp = &sc->sc_control_data->fcd_configcb; 1299 memset(cbp, 0, sizeof(struct fxp_cb_config)); 1300 1301 /* 1302 * This copy is kind of disgusting, but there are a bunch of must be 1303 * zero and must be one bits in this structure and this is the easiest 1304 * way to initialize them all to proper values. 1305 */ 1306 memcpy(cbp, fxp_cb_config_template, sizeof(fxp_cb_config_template)); 1307 1308 /* BIG_ENDIAN: no need to swap to store 0 */ 1309 cbp->cb_status = 0; 1310 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | 1311 FXP_CB_COMMAND_EL); 1312 /* BIG_ENDIAN: no need to swap to store 0xffffffff */ 1313 cbp->link_addr = 0xffffffff; /* (no) next command */ 1314 cbp->byte_count = 22; /* (22) bytes to config */ 1315 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1316 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1317 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1318 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1319 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1320 cbp->dma_bce = 0; /* (disable) dma max counters */ 1321 cbp->late_scb = 0; /* (don't) defer SCB update */ 1322 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1323 cbp->ci_int = 1; /* interrupt on CU idle */ 1324 cbp->save_bf = prm; /* save bad frames */ 1325 cbp->disc_short_rx = !prm; /* discard short packets */ 1326 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1327 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1328 cbp->nsai = 1; /* (don't) disable source addr insert */ 1329 cbp->preamble_length = 2; /* (7 byte) preamble */ 1330 cbp->loopback = 0; /* (don't) loopback */ 1331 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1332 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1333 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1334 cbp->promiscuous = prm; /* promiscuous mode */ 1335 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1336 cbp->crscdt = 0; /* (CRS only) */ 1337 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1338 cbp->padding = 1; /* (do) pad short tx packets */ 1339 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1340 cbp->force_fdx = 0; /* (don't) force full duplex */ 1341 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1342 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1343 cbp->mc_all = allm; /* accept all multicasts */ 1344 1345 FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1346 1347 /* 1348 * Start the config command/DMA. 1349 */ 1350 fxp_scb_wait(sc); 1351 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDCONFIGOFF); 1352 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1353 /* ...and wait for it to complete. */ 1354 do { 1355 FXP_CDCONFIGSYNC(sc, 1356 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1357 } while ((cbp->cb_status & FXP_CB_STATUS_C) == 0); 1358 1359 /* 1360 * Initialize the station address. 1361 */ 1362 cb_ias = &sc->sc_control_data->fcd_iascb; 1363 /* BIG_ENDIAN: no need to swap to store 0 */ 1364 cb_ias->cb_status = 0; 1365 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); 1366 /* BIG_ENDIAN: no need to swap to store 0xffffffff */ 1367 cb_ias->link_addr = 0xffffffff; 1368 memcpy((void *)cb_ias->macaddr, LLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 1369 1370 FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1371 1372 /* 1373 * Start the IAS (Individual Address Setup) command/DMA. 1374 */ 1375 fxp_scb_wait(sc); 1376 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDIASOFF); 1377 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1378 /* ...and wait for it to complete. */ 1379 do { 1380 FXP_CDIASSYNC(sc, 1381 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1382 } while ((cb_ias->cb_status & FXP_CB_STATUS_C) == 0); 1383 1384 /* 1385 * Initialize the transmit descriptor ring. txlast is initialized 1386 * to the end of the list so that it will wrap around to the first 1387 * descriptor when the first packet is transmitted. 1388 */ 1389 for (i = 0; i < FXP_NTXCB; i++) { 1390 txd = FXP_CDTX(sc, i); 1391 memset(txd, 0, sizeof(struct fxp_cb_tx)); 1392 txd->cb_command = 1393 htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); 1394 txd->tbd_array_addr = htole32(FXP_CDTBDADDR(sc, i)); 1395 txd->link_addr = htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(i))); 1396 FXP_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1397 } 1398 sc->sc_txpending = 0; 1399 sc->sc_txdirty = 0; 1400 sc->sc_txlast = FXP_NTXCB - 1; 1401 1402 /* 1403 * Initialize the receive buffer list. 1404 */ 1405 sc->sc_rxq.ifq_maxlen = FXP_NRFABUFS; 1406 while (sc->sc_rxq.ifq_len < FXP_NRFABUFS) { 1407 rxmap = FXP_RXMAP_GET(sc); 1408 if ((error = fxp_add_rfabuf(sc, rxmap, 0)) != 0) { 1409 printf("%s: unable to allocate or map rx " 1410 "buffer %d, error = %d\n", 1411 sc->sc_dev.dv_xname, 1412 sc->sc_rxq.ifq_len, error); 1413 /* 1414 * XXX Should attempt to run with fewer receive 1415 * XXX buffers instead of just failing. 1416 */ 1417 FXP_RXMAP_PUT(sc, rxmap); 1418 fxp_rxdrain(sc); 1419 goto out; 1420 } 1421 } 1422 sc->sc_rxidle = 0; 1423 1424 /* 1425 * Give the transmit ring to the chip. We do this by pointing 1426 * the chip at the last descriptor (which is a NOP|SUSPEND), and 1427 * issuing a start command. It will execute the NOP and then 1428 * suspend, pointing at the first descriptor. 1429 */ 1430 fxp_scb_wait(sc); 1431 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, FXP_CDTXADDR(sc, sc->sc_txlast)); 1432 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1433 1434 /* 1435 * Initialize receiver buffer area - RFA. 1436 */ 1437 rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); 1438 fxp_scb_wait(sc); 1439 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1440 rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); 1441 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); 1442 1443 if (sc->sc_flags & FXPF_MII) { 1444 /* 1445 * Set current media. 1446 */ 1447 mii_mediachg(&sc->sc_mii); 1448 } 1449 1450 /* 1451 * ...all done! 1452 */ 1453 ifp->if_flags |= IFF_RUNNING; 1454 ifp->if_flags &= ~IFF_OACTIVE; 1455 1456 /* 1457 * Start the one second timer. 1458 */ 1459 timeout(fxp_tick, sc, hz); 1460 1461 /* 1462 * Attempt to start output on the interface. 1463 */ 1464 fxp_start(ifp); 1465 1466 out: 1467 if (error) 1468 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1469 return (error); 1470} 1471 1472/* 1473 * Change media according to request. 1474 */ 1475int 1476fxp_mii_mediachange(ifp) 1477 struct ifnet *ifp; 1478{ 1479 struct fxp_softc *sc = ifp->if_softc; 1480 1481 if (ifp->if_flags & IFF_UP) 1482 mii_mediachg(&sc->sc_mii); 1483 return (0); 1484} 1485 1486/* 1487 * Notify the world which media we're using. 1488 */ 1489void 1490fxp_mii_mediastatus(ifp, ifmr) 1491 struct ifnet *ifp; 1492 struct ifmediareq *ifmr; 1493{ 1494 struct fxp_softc *sc = ifp->if_softc; 1495 1496 if(sc->sc_enabled == 0) { 1497 ifmr->ifm_active = IFM_ETHER | IFM_NONE; 1498 ifmr->ifm_status = 0; 1499 return; 1500 } 1501 1502 mii_pollstat(&sc->sc_mii); 1503 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1504 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1505} 1506 1507int 1508fxp_80c24_mediachange(ifp) 1509 struct ifnet *ifp; 1510{ 1511 1512 /* Nothing to do here. */ 1513 return (0); 1514} 1515 1516void 1517fxp_80c24_mediastatus(ifp, ifmr) 1518 struct ifnet *ifp; 1519 struct ifmediareq *ifmr; 1520{ 1521 struct fxp_softc *sc = ifp->if_softc; 1522 1523 /* 1524 * Media is currently-selected media. We cannot determine 1525 * the link status. 1526 */ 1527 ifmr->ifm_status = 0; 1528 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_cur->ifm_media; 1529} 1530 1531/* 1532 * Add a buffer to the end of the RFA buffer list. 1533 * Return 0 if successful, error code on failure. 1534 * 1535 * The RFA struct is stuck at the beginning of mbuf cluster and the 1536 * data pointer is fixed up to point just past it. 1537 */ 1538int 1539fxp_add_rfabuf(sc, rxmap, unload) 1540 struct fxp_softc *sc; 1541 bus_dmamap_t rxmap; 1542 int unload; 1543{ 1544 struct mbuf *m; 1545 int error; 1546 1547 MGETHDR(m, M_DONTWAIT, MT_DATA); 1548 if (m == NULL) 1549 return (ENOBUFS); 1550 1551 MCLGET(m, M_DONTWAIT); 1552 if ((m->m_flags & M_EXT) == 0) { 1553 m_freem(m); 1554 return (ENOBUFS); 1555 } 1556 1557 if (unload) 1558 bus_dmamap_unload(sc->sc_dmat, rxmap); 1559 1560 M_SETCTX(m, rxmap); 1561 1562 error = bus_dmamap_load(sc->sc_dmat, rxmap, 1563 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1564 if (error) { 1565 printf("%s: can't load rx DMA map %d, error = %d\n", 1566 sc->sc_dev.dv_xname, sc->sc_rxq.ifq_len, error); 1567 panic("fxp_add_rfabuf"); /* XXX */ 1568 } 1569 1570 FXP_INIT_RFABUF(sc, m); 1571 1572 return (0); 1573} 1574 1575volatile int 1576fxp_mdi_read(self, phy, reg) 1577 struct device *self; 1578 int phy; 1579 int reg; 1580{ 1581 struct fxp_softc *sc = (struct fxp_softc *)self; 1582 int count = 10000; 1583 int value; 1584 1585 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1586 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1587 1588 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1589 && count--) 1590 DELAY(10); 1591 1592 if (count <= 0) 1593 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname); 1594 1595 return (value & 0xffff); 1596} 1597 1598void 1599fxp_statchg(self) 1600 struct device *self; 1601{ 1602 1603 /* XXX Update ifp->if_baudrate */ 1604} 1605 1606void 1607fxp_mdi_write(self, phy, reg, value) 1608 struct device *self; 1609 int phy; 1610 int reg; 1611 int value; 1612{ 1613 struct fxp_softc *sc = (struct fxp_softc *)self; 1614 int count = 10000; 1615 1616 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1617 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1618 (value & 0xffff)); 1619 1620 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1621 count--) 1622 DELAY(10); 1623 1624 if (count <= 0) 1625 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname); 1626} 1627 1628int 1629fxp_ioctl(ifp, command, data) 1630 struct ifnet *ifp; 1631 u_long command; 1632 caddr_t data; 1633{ 1634 struct fxp_softc *sc = ifp->if_softc; 1635 struct ifreq *ifr = (struct ifreq *)data; 1636 struct ifaddr *ifa = (struct ifaddr *)data; 1637 int s, error = 0; 1638 1639 s = splnet(); 1640 1641 switch (command) { 1642 case SIOCSIFADDR: 1643 if ((error = fxp_enable(sc)) != 0) 1644 break; 1645 ifp->if_flags |= IFF_UP; 1646 1647 switch (ifa->ifa_addr->sa_family) { 1648#ifdef INET 1649 case AF_INET: 1650 if ((error = fxp_init(sc)) != 0) 1651 break; 1652 arp_ifinit(ifp, ifa); 1653 break; 1654#endif /* INET */ 1655#ifdef NS 1656 case AF_NS: 1657 { 1658 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 1659 1660 if (ns_nullhost(*ina)) 1661 ina->x_host = *(union ns_host *) 1662 LLADDR(ifp->if_sadl); 1663 else 1664 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 1665 ifp->if_addrlen); 1666 /* Set new address. */ 1667 error = fxp_init(sc); 1668 break; 1669 } 1670#endif /* NS */ 1671 default: 1672 error = fxp_init(sc); 1673 break; 1674 } 1675 break; 1676 1677 case SIOCSIFMTU: 1678 if (ifr->ifr_mtu > ETHERMTU) 1679 error = EINVAL; 1680 else 1681 ifp->if_mtu = ifr->ifr_mtu; 1682 break; 1683 1684 case SIOCSIFFLAGS: 1685 if ((ifp->if_flags & IFF_UP) == 0 && 1686 (ifp->if_flags & IFF_RUNNING) != 0) { 1687 /* 1688 * If interface is marked down and it is running, then 1689 * stop it. 1690 */ 1691 fxp_stop(sc, 1); 1692 fxp_disable(sc); 1693 } else if ((ifp->if_flags & IFF_UP) != 0 && 1694 (ifp->if_flags & IFF_RUNNING) == 0) { 1695 /* 1696 * If interface is marked up and it is stopped, then 1697 * start it. 1698 */ 1699 if((error = fxp_enable(sc)) != 0) 1700 break; 1701 error = fxp_init(sc); 1702 } else if ((ifp->if_flags & IFF_UP) != 0) { 1703 /* 1704 * Reset the interface to pick up change in any other 1705 * flags that affect the hardware state. 1706 */ 1707 if((error = fxp_enable(sc)) != 0) 1708 break; 1709 error = fxp_init(sc); 1710 } 1711 break; 1712 1713 case SIOCADDMULTI: 1714 case SIOCDELMULTI: 1715 if(sc->sc_enabled == 0) { 1716 error = EIO; 1717 break; 1718 } 1719 error = (command == SIOCADDMULTI) ? 1720 ether_addmulti(ifr, &sc->sc_ethercom) : 1721 ether_delmulti(ifr, &sc->sc_ethercom); 1722 1723 if (error == ENETRESET) { 1724 /* 1725 * Multicast list has changed; set the hardware 1726 * filter accordingly. 1727 */ 1728 if (sc->sc_txpending) { 1729 sc->sc_flags |= FXPF_WANTINIT; 1730 error = 0; 1731 } else 1732 error = fxp_init(sc); 1733 } 1734 break; 1735 1736 case SIOCSIFMEDIA: 1737 case SIOCGIFMEDIA: 1738 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1739 break; 1740 1741 default: 1742 error = EINVAL; 1743 break; 1744 } 1745 1746 splx(s); 1747 return (error); 1748} 1749 1750/* 1751 * Program the multicast filter. 1752 * 1753 * This function must be called at splnet(). 1754 */ 1755void 1756fxp_mc_setup(sc) 1757 struct fxp_softc *sc; 1758{ 1759 struct fxp_cb_mcs *mcsp = &sc->sc_control_data->fcd_mcscb; 1760 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1761 struct ethercom *ec = &sc->sc_ethercom; 1762 struct ether_multi *enm; 1763 struct ether_multistep step; 1764 int nmcasts; 1765 1766#ifdef DIAGNOSTIC 1767 if (sc->sc_txpending) 1768 panic("fxp_mc_setup: pending transmissions"); 1769#endif 1770 1771 ifp->if_flags &= ~IFF_ALLMULTI; 1772 1773 /* 1774 * Initialize multicast setup descriptor. 1775 */ 1776 nmcasts = 0; 1777 ETHER_FIRST_MULTI(step, ec, enm); 1778 while (enm != NULL) { 1779 /* 1780 * Check for too many multicast addresses or if we're 1781 * listening to a range. Either way, we simply have 1782 * to accept all multicasts. 1783 */ 1784 if (nmcasts >= MAXMCADDR || 1785 memcmp(enm->enm_addrlo, enm->enm_addrhi, 1786 ETHER_ADDR_LEN) != 0) { 1787 /* 1788 * Callers of this function must do the 1789 * right thing with this. If we're called 1790 * from outside fxp_init(), the caller must 1791 * detect if the state if IFF_ALLMULTI changes. 1792 * If it does, the caller must then call 1793 * fxp_init(), since allmulti is handled by 1794 * the config block. 1795 */ 1796 ifp->if_flags |= IFF_ALLMULTI; 1797 return; 1798 } 1799 memcpy((void *)&mcsp->mc_addr[nmcasts][0], enm->enm_addrlo, 1800 ETHER_ADDR_LEN); 1801 nmcasts++; 1802 ETHER_NEXT_MULTI(step, enm); 1803 } 1804 1805 /* BIG_ENDIAN: no need to swap to store 0 */ 1806 mcsp->cb_status = 0; 1807 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); 1808 mcsp->link_addr = htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(sc->sc_txlast))); 1809 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); 1810 1811 FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1812 1813 /* 1814 * Wait until the command unit is not active. This should never 1815 * happen since nothing is queued, but make sure anyway. 1816 */ 1817 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 1818 FXP_SCB_CUS_ACTIVE) 1819 /* nothing */ ; 1820 1821 /* 1822 * Start the multicast setup command/DMA. 1823 */ 1824 fxp_scb_wait(sc); 1825 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDMCSOFF); 1826 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1827 1828 /* ...and wait for it to complete. */ 1829 do { 1830 FXP_CDMCSSYNC(sc, 1831 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1832 } while ((mcsp->cb_status & FXP_CB_STATUS_C) == 0); 1833} 1834 1835int 1836fxp_enable(sc) 1837 struct fxp_softc *sc; 1838{ 1839 1840 if (sc->sc_enabled == 0 && sc->sc_enable != NULL) { 1841 if ((*sc->sc_enable)(sc) != 0) { 1842 printf("%s: device enable failed\n", 1843 sc->sc_dev.dv_xname); 1844 return (EIO); 1845 } 1846 } 1847 1848 sc->sc_enabled = 1; 1849 1850 return 0; 1851} 1852 1853void 1854fxp_disable(sc) 1855 struct fxp_softc *sc; 1856{ 1857 if (sc->sc_enabled != 0 && sc->sc_disable != NULL) { 1858 (*sc->sc_disable)(sc); 1859 sc->sc_enabled = 0; 1860 } 1861} 1862