1/* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 *
| 1/* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 *
|
33 * $FreeBSD: head/sys/dev/bge/if_bge.c 100695 2002-07-26 03:47:08Z jdp $
| 33 * $FreeBSD: head/sys/dev/bge/if_bge.c 103103 2002-09-08 19:12:02Z jdp $
|
34 */ 35 36/* 37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Engineer, Wind River Systems 41 */ 42 43/* 44 * The Broadcom BCM5700 is based on technology originally developed by 45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 49 * frames, highly configurable RX filtering, and 16 RX and TX queues 50 * (which, along with RX filter rules, can be used for QOS applications). 51 * Other features, such as TCP segmentation, may be available as part 52 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 53 * firmware images can be stored in hardware and need not be compiled 54 * into the driver. 55 * 56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 58 * 59 * The BCM5701 is a single-chip solution incorporating both the BCM5700 60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 61 * does not support external SSRAM. 62 * 63 * Broadcom also produces a variation of the BCM5700 under the "Altima" 64 * brand name, which is functionally similar but lacks PCI-X support. 65 * 66 * Without external SSRAM, you can only have at most 4 TX rings, 67 * and the use of the mini RX ring is disabled. This seems to imply 68 * that these features are simply not available on the BCM5701. As a 69 * result, this driver does not implement any support for the mini RX 70 * ring. 71 */ 72 73#include <sys/param.h> 74#include <sys/systm.h> 75#include <sys/sockio.h> 76#include <sys/mbuf.h> 77#include <sys/malloc.h> 78#include <sys/kernel.h> 79#include <sys/socket.h> 80#include <sys/queue.h> 81 82#include <net/if.h> 83#include <net/if_arp.h> 84#include <net/ethernet.h> 85#include <net/if_dl.h> 86#include <net/if_media.h> 87 88#include <net/bpf.h> 89 90#include <net/if_types.h> 91#include <net/if_vlan_var.h> 92 93#include <netinet/in_systm.h> 94#include <netinet/in.h> 95#include <netinet/ip.h> 96 97#include <vm/vm.h> /* for vtophys */ 98#include <vm/pmap.h> /* for vtophys */ 99#include <machine/clock.h> /* for DELAY */ 100#include <machine/bus_memio.h> 101#include <machine/bus.h> 102#include <machine/resource.h> 103#include <sys/bus.h> 104#include <sys/rman.h> 105 106#include <dev/mii/mii.h> 107#include <dev/mii/miivar.h> 108#include <dev/mii/miidevs.h> 109#include <dev/mii/brgphyreg.h> 110 111#include <pci/pcireg.h> 112#include <pci/pcivar.h> 113 114#include <dev/bge/if_bgereg.h> 115 116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 117 118MODULE_DEPEND(bge, miibus, 1, 1, 1); 119 120/* "controller miibus0" required. See GENERIC if you get errors here. */ 121#include "miibus_if.h" 122 123#if !defined(lint) 124static const char rcsid[] =
| 34 */ 35 36/* 37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Engineer, Wind River Systems 41 */ 42 43/* 44 * The Broadcom BCM5700 is based on technology originally developed by 45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 49 * frames, highly configurable RX filtering, and 16 RX and TX queues 50 * (which, along with RX filter rules, can be used for QOS applications). 51 * Other features, such as TCP segmentation, may be available as part 52 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 53 * firmware images can be stored in hardware and need not be compiled 54 * into the driver. 55 * 56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 58 * 59 * The BCM5701 is a single-chip solution incorporating both the BCM5700 60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 61 * does not support external SSRAM. 62 * 63 * Broadcom also produces a variation of the BCM5700 under the "Altima" 64 * brand name, which is functionally similar but lacks PCI-X support. 65 * 66 * Without external SSRAM, you can only have at most 4 TX rings, 67 * and the use of the mini RX ring is disabled. This seems to imply 68 * that these features are simply not available on the BCM5701. As a 69 * result, this driver does not implement any support for the mini RX 70 * ring. 71 */ 72 73#include <sys/param.h> 74#include <sys/systm.h> 75#include <sys/sockio.h> 76#include <sys/mbuf.h> 77#include <sys/malloc.h> 78#include <sys/kernel.h> 79#include <sys/socket.h> 80#include <sys/queue.h> 81 82#include <net/if.h> 83#include <net/if_arp.h> 84#include <net/ethernet.h> 85#include <net/if_dl.h> 86#include <net/if_media.h> 87 88#include <net/bpf.h> 89 90#include <net/if_types.h> 91#include <net/if_vlan_var.h> 92 93#include <netinet/in_systm.h> 94#include <netinet/in.h> 95#include <netinet/ip.h> 96 97#include <vm/vm.h> /* for vtophys */ 98#include <vm/pmap.h> /* for vtophys */ 99#include <machine/clock.h> /* for DELAY */ 100#include <machine/bus_memio.h> 101#include <machine/bus.h> 102#include <machine/resource.h> 103#include <sys/bus.h> 104#include <sys/rman.h> 105 106#include <dev/mii/mii.h> 107#include <dev/mii/miivar.h> 108#include <dev/mii/miidevs.h> 109#include <dev/mii/brgphyreg.h> 110 111#include <pci/pcireg.h> 112#include <pci/pcivar.h> 113 114#include <dev/bge/if_bgereg.h> 115 116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 117 118MODULE_DEPEND(bge, miibus, 1, 1, 1); 119 120/* "controller miibus0" required. See GENERIC if you get errors here. */ 121#include "miibus_if.h" 122 123#if !defined(lint) 124static const char rcsid[] =
|
125 "$FreeBSD: head/sys/dev/bge/if_bge.c 100695 2002-07-26 03:47:08Z jdp $";
| 125 "$FreeBSD: head/sys/dev/bge/if_bge.c 103103 2002-09-08 19:12:02Z jdp $";
|
126#endif 127 128/* 129 * Various supported device vendors/types and their names. Note: the 130 * spec seems to indicate that the hardware still has Alteon's vendor 131 * ID burned into it, though it will always be overriden by the vendor 132 * ID in the EEPROM. Just to be safe, we cover all possibilities. 133 */ 134 135static struct bge_type bge_devs[] = { 136 { ALT_VENDORID, ALT_DEVICEID_BCM5700, 137 "Broadcom BCM5700 Gigabit Ethernet" }, 138 { ALT_VENDORID, ALT_DEVICEID_BCM5701, 139 "Broadcom BCM5701 Gigabit Ethernet" }, 140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, 141 "Broadcom BCM5700 Gigabit Ethernet" }, 142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, 143 "Broadcom BCM5701 Gigabit Ethernet" },
| 126#endif 127 128/* 129 * Various supported device vendors/types and their names. Note: the 130 * spec seems to indicate that the hardware still has Alteon's vendor 131 * ID burned into it, though it will always be overriden by the vendor 132 * ID in the EEPROM. Just to be safe, we cover all possibilities. 133 */ 134 135static struct bge_type bge_devs[] = { 136 { ALT_VENDORID, ALT_DEVICEID_BCM5700, 137 "Broadcom BCM5700 Gigabit Ethernet" }, 138 { ALT_VENDORID, ALT_DEVICEID_BCM5701, 139 "Broadcom BCM5701 Gigabit Ethernet" }, 140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, 141 "Broadcom BCM5700 Gigabit Ethernet" }, 142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, 143 "Broadcom BCM5701 Gigabit Ethernet" },
|
| 144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X, 145 "Broadcom BCM5703X Gigabit Ethernet" },
|
144 { SK_VENDORID, SK_DEVICEID_ALTIMA, 145 "SysKonnect Gigabit Ethernet" }, 146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, 147 "Altima AC1000 Gigabit Ethernet" }, 148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, 149 "Altima AC9100 Gigabit Ethernet" }, 150 { 0, 0, NULL } 151}; 152 153static int bge_probe (device_t); 154static int bge_attach (device_t); 155static int bge_detach (device_t); 156static void bge_release_resources 157 (struct bge_softc *); 158static void bge_txeof (struct bge_softc *); 159static void bge_rxeof (struct bge_softc *); 160 161static void bge_tick (void *); 162static void bge_stats_update (struct bge_softc *); 163static int bge_encap (struct bge_softc *, struct mbuf *, 164 u_int32_t *); 165 166static void bge_intr (void *); 167static void bge_start (struct ifnet *); 168static int bge_ioctl (struct ifnet *, u_long, caddr_t); 169static void bge_init (void *); 170static void bge_stop (struct bge_softc *); 171static void bge_watchdog (struct ifnet *); 172static void bge_shutdown (device_t); 173static int bge_ifmedia_upd (struct ifnet *); 174static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 175 176static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); 177static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); 178 179static u_int32_t bge_crc (caddr_t); 180static void bge_setmulti (struct bge_softc *); 181 182static void bge_handle_events (struct bge_softc *); 183static int bge_alloc_jumbo_mem (struct bge_softc *); 184static void bge_free_jumbo_mem (struct bge_softc *); 185static void *bge_jalloc (struct bge_softc *); 186static void bge_jfree (void *, void *); 187static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); 188static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); 189static int bge_init_rx_ring_std (struct bge_softc *); 190static void bge_free_rx_ring_std (struct bge_softc *); 191static int bge_init_rx_ring_jumbo (struct bge_softc *); 192static void bge_free_rx_ring_jumbo (struct bge_softc *); 193static void bge_free_tx_ring (struct bge_softc *); 194static int bge_init_tx_ring (struct bge_softc *); 195 196static int bge_chipinit (struct bge_softc *); 197static int bge_blockinit (struct bge_softc *); 198 199#ifdef notdef 200static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 201static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); 202static void bge_vpd_read (struct bge_softc *); 203#endif 204 205static u_int32_t bge_readmem_ind 206 (struct bge_softc *, int); 207static void bge_writemem_ind (struct bge_softc *, int, int); 208#ifdef notdef 209static u_int32_t bge_readreg_ind 210 (struct bge_softc *, int); 211#endif 212static void bge_writereg_ind (struct bge_softc *, int, int); 213 214static int bge_miibus_readreg (device_t, int, int); 215static int bge_miibus_writereg (device_t, int, int, int); 216static void bge_miibus_statchg (device_t); 217 218static void bge_reset (struct bge_softc *); 219static void bge_phy_hack (struct bge_softc *); 220 221static device_method_t bge_methods[] = { 222 /* Device interface */ 223 DEVMETHOD(device_probe, bge_probe), 224 DEVMETHOD(device_attach, bge_attach), 225 DEVMETHOD(device_detach, bge_detach), 226 DEVMETHOD(device_shutdown, bge_shutdown), 227 228 /* bus interface */ 229 DEVMETHOD(bus_print_child, bus_generic_print_child), 230 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 231 232 /* MII interface */ 233 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 234 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 235 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 236 237 { 0, 0 } 238}; 239 240static driver_t bge_driver = { 241 "bge", 242 bge_methods, 243 sizeof(struct bge_softc) 244}; 245 246static devclass_t bge_devclass; 247 248DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0); 249DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 250 251static u_int32_t 252bge_readmem_ind(sc, off) 253 struct bge_softc *sc; 254 int off; 255{ 256 device_t dev; 257 258 dev = sc->bge_dev; 259 260 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 261 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 262} 263 264static void 265bge_writemem_ind(sc, off, val) 266 struct bge_softc *sc; 267 int off, val; 268{ 269 device_t dev; 270 271 dev = sc->bge_dev; 272 273 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 274 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 275 276 return; 277} 278 279#ifdef notdef 280static u_int32_t 281bge_readreg_ind(sc, off) 282 struct bge_softc *sc; 283 int off; 284{ 285 device_t dev; 286 287 dev = sc->bge_dev; 288 289 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 290 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 291} 292#endif 293 294static void 295bge_writereg_ind(sc, off, val) 296 struct bge_softc *sc; 297 int off, val; 298{ 299 device_t dev; 300 301 dev = sc->bge_dev; 302 303 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 304 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 305 306 return; 307} 308 309#ifdef notdef 310static u_int8_t 311bge_vpd_readbyte(sc, addr) 312 struct bge_softc *sc; 313 int addr; 314{ 315 int i; 316 device_t dev; 317 u_int32_t val; 318 319 dev = sc->bge_dev; 320 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 321 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 322 DELAY(10); 323 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 324 break; 325 } 326 327 if (i == BGE_TIMEOUT) { 328 printf("bge%d: VPD read timed out\n", sc->bge_unit); 329 return(0); 330 } 331 332 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 333 334 return((val >> ((addr % 4) * 8)) & 0xFF); 335} 336 337static void 338bge_vpd_read_res(sc, res, addr) 339 struct bge_softc *sc; 340 struct vpd_res *res; 341 int addr; 342{ 343 int i; 344 u_int8_t *ptr; 345 346 ptr = (u_int8_t *)res; 347 for (i = 0; i < sizeof(struct vpd_res); i++) 348 ptr[i] = bge_vpd_readbyte(sc, i + addr); 349 350 return; 351} 352 353static void 354bge_vpd_read(sc) 355 struct bge_softc *sc; 356{ 357 int pos = 0, i; 358 struct vpd_res res; 359 360 if (sc->bge_vpd_prodname != NULL) 361 free(sc->bge_vpd_prodname, M_DEVBUF); 362 if (sc->bge_vpd_readonly != NULL) 363 free(sc->bge_vpd_readonly, M_DEVBUF); 364 sc->bge_vpd_prodname = NULL; 365 sc->bge_vpd_readonly = NULL; 366 367 bge_vpd_read_res(sc, &res, pos); 368 369 if (res.vr_id != VPD_RES_ID) { 370 printf("bge%d: bad VPD resource id: expected %x got %x\n", 371 sc->bge_unit, VPD_RES_ID, res.vr_id); 372 return; 373 } 374 375 pos += sizeof(res); 376 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 377 for (i = 0; i < res.vr_len; i++) 378 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 379 sc->bge_vpd_prodname[i] = '\0'; 380 pos += i; 381 382 bge_vpd_read_res(sc, &res, pos); 383 384 if (res.vr_id != VPD_RES_READ) { 385 printf("bge%d: bad VPD resource id: expected %x got %x\n", 386 sc->bge_unit, VPD_RES_READ, res.vr_id); 387 return; 388 } 389 390 pos += sizeof(res); 391 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 392 for (i = 0; i < res.vr_len + 1; i++) 393 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 394 395 return; 396} 397#endif 398 399/* 400 * Read a byte of data stored in the EEPROM at address 'addr.' The 401 * BCM570x supports both the traditional bitbang interface and an 402 * auto access interface for reading the EEPROM. We use the auto 403 * access method. 404 */ 405static u_int8_t 406bge_eeprom_getbyte(sc, addr, dest) 407 struct bge_softc *sc; 408 int addr; 409 u_int8_t *dest; 410{ 411 int i; 412 u_int32_t byte = 0; 413 414 /* 415 * Enable use of auto EEPROM access so we can avoid 416 * having to use the bitbang method. 417 */ 418 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 419 420 /* Reset the EEPROM, load the clock period. */ 421 CSR_WRITE_4(sc, BGE_EE_ADDR, 422 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 423 DELAY(20); 424 425 /* Issue the read EEPROM command. */ 426 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 427 428 /* Wait for completion */ 429 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 430 DELAY(10); 431 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 432 break; 433 } 434 435 if (i == BGE_TIMEOUT) { 436 printf("bge%d: eeprom read timed out\n", sc->bge_unit); 437 return(0); 438 } 439 440 /* Get result. */ 441 byte = CSR_READ_4(sc, BGE_EE_DATA); 442 443 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 444 445 return(0); 446} 447 448/* 449 * Read a sequence of bytes from the EEPROM. 450 */ 451static int 452bge_read_eeprom(sc, dest, off, cnt) 453 struct bge_softc *sc; 454 caddr_t dest; 455 int off; 456 int cnt; 457{ 458 int err = 0, i; 459 u_int8_t byte = 0; 460 461 for (i = 0; i < cnt; i++) { 462 err = bge_eeprom_getbyte(sc, off + i, &byte); 463 if (err) 464 break; 465 *(dest + i) = byte; 466 } 467 468 return(err ? 1 : 0); 469} 470 471static int 472bge_miibus_readreg(dev, phy, reg) 473 device_t dev; 474 int phy, reg; 475{ 476 struct bge_softc *sc; 477 struct ifnet *ifp; 478 u_int32_t val; 479 int i; 480 481 sc = device_get_softc(dev); 482 ifp = &sc->arpcom.ac_if; 483
| 146 { SK_VENDORID, SK_DEVICEID_ALTIMA, 147 "SysKonnect Gigabit Ethernet" }, 148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, 149 "Altima AC1000 Gigabit Ethernet" }, 150 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, 151 "Altima AC9100 Gigabit Ethernet" }, 152 { 0, 0, NULL } 153}; 154 155static int bge_probe (device_t); 156static int bge_attach (device_t); 157static int bge_detach (device_t); 158static void bge_release_resources 159 (struct bge_softc *); 160static void bge_txeof (struct bge_softc *); 161static void bge_rxeof (struct bge_softc *); 162 163static void bge_tick (void *); 164static void bge_stats_update (struct bge_softc *); 165static int bge_encap (struct bge_softc *, struct mbuf *, 166 u_int32_t *); 167 168static void bge_intr (void *); 169static void bge_start (struct ifnet *); 170static int bge_ioctl (struct ifnet *, u_long, caddr_t); 171static void bge_init (void *); 172static void bge_stop (struct bge_softc *); 173static void bge_watchdog (struct ifnet *); 174static void bge_shutdown (device_t); 175static int bge_ifmedia_upd (struct ifnet *); 176static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 177 178static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); 179static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); 180 181static u_int32_t bge_crc (caddr_t); 182static void bge_setmulti (struct bge_softc *); 183 184static void bge_handle_events (struct bge_softc *); 185static int bge_alloc_jumbo_mem (struct bge_softc *); 186static void bge_free_jumbo_mem (struct bge_softc *); 187static void *bge_jalloc (struct bge_softc *); 188static void bge_jfree (void *, void *); 189static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); 190static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); 191static int bge_init_rx_ring_std (struct bge_softc *); 192static void bge_free_rx_ring_std (struct bge_softc *); 193static int bge_init_rx_ring_jumbo (struct bge_softc *); 194static void bge_free_rx_ring_jumbo (struct bge_softc *); 195static void bge_free_tx_ring (struct bge_softc *); 196static int bge_init_tx_ring (struct bge_softc *); 197 198static int bge_chipinit (struct bge_softc *); 199static int bge_blockinit (struct bge_softc *); 200 201#ifdef notdef 202static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 203static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); 204static void bge_vpd_read (struct bge_softc *); 205#endif 206 207static u_int32_t bge_readmem_ind 208 (struct bge_softc *, int); 209static void bge_writemem_ind (struct bge_softc *, int, int); 210#ifdef notdef 211static u_int32_t bge_readreg_ind 212 (struct bge_softc *, int); 213#endif 214static void bge_writereg_ind (struct bge_softc *, int, int); 215 216static int bge_miibus_readreg (device_t, int, int); 217static int bge_miibus_writereg (device_t, int, int, int); 218static void bge_miibus_statchg (device_t); 219 220static void bge_reset (struct bge_softc *); 221static void bge_phy_hack (struct bge_softc *); 222 223static device_method_t bge_methods[] = { 224 /* Device interface */ 225 DEVMETHOD(device_probe, bge_probe), 226 DEVMETHOD(device_attach, bge_attach), 227 DEVMETHOD(device_detach, bge_detach), 228 DEVMETHOD(device_shutdown, bge_shutdown), 229 230 /* bus interface */ 231 DEVMETHOD(bus_print_child, bus_generic_print_child), 232 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 233 234 /* MII interface */ 235 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 236 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 237 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 238 239 { 0, 0 } 240}; 241 242static driver_t bge_driver = { 243 "bge", 244 bge_methods, 245 sizeof(struct bge_softc) 246}; 247 248static devclass_t bge_devclass; 249 250DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0); 251DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 252 253static u_int32_t 254bge_readmem_ind(sc, off) 255 struct bge_softc *sc; 256 int off; 257{ 258 device_t dev; 259 260 dev = sc->bge_dev; 261 262 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 263 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 264} 265 266static void 267bge_writemem_ind(sc, off, val) 268 struct bge_softc *sc; 269 int off, val; 270{ 271 device_t dev; 272 273 dev = sc->bge_dev; 274 275 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 276 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 277 278 return; 279} 280 281#ifdef notdef 282static u_int32_t 283bge_readreg_ind(sc, off) 284 struct bge_softc *sc; 285 int off; 286{ 287 device_t dev; 288 289 dev = sc->bge_dev; 290 291 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 292 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 293} 294#endif 295 296static void 297bge_writereg_ind(sc, off, val) 298 struct bge_softc *sc; 299 int off, val; 300{ 301 device_t dev; 302 303 dev = sc->bge_dev; 304 305 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 306 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 307 308 return; 309} 310 311#ifdef notdef 312static u_int8_t 313bge_vpd_readbyte(sc, addr) 314 struct bge_softc *sc; 315 int addr; 316{ 317 int i; 318 device_t dev; 319 u_int32_t val; 320 321 dev = sc->bge_dev; 322 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 323 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 324 DELAY(10); 325 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 326 break; 327 } 328 329 if (i == BGE_TIMEOUT) { 330 printf("bge%d: VPD read timed out\n", sc->bge_unit); 331 return(0); 332 } 333 334 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 335 336 return((val >> ((addr % 4) * 8)) & 0xFF); 337} 338 339static void 340bge_vpd_read_res(sc, res, addr) 341 struct bge_softc *sc; 342 struct vpd_res *res; 343 int addr; 344{ 345 int i; 346 u_int8_t *ptr; 347 348 ptr = (u_int8_t *)res; 349 for (i = 0; i < sizeof(struct vpd_res); i++) 350 ptr[i] = bge_vpd_readbyte(sc, i + addr); 351 352 return; 353} 354 355static void 356bge_vpd_read(sc) 357 struct bge_softc *sc; 358{ 359 int pos = 0, i; 360 struct vpd_res res; 361 362 if (sc->bge_vpd_prodname != NULL) 363 free(sc->bge_vpd_prodname, M_DEVBUF); 364 if (sc->bge_vpd_readonly != NULL) 365 free(sc->bge_vpd_readonly, M_DEVBUF); 366 sc->bge_vpd_prodname = NULL; 367 sc->bge_vpd_readonly = NULL; 368 369 bge_vpd_read_res(sc, &res, pos); 370 371 if (res.vr_id != VPD_RES_ID) { 372 printf("bge%d: bad VPD resource id: expected %x got %x\n", 373 sc->bge_unit, VPD_RES_ID, res.vr_id); 374 return; 375 } 376 377 pos += sizeof(res); 378 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 379 for (i = 0; i < res.vr_len; i++) 380 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 381 sc->bge_vpd_prodname[i] = '\0'; 382 pos += i; 383 384 bge_vpd_read_res(sc, &res, pos); 385 386 if (res.vr_id != VPD_RES_READ) { 387 printf("bge%d: bad VPD resource id: expected %x got %x\n", 388 sc->bge_unit, VPD_RES_READ, res.vr_id); 389 return; 390 } 391 392 pos += sizeof(res); 393 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 394 for (i = 0; i < res.vr_len + 1; i++) 395 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 396 397 return; 398} 399#endif 400 401/* 402 * Read a byte of data stored in the EEPROM at address 'addr.' The 403 * BCM570x supports both the traditional bitbang interface and an 404 * auto access interface for reading the EEPROM. We use the auto 405 * access method. 406 */ 407static u_int8_t 408bge_eeprom_getbyte(sc, addr, dest) 409 struct bge_softc *sc; 410 int addr; 411 u_int8_t *dest; 412{ 413 int i; 414 u_int32_t byte = 0; 415 416 /* 417 * Enable use of auto EEPROM access so we can avoid 418 * having to use the bitbang method. 419 */ 420 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 421 422 /* Reset the EEPROM, load the clock period. */ 423 CSR_WRITE_4(sc, BGE_EE_ADDR, 424 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 425 DELAY(20); 426 427 /* Issue the read EEPROM command. */ 428 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 429 430 /* Wait for completion */ 431 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 432 DELAY(10); 433 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 434 break; 435 } 436 437 if (i == BGE_TIMEOUT) { 438 printf("bge%d: eeprom read timed out\n", sc->bge_unit); 439 return(0); 440 } 441 442 /* Get result. */ 443 byte = CSR_READ_4(sc, BGE_EE_DATA); 444 445 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 446 447 return(0); 448} 449 450/* 451 * Read a sequence of bytes from the EEPROM. 452 */ 453static int 454bge_read_eeprom(sc, dest, off, cnt) 455 struct bge_softc *sc; 456 caddr_t dest; 457 int off; 458 int cnt; 459{ 460 int err = 0, i; 461 u_int8_t byte = 0; 462 463 for (i = 0; i < cnt; i++) { 464 err = bge_eeprom_getbyte(sc, off + i, &byte); 465 if (err) 466 break; 467 *(dest + i) = byte; 468 } 469 470 return(err ? 1 : 0); 471} 472 473static int 474bge_miibus_readreg(dev, phy, reg) 475 device_t dev; 476 int phy, reg; 477{ 478 struct bge_softc *sc; 479 struct ifnet *ifp; 480 u_int32_t val; 481 int i; 482 483 sc = device_get_softc(dev); 484 ifp = &sc->arpcom.ac_if; 485
|
484 if (sc->bge_asicrev == BGE_ASICREV_BCM5701_B5 && phy != 1) 485 return(0);
| 486 if (phy != 1) 487 switch(sc->bge_asicrev) { 488 case BGE_ASICREV_BCM5701_B5: 489 case BGE_ASICREV_BCM5703_A2: 490 return(0); 491 }
|
486 487 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 488 BGE_MIPHY(phy)|BGE_MIREG(reg)); 489 490 for (i = 0; i < BGE_TIMEOUT; i++) { 491 val = CSR_READ_4(sc, BGE_MI_COMM); 492 if (!(val & BGE_MICOMM_BUSY)) 493 break; 494 } 495 496 if (i == BGE_TIMEOUT) { 497 printf("bge%d: PHY read timed out\n", sc->bge_unit); 498 return(0); 499 } 500 501 val = CSR_READ_4(sc, BGE_MI_COMM); 502 503 if (val & BGE_MICOMM_READFAIL) 504 return(0); 505 506 return(val & 0xFFFF); 507} 508 509static int 510bge_miibus_writereg(dev, phy, reg, val) 511 device_t dev; 512 int phy, reg, val; 513{ 514 struct bge_softc *sc; 515 int i; 516 517 sc = device_get_softc(dev); 518 519 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 520 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 521 522 for (i = 0; i < BGE_TIMEOUT; i++) { 523 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 524 break; 525 } 526 527 if (i == BGE_TIMEOUT) { 528 printf("bge%d: PHY read timed out\n", sc->bge_unit); 529 return(0); 530 } 531 532 return(0); 533} 534 535static void 536bge_miibus_statchg(dev) 537 device_t dev; 538{ 539 struct bge_softc *sc; 540 struct mii_data *mii; 541 542 sc = device_get_softc(dev); 543 mii = device_get_softc(sc->bge_miibus); 544 545 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 546 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 547 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 548 } else { 549 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 550 } 551 552 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 553 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 554 } else { 555 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 556 } 557 558 bge_phy_hack(sc); 559 560 return; 561} 562 563/* 564 * Handle events that have triggered interrupts. 565 */ 566static void 567bge_handle_events(sc) 568 struct bge_softc *sc; 569{ 570 571 return; 572} 573 574/* 575 * Memory management for jumbo frames. 576 */ 577 578static int 579bge_alloc_jumbo_mem(sc) 580 struct bge_softc *sc; 581{ 582 caddr_t ptr; 583 register int i; 584 struct bge_jpool_entry *entry; 585 586 /* Grab a big chunk o' storage. */ 587 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF, 588 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 589 590 if (sc->bge_cdata.bge_jumbo_buf == NULL) { 591 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit); 592 return(ENOBUFS); 593 } 594 595 SLIST_INIT(&sc->bge_jfree_listhead); 596 SLIST_INIT(&sc->bge_jinuse_listhead); 597 598 /* 599 * Now divide it up into 9K pieces and save the addresses 600 * in an array. 601 */ 602 ptr = sc->bge_cdata.bge_jumbo_buf; 603 for (i = 0; i < BGE_JSLOTS; i++) { 604 sc->bge_cdata.bge_jslots[i] = ptr; 605 ptr += BGE_JLEN; 606 entry = malloc(sizeof(struct bge_jpool_entry), 607 M_DEVBUF, M_NOWAIT); 608 if (entry == NULL) { 609 contigfree(sc->bge_cdata.bge_jumbo_buf, 610 BGE_JMEM, M_DEVBUF); 611 sc->bge_cdata.bge_jumbo_buf = NULL; 612 printf("bge%d: no memory for jumbo " 613 "buffer queue!\n", sc->bge_unit); 614 return(ENOBUFS); 615 } 616 entry->slot = i; 617 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 618 entry, jpool_entries); 619 } 620 621 return(0); 622} 623 624static void 625bge_free_jumbo_mem(sc) 626 struct bge_softc *sc; 627{ 628 int i; 629 struct bge_jpool_entry *entry; 630 631 for (i = 0; i < BGE_JSLOTS; i++) { 632 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 633 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 634 free(entry, M_DEVBUF); 635 } 636 637 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF); 638 639 return; 640} 641 642/* 643 * Allocate a jumbo buffer. 644 */ 645static void * 646bge_jalloc(sc) 647 struct bge_softc *sc; 648{ 649 struct bge_jpool_entry *entry; 650 651 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 652 653 if (entry == NULL) { 654 printf("bge%d: no free jumbo buffers\n", sc->bge_unit); 655 return(NULL); 656 } 657 658 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 659 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 660 return(sc->bge_cdata.bge_jslots[entry->slot]); 661} 662 663/* 664 * Release a jumbo buffer. 665 */ 666static void 667bge_jfree(buf, args) 668 void *buf; 669 void *args; 670{ 671 struct bge_jpool_entry *entry; 672 struct bge_softc *sc; 673 int i; 674 675 /* Extract the softc struct pointer. */ 676 sc = (struct bge_softc *)args; 677 678 if (sc == NULL) 679 panic("bge_jfree: can't find softc pointer!"); 680 681 /* calculate the slot this buffer belongs to */ 682 683 i = ((vm_offset_t)buf 684 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 685 686 if ((i < 0) || (i >= BGE_JSLOTS)) 687 panic("bge_jfree: asked to free buffer that we don't manage!"); 688 689 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 690 if (entry == NULL) 691 panic("bge_jfree: buffer not in use!"); 692 entry->slot = i; 693 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 694 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 695 696 return; 697} 698 699 700/* 701 * Intialize a standard receive ring descriptor. 702 */ 703static int 704bge_newbuf_std(sc, i, m) 705 struct bge_softc *sc; 706 int i; 707 struct mbuf *m; 708{ 709 struct mbuf *m_new = NULL; 710 struct bge_rx_bd *r; 711 712 if (m == NULL) { 713 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 714 if (m_new == NULL) { 715 return(ENOBUFS); 716 } 717 718 MCLGET(m_new, M_DONTWAIT); 719 if (!(m_new->m_flags & M_EXT)) { 720 m_freem(m_new); 721 return(ENOBUFS); 722 } 723 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 724 } else { 725 m_new = m; 726 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 727 m_new->m_data = m_new->m_ext.ext_buf; 728 } 729 730 if (!sc->bge_rx_alignment_bug) 731 m_adj(m_new, ETHER_ALIGN); 732 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 733 r = &sc->bge_rdata->bge_rx_std_ring[i]; 734 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 735 r->bge_flags = BGE_RXBDFLAG_END; 736 r->bge_len = m_new->m_len; 737 r->bge_idx = i; 738 739 return(0); 740} 741 742/* 743 * Initialize a jumbo receive ring descriptor. This allocates 744 * a jumbo buffer from the pool managed internally by the driver. 745 */ 746static int 747bge_newbuf_jumbo(sc, i, m) 748 struct bge_softc *sc; 749 int i; 750 struct mbuf *m; 751{ 752 struct mbuf *m_new = NULL; 753 struct bge_rx_bd *r; 754 755 if (m == NULL) { 756 caddr_t *buf = NULL; 757 758 /* Allocate the mbuf. */ 759 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 760 if (m_new == NULL) { 761 return(ENOBUFS); 762 } 763 764 /* Allocate the jumbo buffer */ 765 buf = bge_jalloc(sc); 766 if (buf == NULL) { 767 m_freem(m_new); 768 printf("bge%d: jumbo allocation failed " 769 "-- packet dropped!\n", sc->bge_unit); 770 return(ENOBUFS); 771 } 772 773 /* Attach the buffer to the mbuf. */ 774 m_new->m_data = (void *) buf; 775 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 776 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree, 777 (struct bge_softc *)sc, 0, EXT_NET_DRV); 778 } else { 779 m_new = m; 780 m_new->m_data = m_new->m_ext.ext_buf; 781 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 782 } 783 784 if (!sc->bge_rx_alignment_bug) 785 m_adj(m_new, ETHER_ALIGN); 786 /* Set up the descriptor. */ 787 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 788 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 789 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 790 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 791 r->bge_len = m_new->m_len; 792 r->bge_idx = i; 793 794 return(0); 795} 796 797/* 798 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 799 * that's 1MB or memory, which is a lot. For now, we fill only the first 800 * 256 ring entries and hope that our CPU is fast enough to keep up with 801 * the NIC. 802 */ 803static int 804bge_init_rx_ring_std(sc) 805 struct bge_softc *sc; 806{ 807 int i; 808 809 for (i = 0; i < BGE_SSLOTS; i++) { 810 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 811 return(ENOBUFS); 812 }; 813 814 sc->bge_std = i - 1; 815 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 816 817 return(0); 818} 819 820static void 821bge_free_rx_ring_std(sc) 822 struct bge_softc *sc; 823{ 824 int i; 825 826 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 827 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 828 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 829 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 830 } 831 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i], 832 sizeof(struct bge_rx_bd)); 833 } 834 835 return; 836} 837 838static int 839bge_init_rx_ring_jumbo(sc) 840 struct bge_softc *sc; 841{ 842 int i; 843 struct bge_rcb *rcb; 844 struct bge_rcb_opaque *rcbo; 845 846 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 847 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 848 return(ENOBUFS); 849 }; 850 851 sc->bge_jumbo = i - 1; 852 853 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 854 rcbo = (struct bge_rcb_opaque *)rcb; 855 rcb->bge_flags = 0; 856 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 857 858 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 859 860 return(0); 861} 862 863static void 864bge_free_rx_ring_jumbo(sc) 865 struct bge_softc *sc; 866{ 867 int i; 868 869 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 870 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 871 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 872 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 873 } 874 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 875 sizeof(struct bge_rx_bd)); 876 } 877 878 return; 879} 880 881static void 882bge_free_tx_ring(sc) 883 struct bge_softc *sc; 884{ 885 int i; 886 887 if (sc->bge_rdata->bge_tx_ring == NULL) 888 return; 889 890 for (i = 0; i < BGE_TX_RING_CNT; i++) { 891 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 892 m_freem(sc->bge_cdata.bge_tx_chain[i]); 893 sc->bge_cdata.bge_tx_chain[i] = NULL; 894 } 895 bzero((char *)&sc->bge_rdata->bge_tx_ring[i], 896 sizeof(struct bge_tx_bd)); 897 } 898 899 return; 900} 901 902static int 903bge_init_tx_ring(sc) 904 struct bge_softc *sc; 905{ 906 sc->bge_txcnt = 0; 907 sc->bge_tx_saved_considx = 0; 908 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 909 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 910 911 return(0); 912} 913 914#define BGE_POLY 0xEDB88320 915 916static u_int32_t 917bge_crc(addr) 918 caddr_t addr; 919{ 920 u_int32_t idx, bit, data, crc; 921 922 /* Compute CRC for the address value. */ 923 crc = 0xFFFFFFFF; /* initial value */ 924 925 for (idx = 0; idx < 6; idx++) { 926 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 927 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0); 928 } 929 930 return(crc & 0x7F); 931} 932 933static void 934bge_setmulti(sc) 935 struct bge_softc *sc; 936{ 937 struct ifnet *ifp; 938 struct ifmultiaddr *ifma; 939 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 940 int h, i; 941 942 ifp = &sc->arpcom.ac_if; 943 944 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 945 for (i = 0; i < 4; i++) 946 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 947 return; 948 } 949 950 /* First, zot all the existing filters. */ 951 for (i = 0; i < 4; i++) 952 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 953 954 /* Now program new ones. */ 955 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 956 if (ifma->ifma_addr->sa_family != AF_LINK) 957 continue; 958 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 959 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 960 } 961 962 for (i = 0; i < 4; i++) 963 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 964 965 return; 966} 967 968/* 969 * Do endian, PCI and DMA initialization. Also check the on-board ROM 970 * self-test results. 971 */ 972static int 973bge_chipinit(sc) 974 struct bge_softc *sc; 975{ 976 u_int32_t cachesize; 977 int i; 978 979 /* Set endianness before we access any non-PCI registers. */ 980#if BYTE_ORDER == BIG_ENDIAN 981 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 982 BGE_BIGENDIAN_INIT, 4); 983#else 984 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 985 BGE_LITTLEENDIAN_INIT, 4); 986#endif 987 988 /* 989 * Check the 'ROM failed' bit on the RX CPU to see if 990 * self-tests passed. 991 */ 992 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 993 printf("bge%d: RX CPU self-diagnostics failed!\n", 994 sc->bge_unit); 995 return(ENODEV); 996 } 997 998 /* Clear the MAC control register */ 999 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1000 1001 /* 1002 * Clear the MAC statistics block in the NIC's 1003 * internal memory. 1004 */ 1005 for (i = BGE_STATS_BLOCK; 1006 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1007 BGE_MEMWIN_WRITE(sc, i, 0); 1008 1009 for (i = BGE_STATUS_BLOCK; 1010 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1011 BGE_MEMWIN_WRITE(sc, i, 0); 1012 1013 /* Set up the PCI DMA control register. */ 1014 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1015 BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x0F, 4); 1016 1017 /* 1018 * Set up general mode register. 1019 */ 1020 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1021 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1022 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1023 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1024 BGE_MODECTL_RX_NO_PHDR_CSUM); 1025 1026 /* Get cache line size. */ 1027 cachesize = pci_read_config(sc->bge_dev, BGE_PCI_CACHESZ, 1); 1028 1029 /* 1030 * Avoid violating PCI spec on certain chip revs. 1031 */ 1032 if (pci_read_config(sc->bge_dev, BGE_PCI_CMD, 4) & PCIM_CMD_MWIEN) { 1033 switch(cachesize) { 1034 case 1: 1035 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1036 BGE_PCI_WRITE_BNDRY_16BYTES, 4); 1037 break; 1038 case 2: 1039 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1040 BGE_PCI_WRITE_BNDRY_32BYTES, 4); 1041 break; 1042 case 4: 1043 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1044 BGE_PCI_WRITE_BNDRY_64BYTES, 4); 1045 break; 1046 case 8: 1047 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1048 BGE_PCI_WRITE_BNDRY_128BYTES, 4); 1049 break; 1050 case 16: 1051 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1052 BGE_PCI_WRITE_BNDRY_256BYTES, 4); 1053 break; 1054 case 32: 1055 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1056 BGE_PCI_WRITE_BNDRY_512BYTES, 4); 1057 break; 1058 case 64: 1059 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1060 BGE_PCI_WRITE_BNDRY_1024BYTES, 4); 1061 break; 1062 default: 1063 /* Disable PCI memory write and invalidate. */ 1064 if (bootverbose) 1065 printf("bge%d: cache line size %d not " 1066 "supported; disabling PCI MWI\n", 1067 sc->bge_unit, cachesize); 1068 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1069 PCIM_CMD_MWIEN, 4); 1070 break; 1071 } 1072 } 1073 1074#ifdef __brokenalpha__ 1075 /* 1076 * Must insure that we do not cross an 8K (bytes) boundary 1077 * for DMA reads. Our highest limit is 1K bytes. This is a 1078 * restriction on some ALPHA platforms with early revision 1079 * 21174 PCI chipsets, such as the AlphaPC 164lx 1080 */ 1081 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1082#endif 1083 1084 /* Set the timer prescaler (always 66Mhz) */ 1085 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1086 1087 return(0); 1088} 1089 1090static int 1091bge_blockinit(sc) 1092 struct bge_softc *sc; 1093{ 1094 struct bge_rcb *rcb; 1095 struct bge_rcb_opaque *rcbo; 1096 int i; 1097 1098 /* 1099 * Initialize the memory window pointer register so that 1100 * we can access the first 32K of internal NIC RAM. This will 1101 * allow us to set up the TX send ring RCBs and the RX return 1102 * ring RCBs, plus other things which live in NIC memory. 1103 */ 1104 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1105 1106 /* Configure mbuf memory pool */ 1107 if (sc->bge_extram) { 1108 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1109 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1110 } else { 1111 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1112 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1113 } 1114 1115 /* Configure DMA resource pool */ 1116 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1117 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1118 1119 /* Configure mbuf pool watermarks */ 1120 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1121 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1122 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1123 1124 /* Configure DMA resource watermarks */ 1125 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1126 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1127 1128 /* Enable buffer manager */ 1129 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1130 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1131 1132 /* Poll for buffer manager start indication */ 1133 for (i = 0; i < BGE_TIMEOUT; i++) { 1134 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1135 break; 1136 DELAY(10); 1137 } 1138 1139 if (i == BGE_TIMEOUT) { 1140 printf("bge%d: buffer manager failed to start\n", 1141 sc->bge_unit); 1142 return(ENXIO); 1143 } 1144 1145 /* Enable flow-through queues */ 1146 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1147 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1148 1149 /* Wait until queue initialization is complete */ 1150 for (i = 0; i < BGE_TIMEOUT; i++) { 1151 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1152 break; 1153 DELAY(10); 1154 } 1155 1156 if (i == BGE_TIMEOUT) { 1157 printf("bge%d: flow-through queue init failed\n", 1158 sc->bge_unit); 1159 return(ENXIO); 1160 } 1161 1162 /* Initialize the standard RX ring control block */ 1163 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1164 BGE_HOSTADDR(rcb->bge_hostaddr) = 1165 vtophys(&sc->bge_rdata->bge_rx_std_ring); 1166 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1167 if (sc->bge_extram) 1168 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1169 else 1170 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1171 rcb->bge_flags = 0; 1172 rcbo = (struct bge_rcb_opaque *)rcb; 1173 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0); 1174 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1); 1175 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1176 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3); 1177 1178 /* 1179 * Initialize the jumbo RX ring control block 1180 * We set the 'ring disabled' bit in the flags 1181 * field until we're actually ready to start 1182 * using this ring (i.e. once we set the MTU 1183 * high enough to require it). 1184 */ 1185 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1186 BGE_HOSTADDR(rcb->bge_hostaddr) = 1187 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring); 1188 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1189 if (sc->bge_extram) 1190 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1191 else 1192 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1193 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1194 1195 rcbo = (struct bge_rcb_opaque *)rcb; 1196 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0); 1197 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1); 1198 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1199 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3); 1200 1201 /* Set up dummy disabled mini ring RCB */ 1202 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1203 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1204 rcbo = (struct bge_rcb_opaque *)rcb; 1205 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1206 1207 /* 1208 * Set the BD ring replentish thresholds. The recommended 1209 * values are 1/8th the number of descriptors allocated to 1210 * each ring. 1211 */ 1212 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1213 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1214 1215 /* 1216 * Disable all unused send rings by setting the 'ring disabled' 1217 * bit in the flags field of all the TX send ring control blocks. 1218 * These are located in NIC memory. 1219 */ 1220 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1221 BGE_SEND_RING_RCB); 1222 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1223 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1224 rcb->bge_max_len = 0; 1225 rcb->bge_nicaddr = 0; 1226 rcb++; 1227 } 1228 1229 /* Configure TX RCB 0 (we use only the first ring) */ 1230 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1231 BGE_SEND_RING_RCB); 1232 rcb->bge_hostaddr.bge_addr_hi = 0; 1233 BGE_HOSTADDR(rcb->bge_hostaddr) = 1234 vtophys(&sc->bge_rdata->bge_tx_ring); 1235 rcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); 1236 rcb->bge_max_len = BGE_TX_RING_CNT; 1237 rcb->bge_flags = 0; 1238 1239 /* Disable all unused RX return rings */ 1240 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1241 BGE_RX_RETURN_RING_RCB); 1242 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1243 rcb->bge_hostaddr.bge_addr_hi = 0; 1244 rcb->bge_hostaddr.bge_addr_lo = 0; 1245 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1246 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1247 rcb->bge_nicaddr = 0; 1248 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1249 (i * (sizeof(u_int64_t))), 0); 1250 rcb++; 1251 } 1252 1253 /* Initialize RX ring indexes */ 1254 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1255 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1256 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1257 1258 /* 1259 * Set up RX return ring 0 1260 * Note that the NIC address for RX return rings is 0x00000000. 1261 * The return rings live entirely within the host, so the 1262 * nicaddr field in the RCB isn't used. 1263 */ 1264 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1265 BGE_RX_RETURN_RING_RCB); 1266 rcb->bge_hostaddr.bge_addr_hi = 0; 1267 BGE_HOSTADDR(rcb->bge_hostaddr) = 1268 vtophys(&sc->bge_rdata->bge_rx_return_ring); 1269 rcb->bge_nicaddr = 0x00000000; 1270 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1271 rcb->bge_flags = 0; 1272 1273 /* Set random backoff seed for TX */ 1274 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1275 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1276 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1277 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1278 BGE_TX_BACKOFF_SEED_MASK); 1279 1280 /* Set inter-packet gap */ 1281 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1282 1283 /* 1284 * Specify which ring to use for packets that don't match 1285 * any RX rules. 1286 */ 1287 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1288 1289 /* 1290 * Configure number of RX lists. One interrupt distribution 1291 * list, sixteen active lists, one bad frames class. 1292 */ 1293 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1294 1295 /* Inialize RX list placement stats mask. */ 1296 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1297 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1298 1299 /* Disable host coalescing until we get it set up */ 1300 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1301 1302 /* Poll to make sure it's shut down. */ 1303 for (i = 0; i < BGE_TIMEOUT; i++) { 1304 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1305 break; 1306 DELAY(10); 1307 } 1308 1309 if (i == BGE_TIMEOUT) { 1310 printf("bge%d: host coalescing engine failed to idle\n", 1311 sc->bge_unit); 1312 return(ENXIO); 1313 } 1314 1315 /* Set up host coalescing defaults */ 1316 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1317 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1318 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1319 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1320 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1321 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1322 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1323 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1324 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1325 1326 /* Set up address of statistics block */ 1327 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1328 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0); 1329 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1330 vtophys(&sc->bge_rdata->bge_info.bge_stats)); 1331 1332 /* Set up address of status block */ 1333 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1334 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0); 1335 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1336 vtophys(&sc->bge_rdata->bge_status_block)); 1337 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1338 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1339 1340 /* Turn on host coalescing state machine */ 1341 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1342 1343 /* Turn on RX BD completion state machine and enable attentions */ 1344 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1345 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1346 1347 /* Turn on RX list placement state machine */ 1348 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1349 1350 /* Turn on RX list selector state machine. */ 1351 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1352 1353 /* Turn on DMA, clear stats */ 1354 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1355 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1356 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1357 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1358 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1359 1360 /* Set misc. local control, enable interrupts on attentions */ 1361 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1362 1363#ifdef notdef 1364 /* Assert GPIO pins for PHY reset */ 1365 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1366 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1367 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1368 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1369#endif 1370 1371 /* Turn on DMA completion state machine */ 1372 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1373 1374 /* Turn on write DMA state machine */ 1375 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1376 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1377 1378 /* Turn on read DMA state machine */ 1379 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1380 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1381 1382 /* Turn on RX data completion state machine */ 1383 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1384 1385 /* Turn on RX BD initiator state machine */ 1386 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1387 1388 /* Turn on RX data and RX BD initiator state machine */ 1389 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1390 1391 /* Turn on Mbuf cluster free state machine */ 1392 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1393 1394 /* Turn on send BD completion state machine */ 1395 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1396 1397 /* Turn on send data completion state machine */ 1398 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1399 1400 /* Turn on send data initiator state machine */ 1401 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1402 1403 /* Turn on send BD initiator state machine */ 1404 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1405 1406 /* Turn on send BD selector state machine */ 1407 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1408 1409 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1410 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1411 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1412 1413 /* init LED register */ 1414 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1415 1416 /* ack/clear link change events */ 1417 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1418 BGE_MACSTAT_CFG_CHANGED); 1419 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1420 1421 /* Enable PHY auto polling (for MII/GMII only) */ 1422 if (sc->bge_tbi) { 1423 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1424 } else { 1425 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1426 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 1427 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1428 BGE_EVTENB_MI_INTERRUPT); 1429 } 1430 1431 /* Enable link state change attentions. */ 1432 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1433 1434 return(0); 1435} 1436 1437/* 1438 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1439 * against our list and return its name if we find a match. Note 1440 * that since the Broadcom controller contains VPD support, we 1441 * can get the device name string from the controller itself instead 1442 * of the compiled-in string. This is a little slow, but it guarantees 1443 * we'll always announce the right product name. 1444 */ 1445static int 1446bge_probe(dev) 1447 device_t dev; 1448{ 1449 struct bge_type *t; 1450 struct bge_softc *sc; 1451 1452 t = bge_devs; 1453 1454 sc = device_get_softc(dev); 1455 bzero(sc, sizeof(struct bge_softc)); 1456 sc->bge_unit = device_get_unit(dev); 1457 sc->bge_dev = dev; 1458 1459 while(t->bge_name != NULL) { 1460 if ((pci_get_vendor(dev) == t->bge_vid) && 1461 (pci_get_device(dev) == t->bge_did)) { 1462#ifdef notdef 1463 bge_vpd_read(sc); 1464 device_set_desc(dev, sc->bge_vpd_prodname); 1465#endif 1466 device_set_desc(dev, t->bge_name); 1467 return(0); 1468 } 1469 t++; 1470 } 1471 1472 return(ENXIO); 1473} 1474 1475static int 1476bge_attach(dev) 1477 device_t dev; 1478{ 1479 int s; 1480 u_int32_t command; 1481 struct ifnet *ifp; 1482 struct bge_softc *sc; 1483 u_int32_t hwcfg = 0;
| 492 493 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 494 BGE_MIPHY(phy)|BGE_MIREG(reg)); 495 496 for (i = 0; i < BGE_TIMEOUT; i++) { 497 val = CSR_READ_4(sc, BGE_MI_COMM); 498 if (!(val & BGE_MICOMM_BUSY)) 499 break; 500 } 501 502 if (i == BGE_TIMEOUT) { 503 printf("bge%d: PHY read timed out\n", sc->bge_unit); 504 return(0); 505 } 506 507 val = CSR_READ_4(sc, BGE_MI_COMM); 508 509 if (val & BGE_MICOMM_READFAIL) 510 return(0); 511 512 return(val & 0xFFFF); 513} 514 515static int 516bge_miibus_writereg(dev, phy, reg, val) 517 device_t dev; 518 int phy, reg, val; 519{ 520 struct bge_softc *sc; 521 int i; 522 523 sc = device_get_softc(dev); 524 525 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 526 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 527 528 for (i = 0; i < BGE_TIMEOUT; i++) { 529 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 530 break; 531 } 532 533 if (i == BGE_TIMEOUT) { 534 printf("bge%d: PHY read timed out\n", sc->bge_unit); 535 return(0); 536 } 537 538 return(0); 539} 540 541static void 542bge_miibus_statchg(dev) 543 device_t dev; 544{ 545 struct bge_softc *sc; 546 struct mii_data *mii; 547 548 sc = device_get_softc(dev); 549 mii = device_get_softc(sc->bge_miibus); 550 551 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 552 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 553 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 554 } else { 555 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 556 } 557 558 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 559 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 560 } else { 561 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 562 } 563 564 bge_phy_hack(sc); 565 566 return; 567} 568 569/* 570 * Handle events that have triggered interrupts. 571 */ 572static void 573bge_handle_events(sc) 574 struct bge_softc *sc; 575{ 576 577 return; 578} 579 580/* 581 * Memory management for jumbo frames. 582 */ 583 584static int 585bge_alloc_jumbo_mem(sc) 586 struct bge_softc *sc; 587{ 588 caddr_t ptr; 589 register int i; 590 struct bge_jpool_entry *entry; 591 592 /* Grab a big chunk o' storage. */ 593 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF, 594 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 595 596 if (sc->bge_cdata.bge_jumbo_buf == NULL) { 597 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit); 598 return(ENOBUFS); 599 } 600 601 SLIST_INIT(&sc->bge_jfree_listhead); 602 SLIST_INIT(&sc->bge_jinuse_listhead); 603 604 /* 605 * Now divide it up into 9K pieces and save the addresses 606 * in an array. 607 */ 608 ptr = sc->bge_cdata.bge_jumbo_buf; 609 for (i = 0; i < BGE_JSLOTS; i++) { 610 sc->bge_cdata.bge_jslots[i] = ptr; 611 ptr += BGE_JLEN; 612 entry = malloc(sizeof(struct bge_jpool_entry), 613 M_DEVBUF, M_NOWAIT); 614 if (entry == NULL) { 615 contigfree(sc->bge_cdata.bge_jumbo_buf, 616 BGE_JMEM, M_DEVBUF); 617 sc->bge_cdata.bge_jumbo_buf = NULL; 618 printf("bge%d: no memory for jumbo " 619 "buffer queue!\n", sc->bge_unit); 620 return(ENOBUFS); 621 } 622 entry->slot = i; 623 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 624 entry, jpool_entries); 625 } 626 627 return(0); 628} 629 630static void 631bge_free_jumbo_mem(sc) 632 struct bge_softc *sc; 633{ 634 int i; 635 struct bge_jpool_entry *entry; 636 637 for (i = 0; i < BGE_JSLOTS; i++) { 638 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 639 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 640 free(entry, M_DEVBUF); 641 } 642 643 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF); 644 645 return; 646} 647 648/* 649 * Allocate a jumbo buffer. 650 */ 651static void * 652bge_jalloc(sc) 653 struct bge_softc *sc; 654{ 655 struct bge_jpool_entry *entry; 656 657 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 658 659 if (entry == NULL) { 660 printf("bge%d: no free jumbo buffers\n", sc->bge_unit); 661 return(NULL); 662 } 663 664 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 665 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 666 return(sc->bge_cdata.bge_jslots[entry->slot]); 667} 668 669/* 670 * Release a jumbo buffer. 671 */ 672static void 673bge_jfree(buf, args) 674 void *buf; 675 void *args; 676{ 677 struct bge_jpool_entry *entry; 678 struct bge_softc *sc; 679 int i; 680 681 /* Extract the softc struct pointer. */ 682 sc = (struct bge_softc *)args; 683 684 if (sc == NULL) 685 panic("bge_jfree: can't find softc pointer!"); 686 687 /* calculate the slot this buffer belongs to */ 688 689 i = ((vm_offset_t)buf 690 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 691 692 if ((i < 0) || (i >= BGE_JSLOTS)) 693 panic("bge_jfree: asked to free buffer that we don't manage!"); 694 695 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 696 if (entry == NULL) 697 panic("bge_jfree: buffer not in use!"); 698 entry->slot = i; 699 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 700 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 701 702 return; 703} 704 705 706/* 707 * Intialize a standard receive ring descriptor. 708 */ 709static int 710bge_newbuf_std(sc, i, m) 711 struct bge_softc *sc; 712 int i; 713 struct mbuf *m; 714{ 715 struct mbuf *m_new = NULL; 716 struct bge_rx_bd *r; 717 718 if (m == NULL) { 719 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 720 if (m_new == NULL) { 721 return(ENOBUFS); 722 } 723 724 MCLGET(m_new, M_DONTWAIT); 725 if (!(m_new->m_flags & M_EXT)) { 726 m_freem(m_new); 727 return(ENOBUFS); 728 } 729 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 730 } else { 731 m_new = m; 732 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 733 m_new->m_data = m_new->m_ext.ext_buf; 734 } 735 736 if (!sc->bge_rx_alignment_bug) 737 m_adj(m_new, ETHER_ALIGN); 738 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 739 r = &sc->bge_rdata->bge_rx_std_ring[i]; 740 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 741 r->bge_flags = BGE_RXBDFLAG_END; 742 r->bge_len = m_new->m_len; 743 r->bge_idx = i; 744 745 return(0); 746} 747 748/* 749 * Initialize a jumbo receive ring descriptor. This allocates 750 * a jumbo buffer from the pool managed internally by the driver. 751 */ 752static int 753bge_newbuf_jumbo(sc, i, m) 754 struct bge_softc *sc; 755 int i; 756 struct mbuf *m; 757{ 758 struct mbuf *m_new = NULL; 759 struct bge_rx_bd *r; 760 761 if (m == NULL) { 762 caddr_t *buf = NULL; 763 764 /* Allocate the mbuf. */ 765 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 766 if (m_new == NULL) { 767 return(ENOBUFS); 768 } 769 770 /* Allocate the jumbo buffer */ 771 buf = bge_jalloc(sc); 772 if (buf == NULL) { 773 m_freem(m_new); 774 printf("bge%d: jumbo allocation failed " 775 "-- packet dropped!\n", sc->bge_unit); 776 return(ENOBUFS); 777 } 778 779 /* Attach the buffer to the mbuf. */ 780 m_new->m_data = (void *) buf; 781 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 782 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree, 783 (struct bge_softc *)sc, 0, EXT_NET_DRV); 784 } else { 785 m_new = m; 786 m_new->m_data = m_new->m_ext.ext_buf; 787 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 788 } 789 790 if (!sc->bge_rx_alignment_bug) 791 m_adj(m_new, ETHER_ALIGN); 792 /* Set up the descriptor. */ 793 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 794 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 795 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 796 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 797 r->bge_len = m_new->m_len; 798 r->bge_idx = i; 799 800 return(0); 801} 802 803/* 804 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 805 * that's 1MB or memory, which is a lot. For now, we fill only the first 806 * 256 ring entries and hope that our CPU is fast enough to keep up with 807 * the NIC. 808 */ 809static int 810bge_init_rx_ring_std(sc) 811 struct bge_softc *sc; 812{ 813 int i; 814 815 for (i = 0; i < BGE_SSLOTS; i++) { 816 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 817 return(ENOBUFS); 818 }; 819 820 sc->bge_std = i - 1; 821 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 822 823 return(0); 824} 825 826static void 827bge_free_rx_ring_std(sc) 828 struct bge_softc *sc; 829{ 830 int i; 831 832 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 833 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 834 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 835 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 836 } 837 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i], 838 sizeof(struct bge_rx_bd)); 839 } 840 841 return; 842} 843 844static int 845bge_init_rx_ring_jumbo(sc) 846 struct bge_softc *sc; 847{ 848 int i; 849 struct bge_rcb *rcb; 850 struct bge_rcb_opaque *rcbo; 851 852 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 853 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 854 return(ENOBUFS); 855 }; 856 857 sc->bge_jumbo = i - 1; 858 859 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 860 rcbo = (struct bge_rcb_opaque *)rcb; 861 rcb->bge_flags = 0; 862 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 863 864 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 865 866 return(0); 867} 868 869static void 870bge_free_rx_ring_jumbo(sc) 871 struct bge_softc *sc; 872{ 873 int i; 874 875 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 876 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 877 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 878 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 879 } 880 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 881 sizeof(struct bge_rx_bd)); 882 } 883 884 return; 885} 886 887static void 888bge_free_tx_ring(sc) 889 struct bge_softc *sc; 890{ 891 int i; 892 893 if (sc->bge_rdata->bge_tx_ring == NULL) 894 return; 895 896 for (i = 0; i < BGE_TX_RING_CNT; i++) { 897 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 898 m_freem(sc->bge_cdata.bge_tx_chain[i]); 899 sc->bge_cdata.bge_tx_chain[i] = NULL; 900 } 901 bzero((char *)&sc->bge_rdata->bge_tx_ring[i], 902 sizeof(struct bge_tx_bd)); 903 } 904 905 return; 906} 907 908static int 909bge_init_tx_ring(sc) 910 struct bge_softc *sc; 911{ 912 sc->bge_txcnt = 0; 913 sc->bge_tx_saved_considx = 0; 914 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 915 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 916 917 return(0); 918} 919 920#define BGE_POLY 0xEDB88320 921 922static u_int32_t 923bge_crc(addr) 924 caddr_t addr; 925{ 926 u_int32_t idx, bit, data, crc; 927 928 /* Compute CRC for the address value. */ 929 crc = 0xFFFFFFFF; /* initial value */ 930 931 for (idx = 0; idx < 6; idx++) { 932 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 933 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0); 934 } 935 936 return(crc & 0x7F); 937} 938 939static void 940bge_setmulti(sc) 941 struct bge_softc *sc; 942{ 943 struct ifnet *ifp; 944 struct ifmultiaddr *ifma; 945 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 946 int h, i; 947 948 ifp = &sc->arpcom.ac_if; 949 950 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 951 for (i = 0; i < 4; i++) 952 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 953 return; 954 } 955 956 /* First, zot all the existing filters. */ 957 for (i = 0; i < 4; i++) 958 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 959 960 /* Now program new ones. */ 961 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 962 if (ifma->ifma_addr->sa_family != AF_LINK) 963 continue; 964 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 965 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 966 } 967 968 for (i = 0; i < 4; i++) 969 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 970 971 return; 972} 973 974/* 975 * Do endian, PCI and DMA initialization. Also check the on-board ROM 976 * self-test results. 977 */ 978static int 979bge_chipinit(sc) 980 struct bge_softc *sc; 981{ 982 u_int32_t cachesize; 983 int i; 984 985 /* Set endianness before we access any non-PCI registers. */ 986#if BYTE_ORDER == BIG_ENDIAN 987 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 988 BGE_BIGENDIAN_INIT, 4); 989#else 990 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 991 BGE_LITTLEENDIAN_INIT, 4); 992#endif 993 994 /* 995 * Check the 'ROM failed' bit on the RX CPU to see if 996 * self-tests passed. 997 */ 998 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 999 printf("bge%d: RX CPU self-diagnostics failed!\n", 1000 sc->bge_unit); 1001 return(ENODEV); 1002 } 1003 1004 /* Clear the MAC control register */ 1005 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1006 1007 /* 1008 * Clear the MAC statistics block in the NIC's 1009 * internal memory. 1010 */ 1011 for (i = BGE_STATS_BLOCK; 1012 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1013 BGE_MEMWIN_WRITE(sc, i, 0); 1014 1015 for (i = BGE_STATUS_BLOCK; 1016 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1017 BGE_MEMWIN_WRITE(sc, i, 0); 1018 1019 /* Set up the PCI DMA control register. */ 1020 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1021 BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x0F, 4); 1022 1023 /* 1024 * Set up general mode register. 1025 */ 1026 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1027 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1028 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1029 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1030 BGE_MODECTL_RX_NO_PHDR_CSUM); 1031 1032 /* Get cache line size. */ 1033 cachesize = pci_read_config(sc->bge_dev, BGE_PCI_CACHESZ, 1); 1034 1035 /* 1036 * Avoid violating PCI spec on certain chip revs. 1037 */ 1038 if (pci_read_config(sc->bge_dev, BGE_PCI_CMD, 4) & PCIM_CMD_MWIEN) { 1039 switch(cachesize) { 1040 case 1: 1041 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1042 BGE_PCI_WRITE_BNDRY_16BYTES, 4); 1043 break; 1044 case 2: 1045 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1046 BGE_PCI_WRITE_BNDRY_32BYTES, 4); 1047 break; 1048 case 4: 1049 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1050 BGE_PCI_WRITE_BNDRY_64BYTES, 4); 1051 break; 1052 case 8: 1053 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1054 BGE_PCI_WRITE_BNDRY_128BYTES, 4); 1055 break; 1056 case 16: 1057 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1058 BGE_PCI_WRITE_BNDRY_256BYTES, 4); 1059 break; 1060 case 32: 1061 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1062 BGE_PCI_WRITE_BNDRY_512BYTES, 4); 1063 break; 1064 case 64: 1065 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1066 BGE_PCI_WRITE_BNDRY_1024BYTES, 4); 1067 break; 1068 default: 1069 /* Disable PCI memory write and invalidate. */ 1070 if (bootverbose) 1071 printf("bge%d: cache line size %d not " 1072 "supported; disabling PCI MWI\n", 1073 sc->bge_unit, cachesize); 1074 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1075 PCIM_CMD_MWIEN, 4); 1076 break; 1077 } 1078 } 1079 1080#ifdef __brokenalpha__ 1081 /* 1082 * Must insure that we do not cross an 8K (bytes) boundary 1083 * for DMA reads. Our highest limit is 1K bytes. This is a 1084 * restriction on some ALPHA platforms with early revision 1085 * 21174 PCI chipsets, such as the AlphaPC 164lx 1086 */ 1087 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1088#endif 1089 1090 /* Set the timer prescaler (always 66Mhz) */ 1091 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1092 1093 return(0); 1094} 1095 1096static int 1097bge_blockinit(sc) 1098 struct bge_softc *sc; 1099{ 1100 struct bge_rcb *rcb; 1101 struct bge_rcb_opaque *rcbo; 1102 int i; 1103 1104 /* 1105 * Initialize the memory window pointer register so that 1106 * we can access the first 32K of internal NIC RAM. This will 1107 * allow us to set up the TX send ring RCBs and the RX return 1108 * ring RCBs, plus other things which live in NIC memory. 1109 */ 1110 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1111 1112 /* Configure mbuf memory pool */ 1113 if (sc->bge_extram) { 1114 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1115 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1116 } else { 1117 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1118 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1119 } 1120 1121 /* Configure DMA resource pool */ 1122 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1123 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1124 1125 /* Configure mbuf pool watermarks */ 1126 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1127 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1128 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1129 1130 /* Configure DMA resource watermarks */ 1131 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1132 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1133 1134 /* Enable buffer manager */ 1135 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1136 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1137 1138 /* Poll for buffer manager start indication */ 1139 for (i = 0; i < BGE_TIMEOUT; i++) { 1140 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1141 break; 1142 DELAY(10); 1143 } 1144 1145 if (i == BGE_TIMEOUT) { 1146 printf("bge%d: buffer manager failed to start\n", 1147 sc->bge_unit); 1148 return(ENXIO); 1149 } 1150 1151 /* Enable flow-through queues */ 1152 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1153 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1154 1155 /* Wait until queue initialization is complete */ 1156 for (i = 0; i < BGE_TIMEOUT; i++) { 1157 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1158 break; 1159 DELAY(10); 1160 } 1161 1162 if (i == BGE_TIMEOUT) { 1163 printf("bge%d: flow-through queue init failed\n", 1164 sc->bge_unit); 1165 return(ENXIO); 1166 } 1167 1168 /* Initialize the standard RX ring control block */ 1169 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1170 BGE_HOSTADDR(rcb->bge_hostaddr) = 1171 vtophys(&sc->bge_rdata->bge_rx_std_ring); 1172 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1173 if (sc->bge_extram) 1174 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1175 else 1176 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1177 rcb->bge_flags = 0; 1178 rcbo = (struct bge_rcb_opaque *)rcb; 1179 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0); 1180 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1); 1181 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1182 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3); 1183 1184 /* 1185 * Initialize the jumbo RX ring control block 1186 * We set the 'ring disabled' bit in the flags 1187 * field until we're actually ready to start 1188 * using this ring (i.e. once we set the MTU 1189 * high enough to require it). 1190 */ 1191 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1192 BGE_HOSTADDR(rcb->bge_hostaddr) = 1193 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring); 1194 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1195 if (sc->bge_extram) 1196 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1197 else 1198 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1199 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1200 1201 rcbo = (struct bge_rcb_opaque *)rcb; 1202 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0); 1203 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1); 1204 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1205 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3); 1206 1207 /* Set up dummy disabled mini ring RCB */ 1208 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1209 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1210 rcbo = (struct bge_rcb_opaque *)rcb; 1211 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1212 1213 /* 1214 * Set the BD ring replentish thresholds. The recommended 1215 * values are 1/8th the number of descriptors allocated to 1216 * each ring. 1217 */ 1218 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1219 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1220 1221 /* 1222 * Disable all unused send rings by setting the 'ring disabled' 1223 * bit in the flags field of all the TX send ring control blocks. 1224 * These are located in NIC memory. 1225 */ 1226 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1227 BGE_SEND_RING_RCB); 1228 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1229 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1230 rcb->bge_max_len = 0; 1231 rcb->bge_nicaddr = 0; 1232 rcb++; 1233 } 1234 1235 /* Configure TX RCB 0 (we use only the first ring) */ 1236 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1237 BGE_SEND_RING_RCB); 1238 rcb->bge_hostaddr.bge_addr_hi = 0; 1239 BGE_HOSTADDR(rcb->bge_hostaddr) = 1240 vtophys(&sc->bge_rdata->bge_tx_ring); 1241 rcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); 1242 rcb->bge_max_len = BGE_TX_RING_CNT; 1243 rcb->bge_flags = 0; 1244 1245 /* Disable all unused RX return rings */ 1246 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1247 BGE_RX_RETURN_RING_RCB); 1248 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1249 rcb->bge_hostaddr.bge_addr_hi = 0; 1250 rcb->bge_hostaddr.bge_addr_lo = 0; 1251 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1252 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1253 rcb->bge_nicaddr = 0; 1254 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1255 (i * (sizeof(u_int64_t))), 0); 1256 rcb++; 1257 } 1258 1259 /* Initialize RX ring indexes */ 1260 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1261 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1262 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1263 1264 /* 1265 * Set up RX return ring 0 1266 * Note that the NIC address for RX return rings is 0x00000000. 1267 * The return rings live entirely within the host, so the 1268 * nicaddr field in the RCB isn't used. 1269 */ 1270 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1271 BGE_RX_RETURN_RING_RCB); 1272 rcb->bge_hostaddr.bge_addr_hi = 0; 1273 BGE_HOSTADDR(rcb->bge_hostaddr) = 1274 vtophys(&sc->bge_rdata->bge_rx_return_ring); 1275 rcb->bge_nicaddr = 0x00000000; 1276 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1277 rcb->bge_flags = 0; 1278 1279 /* Set random backoff seed for TX */ 1280 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1281 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1282 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1283 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1284 BGE_TX_BACKOFF_SEED_MASK); 1285 1286 /* Set inter-packet gap */ 1287 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1288 1289 /* 1290 * Specify which ring to use for packets that don't match 1291 * any RX rules. 1292 */ 1293 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1294 1295 /* 1296 * Configure number of RX lists. One interrupt distribution 1297 * list, sixteen active lists, one bad frames class. 1298 */ 1299 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1300 1301 /* Inialize RX list placement stats mask. */ 1302 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1303 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1304 1305 /* Disable host coalescing until we get it set up */ 1306 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1307 1308 /* Poll to make sure it's shut down. */ 1309 for (i = 0; i < BGE_TIMEOUT; i++) { 1310 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1311 break; 1312 DELAY(10); 1313 } 1314 1315 if (i == BGE_TIMEOUT) { 1316 printf("bge%d: host coalescing engine failed to idle\n", 1317 sc->bge_unit); 1318 return(ENXIO); 1319 } 1320 1321 /* Set up host coalescing defaults */ 1322 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1323 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1324 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1325 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1326 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1327 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1328 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1329 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1330 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1331 1332 /* Set up address of statistics block */ 1333 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1334 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0); 1335 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1336 vtophys(&sc->bge_rdata->bge_info.bge_stats)); 1337 1338 /* Set up address of status block */ 1339 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1340 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0); 1341 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1342 vtophys(&sc->bge_rdata->bge_status_block)); 1343 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1344 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1345 1346 /* Turn on host coalescing state machine */ 1347 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1348 1349 /* Turn on RX BD completion state machine and enable attentions */ 1350 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1351 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1352 1353 /* Turn on RX list placement state machine */ 1354 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1355 1356 /* Turn on RX list selector state machine. */ 1357 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1358 1359 /* Turn on DMA, clear stats */ 1360 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1361 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1362 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1363 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1364 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1365 1366 /* Set misc. local control, enable interrupts on attentions */ 1367 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1368 1369#ifdef notdef 1370 /* Assert GPIO pins for PHY reset */ 1371 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1372 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1373 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1374 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1375#endif 1376 1377 /* Turn on DMA completion state machine */ 1378 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1379 1380 /* Turn on write DMA state machine */ 1381 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1382 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1383 1384 /* Turn on read DMA state machine */ 1385 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1386 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1387 1388 /* Turn on RX data completion state machine */ 1389 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1390 1391 /* Turn on RX BD initiator state machine */ 1392 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1393 1394 /* Turn on RX data and RX BD initiator state machine */ 1395 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1396 1397 /* Turn on Mbuf cluster free state machine */ 1398 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1399 1400 /* Turn on send BD completion state machine */ 1401 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1402 1403 /* Turn on send data completion state machine */ 1404 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1405 1406 /* Turn on send data initiator state machine */ 1407 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1408 1409 /* Turn on send BD initiator state machine */ 1410 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1411 1412 /* Turn on send BD selector state machine */ 1413 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1414 1415 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1416 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1417 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1418 1419 /* init LED register */ 1420 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1421 1422 /* ack/clear link change events */ 1423 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1424 BGE_MACSTAT_CFG_CHANGED); 1425 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1426 1427 /* Enable PHY auto polling (for MII/GMII only) */ 1428 if (sc->bge_tbi) { 1429 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1430 } else { 1431 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1432 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 1433 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1434 BGE_EVTENB_MI_INTERRUPT); 1435 } 1436 1437 /* Enable link state change attentions. */ 1438 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1439 1440 return(0); 1441} 1442 1443/* 1444 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1445 * against our list and return its name if we find a match. Note 1446 * that since the Broadcom controller contains VPD support, we 1447 * can get the device name string from the controller itself instead 1448 * of the compiled-in string. This is a little slow, but it guarantees 1449 * we'll always announce the right product name. 1450 */ 1451static int 1452bge_probe(dev) 1453 device_t dev; 1454{ 1455 struct bge_type *t; 1456 struct bge_softc *sc; 1457 1458 t = bge_devs; 1459 1460 sc = device_get_softc(dev); 1461 bzero(sc, sizeof(struct bge_softc)); 1462 sc->bge_unit = device_get_unit(dev); 1463 sc->bge_dev = dev; 1464 1465 while(t->bge_name != NULL) { 1466 if ((pci_get_vendor(dev) == t->bge_vid) && 1467 (pci_get_device(dev) == t->bge_did)) { 1468#ifdef notdef 1469 bge_vpd_read(sc); 1470 device_set_desc(dev, sc->bge_vpd_prodname); 1471#endif 1472 device_set_desc(dev, t->bge_name); 1473 return(0); 1474 } 1475 t++; 1476 } 1477 1478 return(ENXIO); 1479} 1480 1481static int 1482bge_attach(dev) 1483 device_t dev; 1484{ 1485 int s; 1486 u_int32_t command; 1487 struct ifnet *ifp; 1488 struct bge_softc *sc; 1489 u_int32_t hwcfg = 0;
|
| 1490 u_int32_t mac_addr = 0;
|
1484 int unit, error = 0, rid; 1485 1486 s = splimp(); 1487 1488 sc = device_get_softc(dev); 1489 unit = device_get_unit(dev); 1490 sc->bge_dev = dev; 1491 sc->bge_unit = unit; 1492 1493 /* 1494 * Map control/status registers. 1495 */ 1496 pci_enable_busmaster(dev); 1497 pci_enable_io(dev, SYS_RES_MEMORY); 1498 command = pci_read_config(dev, PCIR_COMMAND, 4); 1499 1500 if (!(command & PCIM_CMD_MEMEN)) { 1501 printf("bge%d: failed to enable memory mapping!\n", unit); 1502 error = ENXIO; 1503 goto fail; 1504 } 1505 1506 rid = BGE_PCI_BAR0; 1507 sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1508 0, ~0, 1, RF_ACTIVE); 1509 1510 if (sc->bge_res == NULL) { 1511 printf ("bge%d: couldn't map memory\n", unit); 1512 error = ENXIO; 1513 goto fail; 1514 } 1515 1516 sc->bge_btag = rman_get_bustag(sc->bge_res); 1517 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 1518 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); 1519 1520 /* 1521 * XXX FIXME: rman_get_virtual() on the alpha is currently 1522 * broken and returns a physical address instead of a kernel 1523 * virtual address. Consequently, we need to do a little 1524 * extra mangling of the vhandle on the alpha. This should 1525 * eventually be fixed! The whole idea here is to get rid 1526 * of platform dependencies. 1527 */ 1528#ifdef __alpha__ 1529 if (pci_cvt_to_bwx(sc->bge_vhandle)) 1530 sc->bge_vhandle = pci_cvt_to_bwx(sc->bge_vhandle); 1531 else 1532 sc->bge_vhandle = pci_cvt_to_dense(sc->bge_vhandle); 1533 sc->bge_vhandle = ALPHA_PHYS_TO_K0SEG(sc->bge_vhandle); 1534#endif 1535 1536 /* Allocate interrupt */ 1537 rid = 0; 1538 1539 sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1540 RF_SHAREABLE | RF_ACTIVE); 1541 1542 if (sc->bge_irq == NULL) { 1543 printf("bge%d: couldn't map interrupt\n", unit); 1544 error = ENXIO; 1545 goto fail; 1546 } 1547 1548 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET, 1549 bge_intr, sc, &sc->bge_intrhand); 1550 1551 if (error) { 1552 bge_release_resources(sc); 1553 printf("bge%d: couldn't set up irq\n", unit); 1554 goto fail; 1555 } 1556 1557 sc->bge_unit = unit; 1558 1559 /* Try to reset the chip. */ 1560 bge_reset(sc); 1561 1562 if (bge_chipinit(sc)) { 1563 printf("bge%d: chip initialization failed\n", sc->bge_unit); 1564 bge_release_resources(sc); 1565 error = ENXIO; 1566 goto fail; 1567 } 1568 1569 /* 1570 * Get station address from the EEPROM. 1571 */
| 1491 int unit, error = 0, rid; 1492 1493 s = splimp(); 1494 1495 sc = device_get_softc(dev); 1496 unit = device_get_unit(dev); 1497 sc->bge_dev = dev; 1498 sc->bge_unit = unit; 1499 1500 /* 1501 * Map control/status registers. 1502 */ 1503 pci_enable_busmaster(dev); 1504 pci_enable_io(dev, SYS_RES_MEMORY); 1505 command = pci_read_config(dev, PCIR_COMMAND, 4); 1506 1507 if (!(command & PCIM_CMD_MEMEN)) { 1508 printf("bge%d: failed to enable memory mapping!\n", unit); 1509 error = ENXIO; 1510 goto fail; 1511 } 1512 1513 rid = BGE_PCI_BAR0; 1514 sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1515 0, ~0, 1, RF_ACTIVE); 1516 1517 if (sc->bge_res == NULL) { 1518 printf ("bge%d: couldn't map memory\n", unit); 1519 error = ENXIO; 1520 goto fail; 1521 } 1522 1523 sc->bge_btag = rman_get_bustag(sc->bge_res); 1524 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 1525 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); 1526 1527 /* 1528 * XXX FIXME: rman_get_virtual() on the alpha is currently 1529 * broken and returns a physical address instead of a kernel 1530 * virtual address. Consequently, we need to do a little 1531 * extra mangling of the vhandle on the alpha. This should 1532 * eventually be fixed! The whole idea here is to get rid 1533 * of platform dependencies. 1534 */ 1535#ifdef __alpha__ 1536 if (pci_cvt_to_bwx(sc->bge_vhandle)) 1537 sc->bge_vhandle = pci_cvt_to_bwx(sc->bge_vhandle); 1538 else 1539 sc->bge_vhandle = pci_cvt_to_dense(sc->bge_vhandle); 1540 sc->bge_vhandle = ALPHA_PHYS_TO_K0SEG(sc->bge_vhandle); 1541#endif 1542 1543 /* Allocate interrupt */ 1544 rid = 0; 1545 1546 sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1547 RF_SHAREABLE | RF_ACTIVE); 1548 1549 if (sc->bge_irq == NULL) { 1550 printf("bge%d: couldn't map interrupt\n", unit); 1551 error = ENXIO; 1552 goto fail; 1553 } 1554 1555 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET, 1556 bge_intr, sc, &sc->bge_intrhand); 1557 1558 if (error) { 1559 bge_release_resources(sc); 1560 printf("bge%d: couldn't set up irq\n", unit); 1561 goto fail; 1562 } 1563 1564 sc->bge_unit = unit; 1565 1566 /* Try to reset the chip. */ 1567 bge_reset(sc); 1568 1569 if (bge_chipinit(sc)) { 1570 printf("bge%d: chip initialization failed\n", sc->bge_unit); 1571 bge_release_resources(sc); 1572 error = ENXIO; 1573 goto fail; 1574 } 1575 1576 /* 1577 * Get station address from the EEPROM. 1578 */
|
1572 if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
| 1579 mac_addr = bge_readmem_ind(sc, 0x0c14); 1580 if ((mac_addr >> 16) == 0x484b) { 1581 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); 1582 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; 1583 mac_addr = bge_readmem_ind(sc, 0x0c18); 1584 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); 1585 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); 1586 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); 1587 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; 1588 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
|
1573 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1574 printf("bge%d: failed to read station address\n", unit); 1575 bge_release_resources(sc); 1576 error = ENXIO; 1577 goto fail; 1578 } 1579 1580 /* 1581 * A Broadcom chip was detected. Inform the world. 1582 */ 1583 printf("bge%d: Ethernet address: %6D\n", unit, 1584 sc->arpcom.ac_enaddr, ":"); 1585 1586 /* Allocate the general information block and ring buffers. */ 1587 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF, 1588 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1589 1590 if (sc->bge_rdata == NULL) { 1591 bge_release_resources(sc); 1592 error = ENXIO; 1593 printf("bge%d: no memory for list buffers!\n", sc->bge_unit); 1594 goto fail; 1595 } 1596 1597 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 1598 1599 /* Try to allocate memory for jumbo buffers. */ 1600 if (bge_alloc_jumbo_mem(sc)) { 1601 printf("bge%d: jumbo buffer allocation " 1602 "failed\n", sc->bge_unit); 1603 bge_release_resources(sc); 1604 error = ENXIO; 1605 goto fail; 1606 } 1607 1608 /* Set default tuneable values. */ 1609 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1610 sc->bge_rx_coal_ticks = 150; 1611 sc->bge_tx_coal_ticks = 150; 1612 sc->bge_rx_max_coal_bds = 64; 1613 sc->bge_tx_max_coal_bds = 128; 1614 1615 /* Set up ifnet structure */ 1616 ifp = &sc->arpcom.ac_if; 1617 ifp->if_softc = sc; 1618 ifp->if_unit = sc->bge_unit; 1619 ifp->if_name = "bge"; 1620 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1621 ifp->if_ioctl = bge_ioctl; 1622 ifp->if_output = ether_output; 1623 ifp->if_start = bge_start; 1624 ifp->if_watchdog = bge_watchdog; 1625 ifp->if_init = bge_init; 1626 ifp->if_mtu = ETHERMTU; 1627 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1; 1628 ifp->if_hwassist = BGE_CSUM_FEATURES; 1629 ifp->if_capabilities = IFCAP_HWCSUM; 1630 ifp->if_capenable = ifp->if_capabilities; 1631 1632 /* Save ASIC rev. */ 1633 1634 sc->bge_asicrev = 1635 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1636 BGE_PCIMISCCTL_ASICREV; 1637 1638 /* Pretend all 5700s are the same */ 1639 if ((sc->bge_asicrev & 0xFF000000) == BGE_ASICREV_BCM5700) 1640 sc->bge_asicrev = BGE_ASICREV_BCM5700; 1641 1642 /* 1643 * Figure out what sort of media we have by checking the 1644 * hardware config word in the EEPROM. Note: on some BCM5700 1645 * cards, this value appears to be unset. If that's the 1646 * case, we have to rely on identifying the NIC by its PCI 1647 * subsystem ID, as we do below for the SysKonnect SK-9D41. 1648 */ 1649 bge_read_eeprom(sc, (caddr_t)&hwcfg, 1650 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 1651 if ((ntohl(hwcfg) & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 1652 sc->bge_tbi = 1; 1653 1654 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 1655 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 1656 sc->bge_tbi = 1; 1657 1658 if (sc->bge_tbi) { 1659 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 1660 bge_ifmedia_upd, bge_ifmedia_sts); 1661 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1662 ifmedia_add(&sc->bge_ifmedia, 1663 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1664 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1665 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 1666 } else { 1667 /* 1668 * Do transceiver setup. 1669 */ 1670 if (mii_phy_probe(dev, &sc->bge_miibus, 1671 bge_ifmedia_upd, bge_ifmedia_sts)) { 1672 printf("bge%d: MII without any PHY!\n", sc->bge_unit); 1673 bge_release_resources(sc); 1674 bge_free_jumbo_mem(sc); 1675 error = ENXIO; 1676 goto fail; 1677 } 1678 } 1679 1680 /* 1681 * When using the BCM5701 in PCI-X mode, data corruption has 1682 * been observed in the first few bytes of some received packets. 1683 * Aligning the packet buffer in memory eliminates the corruption. 1684 * Unfortunately, this misaligns the packet payloads. On platforms 1685 * which do not support unaligned accesses, we will realign the 1686 * payloads by copying the received packets. 1687 */ 1688 switch (sc->bge_asicrev) { 1689 case BGE_ASICREV_BCM5701_A0: 1690 case BGE_ASICREV_BCM5701_B0: 1691 case BGE_ASICREV_BCM5701_B2: 1692 case BGE_ASICREV_BCM5701_B5: 1693 /* If in PCI-X mode, work around the alignment bug. */ 1694 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 1695 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 1696 BGE_PCISTATE_PCI_BUSSPEED) 1697 sc->bge_rx_alignment_bug = 1; 1698 break; 1699 } 1700 1701 /* 1702 * Call MI attach routine. 1703 */ 1704 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 1705 callout_handle_init(&sc->bge_stat_ch); 1706 1707fail: 1708 splx(s); 1709 1710 return(error); 1711} 1712 1713static int 1714bge_detach(dev) 1715 device_t dev; 1716{ 1717 struct bge_softc *sc; 1718 struct ifnet *ifp; 1719 int s; 1720 1721 s = splimp(); 1722 1723 sc = device_get_softc(dev); 1724 ifp = &sc->arpcom.ac_if; 1725 1726 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); 1727 bge_stop(sc); 1728 bge_reset(sc); 1729 1730 if (sc->bge_tbi) { 1731 ifmedia_removeall(&sc->bge_ifmedia); 1732 } else { 1733 bus_generic_detach(dev); 1734 device_delete_child(dev, sc->bge_miibus); 1735 } 1736 1737 bge_release_resources(sc); 1738 bge_free_jumbo_mem(sc); 1739 1740 splx(s); 1741 1742 return(0); 1743} 1744 1745static void 1746bge_release_resources(sc) 1747 struct bge_softc *sc; 1748{ 1749 device_t dev; 1750 1751 dev = sc->bge_dev; 1752 1753 if (sc->bge_vpd_prodname != NULL) 1754 free(sc->bge_vpd_prodname, M_DEVBUF); 1755 1756 if (sc->bge_vpd_readonly != NULL) 1757 free(sc->bge_vpd_readonly, M_DEVBUF); 1758 1759 if (sc->bge_intrhand != NULL) 1760 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 1761 1762 if (sc->bge_irq != NULL) 1763 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 1764 1765 if (sc->bge_res != NULL) 1766 bus_release_resource(dev, SYS_RES_MEMORY, 1767 BGE_PCI_BAR0, sc->bge_res); 1768 1769 if (sc->bge_rdata != NULL) 1770 contigfree(sc->bge_rdata, 1771 sizeof(struct bge_ring_data), M_DEVBUF); 1772 1773 return; 1774} 1775 1776static void 1777bge_reset(sc) 1778 struct bge_softc *sc; 1779{ 1780 device_t dev; 1781 u_int32_t cachesize, command, pcistate; 1782 int i, val = 0; 1783 1784 dev = sc->bge_dev; 1785 1786 /* Save some important PCI state. */ 1787 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 1788 command = pci_read_config(dev, BGE_PCI_CMD, 4); 1789 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 1790 1791 pci_write_config(dev, BGE_PCI_MISC_CTL, 1792 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1793 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1794 1795 /* Issue global reset */ 1796 bge_writereg_ind(sc, BGE_MISC_CFG, 1797 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 1798 1799 DELAY(1000); 1800 1801 /* Reset some of the PCI state that got zapped by reset */ 1802 pci_write_config(dev, BGE_PCI_MISC_CTL, 1803 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1804 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1805 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 1806 pci_write_config(dev, BGE_PCI_CMD, command, 4); 1807 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 1808 1809 /* 1810 * Prevent PXE restart: write a magic number to the 1811 * general communications memory at 0xB50. 1812 */ 1813 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1814 /* 1815 * Poll the value location we just wrote until 1816 * we see the 1's complement of the magic number. 1817 * This indicates that the firmware initialization 1818 * is complete. 1819 */ 1820 for (i = 0; i < BGE_TIMEOUT; i++) { 1821 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1822 if (val == ~BGE_MAGIC_NUMBER) 1823 break; 1824 DELAY(10); 1825 } 1826 1827 if (i == BGE_TIMEOUT) { 1828 printf("bge%d: firmware handshake timed out\n", sc->bge_unit); 1829 return; 1830 } 1831 1832 /* 1833 * XXX Wait for the value of the PCISTATE register to 1834 * return to its original pre-reset state. This is a 1835 * fairly good indicator of reset completion. If we don't 1836 * wait for the reset to fully complete, trying to read 1837 * from the device's non-PCI registers may yield garbage 1838 * results. 1839 */ 1840 for (i = 0; i < BGE_TIMEOUT; i++) { 1841 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 1842 break; 1843 DELAY(10); 1844 } 1845 1846 /* Enable memory arbiter. */ 1847 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 1848 1849 /* Fix up byte swapping */ 1850 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 1851 BGE_MODECTL_BYTESWAP_DATA); 1852 1853 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1854 1855 DELAY(10000); 1856 1857 return; 1858} 1859 1860/* 1861 * Frame reception handling. This is called if there's a frame 1862 * on the receive return list. 1863 * 1864 * Note: we have to be able to handle two possibilities here: 1865 * 1) the frame is from the jumbo recieve ring 1866 * 2) the frame is from the standard receive ring 1867 */ 1868 1869static void 1870bge_rxeof(sc) 1871 struct bge_softc *sc; 1872{ 1873 struct ifnet *ifp; 1874 int stdcnt = 0, jumbocnt = 0; 1875 1876 ifp = &sc->arpcom.ac_if; 1877 1878 while(sc->bge_rx_saved_considx != 1879 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 1880 struct bge_rx_bd *cur_rx; 1881 u_int32_t rxidx; 1882 struct ether_header *eh; 1883 struct mbuf *m = NULL; 1884 u_int16_t vlan_tag = 0; 1885 int have_tag = 0; 1886 1887 cur_rx = 1888 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx]; 1889 1890 rxidx = cur_rx->bge_idx; 1891 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 1892 1893 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 1894 have_tag = 1; 1895 vlan_tag = cur_rx->bge_vlan_tag; 1896 } 1897 1898 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 1899 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1900 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 1901 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 1902 jumbocnt++; 1903 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1904 ifp->if_ierrors++; 1905 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1906 continue; 1907 } 1908 if (bge_newbuf_jumbo(sc, 1909 sc->bge_jumbo, NULL) == ENOBUFS) { 1910 ifp->if_ierrors++; 1911 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1912 continue; 1913 } 1914 } else { 1915 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 1916 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 1917 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 1918 stdcnt++; 1919 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1920 ifp->if_ierrors++; 1921 bge_newbuf_std(sc, sc->bge_std, m); 1922 continue; 1923 } 1924 if (bge_newbuf_std(sc, sc->bge_std, 1925 NULL) == ENOBUFS) { 1926 ifp->if_ierrors++; 1927 bge_newbuf_std(sc, sc->bge_std, m); 1928 continue; 1929 } 1930 } 1931 1932 ifp->if_ipackets++; 1933#ifndef __i386__ 1934 /* 1935 * The i386 allows unaligned accesses, but for other 1936 * platforms we must make sure the payload is aligned. 1937 */ 1938 if (sc->bge_rx_alignment_bug) { 1939 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 1940 cur_rx->bge_len); 1941 m->m_data += ETHER_ALIGN; 1942 } 1943#endif 1944 eh = mtod(m, struct ether_header *); 1945 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 1946 m->m_pkthdr.rcvif = ifp; 1947 1948 /* Remove header from mbuf and pass it on. */ 1949 m_adj(m, sizeof(struct ether_header)); 1950 1951#if 0 /* currently broken for some packets, possibly related to TCP options */ 1952 if (ifp->if_hwassist) { 1953 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1954 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 1955 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1956 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 1957 m->m_pkthdr.csum_data = 1958 cur_rx->bge_tcp_udp_csum; 1959 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1960 } 1961 } 1962#endif 1963 1964 /* 1965 * If we received a packet with a vlan tag, pass it 1966 * to vlan_input() instead of ether_input(). 1967 */ 1968 if (have_tag) { 1969 VLAN_INPUT_TAG(eh, m, vlan_tag); 1970 have_tag = vlan_tag = 0; 1971 continue; 1972 } 1973 1974 ether_input(ifp, eh, m); 1975 } 1976 1977 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 1978 if (stdcnt) 1979 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1980 if (jumbocnt) 1981 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1982 1983 return; 1984} 1985 1986static void 1987bge_txeof(sc) 1988 struct bge_softc *sc; 1989{ 1990 struct bge_tx_bd *cur_tx = NULL; 1991 struct ifnet *ifp; 1992 1993 ifp = &sc->arpcom.ac_if; 1994 1995 /* 1996 * Go through our tx ring and free mbufs for those 1997 * frames that have been sent. 1998 */ 1999 while (sc->bge_tx_saved_considx != 2000 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2001 u_int32_t idx = 0; 2002 2003 idx = sc->bge_tx_saved_considx; 2004 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2005 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2006 ifp->if_opackets++; 2007 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2008 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2009 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2010 } 2011 sc->bge_txcnt--; 2012 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2013 ifp->if_timer = 0; 2014 } 2015 2016 if (cur_tx != NULL) 2017 ifp->if_flags &= ~IFF_OACTIVE; 2018 2019 return; 2020} 2021 2022static void 2023bge_intr(xsc) 2024 void *xsc; 2025{ 2026 struct bge_softc *sc; 2027 struct ifnet *ifp; 2028 2029 sc = xsc; 2030 ifp = &sc->arpcom.ac_if; 2031 2032#ifdef notdef 2033 /* Avoid this for now -- checking this register is expensive. */ 2034 /* Make sure this is really our interrupt. */ 2035 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2036 return; 2037#endif 2038 /* Ack interrupt and stop others from occuring. */ 2039 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2040 2041 /* 2042 * Process link state changes. 2043 * Grrr. The link status word in the status block does 2044 * not work correctly on the BCM5700 rev AX and BX chips, 2045 * according to all avaibable information. Hence, we have 2046 * to enable MII interrupts in order to properly obtain 2047 * async link changes. Unfortunately, this also means that 2048 * we have to read the MAC status register to detect link 2049 * changes, thereby adding an additional register access to 2050 * the interrupt handler. 2051 */ 2052 2053 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 2054 u_int32_t status; 2055 2056 status = CSR_READ_4(sc, BGE_MAC_STS); 2057 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2058 sc->bge_link = 0; 2059 untimeout(bge_tick, sc, sc->bge_stat_ch); 2060 bge_tick(sc); 2061 /* Clear the interrupt */ 2062 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2063 BGE_EVTENB_MI_INTERRUPT); 2064 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 2065 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 2066 BRGPHY_INTRS); 2067 } 2068 } else { 2069 if (sc->bge_rdata->bge_status_block.bge_status & 2070 BGE_STATFLAG_LINKSTATE_CHANGED) { 2071 sc->bge_link = 0; 2072 untimeout(bge_tick, sc, sc->bge_stat_ch); 2073 bge_tick(sc); 2074 /* Clear the interrupt */ 2075 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2076 BGE_MACSTAT_CFG_CHANGED); 2077 } 2078 } 2079 2080 if (ifp->if_flags & IFF_RUNNING) { 2081 /* Check RX return ring producer/consumer */ 2082 bge_rxeof(sc); 2083 2084 /* Check TX ring producer/consumer */ 2085 bge_txeof(sc); 2086 } 2087 2088 bge_handle_events(sc); 2089 2090 /* Re-enable interrupts. */ 2091 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2092 2093 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) 2094 bge_start(ifp); 2095 2096 return; 2097} 2098 2099static void 2100bge_tick(xsc) 2101 void *xsc; 2102{ 2103 struct bge_softc *sc; 2104 struct mii_data *mii = NULL; 2105 struct ifmedia *ifm = NULL; 2106 struct ifnet *ifp; 2107 int s; 2108 2109 sc = xsc; 2110 ifp = &sc->arpcom.ac_if; 2111 2112 s = splimp(); 2113 2114 bge_stats_update(sc); 2115 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2116 if (sc->bge_link) { 2117 splx(s); 2118 return; 2119 } 2120 2121 if (sc->bge_tbi) { 2122 ifm = &sc->bge_ifmedia; 2123 if (CSR_READ_4(sc, BGE_MAC_STS) & 2124 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2125 sc->bge_link++; 2126 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2127 printf("bge%d: gigabit link up\n", sc->bge_unit); 2128 if (ifp->if_snd.ifq_head != NULL) 2129 bge_start(ifp); 2130 } 2131 splx(s); 2132 return; 2133 } 2134 2135 mii = device_get_softc(sc->bge_miibus); 2136 mii_tick(mii); 2137 2138 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2139 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2140 sc->bge_link++; 2141 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 2142 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 2143 printf("bge%d: gigabit link up\n", 2144 sc->bge_unit); 2145 if (ifp->if_snd.ifq_head != NULL) 2146 bge_start(ifp); 2147 } 2148 2149 splx(s); 2150 2151 return; 2152} 2153 2154static void 2155bge_stats_update(sc) 2156 struct bge_softc *sc; 2157{ 2158 struct ifnet *ifp; 2159 struct bge_stats *stats; 2160 2161 ifp = &sc->arpcom.ac_if; 2162 2163 stats = (struct bge_stats *)(sc->bge_vhandle + 2164 BGE_MEMWIN_START + BGE_STATS_BLOCK); 2165 2166 ifp->if_collisions += 2167 (stats->dot3StatsSingleCollisionFrames.bge_addr_lo + 2168 stats->dot3StatsMultipleCollisionFrames.bge_addr_lo + 2169 stats->dot3StatsExcessiveCollisions.bge_addr_lo + 2170 stats->dot3StatsLateCollisions.bge_addr_lo) - 2171 ifp->if_collisions; 2172 2173#ifdef notdef 2174 ifp->if_collisions += 2175 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2176 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2177 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2178 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2179 ifp->if_collisions; 2180#endif 2181 2182 return; 2183} 2184 2185/* 2186 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2187 * pointers to descriptors. 2188 */ 2189static int 2190bge_encap(sc, m_head, txidx) 2191 struct bge_softc *sc; 2192 struct mbuf *m_head; 2193 u_int32_t *txidx; 2194{ 2195 struct bge_tx_bd *f = NULL; 2196 struct mbuf *m; 2197 u_int32_t frag, cur, cnt = 0; 2198 u_int16_t csum_flags = 0; 2199 struct ifvlan *ifv = NULL; 2200 2201 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 2202 m_head->m_pkthdr.rcvif != NULL && 2203 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 2204 ifv = m_head->m_pkthdr.rcvif->if_softc; 2205 2206 m = m_head; 2207 cur = frag = *txidx; 2208 2209 if (m_head->m_pkthdr.csum_flags) { 2210 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2211 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2212 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2213 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2214 if (m_head->m_flags & M_LASTFRAG) 2215 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2216 else if (m_head->m_flags & M_FRAG) 2217 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2218 } 2219 2220 /* 2221 * Start packing the mbufs in this chain into 2222 * the fragment pointers. Stop when we run out 2223 * of fragments or hit the end of the mbuf chain. 2224 */ 2225 for (m = m_head; m != NULL; m = m->m_next) { 2226 if (m->m_len != 0) { 2227 f = &sc->bge_rdata->bge_tx_ring[frag]; 2228 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2229 break; 2230 BGE_HOSTADDR(f->bge_addr) = 2231 vtophys(mtod(m, vm_offset_t)); 2232 f->bge_len = m->m_len; 2233 f->bge_flags = csum_flags; 2234 if (ifv != NULL) { 2235 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2236 f->bge_vlan_tag = ifv->ifv_tag; 2237 } else { 2238 f->bge_vlan_tag = 0; 2239 } 2240 /* 2241 * Sanity check: avoid coming within 16 descriptors 2242 * of the end of the ring. 2243 */ 2244 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2245 return(ENOBUFS); 2246 cur = frag; 2247 BGE_INC(frag, BGE_TX_RING_CNT); 2248 cnt++; 2249 } 2250 } 2251 2252 if (m != NULL) 2253 return(ENOBUFS); 2254 2255 if (frag == sc->bge_tx_saved_considx) 2256 return(ENOBUFS); 2257 2258 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2259 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2260 sc->bge_txcnt += cnt; 2261 2262 *txidx = frag; 2263 2264 return(0); 2265} 2266 2267/* 2268 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2269 * to the mbuf data regions directly in the transmit descriptors. 2270 */ 2271static void 2272bge_start(ifp) 2273 struct ifnet *ifp; 2274{ 2275 struct bge_softc *sc; 2276 struct mbuf *m_head = NULL; 2277 u_int32_t prodidx = 0; 2278 2279 sc = ifp->if_softc; 2280 2281 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2282 return; 2283 2284 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2285 2286 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2287 IF_DEQUEUE(&ifp->if_snd, m_head); 2288 if (m_head == NULL) 2289 break; 2290 2291 /* 2292 * XXX 2293 * safety overkill. If this is a fragmented packet chain 2294 * with delayed TCP/UDP checksums, then only encapsulate 2295 * it if we have enough descriptors to handle the entire 2296 * chain at once. 2297 * (paranoia -- may not actually be needed) 2298 */ 2299 if (m_head->m_flags & M_FIRSTFRAG && 2300 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2301 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2302 m_head->m_pkthdr.csum_data + 16) { 2303 IF_PREPEND(&ifp->if_snd, m_head); 2304 ifp->if_flags |= IFF_OACTIVE; 2305 break; 2306 } 2307 } 2308 2309 /* 2310 * Pack the data into the transmit ring. If we 2311 * don't have room, set the OACTIVE flag and wait 2312 * for the NIC to drain the ring. 2313 */ 2314 if (bge_encap(sc, m_head, &prodidx)) { 2315 IF_PREPEND(&ifp->if_snd, m_head); 2316 ifp->if_flags |= IFF_OACTIVE; 2317 break; 2318 } 2319 2320 /* 2321 * If there's a BPF listener, bounce a copy of this frame 2322 * to him. 2323 */ 2324 if (ifp->if_bpf) 2325 bpf_mtap(ifp, m_head); 2326 } 2327 2328 /* Transmit */ 2329 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2330 2331 /* 2332 * Set a timeout in case the chip goes out to lunch. 2333 */ 2334 ifp->if_timer = 5; 2335 2336 return; 2337} 2338 2339/* 2340 * If we have a BCM5400 or BCM5401 PHY, we need to properly 2341 * program its internal DSP. Failing to do this can result in 2342 * massive packet loss at 1Gb speeds. 2343 */ 2344static void 2345bge_phy_hack(sc) 2346 struct bge_softc *sc; 2347{ 2348 struct bge_bcom_hack bhack[] = { 2349 { BRGPHY_MII_AUXCTL, 0x4C20 }, 2350 { BRGPHY_MII_DSP_ADDR_REG, 0x0012 }, 2351 { BRGPHY_MII_DSP_RW_PORT, 0x1804 }, 2352 { BRGPHY_MII_DSP_ADDR_REG, 0x0013 }, 2353 { BRGPHY_MII_DSP_RW_PORT, 0x1204 }, 2354 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2355 { BRGPHY_MII_DSP_RW_PORT, 0x0132 }, 2356 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2357 { BRGPHY_MII_DSP_RW_PORT, 0x0232 }, 2358 { BRGPHY_MII_DSP_ADDR_REG, 0x201F }, 2359 { BRGPHY_MII_DSP_RW_PORT, 0x0A20 }, 2360 { 0, 0 } }; 2361 u_int16_t vid, did; 2362 int i; 2363 2364 vid = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR1); 2365 did = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR2); 2366 2367 if (MII_OUI(vid, did) == MII_OUI_xxBROADCOM && 2368 (MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5400 || 2369 MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5401)) { 2370 i = 0; 2371 while(bhack[i].reg) { 2372 bge_miibus_writereg(sc->bge_dev, 1, bhack[i].reg, 2373 bhack[i].val); 2374 i++; 2375 } 2376 } 2377 2378 return; 2379} 2380 2381static void 2382bge_init(xsc) 2383 void *xsc; 2384{ 2385 struct bge_softc *sc = xsc; 2386 struct ifnet *ifp; 2387 u_int16_t *m; 2388 int s; 2389 2390 s = splimp(); 2391 2392 ifp = &sc->arpcom.ac_if; 2393 2394 if (ifp->if_flags & IFF_RUNNING) { 2395 splx(s); 2396 return; 2397 } 2398 2399 /* Cancel pending I/O and flush buffers. */ 2400 bge_stop(sc); 2401 bge_reset(sc); 2402 bge_chipinit(sc); 2403 2404 /* 2405 * Init the various state machines, ring 2406 * control blocks and firmware. 2407 */ 2408 if (bge_blockinit(sc)) { 2409 printf("bge%d: initialization failure\n", sc->bge_unit); 2410 splx(s); 2411 return; 2412 } 2413 2414 ifp = &sc->arpcom.ac_if; 2415 2416 /* Specify MTU. */ 2417 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2418 ETHER_HDR_LEN + ETHER_CRC_LEN); 2419 2420 /* Load our MAC address. */ 2421 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2422 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2423 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2424 2425 /* Enable or disable promiscuous mode as needed. */ 2426 if (ifp->if_flags & IFF_PROMISC) { 2427 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2428 } else { 2429 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2430 } 2431 2432 /* Program multicast filter. */ 2433 bge_setmulti(sc); 2434 2435 /* Init RX ring. */ 2436 bge_init_rx_ring_std(sc); 2437 2438 /* Init jumbo RX ring. */ 2439 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2440 bge_init_rx_ring_jumbo(sc); 2441 2442 /* Init our RX return ring index */ 2443 sc->bge_rx_saved_considx = 0; 2444 2445 /* Init TX ring. */ 2446 bge_init_tx_ring(sc); 2447 2448 /* Turn on transmitter */ 2449 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2450 2451 /* Turn on receiver */ 2452 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2453 2454 /* Tell firmware we're alive. */ 2455 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2456 2457 /* Enable host interrupts. */ 2458 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2459 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2460 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2461 2462 bge_ifmedia_upd(ifp); 2463 2464 ifp->if_flags |= IFF_RUNNING; 2465 ifp->if_flags &= ~IFF_OACTIVE; 2466 2467 splx(s); 2468 2469 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2470 2471 return; 2472} 2473 2474/* 2475 * Set media options. 2476 */ 2477static int 2478bge_ifmedia_upd(ifp) 2479 struct ifnet *ifp; 2480{ 2481 struct bge_softc *sc; 2482 struct mii_data *mii; 2483 struct ifmedia *ifm; 2484 2485 sc = ifp->if_softc; 2486 ifm = &sc->bge_ifmedia; 2487 2488 /* If this is a 1000baseX NIC, enable the TBI port. */ 2489 if (sc->bge_tbi) { 2490 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2491 return(EINVAL); 2492 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2493 case IFM_AUTO: 2494 break; 2495 case IFM_1000_SX: 2496 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2497 BGE_CLRBIT(sc, BGE_MAC_MODE, 2498 BGE_MACMODE_HALF_DUPLEX); 2499 } else { 2500 BGE_SETBIT(sc, BGE_MAC_MODE, 2501 BGE_MACMODE_HALF_DUPLEX); 2502 } 2503 break; 2504 default: 2505 return(EINVAL); 2506 } 2507 return(0); 2508 } 2509 2510 mii = device_get_softc(sc->bge_miibus); 2511 sc->bge_link = 0; 2512 if (mii->mii_instance) { 2513 struct mii_softc *miisc; 2514 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 2515 miisc = LIST_NEXT(miisc, mii_list)) 2516 mii_phy_reset(miisc); 2517 } 2518 bge_phy_hack(sc); 2519 mii_mediachg(mii); 2520 2521 return(0); 2522} 2523 2524/* 2525 * Report current media status. 2526 */ 2527static void 2528bge_ifmedia_sts(ifp, ifmr) 2529 struct ifnet *ifp; 2530 struct ifmediareq *ifmr; 2531{ 2532 struct bge_softc *sc; 2533 struct mii_data *mii; 2534 2535 sc = ifp->if_softc; 2536 2537 if (sc->bge_tbi) { 2538 ifmr->ifm_status = IFM_AVALID; 2539 ifmr->ifm_active = IFM_ETHER; 2540 if (CSR_READ_4(sc, BGE_MAC_STS) & 2541 BGE_MACSTAT_TBI_PCS_SYNCHED) 2542 ifmr->ifm_status |= IFM_ACTIVE; 2543 ifmr->ifm_active |= IFM_1000_SX; 2544 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 2545 ifmr->ifm_active |= IFM_HDX; 2546 else 2547 ifmr->ifm_active |= IFM_FDX; 2548 return; 2549 } 2550 2551 mii = device_get_softc(sc->bge_miibus); 2552 mii_pollstat(mii); 2553 ifmr->ifm_active = mii->mii_media_active; 2554 ifmr->ifm_status = mii->mii_media_status; 2555 2556 return; 2557} 2558 2559static int 2560bge_ioctl(ifp, command, data) 2561 struct ifnet *ifp; 2562 u_long command; 2563 caddr_t data; 2564{ 2565 struct bge_softc *sc = ifp->if_softc; 2566 struct ifreq *ifr = (struct ifreq *) data; 2567 int s, mask, error = 0; 2568 struct mii_data *mii; 2569 2570 s = splimp(); 2571 2572 switch(command) { 2573 case SIOCSIFADDR: 2574 case SIOCGIFADDR: 2575 error = ether_ioctl(ifp, command, data); 2576 break; 2577 case SIOCSIFMTU: 2578 if (ifr->ifr_mtu > BGE_JUMBO_MTU) 2579 error = EINVAL; 2580 else { 2581 ifp->if_mtu = ifr->ifr_mtu; 2582 ifp->if_flags &= ~IFF_RUNNING; 2583 bge_init(sc); 2584 } 2585 break; 2586 case SIOCSIFFLAGS: 2587 if (ifp->if_flags & IFF_UP) { 2588 /* 2589 * If only the state of the PROMISC flag changed, 2590 * then just use the 'set promisc mode' command 2591 * instead of reinitializing the entire NIC. Doing 2592 * a full re-init means reloading the firmware and 2593 * waiting for it to start up, which may take a 2594 * second or two. 2595 */ 2596 if (ifp->if_flags & IFF_RUNNING && 2597 ifp->if_flags & IFF_PROMISC && 2598 !(sc->bge_if_flags & IFF_PROMISC)) { 2599 BGE_SETBIT(sc, BGE_RX_MODE, 2600 BGE_RXMODE_RX_PROMISC); 2601 } else if (ifp->if_flags & IFF_RUNNING && 2602 !(ifp->if_flags & IFF_PROMISC) && 2603 sc->bge_if_flags & IFF_PROMISC) { 2604 BGE_CLRBIT(sc, BGE_RX_MODE, 2605 BGE_RXMODE_RX_PROMISC); 2606 } else 2607 bge_init(sc); 2608 } else { 2609 if (ifp->if_flags & IFF_RUNNING) { 2610 bge_stop(sc); 2611 } 2612 } 2613 sc->bge_if_flags = ifp->if_flags; 2614 error = 0; 2615 break; 2616 case SIOCADDMULTI: 2617 case SIOCDELMULTI: 2618 if (ifp->if_flags & IFF_RUNNING) { 2619 bge_setmulti(sc); 2620 error = 0; 2621 } 2622 break; 2623 case SIOCSIFMEDIA: 2624 case SIOCGIFMEDIA: 2625 if (sc->bge_tbi) { 2626 error = ifmedia_ioctl(ifp, ifr, 2627 &sc->bge_ifmedia, command); 2628 } else { 2629 mii = device_get_softc(sc->bge_miibus); 2630 error = ifmedia_ioctl(ifp, ifr, 2631 &mii->mii_media, command); 2632 } 2633 break; 2634 case SIOCSIFCAP: 2635 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2636 if (mask & IFCAP_HWCSUM) { 2637 if (IFCAP_HWCSUM & ifp->if_capenable) 2638 ifp->if_capenable &= ~IFCAP_HWCSUM; 2639 else 2640 ifp->if_capenable |= IFCAP_HWCSUM; 2641 } 2642 error = 0; 2643 break; 2644 default: 2645 error = EINVAL; 2646 break; 2647 } 2648 2649 (void)splx(s); 2650 2651 return(error); 2652} 2653 2654static void 2655bge_watchdog(ifp) 2656 struct ifnet *ifp; 2657{ 2658 struct bge_softc *sc; 2659 2660 sc = ifp->if_softc; 2661 2662 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit); 2663 2664 ifp->if_flags &= ~IFF_RUNNING; 2665 bge_init(sc); 2666 2667 ifp->if_oerrors++; 2668 2669 return; 2670} 2671 2672/* 2673 * Stop the adapter and free any mbufs allocated to the 2674 * RX and TX lists. 2675 */ 2676static void 2677bge_stop(sc) 2678 struct bge_softc *sc; 2679{ 2680 struct ifnet *ifp; 2681 struct ifmedia_entry *ifm; 2682 struct mii_data *mii = NULL; 2683 int mtmp, itmp; 2684 2685 ifp = &sc->arpcom.ac_if; 2686 2687 if (!sc->bge_tbi) 2688 mii = device_get_softc(sc->bge_miibus); 2689 2690 untimeout(bge_tick, sc, sc->bge_stat_ch); 2691 2692 /* 2693 * Disable all of the receiver blocks 2694 */ 2695 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2696 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2697 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2698 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2699 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 2700 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2701 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 2702 2703 /* 2704 * Disable all of the transmit blocks 2705 */ 2706 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2707 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2708 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2709 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 2710 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 2711 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2712 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2713 2714 /* 2715 * Shut down all of the memory managers and related 2716 * state machines. 2717 */ 2718 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2719 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 2720 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2721 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2722 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2723 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 2724 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2725 2726 /* Disable host interrupts. */ 2727 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2728 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2729 2730 /* 2731 * Tell firmware we're shutting down. 2732 */ 2733 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2734 2735 /* Free the RX lists. */ 2736 bge_free_rx_ring_std(sc); 2737 2738 /* Free jumbo RX list. */ 2739 bge_free_rx_ring_jumbo(sc); 2740 2741 /* Free TX buffers. */ 2742 bge_free_tx_ring(sc); 2743 2744 /* 2745 * Isolate/power down the PHY, but leave the media selection 2746 * unchanged so that things will be put back to normal when 2747 * we bring the interface back up. 2748 */ 2749 if (!sc->bge_tbi) { 2750 itmp = ifp->if_flags; 2751 ifp->if_flags |= IFF_UP; 2752 ifm = mii->mii_media.ifm_cur; 2753 mtmp = ifm->ifm_media; 2754 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2755 mii_mediachg(mii); 2756 ifm->ifm_media = mtmp; 2757 ifp->if_flags = itmp; 2758 } 2759 2760 sc->bge_link = 0; 2761 2762 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 2763 2764 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2765 2766 return; 2767} 2768 2769/* 2770 * Stop all chip I/O so that the kernel's probe routines don't 2771 * get confused by errant DMAs when rebooting. 2772 */ 2773static void 2774bge_shutdown(dev) 2775 device_t dev; 2776{ 2777 struct bge_softc *sc; 2778 2779 sc = device_get_softc(dev); 2780 2781 bge_stop(sc); 2782 bge_reset(sc); 2783 2784 return; 2785}
| 1589 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1590 printf("bge%d: failed to read station address\n", unit); 1591 bge_release_resources(sc); 1592 error = ENXIO; 1593 goto fail; 1594 } 1595 1596 /* 1597 * A Broadcom chip was detected. Inform the world. 1598 */ 1599 printf("bge%d: Ethernet address: %6D\n", unit, 1600 sc->arpcom.ac_enaddr, ":"); 1601 1602 /* Allocate the general information block and ring buffers. */ 1603 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF, 1604 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1605 1606 if (sc->bge_rdata == NULL) { 1607 bge_release_resources(sc); 1608 error = ENXIO; 1609 printf("bge%d: no memory for list buffers!\n", sc->bge_unit); 1610 goto fail; 1611 } 1612 1613 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 1614 1615 /* Try to allocate memory for jumbo buffers. */ 1616 if (bge_alloc_jumbo_mem(sc)) { 1617 printf("bge%d: jumbo buffer allocation " 1618 "failed\n", sc->bge_unit); 1619 bge_release_resources(sc); 1620 error = ENXIO; 1621 goto fail; 1622 } 1623 1624 /* Set default tuneable values. */ 1625 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1626 sc->bge_rx_coal_ticks = 150; 1627 sc->bge_tx_coal_ticks = 150; 1628 sc->bge_rx_max_coal_bds = 64; 1629 sc->bge_tx_max_coal_bds = 128; 1630 1631 /* Set up ifnet structure */ 1632 ifp = &sc->arpcom.ac_if; 1633 ifp->if_softc = sc; 1634 ifp->if_unit = sc->bge_unit; 1635 ifp->if_name = "bge"; 1636 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1637 ifp->if_ioctl = bge_ioctl; 1638 ifp->if_output = ether_output; 1639 ifp->if_start = bge_start; 1640 ifp->if_watchdog = bge_watchdog; 1641 ifp->if_init = bge_init; 1642 ifp->if_mtu = ETHERMTU; 1643 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1; 1644 ifp->if_hwassist = BGE_CSUM_FEATURES; 1645 ifp->if_capabilities = IFCAP_HWCSUM; 1646 ifp->if_capenable = ifp->if_capabilities; 1647 1648 /* Save ASIC rev. */ 1649 1650 sc->bge_asicrev = 1651 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1652 BGE_PCIMISCCTL_ASICREV; 1653 1654 /* Pretend all 5700s are the same */ 1655 if ((sc->bge_asicrev & 0xFF000000) == BGE_ASICREV_BCM5700) 1656 sc->bge_asicrev = BGE_ASICREV_BCM5700; 1657 1658 /* 1659 * Figure out what sort of media we have by checking the 1660 * hardware config word in the EEPROM. Note: on some BCM5700 1661 * cards, this value appears to be unset. If that's the 1662 * case, we have to rely on identifying the NIC by its PCI 1663 * subsystem ID, as we do below for the SysKonnect SK-9D41. 1664 */ 1665 bge_read_eeprom(sc, (caddr_t)&hwcfg, 1666 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 1667 if ((ntohl(hwcfg) & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 1668 sc->bge_tbi = 1; 1669 1670 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 1671 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 1672 sc->bge_tbi = 1; 1673 1674 if (sc->bge_tbi) { 1675 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 1676 bge_ifmedia_upd, bge_ifmedia_sts); 1677 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1678 ifmedia_add(&sc->bge_ifmedia, 1679 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1680 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1681 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 1682 } else { 1683 /* 1684 * Do transceiver setup. 1685 */ 1686 if (mii_phy_probe(dev, &sc->bge_miibus, 1687 bge_ifmedia_upd, bge_ifmedia_sts)) { 1688 printf("bge%d: MII without any PHY!\n", sc->bge_unit); 1689 bge_release_resources(sc); 1690 bge_free_jumbo_mem(sc); 1691 error = ENXIO; 1692 goto fail; 1693 } 1694 } 1695 1696 /* 1697 * When using the BCM5701 in PCI-X mode, data corruption has 1698 * been observed in the first few bytes of some received packets. 1699 * Aligning the packet buffer in memory eliminates the corruption. 1700 * Unfortunately, this misaligns the packet payloads. On platforms 1701 * which do not support unaligned accesses, we will realign the 1702 * payloads by copying the received packets. 1703 */ 1704 switch (sc->bge_asicrev) { 1705 case BGE_ASICREV_BCM5701_A0: 1706 case BGE_ASICREV_BCM5701_B0: 1707 case BGE_ASICREV_BCM5701_B2: 1708 case BGE_ASICREV_BCM5701_B5: 1709 /* If in PCI-X mode, work around the alignment bug. */ 1710 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 1711 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 1712 BGE_PCISTATE_PCI_BUSSPEED) 1713 sc->bge_rx_alignment_bug = 1; 1714 break; 1715 } 1716 1717 /* 1718 * Call MI attach routine. 1719 */ 1720 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 1721 callout_handle_init(&sc->bge_stat_ch); 1722 1723fail: 1724 splx(s); 1725 1726 return(error); 1727} 1728 1729static int 1730bge_detach(dev) 1731 device_t dev; 1732{ 1733 struct bge_softc *sc; 1734 struct ifnet *ifp; 1735 int s; 1736 1737 s = splimp(); 1738 1739 sc = device_get_softc(dev); 1740 ifp = &sc->arpcom.ac_if; 1741 1742 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); 1743 bge_stop(sc); 1744 bge_reset(sc); 1745 1746 if (sc->bge_tbi) { 1747 ifmedia_removeall(&sc->bge_ifmedia); 1748 } else { 1749 bus_generic_detach(dev); 1750 device_delete_child(dev, sc->bge_miibus); 1751 } 1752 1753 bge_release_resources(sc); 1754 bge_free_jumbo_mem(sc); 1755 1756 splx(s); 1757 1758 return(0); 1759} 1760 1761static void 1762bge_release_resources(sc) 1763 struct bge_softc *sc; 1764{ 1765 device_t dev; 1766 1767 dev = sc->bge_dev; 1768 1769 if (sc->bge_vpd_prodname != NULL) 1770 free(sc->bge_vpd_prodname, M_DEVBUF); 1771 1772 if (sc->bge_vpd_readonly != NULL) 1773 free(sc->bge_vpd_readonly, M_DEVBUF); 1774 1775 if (sc->bge_intrhand != NULL) 1776 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 1777 1778 if (sc->bge_irq != NULL) 1779 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 1780 1781 if (sc->bge_res != NULL) 1782 bus_release_resource(dev, SYS_RES_MEMORY, 1783 BGE_PCI_BAR0, sc->bge_res); 1784 1785 if (sc->bge_rdata != NULL) 1786 contigfree(sc->bge_rdata, 1787 sizeof(struct bge_ring_data), M_DEVBUF); 1788 1789 return; 1790} 1791 1792static void 1793bge_reset(sc) 1794 struct bge_softc *sc; 1795{ 1796 device_t dev; 1797 u_int32_t cachesize, command, pcistate; 1798 int i, val = 0; 1799 1800 dev = sc->bge_dev; 1801 1802 /* Save some important PCI state. */ 1803 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 1804 command = pci_read_config(dev, BGE_PCI_CMD, 4); 1805 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 1806 1807 pci_write_config(dev, BGE_PCI_MISC_CTL, 1808 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1809 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1810 1811 /* Issue global reset */ 1812 bge_writereg_ind(sc, BGE_MISC_CFG, 1813 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 1814 1815 DELAY(1000); 1816 1817 /* Reset some of the PCI state that got zapped by reset */ 1818 pci_write_config(dev, BGE_PCI_MISC_CTL, 1819 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1820 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1821 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 1822 pci_write_config(dev, BGE_PCI_CMD, command, 4); 1823 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 1824 1825 /* 1826 * Prevent PXE restart: write a magic number to the 1827 * general communications memory at 0xB50. 1828 */ 1829 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1830 /* 1831 * Poll the value location we just wrote until 1832 * we see the 1's complement of the magic number. 1833 * This indicates that the firmware initialization 1834 * is complete. 1835 */ 1836 for (i = 0; i < BGE_TIMEOUT; i++) { 1837 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1838 if (val == ~BGE_MAGIC_NUMBER) 1839 break; 1840 DELAY(10); 1841 } 1842 1843 if (i == BGE_TIMEOUT) { 1844 printf("bge%d: firmware handshake timed out\n", sc->bge_unit); 1845 return; 1846 } 1847 1848 /* 1849 * XXX Wait for the value of the PCISTATE register to 1850 * return to its original pre-reset state. This is a 1851 * fairly good indicator of reset completion. If we don't 1852 * wait for the reset to fully complete, trying to read 1853 * from the device's non-PCI registers may yield garbage 1854 * results. 1855 */ 1856 for (i = 0; i < BGE_TIMEOUT; i++) { 1857 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 1858 break; 1859 DELAY(10); 1860 } 1861 1862 /* Enable memory arbiter. */ 1863 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 1864 1865 /* Fix up byte swapping */ 1866 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 1867 BGE_MODECTL_BYTESWAP_DATA); 1868 1869 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1870 1871 DELAY(10000); 1872 1873 return; 1874} 1875 1876/* 1877 * Frame reception handling. This is called if there's a frame 1878 * on the receive return list. 1879 * 1880 * Note: we have to be able to handle two possibilities here: 1881 * 1) the frame is from the jumbo recieve ring 1882 * 2) the frame is from the standard receive ring 1883 */ 1884 1885static void 1886bge_rxeof(sc) 1887 struct bge_softc *sc; 1888{ 1889 struct ifnet *ifp; 1890 int stdcnt = 0, jumbocnt = 0; 1891 1892 ifp = &sc->arpcom.ac_if; 1893 1894 while(sc->bge_rx_saved_considx != 1895 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 1896 struct bge_rx_bd *cur_rx; 1897 u_int32_t rxidx; 1898 struct ether_header *eh; 1899 struct mbuf *m = NULL; 1900 u_int16_t vlan_tag = 0; 1901 int have_tag = 0; 1902 1903 cur_rx = 1904 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx]; 1905 1906 rxidx = cur_rx->bge_idx; 1907 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 1908 1909 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 1910 have_tag = 1; 1911 vlan_tag = cur_rx->bge_vlan_tag; 1912 } 1913 1914 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 1915 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1916 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 1917 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 1918 jumbocnt++; 1919 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1920 ifp->if_ierrors++; 1921 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1922 continue; 1923 } 1924 if (bge_newbuf_jumbo(sc, 1925 sc->bge_jumbo, NULL) == ENOBUFS) { 1926 ifp->if_ierrors++; 1927 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1928 continue; 1929 } 1930 } else { 1931 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 1932 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 1933 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 1934 stdcnt++; 1935 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1936 ifp->if_ierrors++; 1937 bge_newbuf_std(sc, sc->bge_std, m); 1938 continue; 1939 } 1940 if (bge_newbuf_std(sc, sc->bge_std, 1941 NULL) == ENOBUFS) { 1942 ifp->if_ierrors++; 1943 bge_newbuf_std(sc, sc->bge_std, m); 1944 continue; 1945 } 1946 } 1947 1948 ifp->if_ipackets++; 1949#ifndef __i386__ 1950 /* 1951 * The i386 allows unaligned accesses, but for other 1952 * platforms we must make sure the payload is aligned. 1953 */ 1954 if (sc->bge_rx_alignment_bug) { 1955 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 1956 cur_rx->bge_len); 1957 m->m_data += ETHER_ALIGN; 1958 } 1959#endif 1960 eh = mtod(m, struct ether_header *); 1961 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 1962 m->m_pkthdr.rcvif = ifp; 1963 1964 /* Remove header from mbuf and pass it on. */ 1965 m_adj(m, sizeof(struct ether_header)); 1966 1967#if 0 /* currently broken for some packets, possibly related to TCP options */ 1968 if (ifp->if_hwassist) { 1969 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1970 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 1971 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1972 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 1973 m->m_pkthdr.csum_data = 1974 cur_rx->bge_tcp_udp_csum; 1975 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1976 } 1977 } 1978#endif 1979 1980 /* 1981 * If we received a packet with a vlan tag, pass it 1982 * to vlan_input() instead of ether_input(). 1983 */ 1984 if (have_tag) { 1985 VLAN_INPUT_TAG(eh, m, vlan_tag); 1986 have_tag = vlan_tag = 0; 1987 continue; 1988 } 1989 1990 ether_input(ifp, eh, m); 1991 } 1992 1993 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 1994 if (stdcnt) 1995 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1996 if (jumbocnt) 1997 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1998 1999 return; 2000} 2001 2002static void 2003bge_txeof(sc) 2004 struct bge_softc *sc; 2005{ 2006 struct bge_tx_bd *cur_tx = NULL; 2007 struct ifnet *ifp; 2008 2009 ifp = &sc->arpcom.ac_if; 2010 2011 /* 2012 * Go through our tx ring and free mbufs for those 2013 * frames that have been sent. 2014 */ 2015 while (sc->bge_tx_saved_considx != 2016 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2017 u_int32_t idx = 0; 2018 2019 idx = sc->bge_tx_saved_considx; 2020 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2021 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2022 ifp->if_opackets++; 2023 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2024 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2025 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2026 } 2027 sc->bge_txcnt--; 2028 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2029 ifp->if_timer = 0; 2030 } 2031 2032 if (cur_tx != NULL) 2033 ifp->if_flags &= ~IFF_OACTIVE; 2034 2035 return; 2036} 2037 2038static void 2039bge_intr(xsc) 2040 void *xsc; 2041{ 2042 struct bge_softc *sc; 2043 struct ifnet *ifp; 2044 2045 sc = xsc; 2046 ifp = &sc->arpcom.ac_if; 2047 2048#ifdef notdef 2049 /* Avoid this for now -- checking this register is expensive. */ 2050 /* Make sure this is really our interrupt. */ 2051 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2052 return; 2053#endif 2054 /* Ack interrupt and stop others from occuring. */ 2055 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2056 2057 /* 2058 * Process link state changes. 2059 * Grrr. The link status word in the status block does 2060 * not work correctly on the BCM5700 rev AX and BX chips, 2061 * according to all avaibable information. Hence, we have 2062 * to enable MII interrupts in order to properly obtain 2063 * async link changes. Unfortunately, this also means that 2064 * we have to read the MAC status register to detect link 2065 * changes, thereby adding an additional register access to 2066 * the interrupt handler. 2067 */ 2068 2069 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 2070 u_int32_t status; 2071 2072 status = CSR_READ_4(sc, BGE_MAC_STS); 2073 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2074 sc->bge_link = 0; 2075 untimeout(bge_tick, sc, sc->bge_stat_ch); 2076 bge_tick(sc); 2077 /* Clear the interrupt */ 2078 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2079 BGE_EVTENB_MI_INTERRUPT); 2080 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 2081 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 2082 BRGPHY_INTRS); 2083 } 2084 } else { 2085 if (sc->bge_rdata->bge_status_block.bge_status & 2086 BGE_STATFLAG_LINKSTATE_CHANGED) { 2087 sc->bge_link = 0; 2088 untimeout(bge_tick, sc, sc->bge_stat_ch); 2089 bge_tick(sc); 2090 /* Clear the interrupt */ 2091 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2092 BGE_MACSTAT_CFG_CHANGED); 2093 } 2094 } 2095 2096 if (ifp->if_flags & IFF_RUNNING) { 2097 /* Check RX return ring producer/consumer */ 2098 bge_rxeof(sc); 2099 2100 /* Check TX ring producer/consumer */ 2101 bge_txeof(sc); 2102 } 2103 2104 bge_handle_events(sc); 2105 2106 /* Re-enable interrupts. */ 2107 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2108 2109 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) 2110 bge_start(ifp); 2111 2112 return; 2113} 2114 2115static void 2116bge_tick(xsc) 2117 void *xsc; 2118{ 2119 struct bge_softc *sc; 2120 struct mii_data *mii = NULL; 2121 struct ifmedia *ifm = NULL; 2122 struct ifnet *ifp; 2123 int s; 2124 2125 sc = xsc; 2126 ifp = &sc->arpcom.ac_if; 2127 2128 s = splimp(); 2129 2130 bge_stats_update(sc); 2131 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2132 if (sc->bge_link) { 2133 splx(s); 2134 return; 2135 } 2136 2137 if (sc->bge_tbi) { 2138 ifm = &sc->bge_ifmedia; 2139 if (CSR_READ_4(sc, BGE_MAC_STS) & 2140 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2141 sc->bge_link++; 2142 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2143 printf("bge%d: gigabit link up\n", sc->bge_unit); 2144 if (ifp->if_snd.ifq_head != NULL) 2145 bge_start(ifp); 2146 } 2147 splx(s); 2148 return; 2149 } 2150 2151 mii = device_get_softc(sc->bge_miibus); 2152 mii_tick(mii); 2153 2154 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2155 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2156 sc->bge_link++; 2157 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 2158 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 2159 printf("bge%d: gigabit link up\n", 2160 sc->bge_unit); 2161 if (ifp->if_snd.ifq_head != NULL) 2162 bge_start(ifp); 2163 } 2164 2165 splx(s); 2166 2167 return; 2168} 2169 2170static void 2171bge_stats_update(sc) 2172 struct bge_softc *sc; 2173{ 2174 struct ifnet *ifp; 2175 struct bge_stats *stats; 2176 2177 ifp = &sc->arpcom.ac_if; 2178 2179 stats = (struct bge_stats *)(sc->bge_vhandle + 2180 BGE_MEMWIN_START + BGE_STATS_BLOCK); 2181 2182 ifp->if_collisions += 2183 (stats->dot3StatsSingleCollisionFrames.bge_addr_lo + 2184 stats->dot3StatsMultipleCollisionFrames.bge_addr_lo + 2185 stats->dot3StatsExcessiveCollisions.bge_addr_lo + 2186 stats->dot3StatsLateCollisions.bge_addr_lo) - 2187 ifp->if_collisions; 2188 2189#ifdef notdef 2190 ifp->if_collisions += 2191 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2192 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2193 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2194 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2195 ifp->if_collisions; 2196#endif 2197 2198 return; 2199} 2200 2201/* 2202 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2203 * pointers to descriptors. 2204 */ 2205static int 2206bge_encap(sc, m_head, txidx) 2207 struct bge_softc *sc; 2208 struct mbuf *m_head; 2209 u_int32_t *txidx; 2210{ 2211 struct bge_tx_bd *f = NULL; 2212 struct mbuf *m; 2213 u_int32_t frag, cur, cnt = 0; 2214 u_int16_t csum_flags = 0; 2215 struct ifvlan *ifv = NULL; 2216 2217 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 2218 m_head->m_pkthdr.rcvif != NULL && 2219 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 2220 ifv = m_head->m_pkthdr.rcvif->if_softc; 2221 2222 m = m_head; 2223 cur = frag = *txidx; 2224 2225 if (m_head->m_pkthdr.csum_flags) { 2226 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2227 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2228 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2229 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2230 if (m_head->m_flags & M_LASTFRAG) 2231 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2232 else if (m_head->m_flags & M_FRAG) 2233 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2234 } 2235 2236 /* 2237 * Start packing the mbufs in this chain into 2238 * the fragment pointers. Stop when we run out 2239 * of fragments or hit the end of the mbuf chain. 2240 */ 2241 for (m = m_head; m != NULL; m = m->m_next) { 2242 if (m->m_len != 0) { 2243 f = &sc->bge_rdata->bge_tx_ring[frag]; 2244 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2245 break; 2246 BGE_HOSTADDR(f->bge_addr) = 2247 vtophys(mtod(m, vm_offset_t)); 2248 f->bge_len = m->m_len; 2249 f->bge_flags = csum_flags; 2250 if (ifv != NULL) { 2251 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2252 f->bge_vlan_tag = ifv->ifv_tag; 2253 } else { 2254 f->bge_vlan_tag = 0; 2255 } 2256 /* 2257 * Sanity check: avoid coming within 16 descriptors 2258 * of the end of the ring. 2259 */ 2260 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2261 return(ENOBUFS); 2262 cur = frag; 2263 BGE_INC(frag, BGE_TX_RING_CNT); 2264 cnt++; 2265 } 2266 } 2267 2268 if (m != NULL) 2269 return(ENOBUFS); 2270 2271 if (frag == sc->bge_tx_saved_considx) 2272 return(ENOBUFS); 2273 2274 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2275 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2276 sc->bge_txcnt += cnt; 2277 2278 *txidx = frag; 2279 2280 return(0); 2281} 2282 2283/* 2284 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2285 * to the mbuf data regions directly in the transmit descriptors. 2286 */ 2287static void 2288bge_start(ifp) 2289 struct ifnet *ifp; 2290{ 2291 struct bge_softc *sc; 2292 struct mbuf *m_head = NULL; 2293 u_int32_t prodidx = 0; 2294 2295 sc = ifp->if_softc; 2296 2297 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2298 return; 2299 2300 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2301 2302 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2303 IF_DEQUEUE(&ifp->if_snd, m_head); 2304 if (m_head == NULL) 2305 break; 2306 2307 /* 2308 * XXX 2309 * safety overkill. If this is a fragmented packet chain 2310 * with delayed TCP/UDP checksums, then only encapsulate 2311 * it if we have enough descriptors to handle the entire 2312 * chain at once. 2313 * (paranoia -- may not actually be needed) 2314 */ 2315 if (m_head->m_flags & M_FIRSTFRAG && 2316 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2317 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2318 m_head->m_pkthdr.csum_data + 16) { 2319 IF_PREPEND(&ifp->if_snd, m_head); 2320 ifp->if_flags |= IFF_OACTIVE; 2321 break; 2322 } 2323 } 2324 2325 /* 2326 * Pack the data into the transmit ring. If we 2327 * don't have room, set the OACTIVE flag and wait 2328 * for the NIC to drain the ring. 2329 */ 2330 if (bge_encap(sc, m_head, &prodidx)) { 2331 IF_PREPEND(&ifp->if_snd, m_head); 2332 ifp->if_flags |= IFF_OACTIVE; 2333 break; 2334 } 2335 2336 /* 2337 * If there's a BPF listener, bounce a copy of this frame 2338 * to him. 2339 */ 2340 if (ifp->if_bpf) 2341 bpf_mtap(ifp, m_head); 2342 } 2343 2344 /* Transmit */ 2345 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2346 2347 /* 2348 * Set a timeout in case the chip goes out to lunch. 2349 */ 2350 ifp->if_timer = 5; 2351 2352 return; 2353} 2354 2355/* 2356 * If we have a BCM5400 or BCM5401 PHY, we need to properly 2357 * program its internal DSP. Failing to do this can result in 2358 * massive packet loss at 1Gb speeds. 2359 */ 2360static void 2361bge_phy_hack(sc) 2362 struct bge_softc *sc; 2363{ 2364 struct bge_bcom_hack bhack[] = { 2365 { BRGPHY_MII_AUXCTL, 0x4C20 }, 2366 { BRGPHY_MII_DSP_ADDR_REG, 0x0012 }, 2367 { BRGPHY_MII_DSP_RW_PORT, 0x1804 }, 2368 { BRGPHY_MII_DSP_ADDR_REG, 0x0013 }, 2369 { BRGPHY_MII_DSP_RW_PORT, 0x1204 }, 2370 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2371 { BRGPHY_MII_DSP_RW_PORT, 0x0132 }, 2372 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2373 { BRGPHY_MII_DSP_RW_PORT, 0x0232 }, 2374 { BRGPHY_MII_DSP_ADDR_REG, 0x201F }, 2375 { BRGPHY_MII_DSP_RW_PORT, 0x0A20 }, 2376 { 0, 0 } }; 2377 u_int16_t vid, did; 2378 int i; 2379 2380 vid = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR1); 2381 did = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR2); 2382 2383 if (MII_OUI(vid, did) == MII_OUI_xxBROADCOM && 2384 (MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5400 || 2385 MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5401)) { 2386 i = 0; 2387 while(bhack[i].reg) { 2388 bge_miibus_writereg(sc->bge_dev, 1, bhack[i].reg, 2389 bhack[i].val); 2390 i++; 2391 } 2392 } 2393 2394 return; 2395} 2396 2397static void 2398bge_init(xsc) 2399 void *xsc; 2400{ 2401 struct bge_softc *sc = xsc; 2402 struct ifnet *ifp; 2403 u_int16_t *m; 2404 int s; 2405 2406 s = splimp(); 2407 2408 ifp = &sc->arpcom.ac_if; 2409 2410 if (ifp->if_flags & IFF_RUNNING) { 2411 splx(s); 2412 return; 2413 } 2414 2415 /* Cancel pending I/O and flush buffers. */ 2416 bge_stop(sc); 2417 bge_reset(sc); 2418 bge_chipinit(sc); 2419 2420 /* 2421 * Init the various state machines, ring 2422 * control blocks and firmware. 2423 */ 2424 if (bge_blockinit(sc)) { 2425 printf("bge%d: initialization failure\n", sc->bge_unit); 2426 splx(s); 2427 return; 2428 } 2429 2430 ifp = &sc->arpcom.ac_if; 2431 2432 /* Specify MTU. */ 2433 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2434 ETHER_HDR_LEN + ETHER_CRC_LEN); 2435 2436 /* Load our MAC address. */ 2437 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2438 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2439 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2440 2441 /* Enable or disable promiscuous mode as needed. */ 2442 if (ifp->if_flags & IFF_PROMISC) { 2443 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2444 } else { 2445 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2446 } 2447 2448 /* Program multicast filter. */ 2449 bge_setmulti(sc); 2450 2451 /* Init RX ring. */ 2452 bge_init_rx_ring_std(sc); 2453 2454 /* Init jumbo RX ring. */ 2455 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2456 bge_init_rx_ring_jumbo(sc); 2457 2458 /* Init our RX return ring index */ 2459 sc->bge_rx_saved_considx = 0; 2460 2461 /* Init TX ring. */ 2462 bge_init_tx_ring(sc); 2463 2464 /* Turn on transmitter */ 2465 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2466 2467 /* Turn on receiver */ 2468 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2469 2470 /* Tell firmware we're alive. */ 2471 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2472 2473 /* Enable host interrupts. */ 2474 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2475 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2476 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2477 2478 bge_ifmedia_upd(ifp); 2479 2480 ifp->if_flags |= IFF_RUNNING; 2481 ifp->if_flags &= ~IFF_OACTIVE; 2482 2483 splx(s); 2484 2485 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2486 2487 return; 2488} 2489 2490/* 2491 * Set media options. 2492 */ 2493static int 2494bge_ifmedia_upd(ifp) 2495 struct ifnet *ifp; 2496{ 2497 struct bge_softc *sc; 2498 struct mii_data *mii; 2499 struct ifmedia *ifm; 2500 2501 sc = ifp->if_softc; 2502 ifm = &sc->bge_ifmedia; 2503 2504 /* If this is a 1000baseX NIC, enable the TBI port. */ 2505 if (sc->bge_tbi) { 2506 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2507 return(EINVAL); 2508 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2509 case IFM_AUTO: 2510 break; 2511 case IFM_1000_SX: 2512 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2513 BGE_CLRBIT(sc, BGE_MAC_MODE, 2514 BGE_MACMODE_HALF_DUPLEX); 2515 } else { 2516 BGE_SETBIT(sc, BGE_MAC_MODE, 2517 BGE_MACMODE_HALF_DUPLEX); 2518 } 2519 break; 2520 default: 2521 return(EINVAL); 2522 } 2523 return(0); 2524 } 2525 2526 mii = device_get_softc(sc->bge_miibus); 2527 sc->bge_link = 0; 2528 if (mii->mii_instance) { 2529 struct mii_softc *miisc; 2530 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 2531 miisc = LIST_NEXT(miisc, mii_list)) 2532 mii_phy_reset(miisc); 2533 } 2534 bge_phy_hack(sc); 2535 mii_mediachg(mii); 2536 2537 return(0); 2538} 2539 2540/* 2541 * Report current media status. 2542 */ 2543static void 2544bge_ifmedia_sts(ifp, ifmr) 2545 struct ifnet *ifp; 2546 struct ifmediareq *ifmr; 2547{ 2548 struct bge_softc *sc; 2549 struct mii_data *mii; 2550 2551 sc = ifp->if_softc; 2552 2553 if (sc->bge_tbi) { 2554 ifmr->ifm_status = IFM_AVALID; 2555 ifmr->ifm_active = IFM_ETHER; 2556 if (CSR_READ_4(sc, BGE_MAC_STS) & 2557 BGE_MACSTAT_TBI_PCS_SYNCHED) 2558 ifmr->ifm_status |= IFM_ACTIVE; 2559 ifmr->ifm_active |= IFM_1000_SX; 2560 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 2561 ifmr->ifm_active |= IFM_HDX; 2562 else 2563 ifmr->ifm_active |= IFM_FDX; 2564 return; 2565 } 2566 2567 mii = device_get_softc(sc->bge_miibus); 2568 mii_pollstat(mii); 2569 ifmr->ifm_active = mii->mii_media_active; 2570 ifmr->ifm_status = mii->mii_media_status; 2571 2572 return; 2573} 2574 2575static int 2576bge_ioctl(ifp, command, data) 2577 struct ifnet *ifp; 2578 u_long command; 2579 caddr_t data; 2580{ 2581 struct bge_softc *sc = ifp->if_softc; 2582 struct ifreq *ifr = (struct ifreq *) data; 2583 int s, mask, error = 0; 2584 struct mii_data *mii; 2585 2586 s = splimp(); 2587 2588 switch(command) { 2589 case SIOCSIFADDR: 2590 case SIOCGIFADDR: 2591 error = ether_ioctl(ifp, command, data); 2592 break; 2593 case SIOCSIFMTU: 2594 if (ifr->ifr_mtu > BGE_JUMBO_MTU) 2595 error = EINVAL; 2596 else { 2597 ifp->if_mtu = ifr->ifr_mtu; 2598 ifp->if_flags &= ~IFF_RUNNING; 2599 bge_init(sc); 2600 } 2601 break; 2602 case SIOCSIFFLAGS: 2603 if (ifp->if_flags & IFF_UP) { 2604 /* 2605 * If only the state of the PROMISC flag changed, 2606 * then just use the 'set promisc mode' command 2607 * instead of reinitializing the entire NIC. Doing 2608 * a full re-init means reloading the firmware and 2609 * waiting for it to start up, which may take a 2610 * second or two. 2611 */ 2612 if (ifp->if_flags & IFF_RUNNING && 2613 ifp->if_flags & IFF_PROMISC && 2614 !(sc->bge_if_flags & IFF_PROMISC)) { 2615 BGE_SETBIT(sc, BGE_RX_MODE, 2616 BGE_RXMODE_RX_PROMISC); 2617 } else if (ifp->if_flags & IFF_RUNNING && 2618 !(ifp->if_flags & IFF_PROMISC) && 2619 sc->bge_if_flags & IFF_PROMISC) { 2620 BGE_CLRBIT(sc, BGE_RX_MODE, 2621 BGE_RXMODE_RX_PROMISC); 2622 } else 2623 bge_init(sc); 2624 } else { 2625 if (ifp->if_flags & IFF_RUNNING) { 2626 bge_stop(sc); 2627 } 2628 } 2629 sc->bge_if_flags = ifp->if_flags; 2630 error = 0; 2631 break; 2632 case SIOCADDMULTI: 2633 case SIOCDELMULTI: 2634 if (ifp->if_flags & IFF_RUNNING) { 2635 bge_setmulti(sc); 2636 error = 0; 2637 } 2638 break; 2639 case SIOCSIFMEDIA: 2640 case SIOCGIFMEDIA: 2641 if (sc->bge_tbi) { 2642 error = ifmedia_ioctl(ifp, ifr, 2643 &sc->bge_ifmedia, command); 2644 } else { 2645 mii = device_get_softc(sc->bge_miibus); 2646 error = ifmedia_ioctl(ifp, ifr, 2647 &mii->mii_media, command); 2648 } 2649 break; 2650 case SIOCSIFCAP: 2651 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2652 if (mask & IFCAP_HWCSUM) { 2653 if (IFCAP_HWCSUM & ifp->if_capenable) 2654 ifp->if_capenable &= ~IFCAP_HWCSUM; 2655 else 2656 ifp->if_capenable |= IFCAP_HWCSUM; 2657 } 2658 error = 0; 2659 break; 2660 default: 2661 error = EINVAL; 2662 break; 2663 } 2664 2665 (void)splx(s); 2666 2667 return(error); 2668} 2669 2670static void 2671bge_watchdog(ifp) 2672 struct ifnet *ifp; 2673{ 2674 struct bge_softc *sc; 2675 2676 sc = ifp->if_softc; 2677 2678 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit); 2679 2680 ifp->if_flags &= ~IFF_RUNNING; 2681 bge_init(sc); 2682 2683 ifp->if_oerrors++; 2684 2685 return; 2686} 2687 2688/* 2689 * Stop the adapter and free any mbufs allocated to the 2690 * RX and TX lists. 2691 */ 2692static void 2693bge_stop(sc) 2694 struct bge_softc *sc; 2695{ 2696 struct ifnet *ifp; 2697 struct ifmedia_entry *ifm; 2698 struct mii_data *mii = NULL; 2699 int mtmp, itmp; 2700 2701 ifp = &sc->arpcom.ac_if; 2702 2703 if (!sc->bge_tbi) 2704 mii = device_get_softc(sc->bge_miibus); 2705 2706 untimeout(bge_tick, sc, sc->bge_stat_ch); 2707 2708 /* 2709 * Disable all of the receiver blocks 2710 */ 2711 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2712 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2713 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2714 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2715 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 2716 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2717 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 2718 2719 /* 2720 * Disable all of the transmit blocks 2721 */ 2722 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2723 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2724 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2725 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 2726 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 2727 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2728 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2729 2730 /* 2731 * Shut down all of the memory managers and related 2732 * state machines. 2733 */ 2734 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2735 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 2736 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2737 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2738 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2739 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 2740 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2741 2742 /* Disable host interrupts. */ 2743 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2744 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2745 2746 /* 2747 * Tell firmware we're shutting down. 2748 */ 2749 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2750 2751 /* Free the RX lists. */ 2752 bge_free_rx_ring_std(sc); 2753 2754 /* Free jumbo RX list. */ 2755 bge_free_rx_ring_jumbo(sc); 2756 2757 /* Free TX buffers. */ 2758 bge_free_tx_ring(sc); 2759 2760 /* 2761 * Isolate/power down the PHY, but leave the media selection 2762 * unchanged so that things will be put back to normal when 2763 * we bring the interface back up. 2764 */ 2765 if (!sc->bge_tbi) { 2766 itmp = ifp->if_flags; 2767 ifp->if_flags |= IFF_UP; 2768 ifm = mii->mii_media.ifm_cur; 2769 mtmp = ifm->ifm_media; 2770 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2771 mii_mediachg(mii); 2772 ifm->ifm_media = mtmp; 2773 ifp->if_flags = itmp; 2774 } 2775 2776 sc->bge_link = 0; 2777 2778 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 2779 2780 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2781 2782 return; 2783} 2784 2785/* 2786 * Stop all chip I/O so that the kernel's probe routines don't 2787 * get confused by errant DMAs when rebooting. 2788 */ 2789static void 2790bge_shutdown(dev) 2791 device_t dev; 2792{ 2793 struct bge_softc *sc; 2794 2795 sc = device_get_softc(dev); 2796 2797 bge_stop(sc); 2798 bge_reset(sc); 2799 2800 return; 2801}
|