36 37/* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69#ifdef HAVE_KERNEL_OPTION_HEADERS 70#include "opt_device_polling.h" 71#endif 72 73#include <sys/param.h> 74#include <sys/endian.h> 75#include <sys/systm.h> 76#include <sys/sockio.h> 77#include <sys/mbuf.h> 78#include <sys/malloc.h> 79#include <sys/kernel.h> 80#include <sys/module.h> 81#include <sys/socket.h> 82#include <sys/sysctl.h> 83 84#include <net/if.h> 85#include <net/if_arp.h> 86#include <net/ethernet.h> 87#include <net/if_dl.h> 88#include <net/if_media.h> 89 90#include <net/bpf.h> 91 92#include <net/if_types.h> 93#include <net/if_vlan_var.h> 94 95#include <netinet/in_systm.h> 96#include <netinet/in.h> 97#include <netinet/ip.h> 98 99#include <machine/bus.h> 100#include <machine/resource.h> 101#include <sys/bus.h> 102#include <sys/rman.h> 103 104#include <dev/mii/mii.h> 105#include <dev/mii/miivar.h> 106#include "miidevs.h" 107#include <dev/mii/brgphyreg.h> 108 109#include <dev/pci/pcireg.h> 110#include <dev/pci/pcivar.h> 111 112#include <dev/bge/if_bgereg.h> 113 114#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 115#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 116 117MODULE_DEPEND(bge, pci, 1, 1, 1); 118MODULE_DEPEND(bge, ether, 1, 1, 1); 119MODULE_DEPEND(bge, miibus, 1, 1, 1); 120 121/* "device miibus" required. See GENERIC if you get errors here. */ 122#include "miibus_if.h" 123 124/* 125 * Various supported device vendors/types and their names. Note: the 126 * spec seems to indicate that the hardware still has Alteon's vendor 127 * ID burned into it, though it will always be overriden by the vendor 128 * ID in the EEPROM. Just to be safe, we cover all possibilities. 129 */ 130static struct bge_type { 131 uint16_t bge_vid; 132 uint16_t bge_did; 133} bge_devs[] = { 134 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 135 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 136 137 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 138 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 139 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 140 141 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 142 143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 191 192 { SK_VENDORID, SK_DEVICEID_ALTIMA }, 193 194 { TC_VENDORID, TC_DEVICEID_3C996 }, 195 196 { 0, 0 } 197}; 198 199static const struct bge_vendor { 200 uint16_t v_id; 201 const char *v_name; 202} bge_vendors[] = { 203 { ALTEON_VENDORID, "Alteon" }, 204 { ALTIMA_VENDORID, "Altima" }, 205 { APPLE_VENDORID, "Apple" }, 206 { BCOM_VENDORID, "Broadcom" }, 207 { SK_VENDORID, "SysKonnect" }, 208 { TC_VENDORID, "3Com" }, 209 210 { 0, NULL } 211}; 212 213static const struct bge_revision { 214 uint32_t br_chipid; 215 const char *br_name; 216} bge_revisions[] = { 217 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 218 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 219 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 220 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 221 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 222 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 223 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 224 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 225 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 226 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 227 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 228 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 229 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 230 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 231 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 232 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 233 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 234 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 235 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 236 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 237 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 238 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 239 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 240 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 241 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 242 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 243 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 244 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 245 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 246 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 247 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 248 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 249 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 250 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 251 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 252 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 253 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 254 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 255 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 256 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 257 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 258 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 259 260 { 0, NULL } 261}; 262 263/* 264 * Some defaults for major revisions, so that newer steppings 265 * that we don't know about have a shot at working. 266 */ 267static const struct bge_revision bge_majorrevs[] = { 268 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 269 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 270 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 271 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 272 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 273 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 274 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 275 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 276 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 277 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 278 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 279 { BGE_ASICREV_BCM5787, "unknown BCM5787" }, 280 281 { 0, NULL } 282}; 283 284#define BGE_IS_5705_OR_BEYOND(sc) \ 285 ((sc)->bge_asicrev == BGE_ASICREV_BCM5705 || \ 286 (sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \ 287 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 288 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 289 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \ 290 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \ 291 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \ 292 (sc)->bge_asicrev == BGE_ASICREV_BCM5787) 293 294#define BGE_IS_575X_PLUS(sc) \ 295 ((sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \ 296 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 297 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 298 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \ 299 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \ 300 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \ 301 (sc)->bge_asicrev == BGE_ASICREV_BCM5787) 302 303#define BGE_IS_5714_FAMILY(sc) \ 304 ((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 305 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 306 (sc)->bge_asicrev == BGE_ASICREV_BCM5714) 307 308#define BGE_IS_JUMBO_CAPABLE(sc) \ 309 ((sc)->bge_asicrev == BGE_ASICREV_BCM5700 || \ 310 (sc)->bge_asicrev == BGE_ASICREV_BCM5701 || \ 311 (sc)->bge_asicrev == BGE_ASICREV_BCM5703 || \ 312 (sc)->bge_asicrev == BGE_ASICREV_BCM5704) 313 314const struct bge_revision * bge_lookup_rev(uint32_t); 315const struct bge_vendor * bge_lookup_vendor(uint16_t); 316static int bge_probe(device_t); 317static int bge_attach(device_t); 318static int bge_detach(device_t); 319static int bge_suspend(device_t); 320static int bge_resume(device_t); 321static void bge_release_resources(struct bge_softc *); 322static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 323static int bge_dma_alloc(device_t); 324static void bge_dma_free(struct bge_softc *); 325 326static void bge_txeof(struct bge_softc *); 327static void bge_rxeof(struct bge_softc *); 328 329static void bge_asf_driver_up (struct bge_softc *); 330static void bge_tick(void *); 331static void bge_stats_update(struct bge_softc *); 332static void bge_stats_update_regs(struct bge_softc *); 333static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 334 335static void bge_intr(void *); 336static void bge_start_locked(struct ifnet *); 337static void bge_start(struct ifnet *); 338static int bge_ioctl(struct ifnet *, u_long, caddr_t); 339static void bge_init_locked(struct bge_softc *); 340static void bge_init(void *); 341static void bge_stop(struct bge_softc *); 342static void bge_watchdog(struct bge_softc *); 343static void bge_shutdown(device_t); 344static int bge_ifmedia_upd_locked(struct ifnet *); 345static int bge_ifmedia_upd(struct ifnet *); 346static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 347 348static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 349static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 350 351static void bge_setpromisc(struct bge_softc *); 352static void bge_setmulti(struct bge_softc *); 353 354static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 355static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 356static int bge_init_rx_ring_std(struct bge_softc *); 357static void bge_free_rx_ring_std(struct bge_softc *); 358static int bge_init_rx_ring_jumbo(struct bge_softc *); 359static void bge_free_rx_ring_jumbo(struct bge_softc *); 360static void bge_free_tx_ring(struct bge_softc *); 361static int bge_init_tx_ring(struct bge_softc *); 362 363static int bge_chipinit(struct bge_softc *); 364static int bge_blockinit(struct bge_softc *); 365 366static uint32_t bge_readmem_ind(struct bge_softc *, int); 367static void bge_writemem_ind(struct bge_softc *, int, int); 368#ifdef notdef 369static uint32_t bge_readreg_ind(struct bge_softc *, int); 370#endif 371static void bge_writereg_ind(struct bge_softc *, int, int); 372 373static int bge_miibus_readreg(device_t, int, int); 374static int bge_miibus_writereg(device_t, int, int, int); 375static void bge_miibus_statchg(device_t); 376#ifdef DEVICE_POLLING 377static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 378#endif 379 380#define BGE_RESET_START 1 381#define BGE_RESET_STOP 2 382static void bge_sig_post_reset(struct bge_softc *, int); 383static void bge_sig_legacy(struct bge_softc *, int); 384static void bge_sig_pre_reset(struct bge_softc *, int); 385static int bge_reset(struct bge_softc *); 386static void bge_link_upd(struct bge_softc *); 387 388static device_method_t bge_methods[] = { 389 /* Device interface */ 390 DEVMETHOD(device_probe, bge_probe), 391 DEVMETHOD(device_attach, bge_attach), 392 DEVMETHOD(device_detach, bge_detach), 393 DEVMETHOD(device_shutdown, bge_shutdown), 394 DEVMETHOD(device_suspend, bge_suspend), 395 DEVMETHOD(device_resume, bge_resume), 396 397 /* bus interface */ 398 DEVMETHOD(bus_print_child, bus_generic_print_child), 399 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 400 401 /* MII interface */ 402 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 403 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 404 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 405 406 { 0, 0 } 407}; 408 409static driver_t bge_driver = { 410 "bge", 411 bge_methods, 412 sizeof(struct bge_softc) 413}; 414 415static devclass_t bge_devclass; 416 417DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 418DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 419 420static int bge_fake_autoneg = 0; 421static int bge_allow_asf = 1; 422 423TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 424TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 425 426SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 427SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0, 428 "Enable fake autonegotiation for certain blade systems"); 429SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 430 "Allow ASF mode if available"); 431 432static uint32_t 433bge_readmem_ind(struct bge_softc *sc, int off) 434{ 435 device_t dev; 436 437 dev = sc->bge_dev; 438 439 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 440 return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 441} 442 443static void 444bge_writemem_ind(struct bge_softc *sc, int off, int val) 445{ 446 device_t dev; 447 448 dev = sc->bge_dev; 449 450 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 451 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 452} 453 454#ifdef notdef 455static uint32_t 456bge_readreg_ind(struct bge_softc *sc, int off) 457{ 458 device_t dev; 459 460 dev = sc->bge_dev; 461 462 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 463 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 464} 465#endif 466 467static void 468bge_writereg_ind(struct bge_softc *sc, int off, int val) 469{ 470 device_t dev; 471 472 dev = sc->bge_dev; 473 474 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 475 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 476} 477 478/* 479 * Map a single buffer address. 480 */ 481 482static void 483bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 484{ 485 struct bge_dmamap_arg *ctx; 486 487 if (error) 488 return; 489 490 ctx = arg; 491 492 if (nseg > ctx->bge_maxsegs) { 493 ctx->bge_maxsegs = 0; 494 return; 495 } 496 497 ctx->bge_busaddr = segs->ds_addr; 498} 499 500/* 501 * Read a byte of data stored in the EEPROM at address 'addr.' The 502 * BCM570x supports both the traditional bitbang interface and an 503 * auto access interface for reading the EEPROM. We use the auto 504 * access method. 505 */ 506static uint8_t 507bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 508{ 509 int i; 510 uint32_t byte = 0; 511 512 /* 513 * Enable use of auto EEPROM access so we can avoid 514 * having to use the bitbang method. 515 */ 516 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 517 518 /* Reset the EEPROM, load the clock period. */ 519 CSR_WRITE_4(sc, BGE_EE_ADDR, 520 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 521 DELAY(20); 522 523 /* Issue the read EEPROM command. */ 524 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 525 526 /* Wait for completion */ 527 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 528 DELAY(10); 529 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 530 break; 531 } 532 533 if (i == BGE_TIMEOUT) { 534 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 535 return (1); 536 } 537 538 /* Get result. */ 539 byte = CSR_READ_4(sc, BGE_EE_DATA); 540 541 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 542 543 return (0); 544} 545 546/* 547 * Read a sequence of bytes from the EEPROM. 548 */ 549static int 550bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 551{ 552 int i, error = 0; 553 uint8_t byte = 0; 554 555 for (i = 0; i < cnt; i++) { 556 error = bge_eeprom_getbyte(sc, off + i, &byte); 557 if (error) 558 break; 559 *(dest + i) = byte; 560 } 561 562 return (error ? 1 : 0); 563} 564 565static int 566bge_miibus_readreg(device_t dev, int phy, int reg) 567{ 568 struct bge_softc *sc; 569 uint32_t val, autopoll; 570 int i; 571 572 sc = device_get_softc(dev); 573 574 /* 575 * Broadcom's own driver always assumes the internal 576 * PHY is at GMII address 1. On some chips, the PHY responds 577 * to accesses at all addresses, which could cause us to 578 * bogusly attach the PHY 32 times at probe type. Always 579 * restricting the lookup to address 1 is simpler than 580 * trying to figure out which chips revisions should be 581 * special-cased. 582 */ 583 if (phy != 1) 584 return (0); 585 586 /* Reading with autopolling on may trigger PCI errors */ 587 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 588 if (autopoll & BGE_MIMODE_AUTOPOLL) { 589 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 590 DELAY(40); 591 } 592 593 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 594 BGE_MIPHY(phy)|BGE_MIREG(reg)); 595 596 for (i = 0; i < BGE_TIMEOUT; i++) { 597 val = CSR_READ_4(sc, BGE_MI_COMM); 598 if (!(val & BGE_MICOMM_BUSY)) 599 break; 600 } 601 602 if (i == BGE_TIMEOUT) { 603 device_printf(sc->bge_dev, "PHY read timed out\n"); 604 val = 0; 605 goto done; 606 } 607 608 val = CSR_READ_4(sc, BGE_MI_COMM); 609 610done: 611 if (autopoll & BGE_MIMODE_AUTOPOLL) { 612 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 613 DELAY(40); 614 } 615 616 if (val & BGE_MICOMM_READFAIL) 617 return (0); 618 619 return (val & 0xFFFF); 620} 621 622static int 623bge_miibus_writereg(device_t dev, int phy, int reg, int val) 624{ 625 struct bge_softc *sc; 626 uint32_t autopoll; 627 int i; 628 629 sc = device_get_softc(dev); 630 631 /* Reading with autopolling on may trigger PCI errors */ 632 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 633 if (autopoll & BGE_MIMODE_AUTOPOLL) { 634 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 635 DELAY(40); 636 } 637 638 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 639 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 640 641 for (i = 0; i < BGE_TIMEOUT; i++) { 642 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 643 break; 644 } 645 646 if (autopoll & BGE_MIMODE_AUTOPOLL) { 647 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 648 DELAY(40); 649 } 650 651 if (i == BGE_TIMEOUT) { 652 device_printf(sc->bge_dev, "PHY read timed out\n"); 653 return (0); 654 } 655 656 return (0); 657} 658 659static void 660bge_miibus_statchg(device_t dev) 661{ 662 struct bge_softc *sc; 663 struct mii_data *mii; 664 sc = device_get_softc(dev); 665 mii = device_get_softc(sc->bge_miibus); 666 667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 670 else 671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 672 673 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 674 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 675 else 676 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 677} 678 679/* 680 * Intialize a standard receive ring descriptor. 681 */ 682static int 683bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) 684{ 685 struct mbuf *m_new = NULL; 686 struct bge_rx_bd *r; 687 struct bge_dmamap_arg ctx; 688 int error; 689 690 if (m == NULL) { 691 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 692 if (m_new == NULL) 693 return (ENOBUFS); 694 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 695 } else { 696 m_new = m; 697 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 698 m_new->m_data = m_new->m_ext.ext_buf; 699 } 700 701 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 702 m_adj(m_new, ETHER_ALIGN); 703 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 704 r = &sc->bge_ldata.bge_rx_std_ring[i]; 705 ctx.bge_maxsegs = 1; 706 ctx.sc = sc; 707 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 708 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 709 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 710 if (error || ctx.bge_maxsegs == 0) { 711 if (m == NULL) { 712 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 713 m_freem(m_new); 714 } 715 return (ENOMEM); 716 } 717 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 718 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 719 r->bge_flags = BGE_RXBDFLAG_END; 720 r->bge_len = m_new->m_len; 721 r->bge_idx = i; 722 723 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 724 sc->bge_cdata.bge_rx_std_dmamap[i], 725 BUS_DMASYNC_PREREAD); 726 727 return (0); 728} 729 730/* 731 * Initialize a jumbo receive ring descriptor. This allocates 732 * a jumbo buffer from the pool managed internally by the driver. 733 */ 734static int 735bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 736{ 737 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 738 struct bge_extrx_bd *r; 739 struct mbuf *m_new = NULL; 740 int nsegs; 741 int error; 742 743 if (m == NULL) { 744 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 745 if (m_new == NULL) 746 return (ENOBUFS); 747 748 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 749 if (!(m_new->m_flags & M_EXT)) { 750 m_freem(m_new); 751 return (ENOBUFS); 752 } 753 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 754 } else { 755 m_new = m; 756 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 757 m_new->m_data = m_new->m_ext.ext_buf; 758 } 759 760 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 761 m_adj(m_new, ETHER_ALIGN); 762 763 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 764 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 765 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 766 if (error) { 767 if (m == NULL) 768 m_freem(m_new); 769 return (error); 770 } 771 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 772 773 /* 774 * Fill in the extended RX buffer descriptor. 775 */ 776 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 777 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END; 778 r->bge_idx = i; 779 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 780 switch (nsegs) { 781 case 4: 782 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 783 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 784 r->bge_len3 = segs[3].ds_len; 785 case 3: 786 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 787 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 788 r->bge_len2 = segs[2].ds_len; 789 case 2: 790 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 791 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 792 r->bge_len1 = segs[1].ds_len; 793 case 1: 794 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 795 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 796 r->bge_len0 = segs[0].ds_len; 797 break; 798 default: 799 panic("%s: %d segments\n", __func__, nsegs); 800 } 801 802 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 803 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 804 BUS_DMASYNC_PREREAD); 805 806 return (0); 807} 808 809/* 810 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 811 * that's 1MB or memory, which is a lot. For now, we fill only the first 812 * 256 ring entries and hope that our CPU is fast enough to keep up with 813 * the NIC. 814 */ 815static int 816bge_init_rx_ring_std(struct bge_softc *sc) 817{ 818 int i; 819 820 for (i = 0; i < BGE_SSLOTS; i++) { 821 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 822 return (ENOBUFS); 823 }; 824 825 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 826 sc->bge_cdata.bge_rx_std_ring_map, 827 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 828 829 sc->bge_std = i - 1; 830 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 831 832 return (0); 833} 834 835static void 836bge_free_rx_ring_std(struct bge_softc *sc) 837{ 838 int i; 839 840 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 841 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 842 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 843 sc->bge_cdata.bge_rx_std_dmamap[i], 844 BUS_DMASYNC_POSTREAD); 845 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 846 sc->bge_cdata.bge_rx_std_dmamap[i]); 847 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 848 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 849 } 850 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 851 sizeof(struct bge_rx_bd)); 852 } 853} 854 855static int 856bge_init_rx_ring_jumbo(struct bge_softc *sc) 857{ 858 struct bge_rcb *rcb; 859 int i; 860 861 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 862 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 863 return (ENOBUFS); 864 }; 865 866 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 867 sc->bge_cdata.bge_rx_jumbo_ring_map, 868 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 869 870 sc->bge_jumbo = i - 1; 871 872 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 873 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 874 BGE_RCB_FLAG_USE_EXT_RX_BD); 875 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 876 877 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 878 879 return (0); 880} 881 882static void 883bge_free_rx_ring_jumbo(struct bge_softc *sc) 884{ 885 int i; 886 887 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 888 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 889 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 890 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 891 BUS_DMASYNC_POSTREAD); 892 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 893 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 894 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 895 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 896 } 897 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 898 sizeof(struct bge_extrx_bd)); 899 } 900} 901 902static void 903bge_free_tx_ring(struct bge_softc *sc) 904{ 905 int i; 906 907 if (sc->bge_ldata.bge_tx_ring == NULL) 908 return; 909 910 for (i = 0; i < BGE_TX_RING_CNT; i++) { 911 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 912 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 913 sc->bge_cdata.bge_tx_dmamap[i], 914 BUS_DMASYNC_POSTWRITE); 915 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 916 sc->bge_cdata.bge_tx_dmamap[i]); 917 m_freem(sc->bge_cdata.bge_tx_chain[i]); 918 sc->bge_cdata.bge_tx_chain[i] = NULL; 919 } 920 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 921 sizeof(struct bge_tx_bd)); 922 } 923} 924 925static int 926bge_init_tx_ring(struct bge_softc *sc) 927{ 928 sc->bge_txcnt = 0; 929 sc->bge_tx_saved_considx = 0; 930 931 /* Initialize transmit producer index for host-memory send ring. */ 932 sc->bge_tx_prodidx = 0; 933 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 934 935 /* 5700 b2 errata */ 936 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 937 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 938 939 /* NIC-memory send ring not used; initialize to zero. */ 940 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 941 /* 5700 b2 errata */ 942 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 943 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 944 945 return (0); 946} 947 948static void 949bge_setpromisc(struct bge_softc *sc) 950{ 951 struct ifnet *ifp; 952 953 BGE_LOCK_ASSERT(sc); 954 955 ifp = sc->bge_ifp; 956 957 /* 958 * Enable or disable promiscuous mode as needed. 959 * Do not strip VLAN tag when promiscuous mode is enabled. 960 */ 961 if (ifp->if_flags & IFF_PROMISC) 962 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC | 963 BGE_RXMODE_RX_KEEP_VLAN_DIAG); 964 else 965 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC | 966 BGE_RXMODE_RX_KEEP_VLAN_DIAG); 967} 968 969static void 970bge_setmulti(struct bge_softc *sc) 971{ 972 struct ifnet *ifp; 973 struct ifmultiaddr *ifma; 974 uint32_t hashes[4] = { 0, 0, 0, 0 }; 975 int h, i; 976 977 BGE_LOCK_ASSERT(sc); 978 979 ifp = sc->bge_ifp; 980 981 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 982 for (i = 0; i < 4; i++) 983 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 984 return; 985 } 986 987 /* First, zot all the existing filters. */ 988 for (i = 0; i < 4; i++) 989 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 990 991 /* Now program new ones. */ 992 IF_ADDR_LOCK(ifp); 993 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 994 if (ifma->ifma_addr->sa_family != AF_LINK) 995 continue; 996 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 997 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 998 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 999 } 1000 IF_ADDR_UNLOCK(ifp); 1001 1002 for (i = 0; i < 4; i++) 1003 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1004} 1005 1006static void 1007bge_sig_pre_reset(sc, type) 1008 struct bge_softc *sc; 1009 int type; 1010{ 1011 /* 1012 * Some chips don't like this so only do this if ASF is enabled 1013 */ 1014 if (sc->bge_asf_mode) 1015 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1016 1017 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1018 switch (type) { 1019 case BGE_RESET_START: 1020 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1021 break; 1022 case BGE_RESET_STOP: 1023 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1024 break; 1025 } 1026 } 1027} 1028 1029static void 1030bge_sig_post_reset(sc, type) 1031 struct bge_softc *sc; 1032 int type; 1033{ 1034 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1035 switch (type) { 1036 case BGE_RESET_START: 1037 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1038 /* START DONE */ 1039 break; 1040 case BGE_RESET_STOP: 1041 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1042 break; 1043 } 1044 } 1045} 1046 1047static void 1048bge_sig_legacy(sc, type) 1049 struct bge_softc *sc; 1050 int type; 1051{ 1052 if (sc->bge_asf_mode) { 1053 switch (type) { 1054 case BGE_RESET_START: 1055 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1056 break; 1057 case BGE_RESET_STOP: 1058 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1059 break; 1060 } 1061 } 1062} 1063 1064void bge_stop_fw(struct bge_softc *); 1065void 1066bge_stop_fw(sc) 1067 struct bge_softc *sc; 1068{ 1069 int i; 1070 1071 if (sc->bge_asf_mode) { 1072 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1073 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1074 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14)); 1075 1076 for (i = 0; i < 100; i++ ) { 1077 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1078 break; 1079 DELAY(10); 1080 } 1081 } 1082} 1083 1084/* 1085 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1086 * self-test results. 1087 */ 1088static int 1089bge_chipinit(struct bge_softc *sc) 1090{ 1091 uint32_t dma_rw_ctl; 1092 int i; 1093 1094 /* Set endianness before we access any non-PCI registers. */ 1095 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1096 1097 /* 1098 * Check the 'ROM failed' bit on the RX CPU to see if 1099 * self-tests passed. 1100 */ 1101 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1102 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1103 return (ENODEV); 1104 } 1105 1106 /* Clear the MAC control register */ 1107 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1108 1109 /* 1110 * Clear the MAC statistics block in the NIC's 1111 * internal memory. 1112 */ 1113 for (i = BGE_STATS_BLOCK; 1114 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1115 BGE_MEMWIN_WRITE(sc, i, 0); 1116 1117 for (i = BGE_STATUS_BLOCK; 1118 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1119 BGE_MEMWIN_WRITE(sc, i, 0); 1120 1121 /* Set up the PCI DMA control register. */ 1122 if (sc->bge_flags & BGE_FLAG_PCIE) { 1123 /* PCI Express bus */ 1124 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1125 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1126 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1127 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1128 /* PCI-X bus */ 1129 if (BGE_IS_5714_FAMILY(sc)) { 1130 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1131 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1132 /* XXX magic values, Broadcom-supplied Linux driver */ 1133 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) 1134 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1135 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1136 else 1137 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15); 1138 1139 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1140 /* 1141 * The 5704 uses a different encoding of read/write 1142 * watermarks. 1143 */ 1144 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1145 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1146 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1147 else 1148 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1149 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1150 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1151 (0x0F); 1152 1153 /* 1154 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1155 * for hardware bugs. 1156 */ 1157 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1158 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1159 uint32_t tmp; 1160 1161 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1162 if (tmp == 0x6 || tmp == 0x7) 1163 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1164 } 1165 } else 1166 /* Conventional PCI bus */ 1167 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1168 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1169 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1170 (0x0F); 1171 1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1173 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1174 sc->bge_asicrev == BGE_ASICREV_BCM5705) 1175 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1176 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1177 1178 /* 1179 * Set up general mode register. 1180 */ 1181 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1182 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1183 BGE_MODECTL_TX_NO_PHDR_CSUM); 1184 1185 /* 1186 * Tell the firmware the driver is running 1187 */ 1188 if (sc->bge_asf_mode & ASF_STACKUP) 1189 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1190 1191 /* 1192 * Disable memory write invalidate. Apparently it is not supported 1193 * properly by these devices. 1194 */ 1195 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1196 1197#ifdef __brokenalpha__ 1198 /* 1199 * Must insure that we do not cross an 8K (bytes) boundary 1200 * for DMA reads. Our highest limit is 1K bytes. This is a 1201 * restriction on some ALPHA platforms with early revision 1202 * 21174 PCI chipsets, such as the AlphaPC 164lx 1203 */ 1204 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1205 BGE_PCI_READ_BNDRY_1024BYTES, 4); 1206#endif 1207 1208 /* Set the timer prescaler (always 66Mhz) */ 1209 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1210 1211 return (0); 1212} 1213 1214static int 1215bge_blockinit(struct bge_softc *sc) 1216{ 1217 struct bge_rcb *rcb; 1218 bus_size_t vrcb; 1219 bge_hostaddr taddr; 1220 int i; 1221 1222 /* 1223 * Initialize the memory window pointer register so that 1224 * we can access the first 32K of internal NIC RAM. This will 1225 * allow us to set up the TX send ring RCBs and the RX return 1226 * ring RCBs, plus other things which live in NIC memory. 1227 */ 1228 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1229 1230 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1231 1232 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1233 /* Configure mbuf memory pool */ 1234 if (sc->bge_flags & BGE_FLAG_EXTRAM) { 1235 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1236 BGE_EXT_SSRAM); 1237 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1238 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1239 else 1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1241 } else { 1242 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1243 BGE_BUFFPOOL_1); 1244 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1245 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1246 else 1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1248 } 1249 1250 /* Configure DMA resource pool */ 1251 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1252 BGE_DMA_DESCRIPTORS); 1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1254 } 1255 1256 /* Configure mbuf pool watermarks */ 1257 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1258 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1259 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1260 } else { 1261 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1262 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1263 } 1264 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1265 1266 /* Configure DMA resource watermarks */ 1267 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1268 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1269 1270 /* Enable buffer manager */ 1271 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1272 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1273 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1274 1275 /* Poll for buffer manager start indication */ 1276 for (i = 0; i < BGE_TIMEOUT; i++) { 1277 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1278 break; 1279 DELAY(10); 1280 } 1281 1282 if (i == BGE_TIMEOUT) { 1283 device_printf(sc->bge_dev, 1284 "buffer manager failed to start\n"); 1285 return (ENXIO); 1286 } 1287 } 1288 1289 /* Enable flow-through queues */ 1290 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1291 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1292 1293 /* Wait until queue initialization is complete */ 1294 for (i = 0; i < BGE_TIMEOUT; i++) { 1295 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1296 break; 1297 DELAY(10); 1298 } 1299 1300 if (i == BGE_TIMEOUT) { 1301 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1302 return (ENXIO); 1303 } 1304 1305 /* Initialize the standard RX ring control block */ 1306 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1307 rcb->bge_hostaddr.bge_addr_lo = 1308 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1309 rcb->bge_hostaddr.bge_addr_hi = 1310 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1311 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1312 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1313 if (BGE_IS_5705_OR_BEYOND(sc)) 1314 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1315 else 1316 rcb->bge_maxlen_flags = 1317 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1318 if (sc->bge_flags & BGE_FLAG_EXTRAM) 1319 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1320 else 1321 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1322 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1323 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1324 1325 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1326 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1327 1328 /* 1329 * Initialize the jumbo RX ring control block 1330 * We set the 'ring disabled' bit in the flags 1331 * field until we're actually ready to start 1332 * using this ring (i.e. once we set the MTU 1333 * high enough to require it). 1334 */ 1335 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1336 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1337 1338 rcb->bge_hostaddr.bge_addr_lo = 1339 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1340 rcb->bge_hostaddr.bge_addr_hi = 1341 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1342 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1343 sc->bge_cdata.bge_rx_jumbo_ring_map, 1344 BUS_DMASYNC_PREREAD); 1345 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1346 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED); 1347 if (sc->bge_flags & BGE_FLAG_EXTRAM) 1348 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1349 else 1350 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1351 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1352 rcb->bge_hostaddr.bge_addr_hi); 1353 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1354 rcb->bge_hostaddr.bge_addr_lo); 1355 1356 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1357 rcb->bge_maxlen_flags); 1358 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1359 1360 /* Set up dummy disabled mini ring RCB */ 1361 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1362 rcb->bge_maxlen_flags = 1363 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1364 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1365 rcb->bge_maxlen_flags); 1366 } 1367 1368 /* 1369 * Set the BD ring replentish thresholds. The recommended 1370 * values are 1/8th the number of descriptors allocated to 1371 * each ring. 1372 */ 1373 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1374 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1375 1376 /* 1377 * Disable all unused send rings by setting the 'ring disabled' 1378 * bit in the flags field of all the TX send ring control blocks. 1379 * These are located in NIC memory. 1380 */ 1381 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1382 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1383 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1384 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1385 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1386 vrcb += sizeof(struct bge_rcb); 1387 } 1388 1389 /* Configure TX RCB 0 (we use only the first ring) */ 1390 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1391 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1392 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1393 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1394 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1395 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1396 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1397 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1398 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1399 1400 /* Disable all unused RX return rings */ 1401 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1402 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1403 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1404 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1405 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1406 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1407 BGE_RCB_FLAG_RING_DISABLED)); 1408 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1409 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1410 (i * (sizeof(uint64_t))), 0); 1411 vrcb += sizeof(struct bge_rcb); 1412 } 1413 1414 /* Initialize RX ring indexes */ 1415 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1416 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1417 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1418 1419 /* 1420 * Set up RX return ring 0 1421 * Note that the NIC address for RX return rings is 0x00000000. 1422 * The return rings live entirely within the host, so the 1423 * nicaddr field in the RCB isn't used. 1424 */ 1425 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1426 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1427 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1428 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1429 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1430 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1431 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1432 1433 /* Set random backoff seed for TX */ 1434 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1435 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1436 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1437 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1438 BGE_TX_BACKOFF_SEED_MASK); 1439 1440 /* Set inter-packet gap */ 1441 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1442 1443 /* 1444 * Specify which ring to use for packets that don't match 1445 * any RX rules. 1446 */ 1447 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1448 1449 /* 1450 * Configure number of RX lists. One interrupt distribution 1451 * list, sixteen active lists, one bad frames class. 1452 */ 1453 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1454 1455 /* Inialize RX list placement stats mask. */ 1456 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1457 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1458 1459 /* Disable host coalescing until we get it set up */ 1460 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1461 1462 /* Poll to make sure it's shut down. */ 1463 for (i = 0; i < BGE_TIMEOUT; i++) { 1464 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1465 break; 1466 DELAY(10); 1467 } 1468 1469 if (i == BGE_TIMEOUT) { 1470 device_printf(sc->bge_dev, 1471 "host coalescing engine failed to idle\n"); 1472 return (ENXIO); 1473 } 1474 1475 /* Set up host coalescing defaults */ 1476 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1477 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1478 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1479 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1480 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1481 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1482 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1483 } 1484 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1485 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1486 1487 /* Set up address of statistics block */ 1488 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1489 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1490 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1491 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1492 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1493 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1494 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1495 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1496 } 1497 1498 /* Set up address of status block */ 1499 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1500 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1501 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1502 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1503 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1504 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1505 1506 /* Turn on host coalescing state machine */ 1507 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1508 1509 /* Turn on RX BD completion state machine and enable attentions */ 1510 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1511 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1512 1513 /* Turn on RX list placement state machine */ 1514 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1515 1516 /* Turn on RX list selector state machine. */ 1517 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1518 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1519 1520 /* Turn on DMA, clear stats */ 1521 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1522 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1523 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1524 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1525 ((sc->bge_flags & BGE_FLAG_TBI) ? 1526 BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1527 1528 /* Set misc. local control, enable interrupts on attentions */ 1529 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1530 1531#ifdef notdef 1532 /* Assert GPIO pins for PHY reset */ 1533 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1534 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1535 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1536 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1537#endif 1538 1539 /* Turn on DMA completion state machine */ 1540 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1541 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1542 1543 /* Turn on write DMA state machine */ 1544 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1545 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1546 1547 /* Turn on read DMA state machine */ 1548 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1549 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1550 1551 /* Turn on RX data completion state machine */ 1552 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1553 1554 /* Turn on RX BD initiator state machine */ 1555 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1556 1557 /* Turn on RX data and RX BD initiator state machine */ 1558 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1559 1560 /* Turn on Mbuf cluster free state machine */ 1561 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1562 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1563 1564 /* Turn on send BD completion state machine */ 1565 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1566 1567 /* Turn on send data completion state machine */ 1568 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1569 1570 /* Turn on send data initiator state machine */ 1571 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1572 1573 /* Turn on send BD initiator state machine */ 1574 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1575 1576 /* Turn on send BD selector state machine */ 1577 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1578 1579 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1580 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1581 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1582 1583 /* ack/clear link change events */ 1584 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1585 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1586 BGE_MACSTAT_LINK_CHANGED); 1587 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1588 1589 /* Enable PHY auto polling (for MII/GMII only) */ 1590 if (sc->bge_flags & BGE_FLAG_TBI) { 1591 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1592 } else { 1593 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1594 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1595 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1596 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1597 BGE_EVTENB_MI_INTERRUPT); 1598 } 1599 1600 /* 1601 * Clear any pending link state attention. 1602 * Otherwise some link state change events may be lost until attention 1603 * is cleared by bge_intr() -> bge_link_upd() sequence. 1604 * It's not necessary on newer BCM chips - perhaps enabling link 1605 * state change attentions implies clearing pending attention. 1606 */ 1607 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1608 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1609 BGE_MACSTAT_LINK_CHANGED); 1610 1611 /* Enable link state change attentions. */ 1612 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1613 1614 return (0); 1615} 1616 1617const struct bge_revision * 1618bge_lookup_rev(uint32_t chipid) 1619{ 1620 const struct bge_revision *br; 1621 1622 for (br = bge_revisions; br->br_name != NULL; br++) { 1623 if (br->br_chipid == chipid) 1624 return (br); 1625 } 1626 1627 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1628 if (br->br_chipid == BGE_ASICREV(chipid)) 1629 return (br); 1630 } 1631 1632 return (NULL); 1633} 1634 1635const struct bge_vendor * 1636bge_lookup_vendor(uint16_t vid) 1637{ 1638 const struct bge_vendor *v; 1639 1640 for (v = bge_vendors; v->v_name != NULL; v++) 1641 if (v->v_id == vid) 1642 return (v); 1643 1644 panic("%s: unknown vendor %d", __func__, vid); 1645 return (NULL); 1646} 1647 1648/* 1649 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1650 * against our list and return its name if we find a match. 1651 * 1652 * Note that since the Broadcom controller contains VPD support, we 1653 * can get the device name string from the controller itself instead 1654 * of the compiled-in string. This is a little slow, but it guarantees 1655 * we'll always announce the right product name. Unfortunately, this 1656 * is possible only later in bge_attach(), when we have established 1657 * access to EEPROM. 1658 */ 1659static int 1660bge_probe(device_t dev) 1661{ 1662 struct bge_type *t = bge_devs; 1663 struct bge_softc *sc = device_get_softc(dev); 1664 1665 bzero(sc, sizeof(struct bge_softc)); 1666 sc->bge_dev = dev; 1667 1668 while(t->bge_vid != 0) { 1669 if ((pci_get_vendor(dev) == t->bge_vid) && 1670 (pci_get_device(dev) == t->bge_did)) { 1671 char buf[64]; 1672 const struct bge_revision *br; 1673 const struct bge_vendor *v; 1674 uint32_t id; 1675 1676 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1677 BGE_PCIMISCCTL_ASICREV; 1678 br = bge_lookup_rev(id); 1679 id >>= 16; 1680 v = bge_lookup_vendor(t->bge_vid); 1681 if (br == NULL) 1682 snprintf(buf, 64, "%s unknown ASIC (%#04x)", 1683 v->v_name, id); 1684 else 1685 snprintf(buf, 64, "%s %s, ASIC rev. %#04x", 1686 v->v_name, br->br_name, id); 1687 device_set_desc_copy(dev, buf); 1688 if (pci_get_subvendor(dev) == DELL_VENDORID) 1689 sc->bge_flags |= BGE_FLAG_NO3LED; 1690 return (0); 1691 } 1692 t++; 1693 } 1694 1695 return (ENXIO); 1696} 1697 1698static void 1699bge_dma_free(struct bge_softc *sc) 1700{ 1701 int i; 1702 1703 /* Destroy DMA maps for RX buffers. */ 1704 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1705 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1706 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1707 sc->bge_cdata.bge_rx_std_dmamap[i]); 1708 } 1709 1710 /* Destroy DMA maps for jumbo RX buffers. */ 1711 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1712 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1713 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1714 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1715 } 1716 1717 /* Destroy DMA maps for TX buffers. */ 1718 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1719 if (sc->bge_cdata.bge_tx_dmamap[i]) 1720 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1721 sc->bge_cdata.bge_tx_dmamap[i]); 1722 } 1723 1724 if (sc->bge_cdata.bge_mtag) 1725 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1726 1727 1728 /* Destroy standard RX ring. */ 1729 if (sc->bge_cdata.bge_rx_std_ring_map) 1730 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1731 sc->bge_cdata.bge_rx_std_ring_map); 1732 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1733 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1734 sc->bge_ldata.bge_rx_std_ring, 1735 sc->bge_cdata.bge_rx_std_ring_map); 1736 1737 if (sc->bge_cdata.bge_rx_std_ring_tag) 1738 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1739 1740 /* Destroy jumbo RX ring. */ 1741 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1742 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1743 sc->bge_cdata.bge_rx_jumbo_ring_map); 1744 1745 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1746 sc->bge_ldata.bge_rx_jumbo_ring) 1747 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1748 sc->bge_ldata.bge_rx_jumbo_ring, 1749 sc->bge_cdata.bge_rx_jumbo_ring_map); 1750 1751 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1752 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1753 1754 /* Destroy RX return ring. */ 1755 if (sc->bge_cdata.bge_rx_return_ring_map) 1756 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1757 sc->bge_cdata.bge_rx_return_ring_map); 1758 1759 if (sc->bge_cdata.bge_rx_return_ring_map && 1760 sc->bge_ldata.bge_rx_return_ring) 1761 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1762 sc->bge_ldata.bge_rx_return_ring, 1763 sc->bge_cdata.bge_rx_return_ring_map); 1764 1765 if (sc->bge_cdata.bge_rx_return_ring_tag) 1766 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1767 1768 /* Destroy TX ring. */ 1769 if (sc->bge_cdata.bge_tx_ring_map) 1770 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1771 sc->bge_cdata.bge_tx_ring_map); 1772 1773 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1774 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1775 sc->bge_ldata.bge_tx_ring, 1776 sc->bge_cdata.bge_tx_ring_map); 1777 1778 if (sc->bge_cdata.bge_tx_ring_tag) 1779 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1780 1781 /* Destroy status block. */ 1782 if (sc->bge_cdata.bge_status_map) 1783 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1784 sc->bge_cdata.bge_status_map); 1785 1786 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1787 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1788 sc->bge_ldata.bge_status_block, 1789 sc->bge_cdata.bge_status_map); 1790 1791 if (sc->bge_cdata.bge_status_tag) 1792 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1793 1794 /* Destroy statistics block. */ 1795 if (sc->bge_cdata.bge_stats_map) 1796 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1797 sc->bge_cdata.bge_stats_map); 1798 1799 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1800 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1801 sc->bge_ldata.bge_stats, 1802 sc->bge_cdata.bge_stats_map); 1803 1804 if (sc->bge_cdata.bge_stats_tag) 1805 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1806 1807 /* Destroy the parent tag. */ 1808 if (sc->bge_cdata.bge_parent_tag) 1809 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1810} 1811 1812static int 1813bge_dma_alloc(device_t dev) 1814{ 1815 struct bge_dmamap_arg ctx; 1816 struct bge_softc *sc; 1817 int i, error; 1818 1819 sc = device_get_softc(dev); 1820 1821 /* 1822 * Allocate the parent bus DMA tag appropriate for PCI. 1823 */ 1824 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */ 1825 1, 0, /* alignment, boundary */ 1826 BUS_SPACE_MAXADDR, /* lowaddr */ 1827 BUS_SPACE_MAXADDR, /* highaddr */ 1828 NULL, NULL, /* filter, filterarg */ 1829 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1830 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1831 0, /* flags */ 1832 NULL, NULL, /* lockfunc, lockarg */ 1833 &sc->bge_cdata.bge_parent_tag); 1834 1835 if (error != 0) { 1836 device_printf(sc->bge_dev, 1837 "could not allocate parent dma tag\n"); 1838 return (ENOMEM); 1839 } 1840 1841 /* 1842 * Create tag for RX mbufs. 1843 */ 1844 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1845 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1846 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1847 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1848 1849 if (error) { 1850 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1851 return (ENOMEM); 1852 } 1853 1854 /* Create DMA maps for RX buffers. */ 1855 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1856 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1857 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1858 if (error) { 1859 device_printf(sc->bge_dev, 1860 "can't create DMA map for RX\n"); 1861 return (ENOMEM); 1862 } 1863 } 1864 1865 /* Create DMA maps for TX buffers. */ 1866 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1867 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1868 &sc->bge_cdata.bge_tx_dmamap[i]); 1869 if (error) { 1870 device_printf(sc->bge_dev, 1871 "can't create DMA map for RX\n"); 1872 return (ENOMEM); 1873 } 1874 } 1875 1876 /* Create tag for standard RX ring. */ 1877 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1878 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1879 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1880 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1881 1882 if (error) { 1883 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1884 return (ENOMEM); 1885 } 1886 1887 /* Allocate DMA'able memory for standard RX ring. */ 1888 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1889 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1890 &sc->bge_cdata.bge_rx_std_ring_map); 1891 if (error) 1892 return (ENOMEM); 1893 1894 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1895 1896 /* Load the address of the standard RX ring. */ 1897 ctx.bge_maxsegs = 1; 1898 ctx.sc = sc; 1899 1900 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1901 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1902 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1903 1904 if (error) 1905 return (ENOMEM); 1906 1907 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1908 1909 /* Create tags for jumbo mbufs. */ 1910 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1911 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1912 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1913 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1914 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1915 if (error) { 1916 device_printf(sc->bge_dev, 1917 "could not allocate jumbo dma tag\n"); 1918 return (ENOMEM); 1919 } 1920 1921 /* Create tag for jumbo RX ring. */ 1922 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1923 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1924 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1925 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1926 1927 if (error) { 1928 device_printf(sc->bge_dev, 1929 "could not allocate jumbo ring dma tag\n"); 1930 return (ENOMEM); 1931 } 1932 1933 /* Allocate DMA'able memory for jumbo RX ring. */ 1934 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1935 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1936 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1937 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1938 if (error) 1939 return (ENOMEM); 1940 1941 /* Load the address of the jumbo RX ring. */ 1942 ctx.bge_maxsegs = 1; 1943 ctx.sc = sc; 1944 1945 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1946 sc->bge_cdata.bge_rx_jumbo_ring_map, 1947 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 1948 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1949 1950 if (error) 1951 return (ENOMEM); 1952 1953 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 1954 1955 /* Create DMA maps for jumbo RX buffers. */ 1956 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1957 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 1958 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1959 if (error) { 1960 device_printf(sc->bge_dev, 1961 "can't create DMA map for jumbo RX\n"); 1962 return (ENOMEM); 1963 } 1964 } 1965 1966 } 1967 1968 /* Create tag for RX return ring. */ 1969 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1970 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1971 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 1972 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 1973 1974 if (error) { 1975 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1976 return (ENOMEM); 1977 } 1978 1979 /* Allocate DMA'able memory for RX return ring. */ 1980 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 1981 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 1982 &sc->bge_cdata.bge_rx_return_ring_map); 1983 if (error) 1984 return (ENOMEM); 1985 1986 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 1987 BGE_RX_RTN_RING_SZ(sc)); 1988 1989 /* Load the address of the RX return ring. */ 1990 ctx.bge_maxsegs = 1; 1991 ctx.sc = sc; 1992 1993 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 1994 sc->bge_cdata.bge_rx_return_ring_map, 1995 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 1996 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1997 1998 if (error) 1999 return (ENOMEM); 2000 2001 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2002 2003 /* Create tag for TX ring. */ 2004 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2005 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2006 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2007 &sc->bge_cdata.bge_tx_ring_tag); 2008 2009 if (error) { 2010 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2011 return (ENOMEM); 2012 } 2013 2014 /* Allocate DMA'able memory for TX ring. */ 2015 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2016 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2017 &sc->bge_cdata.bge_tx_ring_map); 2018 if (error) 2019 return (ENOMEM); 2020 2021 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2022 2023 /* Load the address of the TX ring. */ 2024 ctx.bge_maxsegs = 1; 2025 ctx.sc = sc; 2026 2027 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2028 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2029 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2030 2031 if (error) 2032 return (ENOMEM); 2033 2034 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2035 2036 /* Create tag for status block. */ 2037 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2038 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2039 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2040 NULL, NULL, &sc->bge_cdata.bge_status_tag); 2041 2042 if (error) { 2043 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2044 return (ENOMEM); 2045 } 2046 2047 /* Allocate DMA'able memory for status block. */ 2048 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2049 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2050 &sc->bge_cdata.bge_status_map); 2051 if (error) 2052 return (ENOMEM); 2053 2054 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2055 2056 /* Load the address of the status block. */ 2057 ctx.sc = sc; 2058 ctx.bge_maxsegs = 1; 2059 2060 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2061 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2062 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2063 2064 if (error) 2065 return (ENOMEM); 2066 2067 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2068 2069 /* Create tag for statistics block. */ 2070 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2071 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2072 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2073 &sc->bge_cdata.bge_stats_tag); 2074 2075 if (error) { 2076 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2077 return (ENOMEM); 2078 } 2079 2080 /* Allocate DMA'able memory for statistics block. */ 2081 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2082 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2083 &sc->bge_cdata.bge_stats_map); 2084 if (error) 2085 return (ENOMEM); 2086 2087 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2088 2089 /* Load the address of the statstics block. */ 2090 ctx.sc = sc; 2091 ctx.bge_maxsegs = 1; 2092 2093 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2094 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2095 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2096 2097 if (error) 2098 return (ENOMEM); 2099 2100 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2101 2102 return (0); 2103} 2104 2105static int 2106bge_attach(device_t dev) 2107{ 2108 struct ifnet *ifp; 2109 struct bge_softc *sc; 2110 uint32_t hwcfg = 0; 2111 uint32_t mac_tmp = 0; 2112 u_char eaddr[6]; 2113 int error = 0, rid; 2114 int trys; 2115 2116 sc = device_get_softc(dev); 2117 sc->bge_dev = dev; 2118 2119 /* 2120 * Map control/status registers. 2121 */ 2122 pci_enable_busmaster(dev); 2123 2124 rid = BGE_PCI_BAR0; 2125 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2126 RF_ACTIVE|PCI_RF_DENSE); 2127 2128 if (sc->bge_res == NULL) { 2129 device_printf (sc->bge_dev, "couldn't map memory\n"); 2130 error = ENXIO; 2131 goto fail; 2132 } 2133 2134 sc->bge_btag = rman_get_bustag(sc->bge_res); 2135 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2136 2137 /* Allocate interrupt. */ 2138 rid = 0; 2139 2140 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2141 RF_SHAREABLE | RF_ACTIVE); 2142 2143 if (sc->bge_irq == NULL) { 2144 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2145 error = ENXIO; 2146 goto fail; 2147 } 2148 2149 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2150 2151 /* Save ASIC rev. */ 2152 2153 sc->bge_chipid = 2154 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2155 BGE_PCIMISCCTL_ASICREV; 2156 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2157 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2158 2159 /* 2160 * XXX: Broadcom Linux driver. Not in specs or eratta. 2161 * PCI-Express? 2162 */ 2163 if (BGE_IS_5705_OR_BEYOND(sc)) { 2164 uint32_t v; 2165 2166 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 2167 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { 2168 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2169 if ((v & 0xff) == BGE_PCIE_CAPID) 2170 sc->bge_flags |= BGE_FLAG_PCIE; 2171 } 2172 } 2173 2174 /* 2175 * PCI-X ? 2176 */ 2177 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2178 BGE_PCISTATE_PCI_BUSMODE) == 0) 2179 sc->bge_flags |= BGE_FLAG_PCIX; 2180 2181 /* Try to reset the chip. */ 2182 if (bge_reset(sc)) { 2183 device_printf(sc->bge_dev, "chip reset failed\n"); 2184 bge_release_resources(sc); 2185 error = ENXIO; 2186 goto fail; 2187 } 2188 2189 sc->bge_asf_mode = 0; 2190 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2191 == BGE_MAGIC_NUMBER)) { 2192 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2193 & BGE_HWCFG_ASF) { 2194 sc->bge_asf_mode |= ASF_ENABLE; 2195 sc->bge_asf_mode |= ASF_STACKUP; 2196 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2197 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2198 } 2199 } 2200 } 2201 2202 /* Try to reset the chip again the nice way. */ 2203 bge_stop_fw(sc); 2204 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2205 if (bge_reset(sc)) { 2206 device_printf(sc->bge_dev, "chip reset failed\n"); 2207 bge_release_resources(sc); 2208 error = ENXIO; 2209 goto fail; 2210 } 2211 2212 bge_sig_legacy(sc, BGE_RESET_STOP); 2213 bge_sig_post_reset(sc, BGE_RESET_STOP); 2214 2215 if (bge_chipinit(sc)) { 2216 device_printf(sc->bge_dev, "chip initialization failed\n"); 2217 bge_release_resources(sc); 2218 error = ENXIO; 2219 goto fail; 2220 } 2221 2222 /* 2223 * Get station address from the EEPROM. 2224 */ 2225 mac_tmp = bge_readmem_ind(sc, 0x0c14); 2226 if ((mac_tmp >> 16) == 0x484b) { 2227 eaddr[0] = (u_char)(mac_tmp >> 8); 2228 eaddr[1] = (u_char)mac_tmp; 2229 mac_tmp = bge_readmem_ind(sc, 0x0c18); 2230 eaddr[2] = (u_char)(mac_tmp >> 24); 2231 eaddr[3] = (u_char)(mac_tmp >> 16); 2232 eaddr[4] = (u_char)(mac_tmp >> 8); 2233 eaddr[5] = (u_char)mac_tmp; 2234 } else if (bge_read_eeprom(sc, eaddr, 2235 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2236 device_printf(sc->bge_dev, "failed to read station address\n"); 2237 bge_release_resources(sc); 2238 error = ENXIO; 2239 goto fail; 2240 } 2241 2242 /* 5705 limits RX return ring to 512 entries. */ 2243 if (BGE_IS_5705_OR_BEYOND(sc)) 2244 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2245 else 2246 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2247 2248 if (bge_dma_alloc(dev)) { 2249 device_printf(sc->bge_dev, 2250 "failed to allocate DMA resources\n"); 2251 bge_release_resources(sc); 2252 error = ENXIO; 2253 goto fail; 2254 } 2255 2256 /* Set default tuneable values. */ 2257 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2258 sc->bge_rx_coal_ticks = 150; 2259 sc->bge_tx_coal_ticks = 150; 2260 sc->bge_rx_max_coal_bds = 64; 2261 sc->bge_tx_max_coal_bds = 128; 2262 2263 /* Set up ifnet structure */ 2264 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2265 if (ifp == NULL) { 2266 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2267 bge_release_resources(sc); 2268 error = ENXIO; 2269 goto fail; 2270 } 2271 ifp->if_softc = sc; 2272 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2274 ifp->if_ioctl = bge_ioctl; 2275 ifp->if_start = bge_start; 2276 ifp->if_init = bge_init; 2277 ifp->if_mtu = ETHERMTU; 2278 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2279 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2280 IFQ_SET_READY(&ifp->if_snd); 2281 ifp->if_hwassist = BGE_CSUM_FEATURES; 2282 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2283 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; 2284 ifp->if_capenable = ifp->if_capabilities; 2285#ifdef DEVICE_POLLING 2286 ifp->if_capabilities |= IFCAP_POLLING; 2287#endif 2288 2289 /* 2290 * 5700 B0 chips do not support checksumming correctly due 2291 * to hardware bugs. 2292 */ 2293 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2294 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2295 ifp->if_capenable &= IFCAP_HWCSUM; 2296 ifp->if_hwassist = 0; 2297 } 2298 2299 /* 2300 * Figure out what sort of media we have by checking the 2301 * hardware config word in the first 32k of NIC internal memory, 2302 * or fall back to examining the EEPROM if necessary. 2303 * Note: on some BCM5700 cards, this value appears to be unset. 2304 * If that's the case, we have to rely on identifying the NIC 2305 * by its PCI subsystem ID, as we do below for the SysKonnect 2306 * SK-9D41. 2307 */ 2308 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2309 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2310 else { 2311 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2312 sizeof(hwcfg))) { 2313 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2314 bge_release_resources(sc); 2315 error = ENXIO; 2316 goto fail; 2317 } 2318 hwcfg = ntohl(hwcfg); 2319 } 2320 2321 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2322 sc->bge_flags |= BGE_FLAG_TBI; 2323 2324 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2325 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2326 sc->bge_flags |= BGE_FLAG_TBI; 2327 2328 if (sc->bge_flags & BGE_FLAG_TBI) { 2329 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2330 bge_ifmedia_upd, bge_ifmedia_sts); 2331 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2332 ifmedia_add(&sc->bge_ifmedia, 2333 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2334 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2335 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2336 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2337 } else { 2338 /* 2339 * Do transceiver setup and tell the firmware the 2340 * driver is down so we can try to get access the 2341 * probe if ASF is running. Retry a couple of times 2342 * if we get a conflict with the ASF firmware accessing 2343 * the PHY. 2344 */ 2345 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2346again: 2347 bge_asf_driver_up(sc); 2348 2349 trys = 0; 2350 if (mii_phy_probe(dev, &sc->bge_miibus, 2351 bge_ifmedia_upd, bge_ifmedia_sts)) { 2352 if (trys++ < 4) { 2353 device_printf(sc->bge_dev, "Try again\n"); 2354 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET); 2355 goto again; 2356 } 2357 2358 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2359 bge_release_resources(sc); 2360 error = ENXIO; 2361 goto fail; 2362 } 2363 2364 /* 2365 * Now tell the firmware we are going up after probing the PHY 2366 */ 2367 if (sc->bge_asf_mode & ASF_STACKUP) 2368 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2369 } 2370 2371 /* 2372 * When using the BCM5701 in PCI-X mode, data corruption has 2373 * been observed in the first few bytes of some received packets. 2374 * Aligning the packet buffer in memory eliminates the corruption. 2375 * Unfortunately, this misaligns the packet payloads. On platforms 2376 * which do not support unaligned accesses, we will realign the 2377 * payloads by copying the received packets. 2378 */ 2379 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2380 sc->bge_flags & BGE_FLAG_PCIX) 2381 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2382 2383 /* 2384 * Call MI attach routine. 2385 */ 2386 ether_ifattach(ifp, eaddr); 2387 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 2388 2389 /* 2390 * Hookup IRQ last. 2391 */ 2392 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2393 bge_intr, sc, &sc->bge_intrhand); 2394 2395 if (error) { 2396 bge_detach(dev); 2397 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2398 } 2399 2400fail: 2401 return (error); 2402} 2403 2404static int 2405bge_detach(device_t dev) 2406{ 2407 struct bge_softc *sc; 2408 struct ifnet *ifp; 2409 2410 sc = device_get_softc(dev); 2411 ifp = sc->bge_ifp; 2412 2413#ifdef DEVICE_POLLING 2414 if (ifp->if_capenable & IFCAP_POLLING) 2415 ether_poll_deregister(ifp); 2416#endif 2417 2418 BGE_LOCK(sc); 2419 bge_stop(sc); 2420 bge_reset(sc); 2421 BGE_UNLOCK(sc); 2422 2423 ether_ifdetach(ifp); 2424 2425 if (sc->bge_flags & BGE_FLAG_TBI) { 2426 ifmedia_removeall(&sc->bge_ifmedia); 2427 } else { 2428 bus_generic_detach(dev); 2429 device_delete_child(dev, sc->bge_miibus); 2430 } 2431 2432 bge_release_resources(sc); 2433 2434 return (0); 2435} 2436 2437static void 2438bge_release_resources(struct bge_softc *sc) 2439{ 2440 device_t dev; 2441 2442 dev = sc->bge_dev; 2443 2444 if (sc->bge_vpd_prodname != NULL) 2445 free(sc->bge_vpd_prodname, M_DEVBUF); 2446 2447 if (sc->bge_vpd_readonly != NULL) 2448 free(sc->bge_vpd_readonly, M_DEVBUF); 2449 2450 if (sc->bge_intrhand != NULL) 2451 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2452 2453 if (sc->bge_irq != NULL) 2454 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2455 2456 if (sc->bge_res != NULL) 2457 bus_release_resource(dev, SYS_RES_MEMORY, 2458 BGE_PCI_BAR0, sc->bge_res); 2459 2460 if (sc->bge_ifp != NULL) 2461 if_free(sc->bge_ifp); 2462 2463 bge_dma_free(sc); 2464 2465 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2466 BGE_LOCK_DESTROY(sc); 2467} 2468 2469static int 2470bge_reset(struct bge_softc *sc) 2471{ 2472 device_t dev; 2473 uint32_t cachesize, command, pcistate, reset; 2474 int i, val = 0; 2475 2476 dev = sc->bge_dev; 2477 2478 /* Save some important PCI state. */ 2479 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2480 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2481 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2482 2483 pci_write_config(dev, BGE_PCI_MISC_CTL, 2484 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2485 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2486 2487 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2488 2489 /* XXX: Broadcom Linux driver. */ 2490 if (sc->bge_flags & BGE_FLAG_PCIE) { 2491 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2492 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2493 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2494 /* Prevent PCIE link training during global reset */ 2495 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2496 reset |= (1<<29); 2497 } 2498 } 2499 2500 /* 2501 * Write the magic number to the firmware mailbox at 0xb50 2502 * so that the driver can synchronize with the firmware. 2503 */ 2504 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2505 2506 /* Issue global reset */ 2507 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 2508 2509 DELAY(1000); 2510 2511 /* XXX: Broadcom Linux driver. */ 2512 if (sc->bge_flags & BGE_FLAG_PCIE) { 2513 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2514 uint32_t v; 2515 2516 DELAY(500000); /* wait for link training to complete */ 2517 v = pci_read_config(dev, 0xc4, 4); 2518 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2519 } 2520 /* Set PCIE max payload size and clear error status. */ 2521 pci_write_config(dev, 0xd8, 0xf5000, 4); 2522 } 2523 2524 /* Reset some of the PCI state that got zapped by reset. */ 2525 pci_write_config(dev, BGE_PCI_MISC_CTL, 2526 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2527 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2528 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2529 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2530 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2531 2532 /* Enable memory arbiter. */ 2533 if (BGE_IS_5714_FAMILY(sc)) { 2534 uint32_t val; 2535 2536 val = CSR_READ_4(sc, BGE_MARB_MODE); 2537 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2538 } else 2539 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2540 2541 /* 2542 * Poll the value location we just wrote until 2543 * we see the 1's complement of the magic number. 2544 * This indicates that the firmware initialization 2545 * is complete. 2546 */ 2547 for (i = 0; i < BGE_TIMEOUT; i++) { 2548 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2549 if (val == ~BGE_MAGIC_NUMBER) 2550 break; 2551 DELAY(10); 2552 } 2553 2554 if (i == BGE_TIMEOUT) { 2555 device_printf(sc->bge_dev, "firmware handshake timed out\n"); 2556 return(0); 2557 } 2558 2559 /* 2560 * XXX Wait for the value of the PCISTATE register to 2561 * return to its original pre-reset state. This is a 2562 * fairly good indicator of reset completion. If we don't 2563 * wait for the reset to fully complete, trying to read 2564 * from the device's non-PCI registers may yield garbage 2565 * results. 2566 */ 2567 for (i = 0; i < BGE_TIMEOUT; i++) { 2568 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2569 break; 2570 DELAY(10); 2571 } 2572 2573 /* Fix up byte swapping. */ 2574 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 2575 BGE_MODECTL_BYTESWAP_DATA); 2576 2577 /* Tell the ASF firmware we are up */ 2578 if (sc->bge_asf_mode & ASF_STACKUP) 2579 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2580 2581 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2582 2583 /* 2584 * The 5704 in TBI mode apparently needs some special 2585 * adjustment to insure the SERDES drive level is set 2586 * to 1.2V. 2587 */ 2588 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2589 sc->bge_flags & BGE_FLAG_TBI) { 2590 uint32_t serdescfg; 2591 2592 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2593 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2594 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2595 } 2596 2597 /* XXX: Broadcom Linux driver. */ 2598 if (sc->bge_flags & BGE_FLAG_PCIE && 2599 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2600 uint32_t v; 2601 2602 v = CSR_READ_4(sc, 0x7c00); 2603 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2604 } 2605 DELAY(10000); 2606 2607 return(0); 2608} 2609 2610/* 2611 * Frame reception handling. This is called if there's a frame 2612 * on the receive return list. 2613 * 2614 * Note: we have to be able to handle two possibilities here: 2615 * 1) the frame is from the jumbo receive ring 2616 * 2) the frame is from the standard receive ring 2617 */ 2618 2619static void 2620bge_rxeof(struct bge_softc *sc) 2621{ 2622 struct ifnet *ifp; 2623 int stdcnt = 0, jumbocnt = 0; 2624 2625 BGE_LOCK_ASSERT(sc); 2626 2627 /* Nothing to do. */ 2628 if (sc->bge_rx_saved_considx == 2629 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2630 return; 2631 2632 ifp = sc->bge_ifp; 2633 2634 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2635 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2636 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2637 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2638 if (BGE_IS_JUMBO_CAPABLE(sc)) 2639 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2640 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); 2641 2642 while(sc->bge_rx_saved_considx != 2643 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2644 struct bge_rx_bd *cur_rx; 2645 uint32_t rxidx; 2646 struct mbuf *m = NULL; 2647 uint16_t vlan_tag = 0; 2648 int have_tag = 0; 2649 2650#ifdef DEVICE_POLLING 2651 if (ifp->if_capenable & IFCAP_POLLING) { 2652 if (sc->rxcycles <= 0) 2653 break; 2654 sc->rxcycles--; 2655 } 2656#endif 2657 2658 cur_rx = 2659 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2660 2661 rxidx = cur_rx->bge_idx; 2662 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2663 2664 if (!(ifp->if_flags & IFF_PROMISC) && 2665 (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)) { 2666 have_tag = 1; 2667 vlan_tag = cur_rx->bge_vlan_tag; 2668 } 2669 2670 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2671 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2672 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2673 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2674 BUS_DMASYNC_POSTREAD); 2675 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2676 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2677 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2678 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2679 jumbocnt++; 2680 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2681 ifp->if_ierrors++; 2682 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2683 continue; 2684 } 2685 if (bge_newbuf_jumbo(sc, 2686 sc->bge_jumbo, NULL) == ENOBUFS) { 2687 ifp->if_ierrors++; 2688 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2689 continue; 2690 } 2691 } else { 2692 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2693 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2694 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2695 BUS_DMASYNC_POSTREAD); 2696 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2697 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2698 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2699 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2700 stdcnt++; 2701 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2702 ifp->if_ierrors++; 2703 bge_newbuf_std(sc, sc->bge_std, m); 2704 continue; 2705 } 2706 if (bge_newbuf_std(sc, sc->bge_std, 2707 NULL) == ENOBUFS) { 2708 ifp->if_ierrors++; 2709 bge_newbuf_std(sc, sc->bge_std, m); 2710 continue; 2711 } 2712 } 2713 2714 ifp->if_ipackets++; 2715#ifndef __NO_STRICT_ALIGNMENT 2716 /* 2717 * For architectures with strict alignment we must make sure 2718 * the payload is aligned. 2719 */ 2720 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2721 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2722 cur_rx->bge_len); 2723 m->m_data += ETHER_ALIGN; 2724 } 2725#endif 2726 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2727 m->m_pkthdr.rcvif = ifp; 2728 2729 if (ifp->if_capenable & IFCAP_RXCSUM) { 2730 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2731 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2732 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2733 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2734 } 2735 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2736 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2737 m->m_pkthdr.csum_data = 2738 cur_rx->bge_tcp_udp_csum; 2739 m->m_pkthdr.csum_flags |= 2740 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2741 } 2742 } 2743 2744 /* 2745 * If we received a packet with a vlan tag, 2746 * attach that information to the packet. 2747 */ 2748 if (have_tag) { 2749 m->m_pkthdr.ether_vtag = vlan_tag; 2750 m->m_flags |= M_VLANTAG; 2751 } 2752 2753 BGE_UNLOCK(sc); 2754 (*ifp->if_input)(ifp, m); 2755 BGE_LOCK(sc); 2756 } 2757 2758 if (stdcnt > 0) 2759 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2760 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 2761 2762 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) 2763 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2764 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 2765 2766 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2767 if (stdcnt) 2768 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2769 if (jumbocnt) 2770 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2771} 2772 2773static void 2774bge_txeof(struct bge_softc *sc) 2775{ 2776 struct bge_tx_bd *cur_tx = NULL; 2777 struct ifnet *ifp; 2778 2779 BGE_LOCK_ASSERT(sc); 2780 2781 /* Nothing to do. */ 2782 if (sc->bge_tx_saved_considx == 2783 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 2784 return; 2785 2786 ifp = sc->bge_ifp; 2787 2788 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 2789 sc->bge_cdata.bge_tx_ring_map, 2790 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2791 /* 2792 * Go through our tx ring and free mbufs for those 2793 * frames that have been sent. 2794 */ 2795 while (sc->bge_tx_saved_considx != 2796 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2797 uint32_t idx = 0; 2798 2799 idx = sc->bge_tx_saved_considx; 2800 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2801 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2802 ifp->if_opackets++; 2803 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2804 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2805 sc->bge_cdata.bge_tx_dmamap[idx], 2806 BUS_DMASYNC_POSTWRITE); 2807 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2808 sc->bge_cdata.bge_tx_dmamap[idx]); 2809 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2810 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2811 } 2812 sc->bge_txcnt--; 2813 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2814 sc->bge_timer = 0; 2815 } 2816 2817 if (cur_tx != NULL) 2818 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2819} 2820 2821#ifdef DEVICE_POLLING 2822static void 2823bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2824{ 2825 struct bge_softc *sc = ifp->if_softc; 2826 uint32_t statusword; 2827 2828 BGE_LOCK(sc); 2829 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2830 BGE_UNLOCK(sc); 2831 return; 2832 } 2833 2834 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2835 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2836 2837 statusword = atomic_readandclear_32( 2838 &sc->bge_ldata.bge_status_block->bge_status); 2839 2840 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2841 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2842 2843 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */ 2844 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 2845 sc->bge_link_evt++; 2846 2847 if (cmd == POLL_AND_CHECK_STATUS) 2848 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2849 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 2850 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 2851 bge_link_upd(sc); 2852 2853 sc->rxcycles = count; 2854 bge_rxeof(sc); 2855 bge_txeof(sc); 2856 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2857 bge_start_locked(ifp); 2858 2859 BGE_UNLOCK(sc); 2860} 2861#endif /* DEVICE_POLLING */ 2862 2863static void 2864bge_intr(void *xsc) 2865{ 2866 struct bge_softc *sc; 2867 struct ifnet *ifp; 2868 uint32_t statusword; 2869 2870 sc = xsc; 2871 2872 BGE_LOCK(sc); 2873 2874 ifp = sc->bge_ifp; 2875 2876#ifdef DEVICE_POLLING 2877 if (ifp->if_capenable & IFCAP_POLLING) { 2878 BGE_UNLOCK(sc); 2879 return; 2880 } 2881#endif 2882 2883 /* 2884 * Do the mandatory PCI flush as well as get the link status. 2885 */ 2886 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 2887 2888 /* Ack interrupt and stop others from occuring. */ 2889 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2890 2891 /* Make sure the descriptor ring indexes are coherent. */ 2892 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2893 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2894 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2895 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2896 2897 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2898 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 2899 statusword || sc->bge_link_evt) 2900 bge_link_upd(sc); 2901 2902 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2903 /* Check RX return ring producer/consumer. */ 2904 bge_rxeof(sc); 2905 2906 /* Check TX ring producer/consumer. */ 2907 bge_txeof(sc); 2908 } 2909 2910 /* Re-enable interrupts. */ 2911 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2912 2913 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2914 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2915 bge_start_locked(ifp); 2916 2917 BGE_UNLOCK(sc); 2918} 2919 2920static void 2921bge_asf_driver_up(struct bge_softc *sc) 2922{ 2923 if (sc->bge_asf_mode & ASF_STACKUP) { 2924 /* Send ASF heartbeat aprox. every 2s */ 2925 if (sc->bge_asf_count) 2926 sc->bge_asf_count --; 2927 else { 2928 sc->bge_asf_count = 5; 2929 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 2930 BGE_FW_DRV_ALIVE); 2931 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 2932 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 2933 CSR_WRITE_4(sc, BGE_CPU_EVENT, 2934 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14)); 2935 } 2936 } 2937} 2938 2939static void 2940bge_tick(void *xsc) 2941{ 2942 struct bge_softc *sc = xsc; 2943 struct mii_data *mii = NULL; 2944 2945 BGE_LOCK_ASSERT(sc); 2946 2947 if (BGE_IS_5705_OR_BEYOND(sc)) 2948 bge_stats_update_regs(sc); 2949 else 2950 bge_stats_update(sc); 2951 2952 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 2953 mii = device_get_softc(sc->bge_miibus); 2954 /* Don't mess with the PHY in IPMI/ASF mode */ 2955 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link))) 2956 mii_tick(mii); 2957 } else { 2958 /* 2959 * Since in TBI mode auto-polling can't be used we should poll 2960 * link status manually. Here we register pending link event 2961 * and trigger interrupt. 2962 */ 2963#ifdef DEVICE_POLLING 2964 /* In polling mode we poll link state in bge_poll(). */ 2965 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 2966#endif 2967 { 2968 sc->bge_link_evt++; 2969 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 2970 } 2971 } 2972 2973 bge_asf_driver_up(sc); 2974 bge_watchdog(sc); 2975 2976 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 2977} 2978 2979static void 2980bge_stats_update_regs(struct bge_softc *sc) 2981{
| 36 37/* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69#ifdef HAVE_KERNEL_OPTION_HEADERS 70#include "opt_device_polling.h" 71#endif 72 73#include <sys/param.h> 74#include <sys/endian.h> 75#include <sys/systm.h> 76#include <sys/sockio.h> 77#include <sys/mbuf.h> 78#include <sys/malloc.h> 79#include <sys/kernel.h> 80#include <sys/module.h> 81#include <sys/socket.h> 82#include <sys/sysctl.h> 83 84#include <net/if.h> 85#include <net/if_arp.h> 86#include <net/ethernet.h> 87#include <net/if_dl.h> 88#include <net/if_media.h> 89 90#include <net/bpf.h> 91 92#include <net/if_types.h> 93#include <net/if_vlan_var.h> 94 95#include <netinet/in_systm.h> 96#include <netinet/in.h> 97#include <netinet/ip.h> 98 99#include <machine/bus.h> 100#include <machine/resource.h> 101#include <sys/bus.h> 102#include <sys/rman.h> 103 104#include <dev/mii/mii.h> 105#include <dev/mii/miivar.h> 106#include "miidevs.h" 107#include <dev/mii/brgphyreg.h> 108 109#include <dev/pci/pcireg.h> 110#include <dev/pci/pcivar.h> 111 112#include <dev/bge/if_bgereg.h> 113 114#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 115#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 116 117MODULE_DEPEND(bge, pci, 1, 1, 1); 118MODULE_DEPEND(bge, ether, 1, 1, 1); 119MODULE_DEPEND(bge, miibus, 1, 1, 1); 120 121/* "device miibus" required. See GENERIC if you get errors here. */ 122#include "miibus_if.h" 123 124/* 125 * Various supported device vendors/types and their names. Note: the 126 * spec seems to indicate that the hardware still has Alteon's vendor 127 * ID burned into it, though it will always be overriden by the vendor 128 * ID in the EEPROM. Just to be safe, we cover all possibilities. 129 */ 130static struct bge_type { 131 uint16_t bge_vid; 132 uint16_t bge_did; 133} bge_devs[] = { 134 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 135 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 136 137 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 138 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 139 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 140 141 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 142 143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 191 192 { SK_VENDORID, SK_DEVICEID_ALTIMA }, 193 194 { TC_VENDORID, TC_DEVICEID_3C996 }, 195 196 { 0, 0 } 197}; 198 199static const struct bge_vendor { 200 uint16_t v_id; 201 const char *v_name; 202} bge_vendors[] = { 203 { ALTEON_VENDORID, "Alteon" }, 204 { ALTIMA_VENDORID, "Altima" }, 205 { APPLE_VENDORID, "Apple" }, 206 { BCOM_VENDORID, "Broadcom" }, 207 { SK_VENDORID, "SysKonnect" }, 208 { TC_VENDORID, "3Com" }, 209 210 { 0, NULL } 211}; 212 213static const struct bge_revision { 214 uint32_t br_chipid; 215 const char *br_name; 216} bge_revisions[] = { 217 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 218 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 219 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 220 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 221 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 222 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 223 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 224 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 225 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 226 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 227 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 228 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 229 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 230 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 231 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 232 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 233 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 234 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 235 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 236 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 237 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 238 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 239 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 240 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 241 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 242 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 243 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 244 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 245 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 246 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 247 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 248 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 249 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 250 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 251 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 252 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 253 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 254 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 255 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 256 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 257 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 258 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 259 260 { 0, NULL } 261}; 262 263/* 264 * Some defaults for major revisions, so that newer steppings 265 * that we don't know about have a shot at working. 266 */ 267static const struct bge_revision bge_majorrevs[] = { 268 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 269 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 270 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 271 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 272 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 273 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 274 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 275 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 276 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 277 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 278 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 279 { BGE_ASICREV_BCM5787, "unknown BCM5787" }, 280 281 { 0, NULL } 282}; 283 284#define BGE_IS_5705_OR_BEYOND(sc) \ 285 ((sc)->bge_asicrev == BGE_ASICREV_BCM5705 || \ 286 (sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \ 287 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 288 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 289 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \ 290 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \ 291 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \ 292 (sc)->bge_asicrev == BGE_ASICREV_BCM5787) 293 294#define BGE_IS_575X_PLUS(sc) \ 295 ((sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \ 296 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 297 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 298 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \ 299 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \ 300 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \ 301 (sc)->bge_asicrev == BGE_ASICREV_BCM5787) 302 303#define BGE_IS_5714_FAMILY(sc) \ 304 ((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 305 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 306 (sc)->bge_asicrev == BGE_ASICREV_BCM5714) 307 308#define BGE_IS_JUMBO_CAPABLE(sc) \ 309 ((sc)->bge_asicrev == BGE_ASICREV_BCM5700 || \ 310 (sc)->bge_asicrev == BGE_ASICREV_BCM5701 || \ 311 (sc)->bge_asicrev == BGE_ASICREV_BCM5703 || \ 312 (sc)->bge_asicrev == BGE_ASICREV_BCM5704) 313 314const struct bge_revision * bge_lookup_rev(uint32_t); 315const struct bge_vendor * bge_lookup_vendor(uint16_t); 316static int bge_probe(device_t); 317static int bge_attach(device_t); 318static int bge_detach(device_t); 319static int bge_suspend(device_t); 320static int bge_resume(device_t); 321static void bge_release_resources(struct bge_softc *); 322static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 323static int bge_dma_alloc(device_t); 324static void bge_dma_free(struct bge_softc *); 325 326static void bge_txeof(struct bge_softc *); 327static void bge_rxeof(struct bge_softc *); 328 329static void bge_asf_driver_up (struct bge_softc *); 330static void bge_tick(void *); 331static void bge_stats_update(struct bge_softc *); 332static void bge_stats_update_regs(struct bge_softc *); 333static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 334 335static void bge_intr(void *); 336static void bge_start_locked(struct ifnet *); 337static void bge_start(struct ifnet *); 338static int bge_ioctl(struct ifnet *, u_long, caddr_t); 339static void bge_init_locked(struct bge_softc *); 340static void bge_init(void *); 341static void bge_stop(struct bge_softc *); 342static void bge_watchdog(struct bge_softc *); 343static void bge_shutdown(device_t); 344static int bge_ifmedia_upd_locked(struct ifnet *); 345static int bge_ifmedia_upd(struct ifnet *); 346static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 347 348static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 349static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 350 351static void bge_setpromisc(struct bge_softc *); 352static void bge_setmulti(struct bge_softc *); 353 354static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 355static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 356static int bge_init_rx_ring_std(struct bge_softc *); 357static void bge_free_rx_ring_std(struct bge_softc *); 358static int bge_init_rx_ring_jumbo(struct bge_softc *); 359static void bge_free_rx_ring_jumbo(struct bge_softc *); 360static void bge_free_tx_ring(struct bge_softc *); 361static int bge_init_tx_ring(struct bge_softc *); 362 363static int bge_chipinit(struct bge_softc *); 364static int bge_blockinit(struct bge_softc *); 365 366static uint32_t bge_readmem_ind(struct bge_softc *, int); 367static void bge_writemem_ind(struct bge_softc *, int, int); 368#ifdef notdef 369static uint32_t bge_readreg_ind(struct bge_softc *, int); 370#endif 371static void bge_writereg_ind(struct bge_softc *, int, int); 372 373static int bge_miibus_readreg(device_t, int, int); 374static int bge_miibus_writereg(device_t, int, int, int); 375static void bge_miibus_statchg(device_t); 376#ifdef DEVICE_POLLING 377static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 378#endif 379 380#define BGE_RESET_START 1 381#define BGE_RESET_STOP 2 382static void bge_sig_post_reset(struct bge_softc *, int); 383static void bge_sig_legacy(struct bge_softc *, int); 384static void bge_sig_pre_reset(struct bge_softc *, int); 385static int bge_reset(struct bge_softc *); 386static void bge_link_upd(struct bge_softc *); 387 388static device_method_t bge_methods[] = { 389 /* Device interface */ 390 DEVMETHOD(device_probe, bge_probe), 391 DEVMETHOD(device_attach, bge_attach), 392 DEVMETHOD(device_detach, bge_detach), 393 DEVMETHOD(device_shutdown, bge_shutdown), 394 DEVMETHOD(device_suspend, bge_suspend), 395 DEVMETHOD(device_resume, bge_resume), 396 397 /* bus interface */ 398 DEVMETHOD(bus_print_child, bus_generic_print_child), 399 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 400 401 /* MII interface */ 402 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 403 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 404 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 405 406 { 0, 0 } 407}; 408 409static driver_t bge_driver = { 410 "bge", 411 bge_methods, 412 sizeof(struct bge_softc) 413}; 414 415static devclass_t bge_devclass; 416 417DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 418DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 419 420static int bge_fake_autoneg = 0; 421static int bge_allow_asf = 1; 422 423TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 424TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 425 426SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 427SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0, 428 "Enable fake autonegotiation for certain blade systems"); 429SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 430 "Allow ASF mode if available"); 431 432static uint32_t 433bge_readmem_ind(struct bge_softc *sc, int off) 434{ 435 device_t dev; 436 437 dev = sc->bge_dev; 438 439 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 440 return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 441} 442 443static void 444bge_writemem_ind(struct bge_softc *sc, int off, int val) 445{ 446 device_t dev; 447 448 dev = sc->bge_dev; 449 450 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 451 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 452} 453 454#ifdef notdef 455static uint32_t 456bge_readreg_ind(struct bge_softc *sc, int off) 457{ 458 device_t dev; 459 460 dev = sc->bge_dev; 461 462 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 463 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 464} 465#endif 466 467static void 468bge_writereg_ind(struct bge_softc *sc, int off, int val) 469{ 470 device_t dev; 471 472 dev = sc->bge_dev; 473 474 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 475 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 476} 477 478/* 479 * Map a single buffer address. 480 */ 481 482static void 483bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 484{ 485 struct bge_dmamap_arg *ctx; 486 487 if (error) 488 return; 489 490 ctx = arg; 491 492 if (nseg > ctx->bge_maxsegs) { 493 ctx->bge_maxsegs = 0; 494 return; 495 } 496 497 ctx->bge_busaddr = segs->ds_addr; 498} 499 500/* 501 * Read a byte of data stored in the EEPROM at address 'addr.' The 502 * BCM570x supports both the traditional bitbang interface and an 503 * auto access interface for reading the EEPROM. We use the auto 504 * access method. 505 */ 506static uint8_t 507bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 508{ 509 int i; 510 uint32_t byte = 0; 511 512 /* 513 * Enable use of auto EEPROM access so we can avoid 514 * having to use the bitbang method. 515 */ 516 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 517 518 /* Reset the EEPROM, load the clock period. */ 519 CSR_WRITE_4(sc, BGE_EE_ADDR, 520 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 521 DELAY(20); 522 523 /* Issue the read EEPROM command. */ 524 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 525 526 /* Wait for completion */ 527 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 528 DELAY(10); 529 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 530 break; 531 } 532 533 if (i == BGE_TIMEOUT) { 534 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 535 return (1); 536 } 537 538 /* Get result. */ 539 byte = CSR_READ_4(sc, BGE_EE_DATA); 540 541 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 542 543 return (0); 544} 545 546/* 547 * Read a sequence of bytes from the EEPROM. 548 */ 549static int 550bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 551{ 552 int i, error = 0; 553 uint8_t byte = 0; 554 555 for (i = 0; i < cnt; i++) { 556 error = bge_eeprom_getbyte(sc, off + i, &byte); 557 if (error) 558 break; 559 *(dest + i) = byte; 560 } 561 562 return (error ? 1 : 0); 563} 564 565static int 566bge_miibus_readreg(device_t dev, int phy, int reg) 567{ 568 struct bge_softc *sc; 569 uint32_t val, autopoll; 570 int i; 571 572 sc = device_get_softc(dev); 573 574 /* 575 * Broadcom's own driver always assumes the internal 576 * PHY is at GMII address 1. On some chips, the PHY responds 577 * to accesses at all addresses, which could cause us to 578 * bogusly attach the PHY 32 times at probe type. Always 579 * restricting the lookup to address 1 is simpler than 580 * trying to figure out which chips revisions should be 581 * special-cased. 582 */ 583 if (phy != 1) 584 return (0); 585 586 /* Reading with autopolling on may trigger PCI errors */ 587 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 588 if (autopoll & BGE_MIMODE_AUTOPOLL) { 589 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 590 DELAY(40); 591 } 592 593 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 594 BGE_MIPHY(phy)|BGE_MIREG(reg)); 595 596 for (i = 0; i < BGE_TIMEOUT; i++) { 597 val = CSR_READ_4(sc, BGE_MI_COMM); 598 if (!(val & BGE_MICOMM_BUSY)) 599 break; 600 } 601 602 if (i == BGE_TIMEOUT) { 603 device_printf(sc->bge_dev, "PHY read timed out\n"); 604 val = 0; 605 goto done; 606 } 607 608 val = CSR_READ_4(sc, BGE_MI_COMM); 609 610done: 611 if (autopoll & BGE_MIMODE_AUTOPOLL) { 612 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 613 DELAY(40); 614 } 615 616 if (val & BGE_MICOMM_READFAIL) 617 return (0); 618 619 return (val & 0xFFFF); 620} 621 622static int 623bge_miibus_writereg(device_t dev, int phy, int reg, int val) 624{ 625 struct bge_softc *sc; 626 uint32_t autopoll; 627 int i; 628 629 sc = device_get_softc(dev); 630 631 /* Reading with autopolling on may trigger PCI errors */ 632 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 633 if (autopoll & BGE_MIMODE_AUTOPOLL) { 634 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 635 DELAY(40); 636 } 637 638 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 639 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 640 641 for (i = 0; i < BGE_TIMEOUT; i++) { 642 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 643 break; 644 } 645 646 if (autopoll & BGE_MIMODE_AUTOPOLL) { 647 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 648 DELAY(40); 649 } 650 651 if (i == BGE_TIMEOUT) { 652 device_printf(sc->bge_dev, "PHY read timed out\n"); 653 return (0); 654 } 655 656 return (0); 657} 658 659static void 660bge_miibus_statchg(device_t dev) 661{ 662 struct bge_softc *sc; 663 struct mii_data *mii; 664 sc = device_get_softc(dev); 665 mii = device_get_softc(sc->bge_miibus); 666 667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 670 else 671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 672 673 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 674 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 675 else 676 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 677} 678 679/* 680 * Intialize a standard receive ring descriptor. 681 */ 682static int 683bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) 684{ 685 struct mbuf *m_new = NULL; 686 struct bge_rx_bd *r; 687 struct bge_dmamap_arg ctx; 688 int error; 689 690 if (m == NULL) { 691 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 692 if (m_new == NULL) 693 return (ENOBUFS); 694 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 695 } else { 696 m_new = m; 697 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 698 m_new->m_data = m_new->m_ext.ext_buf; 699 } 700 701 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 702 m_adj(m_new, ETHER_ALIGN); 703 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 704 r = &sc->bge_ldata.bge_rx_std_ring[i]; 705 ctx.bge_maxsegs = 1; 706 ctx.sc = sc; 707 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 708 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 709 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 710 if (error || ctx.bge_maxsegs == 0) { 711 if (m == NULL) { 712 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 713 m_freem(m_new); 714 } 715 return (ENOMEM); 716 } 717 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 718 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 719 r->bge_flags = BGE_RXBDFLAG_END; 720 r->bge_len = m_new->m_len; 721 r->bge_idx = i; 722 723 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 724 sc->bge_cdata.bge_rx_std_dmamap[i], 725 BUS_DMASYNC_PREREAD); 726 727 return (0); 728} 729 730/* 731 * Initialize a jumbo receive ring descriptor. This allocates 732 * a jumbo buffer from the pool managed internally by the driver. 733 */ 734static int 735bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 736{ 737 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 738 struct bge_extrx_bd *r; 739 struct mbuf *m_new = NULL; 740 int nsegs; 741 int error; 742 743 if (m == NULL) { 744 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 745 if (m_new == NULL) 746 return (ENOBUFS); 747 748 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 749 if (!(m_new->m_flags & M_EXT)) { 750 m_freem(m_new); 751 return (ENOBUFS); 752 } 753 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 754 } else { 755 m_new = m; 756 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 757 m_new->m_data = m_new->m_ext.ext_buf; 758 } 759 760 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 761 m_adj(m_new, ETHER_ALIGN); 762 763 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 764 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 765 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 766 if (error) { 767 if (m == NULL) 768 m_freem(m_new); 769 return (error); 770 } 771 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 772 773 /* 774 * Fill in the extended RX buffer descriptor. 775 */ 776 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 777 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END; 778 r->bge_idx = i; 779 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 780 switch (nsegs) { 781 case 4: 782 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 783 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 784 r->bge_len3 = segs[3].ds_len; 785 case 3: 786 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 787 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 788 r->bge_len2 = segs[2].ds_len; 789 case 2: 790 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 791 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 792 r->bge_len1 = segs[1].ds_len; 793 case 1: 794 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 795 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 796 r->bge_len0 = segs[0].ds_len; 797 break; 798 default: 799 panic("%s: %d segments\n", __func__, nsegs); 800 } 801 802 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 803 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 804 BUS_DMASYNC_PREREAD); 805 806 return (0); 807} 808 809/* 810 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 811 * that's 1MB or memory, which is a lot. For now, we fill only the first 812 * 256 ring entries and hope that our CPU is fast enough to keep up with 813 * the NIC. 814 */ 815static int 816bge_init_rx_ring_std(struct bge_softc *sc) 817{ 818 int i; 819 820 for (i = 0; i < BGE_SSLOTS; i++) { 821 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 822 return (ENOBUFS); 823 }; 824 825 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 826 sc->bge_cdata.bge_rx_std_ring_map, 827 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 828 829 sc->bge_std = i - 1; 830 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 831 832 return (0); 833} 834 835static void 836bge_free_rx_ring_std(struct bge_softc *sc) 837{ 838 int i; 839 840 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 841 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 842 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 843 sc->bge_cdata.bge_rx_std_dmamap[i], 844 BUS_DMASYNC_POSTREAD); 845 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 846 sc->bge_cdata.bge_rx_std_dmamap[i]); 847 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 848 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 849 } 850 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 851 sizeof(struct bge_rx_bd)); 852 } 853} 854 855static int 856bge_init_rx_ring_jumbo(struct bge_softc *sc) 857{ 858 struct bge_rcb *rcb; 859 int i; 860 861 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 862 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 863 return (ENOBUFS); 864 }; 865 866 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 867 sc->bge_cdata.bge_rx_jumbo_ring_map, 868 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 869 870 sc->bge_jumbo = i - 1; 871 872 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 873 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 874 BGE_RCB_FLAG_USE_EXT_RX_BD); 875 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 876 877 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 878 879 return (0); 880} 881 882static void 883bge_free_rx_ring_jumbo(struct bge_softc *sc) 884{ 885 int i; 886 887 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 888 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 889 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 890 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 891 BUS_DMASYNC_POSTREAD); 892 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 893 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 894 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 895 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 896 } 897 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 898 sizeof(struct bge_extrx_bd)); 899 } 900} 901 902static void 903bge_free_tx_ring(struct bge_softc *sc) 904{ 905 int i; 906 907 if (sc->bge_ldata.bge_tx_ring == NULL) 908 return; 909 910 for (i = 0; i < BGE_TX_RING_CNT; i++) { 911 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 912 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 913 sc->bge_cdata.bge_tx_dmamap[i], 914 BUS_DMASYNC_POSTWRITE); 915 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 916 sc->bge_cdata.bge_tx_dmamap[i]); 917 m_freem(sc->bge_cdata.bge_tx_chain[i]); 918 sc->bge_cdata.bge_tx_chain[i] = NULL; 919 } 920 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 921 sizeof(struct bge_tx_bd)); 922 } 923} 924 925static int 926bge_init_tx_ring(struct bge_softc *sc) 927{ 928 sc->bge_txcnt = 0; 929 sc->bge_tx_saved_considx = 0; 930 931 /* Initialize transmit producer index for host-memory send ring. */ 932 sc->bge_tx_prodidx = 0; 933 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 934 935 /* 5700 b2 errata */ 936 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 937 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 938 939 /* NIC-memory send ring not used; initialize to zero. */ 940 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 941 /* 5700 b2 errata */ 942 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 943 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 944 945 return (0); 946} 947 948static void 949bge_setpromisc(struct bge_softc *sc) 950{ 951 struct ifnet *ifp; 952 953 BGE_LOCK_ASSERT(sc); 954 955 ifp = sc->bge_ifp; 956 957 /* 958 * Enable or disable promiscuous mode as needed. 959 * Do not strip VLAN tag when promiscuous mode is enabled. 960 */ 961 if (ifp->if_flags & IFF_PROMISC) 962 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC | 963 BGE_RXMODE_RX_KEEP_VLAN_DIAG); 964 else 965 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC | 966 BGE_RXMODE_RX_KEEP_VLAN_DIAG); 967} 968 969static void 970bge_setmulti(struct bge_softc *sc) 971{ 972 struct ifnet *ifp; 973 struct ifmultiaddr *ifma; 974 uint32_t hashes[4] = { 0, 0, 0, 0 }; 975 int h, i; 976 977 BGE_LOCK_ASSERT(sc); 978 979 ifp = sc->bge_ifp; 980 981 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 982 for (i = 0; i < 4; i++) 983 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 984 return; 985 } 986 987 /* First, zot all the existing filters. */ 988 for (i = 0; i < 4; i++) 989 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 990 991 /* Now program new ones. */ 992 IF_ADDR_LOCK(ifp); 993 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 994 if (ifma->ifma_addr->sa_family != AF_LINK) 995 continue; 996 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 997 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 998 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 999 } 1000 IF_ADDR_UNLOCK(ifp); 1001 1002 for (i = 0; i < 4; i++) 1003 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1004} 1005 1006static void 1007bge_sig_pre_reset(sc, type) 1008 struct bge_softc *sc; 1009 int type; 1010{ 1011 /* 1012 * Some chips don't like this so only do this if ASF is enabled 1013 */ 1014 if (sc->bge_asf_mode) 1015 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1016 1017 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1018 switch (type) { 1019 case BGE_RESET_START: 1020 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1021 break; 1022 case BGE_RESET_STOP: 1023 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1024 break; 1025 } 1026 } 1027} 1028 1029static void 1030bge_sig_post_reset(sc, type) 1031 struct bge_softc *sc; 1032 int type; 1033{ 1034 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1035 switch (type) { 1036 case BGE_RESET_START: 1037 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1038 /* START DONE */ 1039 break; 1040 case BGE_RESET_STOP: 1041 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1042 break; 1043 } 1044 } 1045} 1046 1047static void 1048bge_sig_legacy(sc, type) 1049 struct bge_softc *sc; 1050 int type; 1051{ 1052 if (sc->bge_asf_mode) { 1053 switch (type) { 1054 case BGE_RESET_START: 1055 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1056 break; 1057 case BGE_RESET_STOP: 1058 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1059 break; 1060 } 1061 } 1062} 1063 1064void bge_stop_fw(struct bge_softc *); 1065void 1066bge_stop_fw(sc) 1067 struct bge_softc *sc; 1068{ 1069 int i; 1070 1071 if (sc->bge_asf_mode) { 1072 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1073 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1074 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14)); 1075 1076 for (i = 0; i < 100; i++ ) { 1077 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1078 break; 1079 DELAY(10); 1080 } 1081 } 1082} 1083 1084/* 1085 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1086 * self-test results. 1087 */ 1088static int 1089bge_chipinit(struct bge_softc *sc) 1090{ 1091 uint32_t dma_rw_ctl; 1092 int i; 1093 1094 /* Set endianness before we access any non-PCI registers. */ 1095 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1096 1097 /* 1098 * Check the 'ROM failed' bit on the RX CPU to see if 1099 * self-tests passed. 1100 */ 1101 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1102 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1103 return (ENODEV); 1104 } 1105 1106 /* Clear the MAC control register */ 1107 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1108 1109 /* 1110 * Clear the MAC statistics block in the NIC's 1111 * internal memory. 1112 */ 1113 for (i = BGE_STATS_BLOCK; 1114 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1115 BGE_MEMWIN_WRITE(sc, i, 0); 1116 1117 for (i = BGE_STATUS_BLOCK; 1118 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1119 BGE_MEMWIN_WRITE(sc, i, 0); 1120 1121 /* Set up the PCI DMA control register. */ 1122 if (sc->bge_flags & BGE_FLAG_PCIE) { 1123 /* PCI Express bus */ 1124 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1125 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1126 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1127 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1128 /* PCI-X bus */ 1129 if (BGE_IS_5714_FAMILY(sc)) { 1130 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1131 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1132 /* XXX magic values, Broadcom-supplied Linux driver */ 1133 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) 1134 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1135 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1136 else 1137 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15); 1138 1139 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1140 /* 1141 * The 5704 uses a different encoding of read/write 1142 * watermarks. 1143 */ 1144 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1145 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1146 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1147 else 1148 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1149 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1150 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1151 (0x0F); 1152 1153 /* 1154 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1155 * for hardware bugs. 1156 */ 1157 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1158 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1159 uint32_t tmp; 1160 1161 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1162 if (tmp == 0x6 || tmp == 0x7) 1163 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1164 } 1165 } else 1166 /* Conventional PCI bus */ 1167 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1168 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1169 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1170 (0x0F); 1171 1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1173 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1174 sc->bge_asicrev == BGE_ASICREV_BCM5705) 1175 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1176 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1177 1178 /* 1179 * Set up general mode register. 1180 */ 1181 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1182 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1183 BGE_MODECTL_TX_NO_PHDR_CSUM); 1184 1185 /* 1186 * Tell the firmware the driver is running 1187 */ 1188 if (sc->bge_asf_mode & ASF_STACKUP) 1189 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1190 1191 /* 1192 * Disable memory write invalidate. Apparently it is not supported 1193 * properly by these devices. 1194 */ 1195 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1196 1197#ifdef __brokenalpha__ 1198 /* 1199 * Must insure that we do not cross an 8K (bytes) boundary 1200 * for DMA reads. Our highest limit is 1K bytes. This is a 1201 * restriction on some ALPHA platforms with early revision 1202 * 21174 PCI chipsets, such as the AlphaPC 164lx 1203 */ 1204 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1205 BGE_PCI_READ_BNDRY_1024BYTES, 4); 1206#endif 1207 1208 /* Set the timer prescaler (always 66Mhz) */ 1209 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1210 1211 return (0); 1212} 1213 1214static int 1215bge_blockinit(struct bge_softc *sc) 1216{ 1217 struct bge_rcb *rcb; 1218 bus_size_t vrcb; 1219 bge_hostaddr taddr; 1220 int i; 1221 1222 /* 1223 * Initialize the memory window pointer register so that 1224 * we can access the first 32K of internal NIC RAM. This will 1225 * allow us to set up the TX send ring RCBs and the RX return 1226 * ring RCBs, plus other things which live in NIC memory. 1227 */ 1228 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1229 1230 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1231 1232 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1233 /* Configure mbuf memory pool */ 1234 if (sc->bge_flags & BGE_FLAG_EXTRAM) { 1235 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1236 BGE_EXT_SSRAM); 1237 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1238 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1239 else 1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1241 } else { 1242 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1243 BGE_BUFFPOOL_1); 1244 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1245 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1246 else 1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1248 } 1249 1250 /* Configure DMA resource pool */ 1251 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1252 BGE_DMA_DESCRIPTORS); 1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1254 } 1255 1256 /* Configure mbuf pool watermarks */ 1257 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1258 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1259 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1260 } else { 1261 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1262 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1263 } 1264 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1265 1266 /* Configure DMA resource watermarks */ 1267 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1268 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1269 1270 /* Enable buffer manager */ 1271 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1272 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1273 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1274 1275 /* Poll for buffer manager start indication */ 1276 for (i = 0; i < BGE_TIMEOUT; i++) { 1277 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1278 break; 1279 DELAY(10); 1280 } 1281 1282 if (i == BGE_TIMEOUT) { 1283 device_printf(sc->bge_dev, 1284 "buffer manager failed to start\n"); 1285 return (ENXIO); 1286 } 1287 } 1288 1289 /* Enable flow-through queues */ 1290 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1291 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1292 1293 /* Wait until queue initialization is complete */ 1294 for (i = 0; i < BGE_TIMEOUT; i++) { 1295 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1296 break; 1297 DELAY(10); 1298 } 1299 1300 if (i == BGE_TIMEOUT) { 1301 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1302 return (ENXIO); 1303 } 1304 1305 /* Initialize the standard RX ring control block */ 1306 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1307 rcb->bge_hostaddr.bge_addr_lo = 1308 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1309 rcb->bge_hostaddr.bge_addr_hi = 1310 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1311 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1312 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1313 if (BGE_IS_5705_OR_BEYOND(sc)) 1314 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1315 else 1316 rcb->bge_maxlen_flags = 1317 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1318 if (sc->bge_flags & BGE_FLAG_EXTRAM) 1319 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1320 else 1321 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1322 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1323 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1324 1325 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1326 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1327 1328 /* 1329 * Initialize the jumbo RX ring control block 1330 * We set the 'ring disabled' bit in the flags 1331 * field until we're actually ready to start 1332 * using this ring (i.e. once we set the MTU 1333 * high enough to require it). 1334 */ 1335 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1336 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1337 1338 rcb->bge_hostaddr.bge_addr_lo = 1339 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1340 rcb->bge_hostaddr.bge_addr_hi = 1341 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1342 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1343 sc->bge_cdata.bge_rx_jumbo_ring_map, 1344 BUS_DMASYNC_PREREAD); 1345 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1346 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED); 1347 if (sc->bge_flags & BGE_FLAG_EXTRAM) 1348 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1349 else 1350 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1351 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1352 rcb->bge_hostaddr.bge_addr_hi); 1353 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1354 rcb->bge_hostaddr.bge_addr_lo); 1355 1356 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1357 rcb->bge_maxlen_flags); 1358 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1359 1360 /* Set up dummy disabled mini ring RCB */ 1361 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1362 rcb->bge_maxlen_flags = 1363 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1364 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1365 rcb->bge_maxlen_flags); 1366 } 1367 1368 /* 1369 * Set the BD ring replentish thresholds. The recommended 1370 * values are 1/8th the number of descriptors allocated to 1371 * each ring. 1372 */ 1373 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1374 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1375 1376 /* 1377 * Disable all unused send rings by setting the 'ring disabled' 1378 * bit in the flags field of all the TX send ring control blocks. 1379 * These are located in NIC memory. 1380 */ 1381 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1382 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1383 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1384 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1385 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1386 vrcb += sizeof(struct bge_rcb); 1387 } 1388 1389 /* Configure TX RCB 0 (we use only the first ring) */ 1390 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1391 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1392 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1393 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1394 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1395 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1396 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1397 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1398 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1399 1400 /* Disable all unused RX return rings */ 1401 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1402 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1403 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1404 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1405 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1406 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1407 BGE_RCB_FLAG_RING_DISABLED)); 1408 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1409 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1410 (i * (sizeof(uint64_t))), 0); 1411 vrcb += sizeof(struct bge_rcb); 1412 } 1413 1414 /* Initialize RX ring indexes */ 1415 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1416 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1417 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1418 1419 /* 1420 * Set up RX return ring 0 1421 * Note that the NIC address for RX return rings is 0x00000000. 1422 * The return rings live entirely within the host, so the 1423 * nicaddr field in the RCB isn't used. 1424 */ 1425 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1426 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1427 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1428 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1429 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1430 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1431 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1432 1433 /* Set random backoff seed for TX */ 1434 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1435 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1436 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1437 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1438 BGE_TX_BACKOFF_SEED_MASK); 1439 1440 /* Set inter-packet gap */ 1441 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1442 1443 /* 1444 * Specify which ring to use for packets that don't match 1445 * any RX rules. 1446 */ 1447 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1448 1449 /* 1450 * Configure number of RX lists. One interrupt distribution 1451 * list, sixteen active lists, one bad frames class. 1452 */ 1453 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1454 1455 /* Inialize RX list placement stats mask. */ 1456 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1457 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1458 1459 /* Disable host coalescing until we get it set up */ 1460 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1461 1462 /* Poll to make sure it's shut down. */ 1463 for (i = 0; i < BGE_TIMEOUT; i++) { 1464 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1465 break; 1466 DELAY(10); 1467 } 1468 1469 if (i == BGE_TIMEOUT) { 1470 device_printf(sc->bge_dev, 1471 "host coalescing engine failed to idle\n"); 1472 return (ENXIO); 1473 } 1474 1475 /* Set up host coalescing defaults */ 1476 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1477 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1478 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1479 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1480 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1481 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1482 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1483 } 1484 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1485 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1486 1487 /* Set up address of statistics block */ 1488 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1489 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1490 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1491 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1492 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1493 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1494 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1495 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1496 } 1497 1498 /* Set up address of status block */ 1499 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1500 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1501 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1502 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1503 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1504 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1505 1506 /* Turn on host coalescing state machine */ 1507 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1508 1509 /* Turn on RX BD completion state machine and enable attentions */ 1510 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1511 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1512 1513 /* Turn on RX list placement state machine */ 1514 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1515 1516 /* Turn on RX list selector state machine. */ 1517 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1518 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1519 1520 /* Turn on DMA, clear stats */ 1521 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1522 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1523 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1524 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1525 ((sc->bge_flags & BGE_FLAG_TBI) ? 1526 BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1527 1528 /* Set misc. local control, enable interrupts on attentions */ 1529 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1530 1531#ifdef notdef 1532 /* Assert GPIO pins for PHY reset */ 1533 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1534 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1535 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1536 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1537#endif 1538 1539 /* Turn on DMA completion state machine */ 1540 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1541 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1542 1543 /* Turn on write DMA state machine */ 1544 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1545 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1546 1547 /* Turn on read DMA state machine */ 1548 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1549 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1550 1551 /* Turn on RX data completion state machine */ 1552 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1553 1554 /* Turn on RX BD initiator state machine */ 1555 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1556 1557 /* Turn on RX data and RX BD initiator state machine */ 1558 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1559 1560 /* Turn on Mbuf cluster free state machine */ 1561 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1562 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1563 1564 /* Turn on send BD completion state machine */ 1565 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1566 1567 /* Turn on send data completion state machine */ 1568 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1569 1570 /* Turn on send data initiator state machine */ 1571 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1572 1573 /* Turn on send BD initiator state machine */ 1574 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1575 1576 /* Turn on send BD selector state machine */ 1577 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1578 1579 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1580 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1581 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1582 1583 /* ack/clear link change events */ 1584 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1585 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1586 BGE_MACSTAT_LINK_CHANGED); 1587 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1588 1589 /* Enable PHY auto polling (for MII/GMII only) */ 1590 if (sc->bge_flags & BGE_FLAG_TBI) { 1591 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1592 } else { 1593 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1594 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1595 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1596 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1597 BGE_EVTENB_MI_INTERRUPT); 1598 } 1599 1600 /* 1601 * Clear any pending link state attention. 1602 * Otherwise some link state change events may be lost until attention 1603 * is cleared by bge_intr() -> bge_link_upd() sequence. 1604 * It's not necessary on newer BCM chips - perhaps enabling link 1605 * state change attentions implies clearing pending attention. 1606 */ 1607 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1608 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1609 BGE_MACSTAT_LINK_CHANGED); 1610 1611 /* Enable link state change attentions. */ 1612 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1613 1614 return (0); 1615} 1616 1617const struct bge_revision * 1618bge_lookup_rev(uint32_t chipid) 1619{ 1620 const struct bge_revision *br; 1621 1622 for (br = bge_revisions; br->br_name != NULL; br++) { 1623 if (br->br_chipid == chipid) 1624 return (br); 1625 } 1626 1627 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1628 if (br->br_chipid == BGE_ASICREV(chipid)) 1629 return (br); 1630 } 1631 1632 return (NULL); 1633} 1634 1635const struct bge_vendor * 1636bge_lookup_vendor(uint16_t vid) 1637{ 1638 const struct bge_vendor *v; 1639 1640 for (v = bge_vendors; v->v_name != NULL; v++) 1641 if (v->v_id == vid) 1642 return (v); 1643 1644 panic("%s: unknown vendor %d", __func__, vid); 1645 return (NULL); 1646} 1647 1648/* 1649 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1650 * against our list and return its name if we find a match. 1651 * 1652 * Note that since the Broadcom controller contains VPD support, we 1653 * can get the device name string from the controller itself instead 1654 * of the compiled-in string. This is a little slow, but it guarantees 1655 * we'll always announce the right product name. Unfortunately, this 1656 * is possible only later in bge_attach(), when we have established 1657 * access to EEPROM. 1658 */ 1659static int 1660bge_probe(device_t dev) 1661{ 1662 struct bge_type *t = bge_devs; 1663 struct bge_softc *sc = device_get_softc(dev); 1664 1665 bzero(sc, sizeof(struct bge_softc)); 1666 sc->bge_dev = dev; 1667 1668 while(t->bge_vid != 0) { 1669 if ((pci_get_vendor(dev) == t->bge_vid) && 1670 (pci_get_device(dev) == t->bge_did)) { 1671 char buf[64]; 1672 const struct bge_revision *br; 1673 const struct bge_vendor *v; 1674 uint32_t id; 1675 1676 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1677 BGE_PCIMISCCTL_ASICREV; 1678 br = bge_lookup_rev(id); 1679 id >>= 16; 1680 v = bge_lookup_vendor(t->bge_vid); 1681 if (br == NULL) 1682 snprintf(buf, 64, "%s unknown ASIC (%#04x)", 1683 v->v_name, id); 1684 else 1685 snprintf(buf, 64, "%s %s, ASIC rev. %#04x", 1686 v->v_name, br->br_name, id); 1687 device_set_desc_copy(dev, buf); 1688 if (pci_get_subvendor(dev) == DELL_VENDORID) 1689 sc->bge_flags |= BGE_FLAG_NO3LED; 1690 return (0); 1691 } 1692 t++; 1693 } 1694 1695 return (ENXIO); 1696} 1697 1698static void 1699bge_dma_free(struct bge_softc *sc) 1700{ 1701 int i; 1702 1703 /* Destroy DMA maps for RX buffers. */ 1704 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1705 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1706 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1707 sc->bge_cdata.bge_rx_std_dmamap[i]); 1708 } 1709 1710 /* Destroy DMA maps for jumbo RX buffers. */ 1711 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1712 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1713 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1714 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1715 } 1716 1717 /* Destroy DMA maps for TX buffers. */ 1718 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1719 if (sc->bge_cdata.bge_tx_dmamap[i]) 1720 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1721 sc->bge_cdata.bge_tx_dmamap[i]); 1722 } 1723 1724 if (sc->bge_cdata.bge_mtag) 1725 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1726 1727 1728 /* Destroy standard RX ring. */ 1729 if (sc->bge_cdata.bge_rx_std_ring_map) 1730 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1731 sc->bge_cdata.bge_rx_std_ring_map); 1732 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1733 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1734 sc->bge_ldata.bge_rx_std_ring, 1735 sc->bge_cdata.bge_rx_std_ring_map); 1736 1737 if (sc->bge_cdata.bge_rx_std_ring_tag) 1738 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1739 1740 /* Destroy jumbo RX ring. */ 1741 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1742 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1743 sc->bge_cdata.bge_rx_jumbo_ring_map); 1744 1745 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1746 sc->bge_ldata.bge_rx_jumbo_ring) 1747 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1748 sc->bge_ldata.bge_rx_jumbo_ring, 1749 sc->bge_cdata.bge_rx_jumbo_ring_map); 1750 1751 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1752 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1753 1754 /* Destroy RX return ring. */ 1755 if (sc->bge_cdata.bge_rx_return_ring_map) 1756 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1757 sc->bge_cdata.bge_rx_return_ring_map); 1758 1759 if (sc->bge_cdata.bge_rx_return_ring_map && 1760 sc->bge_ldata.bge_rx_return_ring) 1761 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1762 sc->bge_ldata.bge_rx_return_ring, 1763 sc->bge_cdata.bge_rx_return_ring_map); 1764 1765 if (sc->bge_cdata.bge_rx_return_ring_tag) 1766 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1767 1768 /* Destroy TX ring. */ 1769 if (sc->bge_cdata.bge_tx_ring_map) 1770 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1771 sc->bge_cdata.bge_tx_ring_map); 1772 1773 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1774 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1775 sc->bge_ldata.bge_tx_ring, 1776 sc->bge_cdata.bge_tx_ring_map); 1777 1778 if (sc->bge_cdata.bge_tx_ring_tag) 1779 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1780 1781 /* Destroy status block. */ 1782 if (sc->bge_cdata.bge_status_map) 1783 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1784 sc->bge_cdata.bge_status_map); 1785 1786 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1787 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1788 sc->bge_ldata.bge_status_block, 1789 sc->bge_cdata.bge_status_map); 1790 1791 if (sc->bge_cdata.bge_status_tag) 1792 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1793 1794 /* Destroy statistics block. */ 1795 if (sc->bge_cdata.bge_stats_map) 1796 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1797 sc->bge_cdata.bge_stats_map); 1798 1799 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1800 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1801 sc->bge_ldata.bge_stats, 1802 sc->bge_cdata.bge_stats_map); 1803 1804 if (sc->bge_cdata.bge_stats_tag) 1805 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1806 1807 /* Destroy the parent tag. */ 1808 if (sc->bge_cdata.bge_parent_tag) 1809 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1810} 1811 1812static int 1813bge_dma_alloc(device_t dev) 1814{ 1815 struct bge_dmamap_arg ctx; 1816 struct bge_softc *sc; 1817 int i, error; 1818 1819 sc = device_get_softc(dev); 1820 1821 /* 1822 * Allocate the parent bus DMA tag appropriate for PCI. 1823 */ 1824 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */ 1825 1, 0, /* alignment, boundary */ 1826 BUS_SPACE_MAXADDR, /* lowaddr */ 1827 BUS_SPACE_MAXADDR, /* highaddr */ 1828 NULL, NULL, /* filter, filterarg */ 1829 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1830 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1831 0, /* flags */ 1832 NULL, NULL, /* lockfunc, lockarg */ 1833 &sc->bge_cdata.bge_parent_tag); 1834 1835 if (error != 0) { 1836 device_printf(sc->bge_dev, 1837 "could not allocate parent dma tag\n"); 1838 return (ENOMEM); 1839 } 1840 1841 /* 1842 * Create tag for RX mbufs. 1843 */ 1844 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1845 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1846 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1847 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1848 1849 if (error) { 1850 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1851 return (ENOMEM); 1852 } 1853 1854 /* Create DMA maps for RX buffers. */ 1855 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1856 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1857 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1858 if (error) { 1859 device_printf(sc->bge_dev, 1860 "can't create DMA map for RX\n"); 1861 return (ENOMEM); 1862 } 1863 } 1864 1865 /* Create DMA maps for TX buffers. */ 1866 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1867 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1868 &sc->bge_cdata.bge_tx_dmamap[i]); 1869 if (error) { 1870 device_printf(sc->bge_dev, 1871 "can't create DMA map for RX\n"); 1872 return (ENOMEM); 1873 } 1874 } 1875 1876 /* Create tag for standard RX ring. */ 1877 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1878 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1879 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1880 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1881 1882 if (error) { 1883 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1884 return (ENOMEM); 1885 } 1886 1887 /* Allocate DMA'able memory for standard RX ring. */ 1888 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1889 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1890 &sc->bge_cdata.bge_rx_std_ring_map); 1891 if (error) 1892 return (ENOMEM); 1893 1894 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1895 1896 /* Load the address of the standard RX ring. */ 1897 ctx.bge_maxsegs = 1; 1898 ctx.sc = sc; 1899 1900 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1901 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1902 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1903 1904 if (error) 1905 return (ENOMEM); 1906 1907 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1908 1909 /* Create tags for jumbo mbufs. */ 1910 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1911 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1912 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1913 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1914 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1915 if (error) { 1916 device_printf(sc->bge_dev, 1917 "could not allocate jumbo dma tag\n"); 1918 return (ENOMEM); 1919 } 1920 1921 /* Create tag for jumbo RX ring. */ 1922 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1923 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1924 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1925 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1926 1927 if (error) { 1928 device_printf(sc->bge_dev, 1929 "could not allocate jumbo ring dma tag\n"); 1930 return (ENOMEM); 1931 } 1932 1933 /* Allocate DMA'able memory for jumbo RX ring. */ 1934 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1935 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1936 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1937 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1938 if (error) 1939 return (ENOMEM); 1940 1941 /* Load the address of the jumbo RX ring. */ 1942 ctx.bge_maxsegs = 1; 1943 ctx.sc = sc; 1944 1945 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1946 sc->bge_cdata.bge_rx_jumbo_ring_map, 1947 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 1948 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1949 1950 if (error) 1951 return (ENOMEM); 1952 1953 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 1954 1955 /* Create DMA maps for jumbo RX buffers. */ 1956 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1957 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 1958 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1959 if (error) { 1960 device_printf(sc->bge_dev, 1961 "can't create DMA map for jumbo RX\n"); 1962 return (ENOMEM); 1963 } 1964 } 1965 1966 } 1967 1968 /* Create tag for RX return ring. */ 1969 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1970 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1971 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 1972 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 1973 1974 if (error) { 1975 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1976 return (ENOMEM); 1977 } 1978 1979 /* Allocate DMA'able memory for RX return ring. */ 1980 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 1981 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 1982 &sc->bge_cdata.bge_rx_return_ring_map); 1983 if (error) 1984 return (ENOMEM); 1985 1986 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 1987 BGE_RX_RTN_RING_SZ(sc)); 1988 1989 /* Load the address of the RX return ring. */ 1990 ctx.bge_maxsegs = 1; 1991 ctx.sc = sc; 1992 1993 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 1994 sc->bge_cdata.bge_rx_return_ring_map, 1995 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 1996 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1997 1998 if (error) 1999 return (ENOMEM); 2000 2001 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2002 2003 /* Create tag for TX ring. */ 2004 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2005 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2006 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2007 &sc->bge_cdata.bge_tx_ring_tag); 2008 2009 if (error) { 2010 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2011 return (ENOMEM); 2012 } 2013 2014 /* Allocate DMA'able memory for TX ring. */ 2015 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2016 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2017 &sc->bge_cdata.bge_tx_ring_map); 2018 if (error) 2019 return (ENOMEM); 2020 2021 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2022 2023 /* Load the address of the TX ring. */ 2024 ctx.bge_maxsegs = 1; 2025 ctx.sc = sc; 2026 2027 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2028 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2029 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2030 2031 if (error) 2032 return (ENOMEM); 2033 2034 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2035 2036 /* Create tag for status block. */ 2037 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2038 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2039 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2040 NULL, NULL, &sc->bge_cdata.bge_status_tag); 2041 2042 if (error) { 2043 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2044 return (ENOMEM); 2045 } 2046 2047 /* Allocate DMA'able memory for status block. */ 2048 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2049 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2050 &sc->bge_cdata.bge_status_map); 2051 if (error) 2052 return (ENOMEM); 2053 2054 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2055 2056 /* Load the address of the status block. */ 2057 ctx.sc = sc; 2058 ctx.bge_maxsegs = 1; 2059 2060 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2061 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2062 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2063 2064 if (error) 2065 return (ENOMEM); 2066 2067 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2068 2069 /* Create tag for statistics block. */ 2070 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2071 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2072 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2073 &sc->bge_cdata.bge_stats_tag); 2074 2075 if (error) { 2076 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2077 return (ENOMEM); 2078 } 2079 2080 /* Allocate DMA'able memory for statistics block. */ 2081 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2082 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2083 &sc->bge_cdata.bge_stats_map); 2084 if (error) 2085 return (ENOMEM); 2086 2087 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2088 2089 /* Load the address of the statstics block. */ 2090 ctx.sc = sc; 2091 ctx.bge_maxsegs = 1; 2092 2093 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2094 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2095 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2096 2097 if (error) 2098 return (ENOMEM); 2099 2100 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2101 2102 return (0); 2103} 2104 2105static int 2106bge_attach(device_t dev) 2107{ 2108 struct ifnet *ifp; 2109 struct bge_softc *sc; 2110 uint32_t hwcfg = 0; 2111 uint32_t mac_tmp = 0; 2112 u_char eaddr[6]; 2113 int error = 0, rid; 2114 int trys; 2115 2116 sc = device_get_softc(dev); 2117 sc->bge_dev = dev; 2118 2119 /* 2120 * Map control/status registers. 2121 */ 2122 pci_enable_busmaster(dev); 2123 2124 rid = BGE_PCI_BAR0; 2125 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2126 RF_ACTIVE|PCI_RF_DENSE); 2127 2128 if (sc->bge_res == NULL) { 2129 device_printf (sc->bge_dev, "couldn't map memory\n"); 2130 error = ENXIO; 2131 goto fail; 2132 } 2133 2134 sc->bge_btag = rman_get_bustag(sc->bge_res); 2135 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2136 2137 /* Allocate interrupt. */ 2138 rid = 0; 2139 2140 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2141 RF_SHAREABLE | RF_ACTIVE); 2142 2143 if (sc->bge_irq == NULL) { 2144 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2145 error = ENXIO; 2146 goto fail; 2147 } 2148 2149 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2150 2151 /* Save ASIC rev. */ 2152 2153 sc->bge_chipid = 2154 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2155 BGE_PCIMISCCTL_ASICREV; 2156 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2157 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2158 2159 /* 2160 * XXX: Broadcom Linux driver. Not in specs or eratta. 2161 * PCI-Express? 2162 */ 2163 if (BGE_IS_5705_OR_BEYOND(sc)) { 2164 uint32_t v; 2165 2166 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 2167 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { 2168 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2169 if ((v & 0xff) == BGE_PCIE_CAPID) 2170 sc->bge_flags |= BGE_FLAG_PCIE; 2171 } 2172 } 2173 2174 /* 2175 * PCI-X ? 2176 */ 2177 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2178 BGE_PCISTATE_PCI_BUSMODE) == 0) 2179 sc->bge_flags |= BGE_FLAG_PCIX; 2180 2181 /* Try to reset the chip. */ 2182 if (bge_reset(sc)) { 2183 device_printf(sc->bge_dev, "chip reset failed\n"); 2184 bge_release_resources(sc); 2185 error = ENXIO; 2186 goto fail; 2187 } 2188 2189 sc->bge_asf_mode = 0; 2190 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2191 == BGE_MAGIC_NUMBER)) { 2192 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2193 & BGE_HWCFG_ASF) { 2194 sc->bge_asf_mode |= ASF_ENABLE; 2195 sc->bge_asf_mode |= ASF_STACKUP; 2196 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2197 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2198 } 2199 } 2200 } 2201 2202 /* Try to reset the chip again the nice way. */ 2203 bge_stop_fw(sc); 2204 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2205 if (bge_reset(sc)) { 2206 device_printf(sc->bge_dev, "chip reset failed\n"); 2207 bge_release_resources(sc); 2208 error = ENXIO; 2209 goto fail; 2210 } 2211 2212 bge_sig_legacy(sc, BGE_RESET_STOP); 2213 bge_sig_post_reset(sc, BGE_RESET_STOP); 2214 2215 if (bge_chipinit(sc)) { 2216 device_printf(sc->bge_dev, "chip initialization failed\n"); 2217 bge_release_resources(sc); 2218 error = ENXIO; 2219 goto fail; 2220 } 2221 2222 /* 2223 * Get station address from the EEPROM. 2224 */ 2225 mac_tmp = bge_readmem_ind(sc, 0x0c14); 2226 if ((mac_tmp >> 16) == 0x484b) { 2227 eaddr[0] = (u_char)(mac_tmp >> 8); 2228 eaddr[1] = (u_char)mac_tmp; 2229 mac_tmp = bge_readmem_ind(sc, 0x0c18); 2230 eaddr[2] = (u_char)(mac_tmp >> 24); 2231 eaddr[3] = (u_char)(mac_tmp >> 16); 2232 eaddr[4] = (u_char)(mac_tmp >> 8); 2233 eaddr[5] = (u_char)mac_tmp; 2234 } else if (bge_read_eeprom(sc, eaddr, 2235 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2236 device_printf(sc->bge_dev, "failed to read station address\n"); 2237 bge_release_resources(sc); 2238 error = ENXIO; 2239 goto fail; 2240 } 2241 2242 /* 5705 limits RX return ring to 512 entries. */ 2243 if (BGE_IS_5705_OR_BEYOND(sc)) 2244 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2245 else 2246 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2247 2248 if (bge_dma_alloc(dev)) { 2249 device_printf(sc->bge_dev, 2250 "failed to allocate DMA resources\n"); 2251 bge_release_resources(sc); 2252 error = ENXIO; 2253 goto fail; 2254 } 2255 2256 /* Set default tuneable values. */ 2257 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2258 sc->bge_rx_coal_ticks = 150; 2259 sc->bge_tx_coal_ticks = 150; 2260 sc->bge_rx_max_coal_bds = 64; 2261 sc->bge_tx_max_coal_bds = 128; 2262 2263 /* Set up ifnet structure */ 2264 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2265 if (ifp == NULL) { 2266 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2267 bge_release_resources(sc); 2268 error = ENXIO; 2269 goto fail; 2270 } 2271 ifp->if_softc = sc; 2272 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2274 ifp->if_ioctl = bge_ioctl; 2275 ifp->if_start = bge_start; 2276 ifp->if_init = bge_init; 2277 ifp->if_mtu = ETHERMTU; 2278 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2279 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2280 IFQ_SET_READY(&ifp->if_snd); 2281 ifp->if_hwassist = BGE_CSUM_FEATURES; 2282 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2283 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; 2284 ifp->if_capenable = ifp->if_capabilities; 2285#ifdef DEVICE_POLLING 2286 ifp->if_capabilities |= IFCAP_POLLING; 2287#endif 2288 2289 /* 2290 * 5700 B0 chips do not support checksumming correctly due 2291 * to hardware bugs. 2292 */ 2293 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2294 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2295 ifp->if_capenable &= IFCAP_HWCSUM; 2296 ifp->if_hwassist = 0; 2297 } 2298 2299 /* 2300 * Figure out what sort of media we have by checking the 2301 * hardware config word in the first 32k of NIC internal memory, 2302 * or fall back to examining the EEPROM if necessary. 2303 * Note: on some BCM5700 cards, this value appears to be unset. 2304 * If that's the case, we have to rely on identifying the NIC 2305 * by its PCI subsystem ID, as we do below for the SysKonnect 2306 * SK-9D41. 2307 */ 2308 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2309 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2310 else { 2311 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2312 sizeof(hwcfg))) { 2313 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2314 bge_release_resources(sc); 2315 error = ENXIO; 2316 goto fail; 2317 } 2318 hwcfg = ntohl(hwcfg); 2319 } 2320 2321 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2322 sc->bge_flags |= BGE_FLAG_TBI; 2323 2324 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2325 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2326 sc->bge_flags |= BGE_FLAG_TBI; 2327 2328 if (sc->bge_flags & BGE_FLAG_TBI) { 2329 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2330 bge_ifmedia_upd, bge_ifmedia_sts); 2331 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2332 ifmedia_add(&sc->bge_ifmedia, 2333 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2334 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2335 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2336 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2337 } else { 2338 /* 2339 * Do transceiver setup and tell the firmware the 2340 * driver is down so we can try to get access the 2341 * probe if ASF is running. Retry a couple of times 2342 * if we get a conflict with the ASF firmware accessing 2343 * the PHY. 2344 */ 2345 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2346again: 2347 bge_asf_driver_up(sc); 2348 2349 trys = 0; 2350 if (mii_phy_probe(dev, &sc->bge_miibus, 2351 bge_ifmedia_upd, bge_ifmedia_sts)) { 2352 if (trys++ < 4) { 2353 device_printf(sc->bge_dev, "Try again\n"); 2354 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET); 2355 goto again; 2356 } 2357 2358 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2359 bge_release_resources(sc); 2360 error = ENXIO; 2361 goto fail; 2362 } 2363 2364 /* 2365 * Now tell the firmware we are going up after probing the PHY 2366 */ 2367 if (sc->bge_asf_mode & ASF_STACKUP) 2368 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2369 } 2370 2371 /* 2372 * When using the BCM5701 in PCI-X mode, data corruption has 2373 * been observed in the first few bytes of some received packets. 2374 * Aligning the packet buffer in memory eliminates the corruption. 2375 * Unfortunately, this misaligns the packet payloads. On platforms 2376 * which do not support unaligned accesses, we will realign the 2377 * payloads by copying the received packets. 2378 */ 2379 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2380 sc->bge_flags & BGE_FLAG_PCIX) 2381 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2382 2383 /* 2384 * Call MI attach routine. 2385 */ 2386 ether_ifattach(ifp, eaddr); 2387 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 2388 2389 /* 2390 * Hookup IRQ last. 2391 */ 2392 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2393 bge_intr, sc, &sc->bge_intrhand); 2394 2395 if (error) { 2396 bge_detach(dev); 2397 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2398 } 2399 2400fail: 2401 return (error); 2402} 2403 2404static int 2405bge_detach(device_t dev) 2406{ 2407 struct bge_softc *sc; 2408 struct ifnet *ifp; 2409 2410 sc = device_get_softc(dev); 2411 ifp = sc->bge_ifp; 2412 2413#ifdef DEVICE_POLLING 2414 if (ifp->if_capenable & IFCAP_POLLING) 2415 ether_poll_deregister(ifp); 2416#endif 2417 2418 BGE_LOCK(sc); 2419 bge_stop(sc); 2420 bge_reset(sc); 2421 BGE_UNLOCK(sc); 2422 2423 ether_ifdetach(ifp); 2424 2425 if (sc->bge_flags & BGE_FLAG_TBI) { 2426 ifmedia_removeall(&sc->bge_ifmedia); 2427 } else { 2428 bus_generic_detach(dev); 2429 device_delete_child(dev, sc->bge_miibus); 2430 } 2431 2432 bge_release_resources(sc); 2433 2434 return (0); 2435} 2436 2437static void 2438bge_release_resources(struct bge_softc *sc) 2439{ 2440 device_t dev; 2441 2442 dev = sc->bge_dev; 2443 2444 if (sc->bge_vpd_prodname != NULL) 2445 free(sc->bge_vpd_prodname, M_DEVBUF); 2446 2447 if (sc->bge_vpd_readonly != NULL) 2448 free(sc->bge_vpd_readonly, M_DEVBUF); 2449 2450 if (sc->bge_intrhand != NULL) 2451 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2452 2453 if (sc->bge_irq != NULL) 2454 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2455 2456 if (sc->bge_res != NULL) 2457 bus_release_resource(dev, SYS_RES_MEMORY, 2458 BGE_PCI_BAR0, sc->bge_res); 2459 2460 if (sc->bge_ifp != NULL) 2461 if_free(sc->bge_ifp); 2462 2463 bge_dma_free(sc); 2464 2465 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2466 BGE_LOCK_DESTROY(sc); 2467} 2468 2469static int 2470bge_reset(struct bge_softc *sc) 2471{ 2472 device_t dev; 2473 uint32_t cachesize, command, pcistate, reset; 2474 int i, val = 0; 2475 2476 dev = sc->bge_dev; 2477 2478 /* Save some important PCI state. */ 2479 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2480 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2481 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2482 2483 pci_write_config(dev, BGE_PCI_MISC_CTL, 2484 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2485 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2486 2487 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2488 2489 /* XXX: Broadcom Linux driver. */ 2490 if (sc->bge_flags & BGE_FLAG_PCIE) { 2491 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2492 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2493 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2494 /* Prevent PCIE link training during global reset */ 2495 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2496 reset |= (1<<29); 2497 } 2498 } 2499 2500 /* 2501 * Write the magic number to the firmware mailbox at 0xb50 2502 * so that the driver can synchronize with the firmware. 2503 */ 2504 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2505 2506 /* Issue global reset */ 2507 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 2508 2509 DELAY(1000); 2510 2511 /* XXX: Broadcom Linux driver. */ 2512 if (sc->bge_flags & BGE_FLAG_PCIE) { 2513 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2514 uint32_t v; 2515 2516 DELAY(500000); /* wait for link training to complete */ 2517 v = pci_read_config(dev, 0xc4, 4); 2518 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2519 } 2520 /* Set PCIE max payload size and clear error status. */ 2521 pci_write_config(dev, 0xd8, 0xf5000, 4); 2522 } 2523 2524 /* Reset some of the PCI state that got zapped by reset. */ 2525 pci_write_config(dev, BGE_PCI_MISC_CTL, 2526 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2527 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2528 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2529 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2530 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2531 2532 /* Enable memory arbiter. */ 2533 if (BGE_IS_5714_FAMILY(sc)) { 2534 uint32_t val; 2535 2536 val = CSR_READ_4(sc, BGE_MARB_MODE); 2537 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2538 } else 2539 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2540 2541 /* 2542 * Poll the value location we just wrote until 2543 * we see the 1's complement of the magic number. 2544 * This indicates that the firmware initialization 2545 * is complete. 2546 */ 2547 for (i = 0; i < BGE_TIMEOUT; i++) { 2548 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2549 if (val == ~BGE_MAGIC_NUMBER) 2550 break; 2551 DELAY(10); 2552 } 2553 2554 if (i == BGE_TIMEOUT) { 2555 device_printf(sc->bge_dev, "firmware handshake timed out\n"); 2556 return(0); 2557 } 2558 2559 /* 2560 * XXX Wait for the value of the PCISTATE register to 2561 * return to its original pre-reset state. This is a 2562 * fairly good indicator of reset completion. If we don't 2563 * wait for the reset to fully complete, trying to read 2564 * from the device's non-PCI registers may yield garbage 2565 * results. 2566 */ 2567 for (i = 0; i < BGE_TIMEOUT; i++) { 2568 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2569 break; 2570 DELAY(10); 2571 } 2572 2573 /* Fix up byte swapping. */ 2574 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 2575 BGE_MODECTL_BYTESWAP_DATA); 2576 2577 /* Tell the ASF firmware we are up */ 2578 if (sc->bge_asf_mode & ASF_STACKUP) 2579 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2580 2581 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2582 2583 /* 2584 * The 5704 in TBI mode apparently needs some special 2585 * adjustment to insure the SERDES drive level is set 2586 * to 1.2V. 2587 */ 2588 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2589 sc->bge_flags & BGE_FLAG_TBI) { 2590 uint32_t serdescfg; 2591 2592 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2593 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2594 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2595 } 2596 2597 /* XXX: Broadcom Linux driver. */ 2598 if (sc->bge_flags & BGE_FLAG_PCIE && 2599 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2600 uint32_t v; 2601 2602 v = CSR_READ_4(sc, 0x7c00); 2603 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2604 } 2605 DELAY(10000); 2606 2607 return(0); 2608} 2609 2610/* 2611 * Frame reception handling. This is called if there's a frame 2612 * on the receive return list. 2613 * 2614 * Note: we have to be able to handle two possibilities here: 2615 * 1) the frame is from the jumbo receive ring 2616 * 2) the frame is from the standard receive ring 2617 */ 2618 2619static void 2620bge_rxeof(struct bge_softc *sc) 2621{ 2622 struct ifnet *ifp; 2623 int stdcnt = 0, jumbocnt = 0; 2624 2625 BGE_LOCK_ASSERT(sc); 2626 2627 /* Nothing to do. */ 2628 if (sc->bge_rx_saved_considx == 2629 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2630 return; 2631 2632 ifp = sc->bge_ifp; 2633 2634 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2635 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2636 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2637 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2638 if (BGE_IS_JUMBO_CAPABLE(sc)) 2639 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2640 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); 2641 2642 while(sc->bge_rx_saved_considx != 2643 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2644 struct bge_rx_bd *cur_rx; 2645 uint32_t rxidx; 2646 struct mbuf *m = NULL; 2647 uint16_t vlan_tag = 0; 2648 int have_tag = 0; 2649 2650#ifdef DEVICE_POLLING 2651 if (ifp->if_capenable & IFCAP_POLLING) { 2652 if (sc->rxcycles <= 0) 2653 break; 2654 sc->rxcycles--; 2655 } 2656#endif 2657 2658 cur_rx = 2659 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2660 2661 rxidx = cur_rx->bge_idx; 2662 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2663 2664 if (!(ifp->if_flags & IFF_PROMISC) && 2665 (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)) { 2666 have_tag = 1; 2667 vlan_tag = cur_rx->bge_vlan_tag; 2668 } 2669 2670 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2671 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2672 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2673 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2674 BUS_DMASYNC_POSTREAD); 2675 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2676 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2677 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2678 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2679 jumbocnt++; 2680 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2681 ifp->if_ierrors++; 2682 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2683 continue; 2684 } 2685 if (bge_newbuf_jumbo(sc, 2686 sc->bge_jumbo, NULL) == ENOBUFS) { 2687 ifp->if_ierrors++; 2688 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2689 continue; 2690 } 2691 } else { 2692 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2693 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2694 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2695 BUS_DMASYNC_POSTREAD); 2696 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2697 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2698 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2699 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2700 stdcnt++; 2701 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2702 ifp->if_ierrors++; 2703 bge_newbuf_std(sc, sc->bge_std, m); 2704 continue; 2705 } 2706 if (bge_newbuf_std(sc, sc->bge_std, 2707 NULL) == ENOBUFS) { 2708 ifp->if_ierrors++; 2709 bge_newbuf_std(sc, sc->bge_std, m); 2710 continue; 2711 } 2712 } 2713 2714 ifp->if_ipackets++; 2715#ifndef __NO_STRICT_ALIGNMENT 2716 /* 2717 * For architectures with strict alignment we must make sure 2718 * the payload is aligned. 2719 */ 2720 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2721 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2722 cur_rx->bge_len); 2723 m->m_data += ETHER_ALIGN; 2724 } 2725#endif 2726 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2727 m->m_pkthdr.rcvif = ifp; 2728 2729 if (ifp->if_capenable & IFCAP_RXCSUM) { 2730 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2731 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2732 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2733 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2734 } 2735 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2736 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2737 m->m_pkthdr.csum_data = 2738 cur_rx->bge_tcp_udp_csum; 2739 m->m_pkthdr.csum_flags |= 2740 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2741 } 2742 } 2743 2744 /* 2745 * If we received a packet with a vlan tag, 2746 * attach that information to the packet. 2747 */ 2748 if (have_tag) { 2749 m->m_pkthdr.ether_vtag = vlan_tag; 2750 m->m_flags |= M_VLANTAG; 2751 } 2752 2753 BGE_UNLOCK(sc); 2754 (*ifp->if_input)(ifp, m); 2755 BGE_LOCK(sc); 2756 } 2757 2758 if (stdcnt > 0) 2759 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2760 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 2761 2762 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) 2763 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2764 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 2765 2766 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2767 if (stdcnt) 2768 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2769 if (jumbocnt) 2770 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2771} 2772 2773static void 2774bge_txeof(struct bge_softc *sc) 2775{ 2776 struct bge_tx_bd *cur_tx = NULL; 2777 struct ifnet *ifp; 2778 2779 BGE_LOCK_ASSERT(sc); 2780 2781 /* Nothing to do. */ 2782 if (sc->bge_tx_saved_considx == 2783 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 2784 return; 2785 2786 ifp = sc->bge_ifp; 2787 2788 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 2789 sc->bge_cdata.bge_tx_ring_map, 2790 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2791 /* 2792 * Go through our tx ring and free mbufs for those 2793 * frames that have been sent. 2794 */ 2795 while (sc->bge_tx_saved_considx != 2796 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2797 uint32_t idx = 0; 2798 2799 idx = sc->bge_tx_saved_considx; 2800 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2801 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2802 ifp->if_opackets++; 2803 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2804 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2805 sc->bge_cdata.bge_tx_dmamap[idx], 2806 BUS_DMASYNC_POSTWRITE); 2807 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2808 sc->bge_cdata.bge_tx_dmamap[idx]); 2809 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2810 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2811 } 2812 sc->bge_txcnt--; 2813 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2814 sc->bge_timer = 0; 2815 } 2816 2817 if (cur_tx != NULL) 2818 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2819} 2820 2821#ifdef DEVICE_POLLING 2822static void 2823bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2824{ 2825 struct bge_softc *sc = ifp->if_softc; 2826 uint32_t statusword; 2827 2828 BGE_LOCK(sc); 2829 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2830 BGE_UNLOCK(sc); 2831 return; 2832 } 2833 2834 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2835 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2836 2837 statusword = atomic_readandclear_32( 2838 &sc->bge_ldata.bge_status_block->bge_status); 2839 2840 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2841 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2842 2843 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */ 2844 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 2845 sc->bge_link_evt++; 2846 2847 if (cmd == POLL_AND_CHECK_STATUS) 2848 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2849 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 2850 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 2851 bge_link_upd(sc); 2852 2853 sc->rxcycles = count; 2854 bge_rxeof(sc); 2855 bge_txeof(sc); 2856 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2857 bge_start_locked(ifp); 2858 2859 BGE_UNLOCK(sc); 2860} 2861#endif /* DEVICE_POLLING */ 2862 2863static void 2864bge_intr(void *xsc) 2865{ 2866 struct bge_softc *sc; 2867 struct ifnet *ifp; 2868 uint32_t statusword; 2869 2870 sc = xsc; 2871 2872 BGE_LOCK(sc); 2873 2874 ifp = sc->bge_ifp; 2875 2876#ifdef DEVICE_POLLING 2877 if (ifp->if_capenable & IFCAP_POLLING) { 2878 BGE_UNLOCK(sc); 2879 return; 2880 } 2881#endif 2882 2883 /* 2884 * Do the mandatory PCI flush as well as get the link status. 2885 */ 2886 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 2887 2888 /* Ack interrupt and stop others from occuring. */ 2889 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2890 2891 /* Make sure the descriptor ring indexes are coherent. */ 2892 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2893 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2894 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2895 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2896 2897 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2898 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 2899 statusword || sc->bge_link_evt) 2900 bge_link_upd(sc); 2901 2902 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2903 /* Check RX return ring producer/consumer. */ 2904 bge_rxeof(sc); 2905 2906 /* Check TX ring producer/consumer. */ 2907 bge_txeof(sc); 2908 } 2909 2910 /* Re-enable interrupts. */ 2911 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2912 2913 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2914 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2915 bge_start_locked(ifp); 2916 2917 BGE_UNLOCK(sc); 2918} 2919 2920static void 2921bge_asf_driver_up(struct bge_softc *sc) 2922{ 2923 if (sc->bge_asf_mode & ASF_STACKUP) { 2924 /* Send ASF heartbeat aprox. every 2s */ 2925 if (sc->bge_asf_count) 2926 sc->bge_asf_count --; 2927 else { 2928 sc->bge_asf_count = 5; 2929 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 2930 BGE_FW_DRV_ALIVE); 2931 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 2932 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 2933 CSR_WRITE_4(sc, BGE_CPU_EVENT, 2934 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14)); 2935 } 2936 } 2937} 2938 2939static void 2940bge_tick(void *xsc) 2941{ 2942 struct bge_softc *sc = xsc; 2943 struct mii_data *mii = NULL; 2944 2945 BGE_LOCK_ASSERT(sc); 2946 2947 if (BGE_IS_5705_OR_BEYOND(sc)) 2948 bge_stats_update_regs(sc); 2949 else 2950 bge_stats_update(sc); 2951 2952 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 2953 mii = device_get_softc(sc->bge_miibus); 2954 /* Don't mess with the PHY in IPMI/ASF mode */ 2955 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link))) 2956 mii_tick(mii); 2957 } else { 2958 /* 2959 * Since in TBI mode auto-polling can't be used we should poll 2960 * link status manually. Here we register pending link event 2961 * and trigger interrupt. 2962 */ 2963#ifdef DEVICE_POLLING 2964 /* In polling mode we poll link state in bge_poll(). */ 2965 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 2966#endif 2967 { 2968 sc->bge_link_evt++; 2969 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 2970 } 2971 } 2972 2973 bge_asf_driver_up(sc); 2974 bge_watchdog(sc); 2975 2976 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 2977} 2978 2979static void 2980bge_stats_update_regs(struct bge_softc *sc) 2981{
|