36 37/* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69#ifdef HAVE_KERNEL_OPTION_HEADERS 70#include "opt_device_polling.h" 71#endif 72 73#include <sys/param.h> 74#include <sys/endian.h> 75#include <sys/systm.h> 76#include <sys/sockio.h> 77#include <sys/mbuf.h> 78#include <sys/malloc.h> 79#include <sys/kernel.h> 80#include <sys/module.h> 81#include <sys/socket.h> 82#include <sys/sysctl.h> 83 84#include <net/if.h> 85#include <net/if_arp.h> 86#include <net/ethernet.h> 87#include <net/if_dl.h> 88#include <net/if_media.h> 89 90#include <net/bpf.h> 91 92#include <net/if_types.h> 93#include <net/if_vlan_var.h> 94 95#include <netinet/in_systm.h> 96#include <netinet/in.h> 97#include <netinet/ip.h> 98 99#include <machine/bus.h> 100#include <machine/resource.h> 101#include <sys/bus.h> 102#include <sys/rman.h> 103 104#include <dev/mii/mii.h> 105#include <dev/mii/miivar.h> 106#include "miidevs.h" 107#include <dev/mii/brgphyreg.h> 108 109#ifdef __sparc64__ 110#include <dev/ofw/ofw_bus.h> 111#include <dev/ofw/openfirm.h> 112#include <machine/ofw_machdep.h> 113#include <machine/ver.h> 114#endif 115 116#include <dev/pci/pcireg.h> 117#include <dev/pci/pcivar.h> 118 119#include <dev/bge/if_bgereg.h> 120 121#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 122#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 123 124MODULE_DEPEND(bge, pci, 1, 1, 1); 125MODULE_DEPEND(bge, ether, 1, 1, 1); 126MODULE_DEPEND(bge, miibus, 1, 1, 1); 127 128/* "device miibus" required. See GENERIC if you get errors here. */ 129#include "miibus_if.h" 130 131/* 132 * Various supported device vendors/types and their names. Note: the 133 * spec seems to indicate that the hardware still has Alteon's vendor 134 * ID burned into it, though it will always be overriden by the vendor 135 * ID in the EEPROM. Just to be safe, we cover all possibilities. 136 */ 137static struct bge_type { 138 uint16_t bge_vid; 139 uint16_t bge_did; 140} bge_devs[] = { 141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 142 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 143 144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 147 148 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 149 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 198 199 { SK_VENDORID, SK_DEVICEID_ALTIMA }, 200 201 { TC_VENDORID, TC_DEVICEID_3C996 }, 202 203 { 0, 0 } 204}; 205 206static const struct bge_vendor { 207 uint16_t v_id; 208 const char *v_name; 209} bge_vendors[] = { 210 { ALTEON_VENDORID, "Alteon" }, 211 { ALTIMA_VENDORID, "Altima" }, 212 { APPLE_VENDORID, "Apple" }, 213 { BCOM_VENDORID, "Broadcom" }, 214 { SK_VENDORID, "SysKonnect" }, 215 { TC_VENDORID, "3Com" }, 216 217 { 0, NULL } 218}; 219 220static const struct bge_revision { 221 uint32_t br_chipid; 222 const char *br_name; 223} bge_revisions[] = { 224 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 225 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 226 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 227 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 228 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 229 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 230 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 231 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 232 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 233 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 234 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 235 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 236 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 237 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 238 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 239 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 240 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 241 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 242 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 243 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 244 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 245 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 246 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 247 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 248 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 249 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 250 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 251 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 252 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 253 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 254 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 255 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 256 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 257 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 258 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 259 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 260 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 261 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 262 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 263 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 264 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 265 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 266 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 267 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 268 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 269 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 270 /* 5754 and 5787 share the same ASIC ID */ 271 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 272 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 273 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 274 275 { 0, NULL } 276}; 277 278/* 279 * Some defaults for major revisions, so that newer steppings 280 * that we don't know about have a shot at working. 281 */ 282static const struct bge_revision bge_majorrevs[] = { 283 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 284 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 285 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 286 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 287 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 288 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 289 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 290 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 291 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 292 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 293 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 294 /* 5754 and 5787 share the same ASIC ID */ 295 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 296 297 { 0, NULL } 298}; 299 300#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 301#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 302#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 303#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 304#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 305 306const struct bge_revision * bge_lookup_rev(uint32_t); 307const struct bge_vendor * bge_lookup_vendor(uint16_t); 308static int bge_probe(device_t); 309static int bge_attach(device_t); 310static int bge_detach(device_t); 311static int bge_suspend(device_t); 312static int bge_resume(device_t); 313static void bge_release_resources(struct bge_softc *); 314static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 315static int bge_dma_alloc(device_t); 316static void bge_dma_free(struct bge_softc *); 317 318static void bge_txeof(struct bge_softc *); 319static void bge_rxeof(struct bge_softc *); 320 321static void bge_asf_driver_up (struct bge_softc *); 322static void bge_tick(void *); 323static void bge_stats_update(struct bge_softc *); 324static void bge_stats_update_regs(struct bge_softc *); 325static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 326 327static void bge_intr(void *); 328static void bge_start_locked(struct ifnet *); 329static void bge_start(struct ifnet *); 330static int bge_ioctl(struct ifnet *, u_long, caddr_t); 331static void bge_init_locked(struct bge_softc *); 332static void bge_init(void *); 333static void bge_stop(struct bge_softc *); 334static void bge_watchdog(struct bge_softc *); 335static void bge_shutdown(device_t); 336static int bge_ifmedia_upd_locked(struct ifnet *); 337static int bge_ifmedia_upd(struct ifnet *); 338static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 339 340static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 341static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 342 343static void bge_setpromisc(struct bge_softc *); 344static void bge_setmulti(struct bge_softc *); 345 346static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 347static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 348static int bge_init_rx_ring_std(struct bge_softc *); 349static void bge_free_rx_ring_std(struct bge_softc *); 350static int bge_init_rx_ring_jumbo(struct bge_softc *); 351static void bge_free_rx_ring_jumbo(struct bge_softc *); 352static void bge_free_tx_ring(struct bge_softc *); 353static int bge_init_tx_ring(struct bge_softc *); 354 355static int bge_chipinit(struct bge_softc *); 356static int bge_blockinit(struct bge_softc *); 357 358static int bge_has_eeprom(struct bge_softc *); 359static uint32_t bge_readmem_ind(struct bge_softc *, int); 360static void bge_writemem_ind(struct bge_softc *, int, int); 361#ifdef notdef 362static uint32_t bge_readreg_ind(struct bge_softc *, int); 363#endif 364static void bge_writemem_direct(struct bge_softc *, int, int); 365static void bge_writereg_ind(struct bge_softc *, int, int); 366 367static int bge_miibus_readreg(device_t, int, int); 368static int bge_miibus_writereg(device_t, int, int, int); 369static void bge_miibus_statchg(device_t); 370#ifdef DEVICE_POLLING 371static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 372#endif 373 374#define BGE_RESET_START 1 375#define BGE_RESET_STOP 2 376static void bge_sig_post_reset(struct bge_softc *, int); 377static void bge_sig_legacy(struct bge_softc *, int); 378static void bge_sig_pre_reset(struct bge_softc *, int); 379static int bge_reset(struct bge_softc *); 380static void bge_link_upd(struct bge_softc *); 381 382/* 383 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 384 * leak information to untrusted users. It is also known to cause alignment 385 * traps on certain architectures. 386 */ 387#ifdef BGE_REGISTER_DEBUG 388static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 389static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 390static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 391#endif 392static void bge_add_sysctls(struct bge_softc *); 393static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 394 395static device_method_t bge_methods[] = { 396 /* Device interface */ 397 DEVMETHOD(device_probe, bge_probe), 398 DEVMETHOD(device_attach, bge_attach), 399 DEVMETHOD(device_detach, bge_detach), 400 DEVMETHOD(device_shutdown, bge_shutdown), 401 DEVMETHOD(device_suspend, bge_suspend), 402 DEVMETHOD(device_resume, bge_resume), 403 404 /* bus interface */ 405 DEVMETHOD(bus_print_child, bus_generic_print_child), 406 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 407 408 /* MII interface */ 409 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 410 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 411 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 412 413 { 0, 0 } 414}; 415 416static driver_t bge_driver = { 417 "bge", 418 bge_methods, 419 sizeof(struct bge_softc) 420}; 421 422static devclass_t bge_devclass; 423 424DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 425DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 426 427static int bge_fake_autoneg = 0; 428static int bge_allow_asf = 1; 429 430TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 431TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 432 433SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 434SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0, 435 "Enable fake autonegotiation for certain blade systems"); 436SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 437 "Allow ASF mode if available"); 438 439#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" 440#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" 441#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" 442#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" 443#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" 444 445static int 446bge_has_eeprom(struct bge_softc *sc) 447{ 448#ifdef __sparc64__ 449 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; 450 device_t dev; 451 uint32_t subvendor; 452 453 dev = sc->bge_dev; 454 455 /* 456 * The on-board BGEs found in sun4u machines aren't fitted with 457 * an EEPROM which means that we have to obtain the MAC address 458 * via OFW and that some tests will always fail. We distinguish 459 * such BGEs by the subvendor ID, which also has to be obtained 460 * from OFW instead of the PCI configuration space as the latter 461 * indicates Broadcom as the subvendor of the netboot interface. 462 * For early Blade 1500 and 2500 we even have to check the OFW 463 * device path as the subvendor ID always defaults to Broadcom 464 * there. 465 */ 466 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, 467 &subvendor, sizeof(subvendor)) == sizeof(subvendor) && 468 subvendor == SUN_VENDORID) 469 return (0); 470 memset(buf, 0, sizeof(buf)); 471 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { 472 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && 473 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) 474 return (0); 475 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && 476 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) 477 return (0); 478 } 479#endif 480 return (1); 481} 482 483static uint32_t 484bge_readmem_ind(struct bge_softc *sc, int off) 485{ 486 device_t dev; 487 uint32_t val; 488 489 dev = sc->bge_dev; 490 491 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 492 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 493 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 494 return (val); 495} 496 497static void 498bge_writemem_ind(struct bge_softc *sc, int off, int val) 499{ 500 device_t dev; 501 502 dev = sc->bge_dev; 503 504 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 505 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 506 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 507} 508 509#ifdef notdef 510static uint32_t 511bge_readreg_ind(struct bge_softc *sc, int off) 512{ 513 device_t dev; 514 515 dev = sc->bge_dev; 516 517 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 518 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 519} 520#endif 521 522static void 523bge_writereg_ind(struct bge_softc *sc, int off, int val) 524{ 525 device_t dev; 526 527 dev = sc->bge_dev; 528 529 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 530 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 531} 532 533static void 534bge_writemem_direct(struct bge_softc *sc, int off, int val) 535{ 536 CSR_WRITE_4(sc, off, val); 537} 538 539/* 540 * Map a single buffer address. 541 */ 542 543static void 544bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 545{ 546 struct bge_dmamap_arg *ctx; 547 548 if (error) 549 return; 550 551 ctx = arg; 552 553 if (nseg > ctx->bge_maxsegs) { 554 ctx->bge_maxsegs = 0; 555 return; 556 } 557 558 ctx->bge_busaddr = segs->ds_addr; 559} 560 561/* 562 * Read a byte of data stored in the EEPROM at address 'addr.' The 563 * BCM570x supports both the traditional bitbang interface and an 564 * auto access interface for reading the EEPROM. We use the auto 565 * access method. 566 */ 567static uint8_t 568bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 569{ 570 int i; 571 uint32_t byte = 0; 572 573 /* 574 * Enable use of auto EEPROM access so we can avoid 575 * having to use the bitbang method. 576 */ 577 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 578 579 /* Reset the EEPROM, load the clock period. */ 580 CSR_WRITE_4(sc, BGE_EE_ADDR, 581 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 582 DELAY(20); 583 584 /* Issue the read EEPROM command. */ 585 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 586 587 /* Wait for completion */ 588 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 589 DELAY(10); 590 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 591 break; 592 } 593 594 if (i == BGE_TIMEOUT * 10) { 595 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 596 return (1); 597 } 598 599 /* Get result. */ 600 byte = CSR_READ_4(sc, BGE_EE_DATA); 601 602 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 603 604 return (0); 605} 606 607/* 608 * Read a sequence of bytes from the EEPROM. 609 */ 610static int 611bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 612{ 613 int i, error = 0; 614 uint8_t byte = 0; 615 616 for (i = 0; i < cnt; i++) { 617 error = bge_eeprom_getbyte(sc, off + i, &byte); 618 if (error) 619 break; 620 *(dest + i) = byte; 621 } 622 623 return (error ? 1 : 0); 624} 625 626static int 627bge_miibus_readreg(device_t dev, int phy, int reg) 628{ 629 struct bge_softc *sc; 630 uint32_t val, autopoll; 631 int i; 632 633 sc = device_get_softc(dev); 634 635 /* 636 * Broadcom's own driver always assumes the internal 637 * PHY is at GMII address 1. On some chips, the PHY responds 638 * to accesses at all addresses, which could cause us to 639 * bogusly attach the PHY 32 times at probe type. Always 640 * restricting the lookup to address 1 is simpler than 641 * trying to figure out which chips revisions should be 642 * special-cased. 643 */ 644 if (phy != 1) 645 return (0); 646 647 /* Reading with autopolling on may trigger PCI errors */ 648 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 649 if (autopoll & BGE_MIMODE_AUTOPOLL) { 650 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 651 DELAY(40); 652 } 653 654 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 655 BGE_MIPHY(phy) | BGE_MIREG(reg)); 656 657 for (i = 0; i < BGE_TIMEOUT; i++) { 658 DELAY(10); 659 val = CSR_READ_4(sc, BGE_MI_COMM); 660 if (!(val & BGE_MICOMM_BUSY)) 661 break; 662 } 663 664 if (i == BGE_TIMEOUT) { 665 device_printf(sc->bge_dev, "PHY read timed out\n"); 666 val = 0; 667 goto done; 668 } 669 670 val = CSR_READ_4(sc, BGE_MI_COMM); 671 672done: 673 if (autopoll & BGE_MIMODE_AUTOPOLL) { 674 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 675 DELAY(40); 676 } 677 678 if (val & BGE_MICOMM_READFAIL) 679 return (0); 680 681 return (val & 0xFFFF); 682} 683 684static int 685bge_miibus_writereg(device_t dev, int phy, int reg, int val) 686{ 687 struct bge_softc *sc; 688 uint32_t autopoll; 689 int i; 690 691 sc = device_get_softc(dev); 692 693 /* Reading with autopolling on may trigger PCI errors */ 694 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 695 if (autopoll & BGE_MIMODE_AUTOPOLL) { 696 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 697 DELAY(40); 698 } 699 700 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 701 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 702 703 for (i = 0; i < BGE_TIMEOUT; i++) { 704 DELAY(10); 705 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 706 break; 707 } 708 709 if (i == BGE_TIMEOUT) { 710 device_printf(sc->bge_dev, "PHY write timed out\n"); 711 return (0); 712 } 713 714 if (autopoll & BGE_MIMODE_AUTOPOLL) { 715 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 716 DELAY(40); 717 } 718 719 720 return (0); 721} 722 723static void 724bge_miibus_statchg(device_t dev) 725{ 726 struct bge_softc *sc; 727 struct mii_data *mii; 728 sc = device_get_softc(dev); 729 mii = device_get_softc(sc->bge_miibus); 730 731 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 732 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 733 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 734 else 735 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 736 737 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 738 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 739 else 740 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 741} 742 743/* 744 * Intialize a standard receive ring descriptor. 745 */ 746static int 747bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) 748{ 749 struct mbuf *m_new = NULL; 750 struct bge_rx_bd *r; 751 struct bge_dmamap_arg ctx; 752 int error; 753 754 if (m == NULL) { 755 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 756 if (m_new == NULL) 757 return (ENOBUFS); 758 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 759 } else { 760 m_new = m; 761 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 762 m_new->m_data = m_new->m_ext.ext_buf; 763 } 764 765 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 766 m_adj(m_new, ETHER_ALIGN); 767 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 768 r = &sc->bge_ldata.bge_rx_std_ring[i]; 769 ctx.bge_maxsegs = 1; 770 ctx.sc = sc; 771 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 772 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 773 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 774 if (error || ctx.bge_maxsegs == 0) { 775 if (m == NULL) { 776 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 777 m_freem(m_new); 778 } 779 return (ENOMEM); 780 } 781 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 782 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 783 r->bge_flags = BGE_RXBDFLAG_END; 784 r->bge_len = m_new->m_len; 785 r->bge_idx = i; 786 787 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 788 sc->bge_cdata.bge_rx_std_dmamap[i], 789 BUS_DMASYNC_PREREAD); 790 791 return (0); 792} 793 794/* 795 * Initialize a jumbo receive ring descriptor. This allocates 796 * a jumbo buffer from the pool managed internally by the driver. 797 */ 798static int 799bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 800{ 801 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 802 struct bge_extrx_bd *r; 803 struct mbuf *m_new = NULL; 804 int nsegs; 805 int error; 806 807 if (m == NULL) { 808 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 809 if (m_new == NULL) 810 return (ENOBUFS); 811 812 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 813 if (!(m_new->m_flags & M_EXT)) { 814 m_freem(m_new); 815 return (ENOBUFS); 816 } 817 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 818 } else { 819 m_new = m; 820 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 821 m_new->m_data = m_new->m_ext.ext_buf; 822 } 823 824 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 825 m_adj(m_new, ETHER_ALIGN); 826 827 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 828 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 829 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 830 if (error) { 831 if (m == NULL) 832 m_freem(m_new); 833 return (error); 834 } 835 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 836 837 /* 838 * Fill in the extended RX buffer descriptor. 839 */ 840 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 841 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 842 r->bge_idx = i; 843 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 844 switch (nsegs) { 845 case 4: 846 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 847 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 848 r->bge_len3 = segs[3].ds_len; 849 case 3: 850 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 851 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 852 r->bge_len2 = segs[2].ds_len; 853 case 2: 854 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 855 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 856 r->bge_len1 = segs[1].ds_len; 857 case 1: 858 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 859 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 860 r->bge_len0 = segs[0].ds_len; 861 break; 862 default: 863 panic("%s: %d segments\n", __func__, nsegs); 864 } 865 866 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 867 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 868 BUS_DMASYNC_PREREAD); 869 870 return (0); 871} 872 873/* 874 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 875 * that's 1MB or memory, which is a lot. For now, we fill only the first 876 * 256 ring entries and hope that our CPU is fast enough to keep up with 877 * the NIC. 878 */ 879static int 880bge_init_rx_ring_std(struct bge_softc *sc) 881{ 882 int i; 883 884 for (i = 0; i < BGE_SSLOTS; i++) { 885 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 886 return (ENOBUFS); 887 }; 888 889 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 890 sc->bge_cdata.bge_rx_std_ring_map, 891 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 892 893 sc->bge_std = i - 1; 894 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 895 896 return (0); 897} 898 899static void 900bge_free_rx_ring_std(struct bge_softc *sc) 901{ 902 int i; 903 904 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 905 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 906 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 907 sc->bge_cdata.bge_rx_std_dmamap[i], 908 BUS_DMASYNC_POSTREAD); 909 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 910 sc->bge_cdata.bge_rx_std_dmamap[i]); 911 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 912 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 913 } 914 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 915 sizeof(struct bge_rx_bd)); 916 } 917} 918 919static int 920bge_init_rx_ring_jumbo(struct bge_softc *sc) 921{ 922 struct bge_rcb *rcb; 923 int i; 924 925 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 926 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 927 return (ENOBUFS); 928 }; 929 930 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 931 sc->bge_cdata.bge_rx_jumbo_ring_map, 932 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 933 934 sc->bge_jumbo = i - 1; 935 936 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 937 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 938 BGE_RCB_FLAG_USE_EXT_RX_BD); 939 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 940 941 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 942 943 return (0); 944} 945 946static void 947bge_free_rx_ring_jumbo(struct bge_softc *sc) 948{ 949 int i; 950 951 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 952 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 953 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 954 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 955 BUS_DMASYNC_POSTREAD); 956 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 957 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 958 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 959 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 960 } 961 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 962 sizeof(struct bge_extrx_bd)); 963 } 964} 965 966static void 967bge_free_tx_ring(struct bge_softc *sc) 968{ 969 int i; 970 971 if (sc->bge_ldata.bge_tx_ring == NULL) 972 return; 973 974 for (i = 0; i < BGE_TX_RING_CNT; i++) { 975 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 976 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 977 sc->bge_cdata.bge_tx_dmamap[i], 978 BUS_DMASYNC_POSTWRITE); 979 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 980 sc->bge_cdata.bge_tx_dmamap[i]); 981 m_freem(sc->bge_cdata.bge_tx_chain[i]); 982 sc->bge_cdata.bge_tx_chain[i] = NULL; 983 } 984 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 985 sizeof(struct bge_tx_bd)); 986 } 987} 988 989static int 990bge_init_tx_ring(struct bge_softc *sc) 991{ 992 sc->bge_txcnt = 0; 993 sc->bge_tx_saved_considx = 0; 994 995 /* Initialize transmit producer index for host-memory send ring. */ 996 sc->bge_tx_prodidx = 0; 997 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 998 999 /* 5700 b2 errata */ 1000 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1001 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1002 1003 /* NIC-memory send ring not used; initialize to zero. */ 1004 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1005 /* 5700 b2 errata */ 1006 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1007 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1008 1009 return (0); 1010} 1011 1012static void 1013bge_setpromisc(struct bge_softc *sc) 1014{ 1015 struct ifnet *ifp; 1016 1017 BGE_LOCK_ASSERT(sc); 1018 1019 ifp = sc->bge_ifp; 1020 1021 /* Enable or disable promiscuous mode as needed. */ 1022 if (ifp->if_flags & IFF_PROMISC) 1023 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1024 else 1025 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1026} 1027 1028static void 1029bge_setmulti(struct bge_softc *sc) 1030{ 1031 struct ifnet *ifp; 1032 struct ifmultiaddr *ifma; 1033 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1034 int h, i; 1035 1036 BGE_LOCK_ASSERT(sc); 1037 1038 ifp = sc->bge_ifp; 1039 1040 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1041 for (i = 0; i < 4; i++) 1042 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1043 return; 1044 } 1045 1046 /* First, zot all the existing filters. */ 1047 for (i = 0; i < 4; i++) 1048 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1049 1050 /* Now program new ones. */ 1051 IF_ADDR_LOCK(ifp); 1052 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1053 if (ifma->ifma_addr->sa_family != AF_LINK) 1054 continue; 1055 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1056 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 1057 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1058 } 1059 IF_ADDR_UNLOCK(ifp); 1060 1061 for (i = 0; i < 4; i++) 1062 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1063} 1064 1065static void 1066bge_sig_pre_reset(sc, type) 1067 struct bge_softc *sc; 1068 int type; 1069{ 1070 /* 1071 * Some chips don't like this so only do this if ASF is enabled 1072 */ 1073 if (sc->bge_asf_mode) 1074 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1075 1076 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1077 switch (type) { 1078 case BGE_RESET_START: 1079 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1080 break; 1081 case BGE_RESET_STOP: 1082 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1083 break; 1084 } 1085 } 1086} 1087 1088static void 1089bge_sig_post_reset(sc, type) 1090 struct bge_softc *sc; 1091 int type; 1092{ 1093 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1094 switch (type) { 1095 case BGE_RESET_START: 1096 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1097 /* START DONE */ 1098 break; 1099 case BGE_RESET_STOP: 1100 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1101 break; 1102 } 1103 } 1104} 1105 1106static void 1107bge_sig_legacy(sc, type) 1108 struct bge_softc *sc; 1109 int type; 1110{ 1111 if (sc->bge_asf_mode) { 1112 switch (type) { 1113 case BGE_RESET_START: 1114 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1115 break; 1116 case BGE_RESET_STOP: 1117 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1118 break; 1119 } 1120 } 1121} 1122 1123void bge_stop_fw(struct bge_softc *); 1124void 1125bge_stop_fw(sc) 1126 struct bge_softc *sc; 1127{ 1128 int i; 1129 1130 if (sc->bge_asf_mode) { 1131 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1132 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1133 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 1134 1135 for (i = 0; i < 100; i++ ) { 1136 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1137 break; 1138 DELAY(10); 1139 } 1140 } 1141} 1142 1143/* 1144 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1145 * self-test results. 1146 */ 1147static int 1148bge_chipinit(struct bge_softc *sc) 1149{ 1150 uint32_t dma_rw_ctl; 1151 int i; 1152 1153 /* Set endianness before we access any non-PCI registers. */ 1154 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1155 1156 /* 1157 * Check the 'ROM failed' bit on the RX CPU to see if 1158 * self-tests passed. Skip this check when there's no 1159 * EEPROM fitted, since in that case it will always 1160 * fail. 1161 */ 1162 if ((sc->bge_flags & BGE_FLAG_EEPROM) && 1163 CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1164 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1165 return (ENODEV); 1166 } 1167 1168 /* Clear the MAC control register */ 1169 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1170 1171 /* 1172 * Clear the MAC statistics block in the NIC's 1173 * internal memory. 1174 */ 1175 for (i = BGE_STATS_BLOCK; 1176 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1177 BGE_MEMWIN_WRITE(sc, i, 0); 1178 1179 for (i = BGE_STATUS_BLOCK; 1180 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1181 BGE_MEMWIN_WRITE(sc, i, 0); 1182
| 36 37/* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69#ifdef HAVE_KERNEL_OPTION_HEADERS 70#include "opt_device_polling.h" 71#endif 72 73#include <sys/param.h> 74#include <sys/endian.h> 75#include <sys/systm.h> 76#include <sys/sockio.h> 77#include <sys/mbuf.h> 78#include <sys/malloc.h> 79#include <sys/kernel.h> 80#include <sys/module.h> 81#include <sys/socket.h> 82#include <sys/sysctl.h> 83 84#include <net/if.h> 85#include <net/if_arp.h> 86#include <net/ethernet.h> 87#include <net/if_dl.h> 88#include <net/if_media.h> 89 90#include <net/bpf.h> 91 92#include <net/if_types.h> 93#include <net/if_vlan_var.h> 94 95#include <netinet/in_systm.h> 96#include <netinet/in.h> 97#include <netinet/ip.h> 98 99#include <machine/bus.h> 100#include <machine/resource.h> 101#include <sys/bus.h> 102#include <sys/rman.h> 103 104#include <dev/mii/mii.h> 105#include <dev/mii/miivar.h> 106#include "miidevs.h" 107#include <dev/mii/brgphyreg.h> 108 109#ifdef __sparc64__ 110#include <dev/ofw/ofw_bus.h> 111#include <dev/ofw/openfirm.h> 112#include <machine/ofw_machdep.h> 113#include <machine/ver.h> 114#endif 115 116#include <dev/pci/pcireg.h> 117#include <dev/pci/pcivar.h> 118 119#include <dev/bge/if_bgereg.h> 120 121#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 122#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 123 124MODULE_DEPEND(bge, pci, 1, 1, 1); 125MODULE_DEPEND(bge, ether, 1, 1, 1); 126MODULE_DEPEND(bge, miibus, 1, 1, 1); 127 128/* "device miibus" required. See GENERIC if you get errors here. */ 129#include "miibus_if.h" 130 131/* 132 * Various supported device vendors/types and their names. Note: the 133 * spec seems to indicate that the hardware still has Alteon's vendor 134 * ID burned into it, though it will always be overriden by the vendor 135 * ID in the EEPROM. Just to be safe, we cover all possibilities. 136 */ 137static struct bge_type { 138 uint16_t bge_vid; 139 uint16_t bge_did; 140} bge_devs[] = { 141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 142 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 143 144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 147 148 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 149 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 198 199 { SK_VENDORID, SK_DEVICEID_ALTIMA }, 200 201 { TC_VENDORID, TC_DEVICEID_3C996 }, 202 203 { 0, 0 } 204}; 205 206static const struct bge_vendor { 207 uint16_t v_id; 208 const char *v_name; 209} bge_vendors[] = { 210 { ALTEON_VENDORID, "Alteon" }, 211 { ALTIMA_VENDORID, "Altima" }, 212 { APPLE_VENDORID, "Apple" }, 213 { BCOM_VENDORID, "Broadcom" }, 214 { SK_VENDORID, "SysKonnect" }, 215 { TC_VENDORID, "3Com" }, 216 217 { 0, NULL } 218}; 219 220static const struct bge_revision { 221 uint32_t br_chipid; 222 const char *br_name; 223} bge_revisions[] = { 224 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 225 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 226 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 227 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 228 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 229 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 230 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 231 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 232 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 233 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 234 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 235 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 236 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 237 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 238 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 239 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 240 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 241 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 242 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 243 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 244 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 245 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 246 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 247 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 248 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 249 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 250 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 251 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 252 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 253 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 254 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 255 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 256 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 257 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 258 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 259 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 260 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 261 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 262 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 263 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 264 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 265 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 266 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 267 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 268 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 269 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 270 /* 5754 and 5787 share the same ASIC ID */ 271 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 272 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 273 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 274 275 { 0, NULL } 276}; 277 278/* 279 * Some defaults for major revisions, so that newer steppings 280 * that we don't know about have a shot at working. 281 */ 282static const struct bge_revision bge_majorrevs[] = { 283 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 284 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 285 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 286 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 287 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 288 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 289 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 290 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 291 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 292 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 293 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 294 /* 5754 and 5787 share the same ASIC ID */ 295 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 296 297 { 0, NULL } 298}; 299 300#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 301#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 302#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 303#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 304#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 305 306const struct bge_revision * bge_lookup_rev(uint32_t); 307const struct bge_vendor * bge_lookup_vendor(uint16_t); 308static int bge_probe(device_t); 309static int bge_attach(device_t); 310static int bge_detach(device_t); 311static int bge_suspend(device_t); 312static int bge_resume(device_t); 313static void bge_release_resources(struct bge_softc *); 314static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 315static int bge_dma_alloc(device_t); 316static void bge_dma_free(struct bge_softc *); 317 318static void bge_txeof(struct bge_softc *); 319static void bge_rxeof(struct bge_softc *); 320 321static void bge_asf_driver_up (struct bge_softc *); 322static void bge_tick(void *); 323static void bge_stats_update(struct bge_softc *); 324static void bge_stats_update_regs(struct bge_softc *); 325static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 326 327static void bge_intr(void *); 328static void bge_start_locked(struct ifnet *); 329static void bge_start(struct ifnet *); 330static int bge_ioctl(struct ifnet *, u_long, caddr_t); 331static void bge_init_locked(struct bge_softc *); 332static void bge_init(void *); 333static void bge_stop(struct bge_softc *); 334static void bge_watchdog(struct bge_softc *); 335static void bge_shutdown(device_t); 336static int bge_ifmedia_upd_locked(struct ifnet *); 337static int bge_ifmedia_upd(struct ifnet *); 338static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 339 340static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 341static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 342 343static void bge_setpromisc(struct bge_softc *); 344static void bge_setmulti(struct bge_softc *); 345 346static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 347static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 348static int bge_init_rx_ring_std(struct bge_softc *); 349static void bge_free_rx_ring_std(struct bge_softc *); 350static int bge_init_rx_ring_jumbo(struct bge_softc *); 351static void bge_free_rx_ring_jumbo(struct bge_softc *); 352static void bge_free_tx_ring(struct bge_softc *); 353static int bge_init_tx_ring(struct bge_softc *); 354 355static int bge_chipinit(struct bge_softc *); 356static int bge_blockinit(struct bge_softc *); 357 358static int bge_has_eeprom(struct bge_softc *); 359static uint32_t bge_readmem_ind(struct bge_softc *, int); 360static void bge_writemem_ind(struct bge_softc *, int, int); 361#ifdef notdef 362static uint32_t bge_readreg_ind(struct bge_softc *, int); 363#endif 364static void bge_writemem_direct(struct bge_softc *, int, int); 365static void bge_writereg_ind(struct bge_softc *, int, int); 366 367static int bge_miibus_readreg(device_t, int, int); 368static int bge_miibus_writereg(device_t, int, int, int); 369static void bge_miibus_statchg(device_t); 370#ifdef DEVICE_POLLING 371static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 372#endif 373 374#define BGE_RESET_START 1 375#define BGE_RESET_STOP 2 376static void bge_sig_post_reset(struct bge_softc *, int); 377static void bge_sig_legacy(struct bge_softc *, int); 378static void bge_sig_pre_reset(struct bge_softc *, int); 379static int bge_reset(struct bge_softc *); 380static void bge_link_upd(struct bge_softc *); 381 382/* 383 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 384 * leak information to untrusted users. It is also known to cause alignment 385 * traps on certain architectures. 386 */ 387#ifdef BGE_REGISTER_DEBUG 388static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 389static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 390static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 391#endif 392static void bge_add_sysctls(struct bge_softc *); 393static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 394 395static device_method_t bge_methods[] = { 396 /* Device interface */ 397 DEVMETHOD(device_probe, bge_probe), 398 DEVMETHOD(device_attach, bge_attach), 399 DEVMETHOD(device_detach, bge_detach), 400 DEVMETHOD(device_shutdown, bge_shutdown), 401 DEVMETHOD(device_suspend, bge_suspend), 402 DEVMETHOD(device_resume, bge_resume), 403 404 /* bus interface */ 405 DEVMETHOD(bus_print_child, bus_generic_print_child), 406 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 407 408 /* MII interface */ 409 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 410 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 411 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 412 413 { 0, 0 } 414}; 415 416static driver_t bge_driver = { 417 "bge", 418 bge_methods, 419 sizeof(struct bge_softc) 420}; 421 422static devclass_t bge_devclass; 423 424DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 425DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 426 427static int bge_fake_autoneg = 0; 428static int bge_allow_asf = 1; 429 430TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 431TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 432 433SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 434SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0, 435 "Enable fake autonegotiation for certain blade systems"); 436SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 437 "Allow ASF mode if available"); 438 439#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" 440#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" 441#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" 442#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" 443#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" 444 445static int 446bge_has_eeprom(struct bge_softc *sc) 447{ 448#ifdef __sparc64__ 449 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; 450 device_t dev; 451 uint32_t subvendor; 452 453 dev = sc->bge_dev; 454 455 /* 456 * The on-board BGEs found in sun4u machines aren't fitted with 457 * an EEPROM which means that we have to obtain the MAC address 458 * via OFW and that some tests will always fail. We distinguish 459 * such BGEs by the subvendor ID, which also has to be obtained 460 * from OFW instead of the PCI configuration space as the latter 461 * indicates Broadcom as the subvendor of the netboot interface. 462 * For early Blade 1500 and 2500 we even have to check the OFW 463 * device path as the subvendor ID always defaults to Broadcom 464 * there. 465 */ 466 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, 467 &subvendor, sizeof(subvendor)) == sizeof(subvendor) && 468 subvendor == SUN_VENDORID) 469 return (0); 470 memset(buf, 0, sizeof(buf)); 471 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { 472 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && 473 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) 474 return (0); 475 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && 476 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) 477 return (0); 478 } 479#endif 480 return (1); 481} 482 483static uint32_t 484bge_readmem_ind(struct bge_softc *sc, int off) 485{ 486 device_t dev; 487 uint32_t val; 488 489 dev = sc->bge_dev; 490 491 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 492 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 493 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 494 return (val); 495} 496 497static void 498bge_writemem_ind(struct bge_softc *sc, int off, int val) 499{ 500 device_t dev; 501 502 dev = sc->bge_dev; 503 504 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 505 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 506 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 507} 508 509#ifdef notdef 510static uint32_t 511bge_readreg_ind(struct bge_softc *sc, int off) 512{ 513 device_t dev; 514 515 dev = sc->bge_dev; 516 517 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 518 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 519} 520#endif 521 522static void 523bge_writereg_ind(struct bge_softc *sc, int off, int val) 524{ 525 device_t dev; 526 527 dev = sc->bge_dev; 528 529 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 530 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 531} 532 533static void 534bge_writemem_direct(struct bge_softc *sc, int off, int val) 535{ 536 CSR_WRITE_4(sc, off, val); 537} 538 539/* 540 * Map a single buffer address. 541 */ 542 543static void 544bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 545{ 546 struct bge_dmamap_arg *ctx; 547 548 if (error) 549 return; 550 551 ctx = arg; 552 553 if (nseg > ctx->bge_maxsegs) { 554 ctx->bge_maxsegs = 0; 555 return; 556 } 557 558 ctx->bge_busaddr = segs->ds_addr; 559} 560 561/* 562 * Read a byte of data stored in the EEPROM at address 'addr.' The 563 * BCM570x supports both the traditional bitbang interface and an 564 * auto access interface for reading the EEPROM. We use the auto 565 * access method. 566 */ 567static uint8_t 568bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 569{ 570 int i; 571 uint32_t byte = 0; 572 573 /* 574 * Enable use of auto EEPROM access so we can avoid 575 * having to use the bitbang method. 576 */ 577 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 578 579 /* Reset the EEPROM, load the clock period. */ 580 CSR_WRITE_4(sc, BGE_EE_ADDR, 581 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 582 DELAY(20); 583 584 /* Issue the read EEPROM command. */ 585 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 586 587 /* Wait for completion */ 588 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 589 DELAY(10); 590 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 591 break; 592 } 593 594 if (i == BGE_TIMEOUT * 10) { 595 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 596 return (1); 597 } 598 599 /* Get result. */ 600 byte = CSR_READ_4(sc, BGE_EE_DATA); 601 602 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 603 604 return (0); 605} 606 607/* 608 * Read a sequence of bytes from the EEPROM. 609 */ 610static int 611bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 612{ 613 int i, error = 0; 614 uint8_t byte = 0; 615 616 for (i = 0; i < cnt; i++) { 617 error = bge_eeprom_getbyte(sc, off + i, &byte); 618 if (error) 619 break; 620 *(dest + i) = byte; 621 } 622 623 return (error ? 1 : 0); 624} 625 626static int 627bge_miibus_readreg(device_t dev, int phy, int reg) 628{ 629 struct bge_softc *sc; 630 uint32_t val, autopoll; 631 int i; 632 633 sc = device_get_softc(dev); 634 635 /* 636 * Broadcom's own driver always assumes the internal 637 * PHY is at GMII address 1. On some chips, the PHY responds 638 * to accesses at all addresses, which could cause us to 639 * bogusly attach the PHY 32 times at probe type. Always 640 * restricting the lookup to address 1 is simpler than 641 * trying to figure out which chips revisions should be 642 * special-cased. 643 */ 644 if (phy != 1) 645 return (0); 646 647 /* Reading with autopolling on may trigger PCI errors */ 648 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 649 if (autopoll & BGE_MIMODE_AUTOPOLL) { 650 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 651 DELAY(40); 652 } 653 654 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 655 BGE_MIPHY(phy) | BGE_MIREG(reg)); 656 657 for (i = 0; i < BGE_TIMEOUT; i++) { 658 DELAY(10); 659 val = CSR_READ_4(sc, BGE_MI_COMM); 660 if (!(val & BGE_MICOMM_BUSY)) 661 break; 662 } 663 664 if (i == BGE_TIMEOUT) { 665 device_printf(sc->bge_dev, "PHY read timed out\n"); 666 val = 0; 667 goto done; 668 } 669 670 val = CSR_READ_4(sc, BGE_MI_COMM); 671 672done: 673 if (autopoll & BGE_MIMODE_AUTOPOLL) { 674 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 675 DELAY(40); 676 } 677 678 if (val & BGE_MICOMM_READFAIL) 679 return (0); 680 681 return (val & 0xFFFF); 682} 683 684static int 685bge_miibus_writereg(device_t dev, int phy, int reg, int val) 686{ 687 struct bge_softc *sc; 688 uint32_t autopoll; 689 int i; 690 691 sc = device_get_softc(dev); 692 693 /* Reading with autopolling on may trigger PCI errors */ 694 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 695 if (autopoll & BGE_MIMODE_AUTOPOLL) { 696 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 697 DELAY(40); 698 } 699 700 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 701 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 702 703 for (i = 0; i < BGE_TIMEOUT; i++) { 704 DELAY(10); 705 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 706 break; 707 } 708 709 if (i == BGE_TIMEOUT) { 710 device_printf(sc->bge_dev, "PHY write timed out\n"); 711 return (0); 712 } 713 714 if (autopoll & BGE_MIMODE_AUTOPOLL) { 715 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 716 DELAY(40); 717 } 718 719 720 return (0); 721} 722 723static void 724bge_miibus_statchg(device_t dev) 725{ 726 struct bge_softc *sc; 727 struct mii_data *mii; 728 sc = device_get_softc(dev); 729 mii = device_get_softc(sc->bge_miibus); 730 731 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 732 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 733 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 734 else 735 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 736 737 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 738 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 739 else 740 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 741} 742 743/* 744 * Intialize a standard receive ring descriptor. 745 */ 746static int 747bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) 748{ 749 struct mbuf *m_new = NULL; 750 struct bge_rx_bd *r; 751 struct bge_dmamap_arg ctx; 752 int error; 753 754 if (m == NULL) { 755 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 756 if (m_new == NULL) 757 return (ENOBUFS); 758 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 759 } else { 760 m_new = m; 761 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 762 m_new->m_data = m_new->m_ext.ext_buf; 763 } 764 765 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 766 m_adj(m_new, ETHER_ALIGN); 767 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 768 r = &sc->bge_ldata.bge_rx_std_ring[i]; 769 ctx.bge_maxsegs = 1; 770 ctx.sc = sc; 771 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 772 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 773 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 774 if (error || ctx.bge_maxsegs == 0) { 775 if (m == NULL) { 776 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 777 m_freem(m_new); 778 } 779 return (ENOMEM); 780 } 781 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 782 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 783 r->bge_flags = BGE_RXBDFLAG_END; 784 r->bge_len = m_new->m_len; 785 r->bge_idx = i; 786 787 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 788 sc->bge_cdata.bge_rx_std_dmamap[i], 789 BUS_DMASYNC_PREREAD); 790 791 return (0); 792} 793 794/* 795 * Initialize a jumbo receive ring descriptor. This allocates 796 * a jumbo buffer from the pool managed internally by the driver. 797 */ 798static int 799bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 800{ 801 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 802 struct bge_extrx_bd *r; 803 struct mbuf *m_new = NULL; 804 int nsegs; 805 int error; 806 807 if (m == NULL) { 808 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 809 if (m_new == NULL) 810 return (ENOBUFS); 811 812 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 813 if (!(m_new->m_flags & M_EXT)) { 814 m_freem(m_new); 815 return (ENOBUFS); 816 } 817 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 818 } else { 819 m_new = m; 820 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 821 m_new->m_data = m_new->m_ext.ext_buf; 822 } 823 824 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 825 m_adj(m_new, ETHER_ALIGN); 826 827 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 828 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 829 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 830 if (error) { 831 if (m == NULL) 832 m_freem(m_new); 833 return (error); 834 } 835 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 836 837 /* 838 * Fill in the extended RX buffer descriptor. 839 */ 840 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 841 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 842 r->bge_idx = i; 843 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 844 switch (nsegs) { 845 case 4: 846 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 847 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 848 r->bge_len3 = segs[3].ds_len; 849 case 3: 850 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 851 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 852 r->bge_len2 = segs[2].ds_len; 853 case 2: 854 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 855 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 856 r->bge_len1 = segs[1].ds_len; 857 case 1: 858 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 859 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 860 r->bge_len0 = segs[0].ds_len; 861 break; 862 default: 863 panic("%s: %d segments\n", __func__, nsegs); 864 } 865 866 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 867 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 868 BUS_DMASYNC_PREREAD); 869 870 return (0); 871} 872 873/* 874 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 875 * that's 1MB or memory, which is a lot. For now, we fill only the first 876 * 256 ring entries and hope that our CPU is fast enough to keep up with 877 * the NIC. 878 */ 879static int 880bge_init_rx_ring_std(struct bge_softc *sc) 881{ 882 int i; 883 884 for (i = 0; i < BGE_SSLOTS; i++) { 885 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 886 return (ENOBUFS); 887 }; 888 889 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 890 sc->bge_cdata.bge_rx_std_ring_map, 891 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 892 893 sc->bge_std = i - 1; 894 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 895 896 return (0); 897} 898 899static void 900bge_free_rx_ring_std(struct bge_softc *sc) 901{ 902 int i; 903 904 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 905 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 906 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 907 sc->bge_cdata.bge_rx_std_dmamap[i], 908 BUS_DMASYNC_POSTREAD); 909 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 910 sc->bge_cdata.bge_rx_std_dmamap[i]); 911 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 912 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 913 } 914 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 915 sizeof(struct bge_rx_bd)); 916 } 917} 918 919static int 920bge_init_rx_ring_jumbo(struct bge_softc *sc) 921{ 922 struct bge_rcb *rcb; 923 int i; 924 925 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 926 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 927 return (ENOBUFS); 928 }; 929 930 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 931 sc->bge_cdata.bge_rx_jumbo_ring_map, 932 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 933 934 sc->bge_jumbo = i - 1; 935 936 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 937 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 938 BGE_RCB_FLAG_USE_EXT_RX_BD); 939 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 940 941 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 942 943 return (0); 944} 945 946static void 947bge_free_rx_ring_jumbo(struct bge_softc *sc) 948{ 949 int i; 950 951 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 952 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 953 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 954 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 955 BUS_DMASYNC_POSTREAD); 956 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 957 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 958 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 959 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 960 } 961 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 962 sizeof(struct bge_extrx_bd)); 963 } 964} 965 966static void 967bge_free_tx_ring(struct bge_softc *sc) 968{ 969 int i; 970 971 if (sc->bge_ldata.bge_tx_ring == NULL) 972 return; 973 974 for (i = 0; i < BGE_TX_RING_CNT; i++) { 975 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 976 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 977 sc->bge_cdata.bge_tx_dmamap[i], 978 BUS_DMASYNC_POSTWRITE); 979 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 980 sc->bge_cdata.bge_tx_dmamap[i]); 981 m_freem(sc->bge_cdata.bge_tx_chain[i]); 982 sc->bge_cdata.bge_tx_chain[i] = NULL; 983 } 984 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 985 sizeof(struct bge_tx_bd)); 986 } 987} 988 989static int 990bge_init_tx_ring(struct bge_softc *sc) 991{ 992 sc->bge_txcnt = 0; 993 sc->bge_tx_saved_considx = 0; 994 995 /* Initialize transmit producer index for host-memory send ring. */ 996 sc->bge_tx_prodidx = 0; 997 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 998 999 /* 5700 b2 errata */ 1000 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1001 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1002 1003 /* NIC-memory send ring not used; initialize to zero. */ 1004 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1005 /* 5700 b2 errata */ 1006 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1007 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1008 1009 return (0); 1010} 1011 1012static void 1013bge_setpromisc(struct bge_softc *sc) 1014{ 1015 struct ifnet *ifp; 1016 1017 BGE_LOCK_ASSERT(sc); 1018 1019 ifp = sc->bge_ifp; 1020 1021 /* Enable or disable promiscuous mode as needed. */ 1022 if (ifp->if_flags & IFF_PROMISC) 1023 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1024 else 1025 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1026} 1027 1028static void 1029bge_setmulti(struct bge_softc *sc) 1030{ 1031 struct ifnet *ifp; 1032 struct ifmultiaddr *ifma; 1033 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1034 int h, i; 1035 1036 BGE_LOCK_ASSERT(sc); 1037 1038 ifp = sc->bge_ifp; 1039 1040 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1041 for (i = 0; i < 4; i++) 1042 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1043 return; 1044 } 1045 1046 /* First, zot all the existing filters. */ 1047 for (i = 0; i < 4; i++) 1048 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1049 1050 /* Now program new ones. */ 1051 IF_ADDR_LOCK(ifp); 1052 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1053 if (ifma->ifma_addr->sa_family != AF_LINK) 1054 continue; 1055 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1056 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 1057 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1058 } 1059 IF_ADDR_UNLOCK(ifp); 1060 1061 for (i = 0; i < 4; i++) 1062 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1063} 1064 1065static void 1066bge_sig_pre_reset(sc, type) 1067 struct bge_softc *sc; 1068 int type; 1069{ 1070 /* 1071 * Some chips don't like this so only do this if ASF is enabled 1072 */ 1073 if (sc->bge_asf_mode) 1074 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1075 1076 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1077 switch (type) { 1078 case BGE_RESET_START: 1079 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1080 break; 1081 case BGE_RESET_STOP: 1082 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1083 break; 1084 } 1085 } 1086} 1087 1088static void 1089bge_sig_post_reset(sc, type) 1090 struct bge_softc *sc; 1091 int type; 1092{ 1093 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1094 switch (type) { 1095 case BGE_RESET_START: 1096 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1097 /* START DONE */ 1098 break; 1099 case BGE_RESET_STOP: 1100 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1101 break; 1102 } 1103 } 1104} 1105 1106static void 1107bge_sig_legacy(sc, type) 1108 struct bge_softc *sc; 1109 int type; 1110{ 1111 if (sc->bge_asf_mode) { 1112 switch (type) { 1113 case BGE_RESET_START: 1114 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1115 break; 1116 case BGE_RESET_STOP: 1117 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1118 break; 1119 } 1120 } 1121} 1122 1123void bge_stop_fw(struct bge_softc *); 1124void 1125bge_stop_fw(sc) 1126 struct bge_softc *sc; 1127{ 1128 int i; 1129 1130 if (sc->bge_asf_mode) { 1131 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1132 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1133 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 1134 1135 for (i = 0; i < 100; i++ ) { 1136 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1137 break; 1138 DELAY(10); 1139 } 1140 } 1141} 1142 1143/* 1144 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1145 * self-test results. 1146 */ 1147static int 1148bge_chipinit(struct bge_softc *sc) 1149{ 1150 uint32_t dma_rw_ctl; 1151 int i; 1152 1153 /* Set endianness before we access any non-PCI registers. */ 1154 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1155 1156 /* 1157 * Check the 'ROM failed' bit on the RX CPU to see if 1158 * self-tests passed. Skip this check when there's no 1159 * EEPROM fitted, since in that case it will always 1160 * fail. 1161 */ 1162 if ((sc->bge_flags & BGE_FLAG_EEPROM) && 1163 CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1164 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1165 return (ENODEV); 1166 } 1167 1168 /* Clear the MAC control register */ 1169 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1170 1171 /* 1172 * Clear the MAC statistics block in the NIC's 1173 * internal memory. 1174 */ 1175 for (i = BGE_STATS_BLOCK; 1176 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1177 BGE_MEMWIN_WRITE(sc, i, 0); 1178 1179 for (i = BGE_STATUS_BLOCK; 1180 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1181 BGE_MEMWIN_WRITE(sc, i, 0); 1182
|
1237 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1238 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1239 1240 /* 1241 * Set up general mode register. 1242 */ 1243 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 1244 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1245 BGE_MODECTL_TX_NO_PHDR_CSUM); 1246 1247 /* 1248 * Tell the firmware the driver is running 1249 */ 1250 if (sc->bge_asf_mode & ASF_STACKUP) 1251 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1252 1253 /* 1254 * Disable memory write invalidate. Apparently it is not supported 1255 * properly by these devices. 1256 */ 1257 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1258 1259 /* Set the timer prescaler (always 66Mhz) */ 1260 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 1261 1262 return (0); 1263} 1264 1265static int 1266bge_blockinit(struct bge_softc *sc) 1267{ 1268 struct bge_rcb *rcb; 1269 bus_size_t vrcb; 1270 bge_hostaddr taddr; 1271 uint32_t val; 1272 int i; 1273 1274 /* 1275 * Initialize the memory window pointer register so that 1276 * we can access the first 32K of internal NIC RAM. This will 1277 * allow us to set up the TX send ring RCBs and the RX return 1278 * ring RCBs, plus other things which live in NIC memory. 1279 */ 1280 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1281 1282 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1283 1284 if (!(BGE_IS_5705_PLUS(sc))) { 1285 /* Configure mbuf memory pool */ 1286 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1287 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1288 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1289 else 1290 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1291 1292 /* Configure DMA resource pool */ 1293 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1294 BGE_DMA_DESCRIPTORS); 1295 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1296 } 1297 1298 /* Configure mbuf pool watermarks */ 1299 if (!(BGE_IS_5705_PLUS(sc))) { 1300 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1301 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1302 } else { 1303 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1304 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1305 } 1306 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1307 1308 /* Configure DMA resource watermarks */ 1309 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1310 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1311 1312 /* Enable buffer manager */ 1313 if (!(BGE_IS_5705_PLUS(sc))) { 1314 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1315 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 1316 1317 /* Poll for buffer manager start indication */ 1318 for (i = 0; i < BGE_TIMEOUT; i++) { 1319 DELAY(10); 1320 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1321 break; 1322 } 1323 1324 if (i == BGE_TIMEOUT) { 1325 device_printf(sc->bge_dev, 1326 "buffer manager failed to start\n"); 1327 return (ENXIO); 1328 } 1329 } 1330 1331 /* Enable flow-through queues */ 1332 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1333 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1334 1335 /* Wait until queue initialization is complete */ 1336 for (i = 0; i < BGE_TIMEOUT; i++) { 1337 DELAY(10); 1338 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1339 break; 1340 } 1341 1342 if (i == BGE_TIMEOUT) { 1343 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1344 return (ENXIO); 1345 } 1346 1347 /* Initialize the standard RX ring control block */ 1348 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1349 rcb->bge_hostaddr.bge_addr_lo = 1350 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1351 rcb->bge_hostaddr.bge_addr_hi = 1352 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1353 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1354 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1355 if (BGE_IS_5705_PLUS(sc)) 1356 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1357 else 1358 rcb->bge_maxlen_flags = 1359 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1360 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1361 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1363 1364 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1365 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1366 1367 /* 1368 * Initialize the jumbo RX ring control block 1369 * We set the 'ring disabled' bit in the flags 1370 * field until we're actually ready to start 1371 * using this ring (i.e. once we set the MTU 1372 * high enough to require it). 1373 */ 1374 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1375 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1376 1377 rcb->bge_hostaddr.bge_addr_lo = 1378 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1379 rcb->bge_hostaddr.bge_addr_hi = 1380 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1381 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1382 sc->bge_cdata.bge_rx_jumbo_ring_map, 1383 BUS_DMASYNC_PREREAD); 1384 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1385 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 1386 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1387 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1388 rcb->bge_hostaddr.bge_addr_hi); 1389 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1390 rcb->bge_hostaddr.bge_addr_lo); 1391 1392 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1393 rcb->bge_maxlen_flags); 1394 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1395 1396 /* Set up dummy disabled mini ring RCB */ 1397 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1398 rcb->bge_maxlen_flags = 1399 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1400 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1401 rcb->bge_maxlen_flags); 1402 } 1403 1404 /* 1405 * Set the BD ring replentish thresholds. The recommended 1406 * values are 1/8th the number of descriptors allocated to 1407 * each ring. 1408 * XXX The 5754 requires a lower threshold, so it might be a 1409 * requirement of all 575x family chips. The Linux driver sets 1410 * the lower threshold for all 5705 family chips as well, but there 1411 * are reports that it might not need to be so strict. 1412 */ 1413 if (BGE_IS_5705_PLUS(sc)) 1414 val = 8; 1415 else 1416 val = BGE_STD_RX_RING_CNT / 8; 1417 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1418 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1419 1420 /* 1421 * Disable all unused send rings by setting the 'ring disabled' 1422 * bit in the flags field of all the TX send ring control blocks. 1423 * These are located in NIC memory. 1424 */ 1425 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1426 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1427 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1428 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1429 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1430 vrcb += sizeof(struct bge_rcb); 1431 } 1432 1433 /* Configure TX RCB 0 (we use only the first ring) */ 1434 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1435 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1436 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1437 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1438 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1439 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1440 if (!(BGE_IS_5705_PLUS(sc))) 1441 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1442 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1443 1444 /* Disable all unused RX return rings */ 1445 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1446 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1447 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1448 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1449 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1450 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1451 BGE_RCB_FLAG_RING_DISABLED)); 1452 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1453 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1454 (i * (sizeof(uint64_t))), 0); 1455 vrcb += sizeof(struct bge_rcb); 1456 } 1457 1458 /* Initialize RX ring indexes */ 1459 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1460 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1461 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1462 1463 /* 1464 * Set up RX return ring 0 1465 * Note that the NIC address for RX return rings is 0x00000000. 1466 * The return rings live entirely within the host, so the 1467 * nicaddr field in the RCB isn't used. 1468 */ 1469 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1470 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1471 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1472 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1473 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1474 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1475 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1476 1477 /* Set random backoff seed for TX */ 1478 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1479 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1480 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1481 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1482 BGE_TX_BACKOFF_SEED_MASK); 1483 1484 /* Set inter-packet gap */ 1485 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1486 1487 /* 1488 * Specify which ring to use for packets that don't match 1489 * any RX rules. 1490 */ 1491 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1492 1493 /* 1494 * Configure number of RX lists. One interrupt distribution 1495 * list, sixteen active lists, one bad frames class. 1496 */ 1497 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1498 1499 /* Inialize RX list placement stats mask. */ 1500 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1501 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1502 1503 /* Disable host coalescing until we get it set up */ 1504 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1505 1506 /* Poll to make sure it's shut down. */ 1507 for (i = 0; i < BGE_TIMEOUT; i++) { 1508 DELAY(10); 1509 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1510 break; 1511 } 1512 1513 if (i == BGE_TIMEOUT) { 1514 device_printf(sc->bge_dev, 1515 "host coalescing engine failed to idle\n"); 1516 return (ENXIO); 1517 } 1518 1519 /* Set up host coalescing defaults */ 1520 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1521 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1522 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1523 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1524 if (!(BGE_IS_5705_PLUS(sc))) { 1525 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1526 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1527 } 1528 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1529 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 1530 1531 /* Set up address of statistics block */ 1532 if (!(BGE_IS_5705_PLUS(sc))) { 1533 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1534 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1535 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1536 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1537 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1538 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1539 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1540 } 1541 1542 /* Set up address of status block */ 1543 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1544 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1545 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1546 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1547 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1548 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1549 1550 /* Turn on host coalescing state machine */ 1551 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1552 1553 /* Turn on RX BD completion state machine and enable attentions */ 1554 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1555 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 1556 1557 /* Turn on RX list placement state machine */ 1558 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1559 1560 /* Turn on RX list selector state machine. */ 1561 if (!(BGE_IS_5705_PLUS(sc))) 1562 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1563 1564 /* Turn on DMA, clear stats */ 1565 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB | 1566 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | 1567 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | 1568 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB | 1569 ((sc->bge_flags & BGE_FLAG_TBI) ? 1570 BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1571 1572 /* Set misc. local control, enable interrupts on attentions */ 1573 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1574 1575#ifdef notdef 1576 /* Assert GPIO pins for PHY reset */ 1577 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 1578 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 1579 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 1580 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 1581#endif 1582 1583 /* Turn on DMA completion state machine */ 1584 if (!(BGE_IS_5705_PLUS(sc))) 1585 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1586 1587 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 1588 1589 /* Enable host coalescing bug fix. */ 1590 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 1591 sc->bge_asicrev == BGE_ASICREV_BCM5787) 1592 val |= 1 << 29; 1593 1594 /* Turn on write DMA state machine */ 1595 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1596 1597 /* Turn on read DMA state machine */ 1598 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1599 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS); 1600 1601 /* Turn on RX data completion state machine */ 1602 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1603 1604 /* Turn on RX BD initiator state machine */ 1605 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1606 1607 /* Turn on RX data and RX BD initiator state machine */ 1608 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1609 1610 /* Turn on Mbuf cluster free state machine */ 1611 if (!(BGE_IS_5705_PLUS(sc))) 1612 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1613 1614 /* Turn on send BD completion state machine */ 1615 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1616 1617 /* Turn on send data completion state machine */ 1618 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1619 1620 /* Turn on send data initiator state machine */ 1621 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1622 1623 /* Turn on send BD initiator state machine */ 1624 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1625 1626 /* Turn on send BD selector state machine */ 1627 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1628 1629 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1630 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1631 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 1632 1633 /* ack/clear link change events */ 1634 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 1635 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 1636 BGE_MACSTAT_LINK_CHANGED); 1637 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1638 1639 /* Enable PHY auto polling (for MII/GMII only) */ 1640 if (sc->bge_flags & BGE_FLAG_TBI) { 1641 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1642 } else { 1643 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 1644 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1645 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1646 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1647 BGE_EVTENB_MI_INTERRUPT); 1648 } 1649 1650 /* 1651 * Clear any pending link state attention. 1652 * Otherwise some link state change events may be lost until attention 1653 * is cleared by bge_intr() -> bge_link_upd() sequence. 1654 * It's not necessary on newer BCM chips - perhaps enabling link 1655 * state change attentions implies clearing pending attention. 1656 */ 1657 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 1658 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 1659 BGE_MACSTAT_LINK_CHANGED); 1660 1661 /* Enable link state change attentions. */ 1662 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1663 1664 return (0); 1665} 1666 1667const struct bge_revision * 1668bge_lookup_rev(uint32_t chipid) 1669{ 1670 const struct bge_revision *br; 1671 1672 for (br = bge_revisions; br->br_name != NULL; br++) { 1673 if (br->br_chipid == chipid) 1674 return (br); 1675 } 1676 1677 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1678 if (br->br_chipid == BGE_ASICREV(chipid)) 1679 return (br); 1680 } 1681 1682 return (NULL); 1683} 1684 1685const struct bge_vendor * 1686bge_lookup_vendor(uint16_t vid) 1687{ 1688 const struct bge_vendor *v; 1689 1690 for (v = bge_vendors; v->v_name != NULL; v++) 1691 if (v->v_id == vid) 1692 return (v); 1693 1694 panic("%s: unknown vendor %d", __func__, vid); 1695 return (NULL); 1696} 1697 1698/* 1699 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1700 * against our list and return its name if we find a match. 1701 * 1702 * Note that since the Broadcom controller contains VPD support, we 1703 * try to get the device name string from the controller itself instead 1704 * of the compiled-in string. It guarantees we'll always announce the 1705 * right product name. We fall back to the compiled-in string when 1706 * VPD is unavailable or corrupt. 1707 */ 1708static int 1709bge_probe(device_t dev) 1710{ 1711 struct bge_type *t = bge_devs; 1712 struct bge_softc *sc = device_get_softc(dev); 1713 uint16_t vid, did; 1714 1715 sc->bge_dev = dev; 1716 vid = pci_get_vendor(dev); 1717 did = pci_get_device(dev); 1718 while(t->bge_vid != 0) { 1719 if ((vid == t->bge_vid) && (did == t->bge_did)) { 1720 char model[64], buf[96]; 1721 const struct bge_revision *br; 1722 const struct bge_vendor *v; 1723 uint32_t id; 1724 1725 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1726 BGE_PCIMISCCTL_ASICREV; 1727 br = bge_lookup_rev(id); 1728 v = bge_lookup_vendor(vid); 1729 { 1730#if __FreeBSD_version > 700024 1731 const char *pname; 1732 1733 if (pci_get_vpd_ident(dev, &pname) == 0) 1734 snprintf(model, 64, "%s", pname); 1735 else 1736#endif 1737 snprintf(model, 64, "%s %s", 1738 v->v_name, 1739 br != NULL ? br->br_name : 1740 "NetXtreme Ethernet Controller"); 1741 } 1742 snprintf(buf, 96, "%s, %sASIC rev. %#04x", model, 1743 br != NULL ? "" : "unknown ", id >> 16); 1744 device_set_desc_copy(dev, buf); 1745 if (pci_get_subvendor(dev) == DELL_VENDORID) 1746 sc->bge_flags |= BGE_FLAG_NO_3LED; 1747 if (did == BCOM_DEVICEID_BCM5755M) 1748 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; 1749 return (0); 1750 } 1751 t++; 1752 } 1753 1754 return (ENXIO); 1755} 1756 1757static void 1758bge_dma_free(struct bge_softc *sc) 1759{ 1760 int i; 1761 1762 /* Destroy DMA maps for RX buffers. */ 1763 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1764 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1765 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1766 sc->bge_cdata.bge_rx_std_dmamap[i]); 1767 } 1768 1769 /* Destroy DMA maps for jumbo RX buffers. */ 1770 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1771 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1772 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1773 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1774 } 1775 1776 /* Destroy DMA maps for TX buffers. */ 1777 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1778 if (sc->bge_cdata.bge_tx_dmamap[i]) 1779 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1780 sc->bge_cdata.bge_tx_dmamap[i]); 1781 } 1782 1783 if (sc->bge_cdata.bge_mtag) 1784 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1785 1786 1787 /* Destroy standard RX ring. */ 1788 if (sc->bge_cdata.bge_rx_std_ring_map) 1789 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1790 sc->bge_cdata.bge_rx_std_ring_map); 1791 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1792 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1793 sc->bge_ldata.bge_rx_std_ring, 1794 sc->bge_cdata.bge_rx_std_ring_map); 1795 1796 if (sc->bge_cdata.bge_rx_std_ring_tag) 1797 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1798 1799 /* Destroy jumbo RX ring. */ 1800 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1801 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1802 sc->bge_cdata.bge_rx_jumbo_ring_map); 1803 1804 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1805 sc->bge_ldata.bge_rx_jumbo_ring) 1806 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1807 sc->bge_ldata.bge_rx_jumbo_ring, 1808 sc->bge_cdata.bge_rx_jumbo_ring_map); 1809 1810 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1811 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1812 1813 /* Destroy RX return ring. */ 1814 if (sc->bge_cdata.bge_rx_return_ring_map) 1815 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1816 sc->bge_cdata.bge_rx_return_ring_map); 1817 1818 if (sc->bge_cdata.bge_rx_return_ring_map && 1819 sc->bge_ldata.bge_rx_return_ring) 1820 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1821 sc->bge_ldata.bge_rx_return_ring, 1822 sc->bge_cdata.bge_rx_return_ring_map); 1823 1824 if (sc->bge_cdata.bge_rx_return_ring_tag) 1825 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1826 1827 /* Destroy TX ring. */ 1828 if (sc->bge_cdata.bge_tx_ring_map) 1829 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1830 sc->bge_cdata.bge_tx_ring_map); 1831 1832 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1833 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1834 sc->bge_ldata.bge_tx_ring, 1835 sc->bge_cdata.bge_tx_ring_map); 1836 1837 if (sc->bge_cdata.bge_tx_ring_tag) 1838 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1839 1840 /* Destroy status block. */ 1841 if (sc->bge_cdata.bge_status_map) 1842 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1843 sc->bge_cdata.bge_status_map); 1844 1845 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1846 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1847 sc->bge_ldata.bge_status_block, 1848 sc->bge_cdata.bge_status_map); 1849 1850 if (sc->bge_cdata.bge_status_tag) 1851 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1852 1853 /* Destroy statistics block. */ 1854 if (sc->bge_cdata.bge_stats_map) 1855 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1856 sc->bge_cdata.bge_stats_map); 1857 1858 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1859 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1860 sc->bge_ldata.bge_stats, 1861 sc->bge_cdata.bge_stats_map); 1862 1863 if (sc->bge_cdata.bge_stats_tag) 1864 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1865 1866 /* Destroy the parent tag. */ 1867 if (sc->bge_cdata.bge_parent_tag) 1868 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1869} 1870 1871static int 1872bge_dma_alloc(device_t dev) 1873{ 1874 struct bge_dmamap_arg ctx; 1875 struct bge_softc *sc; 1876 int i, error; 1877 1878 sc = device_get_softc(dev); 1879 1880 /* 1881 * Allocate the parent bus DMA tag appropriate for PCI. 1882 */ 1883 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), /* parent */ 1884 1, 0, /* alignment, boundary */ 1885 BUS_SPACE_MAXADDR, /* lowaddr */ 1886 BUS_SPACE_MAXADDR, /* highaddr */ 1887 NULL, NULL, /* filter, filterarg */ 1888 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1889 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1890 0, /* flags */ 1891 NULL, NULL, /* lockfunc, lockarg */ 1892 &sc->bge_cdata.bge_parent_tag); 1893 1894 if (error != 0) { 1895 device_printf(sc->bge_dev, 1896 "could not allocate parent dma tag\n"); 1897 return (ENOMEM); 1898 } 1899 1900 /* 1901 * Create tag for RX mbufs. 1902 */ 1903 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1904 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1905 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1906 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1907 1908 if (error) { 1909 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1910 return (ENOMEM); 1911 } 1912 1913 /* Create DMA maps for RX buffers. */ 1914 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1915 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1916 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1917 if (error) { 1918 device_printf(sc->bge_dev, 1919 "can't create DMA map for RX\n"); 1920 return (ENOMEM); 1921 } 1922 } 1923 1924 /* Create DMA maps for TX buffers. */ 1925 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1926 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1927 &sc->bge_cdata.bge_tx_dmamap[i]); 1928 if (error) { 1929 device_printf(sc->bge_dev, 1930 "can't create DMA map for RX\n"); 1931 return (ENOMEM); 1932 } 1933 } 1934 1935 /* Create tag for standard RX ring. */ 1936 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1937 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1938 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1939 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1940 1941 if (error) { 1942 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1943 return (ENOMEM); 1944 } 1945 1946 /* Allocate DMA'able memory for standard RX ring. */ 1947 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1948 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1949 &sc->bge_cdata.bge_rx_std_ring_map); 1950 if (error) 1951 return (ENOMEM); 1952 1953 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1954 1955 /* Load the address of the standard RX ring. */ 1956 ctx.bge_maxsegs = 1; 1957 ctx.sc = sc; 1958 1959 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1960 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1961 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1962 1963 if (error) 1964 return (ENOMEM); 1965 1966 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1967 1968 /* Create tags for jumbo mbufs. */ 1969 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1970 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1971 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1972 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1973 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1974 if (error) { 1975 device_printf(sc->bge_dev, 1976 "could not allocate jumbo dma tag\n"); 1977 return (ENOMEM); 1978 } 1979 1980 /* Create tag for jumbo RX ring. */ 1981 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1982 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1983 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1984 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1985 1986 if (error) { 1987 device_printf(sc->bge_dev, 1988 "could not allocate jumbo ring dma tag\n"); 1989 return (ENOMEM); 1990 } 1991 1992 /* Allocate DMA'able memory for jumbo RX ring. */ 1993 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1994 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1995 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1996 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1997 if (error) 1998 return (ENOMEM); 1999 2000 /* Load the address of the jumbo RX ring. */ 2001 ctx.bge_maxsegs = 1; 2002 ctx.sc = sc; 2003 2004 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2005 sc->bge_cdata.bge_rx_jumbo_ring_map, 2006 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 2007 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2008 2009 if (error) 2010 return (ENOMEM); 2011 2012 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 2013 2014 /* Create DMA maps for jumbo RX buffers. */ 2015 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2016 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2017 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2018 if (error) { 2019 device_printf(sc->bge_dev, 2020 "can't create DMA map for jumbo RX\n"); 2021 return (ENOMEM); 2022 } 2023 } 2024 2025 } 2026 2027 /* Create tag for RX return ring. */ 2028 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2029 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2030 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 2031 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 2032 2033 if (error) { 2034 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2035 return (ENOMEM); 2036 } 2037 2038 /* Allocate DMA'able memory for RX return ring. */ 2039 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 2040 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 2041 &sc->bge_cdata.bge_rx_return_ring_map); 2042 if (error) 2043 return (ENOMEM); 2044 2045 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 2046 BGE_RX_RTN_RING_SZ(sc)); 2047 2048 /* Load the address of the RX return ring. */ 2049 ctx.bge_maxsegs = 1; 2050 ctx.sc = sc; 2051 2052 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 2053 sc->bge_cdata.bge_rx_return_ring_map, 2054 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 2055 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2056 2057 if (error) 2058 return (ENOMEM); 2059 2060 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2061 2062 /* Create tag for TX ring. */ 2063 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2064 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2065 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2066 &sc->bge_cdata.bge_tx_ring_tag); 2067 2068 if (error) { 2069 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2070 return (ENOMEM); 2071 } 2072 2073 /* Allocate DMA'able memory for TX ring. */ 2074 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2075 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2076 &sc->bge_cdata.bge_tx_ring_map); 2077 if (error) 2078 return (ENOMEM); 2079 2080 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2081 2082 /* Load the address of the TX ring. */ 2083 ctx.bge_maxsegs = 1; 2084 ctx.sc = sc; 2085 2086 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2087 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2088 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2089 2090 if (error) 2091 return (ENOMEM); 2092 2093 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2094 2095 /* Create tag for status block. */ 2096 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2097 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2098 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2099 NULL, NULL, &sc->bge_cdata.bge_status_tag); 2100 2101 if (error) { 2102 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2103 return (ENOMEM); 2104 } 2105 2106 /* Allocate DMA'able memory for status block. */ 2107 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2108 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2109 &sc->bge_cdata.bge_status_map); 2110 if (error) 2111 return (ENOMEM); 2112 2113 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2114 2115 /* Load the address of the status block. */ 2116 ctx.sc = sc; 2117 ctx.bge_maxsegs = 1; 2118 2119 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2120 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2121 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2122 2123 if (error) 2124 return (ENOMEM); 2125 2126 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2127 2128 /* Create tag for statistics block. */ 2129 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2130 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2131 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2132 &sc->bge_cdata.bge_stats_tag); 2133 2134 if (error) { 2135 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2136 return (ENOMEM); 2137 } 2138 2139 /* Allocate DMA'able memory for statistics block. */ 2140 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2141 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2142 &sc->bge_cdata.bge_stats_map); 2143 if (error) 2144 return (ENOMEM); 2145 2146 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2147 2148 /* Load the address of the statstics block. */ 2149 ctx.sc = sc; 2150 ctx.bge_maxsegs = 1; 2151 2152 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2153 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2154 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2155 2156 if (error) 2157 return (ENOMEM); 2158 2159 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2160 2161 return (0); 2162} 2163 2164#if __FreeBSD_version > 602105 2165/* 2166 * Return true if this device has more than one port. 2167 */ 2168static int 2169bge_has_multiple_ports(struct bge_softc *sc) 2170{ 2171 device_t dev = sc->bge_dev; 2172 u_int b, s, f, fscan; 2173 2174 b = pci_get_bus(dev); 2175 s = pci_get_slot(dev); 2176 f = pci_get_function(dev); 2177 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 2178 if (fscan != f && pci_find_bsf(b, s, fscan) != NULL) 2179 return (1); 2180 return (0); 2181} 2182 2183/* 2184 * Return true if MSI can be used with this device. 2185 */ 2186static int 2187bge_can_use_msi(struct bge_softc *sc) 2188{ 2189 int can_use_msi = 0; 2190 2191 switch (sc->bge_asicrev) { 2192 case BGE_ASICREV_BCM5714: 2193 /* 2194 * Apparently, MSI doesn't work when this chip is configured 2195 * in single-port mode. 2196 */ 2197 if (bge_has_multiple_ports(sc)) 2198 can_use_msi = 1; 2199 break; 2200 case BGE_ASICREV_BCM5750: 2201 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 2202 sc->bge_chiprev != BGE_CHIPREV_5750_BX) 2203 can_use_msi = 1; 2204 break; 2205 case BGE_ASICREV_BCM5752: 2206 case BGE_ASICREV_BCM5780: 2207 can_use_msi = 1; 2208 break; 2209 } 2210 return (can_use_msi); 2211} 2212#endif 2213 2214static int 2215bge_attach(device_t dev) 2216{ 2217 struct ifnet *ifp; 2218 struct bge_softc *sc; 2219 uint32_t hwcfg = 0; 2220 uint32_t mac_tmp = 0; 2221 u_char eaddr[ETHER_ADDR_LEN]; 2222 int error, reg, rid, trys; 2223 2224 sc = device_get_softc(dev); 2225 sc->bge_dev = dev; 2226 2227 /* 2228 * Map control/status registers. 2229 */ 2230 pci_enable_busmaster(dev); 2231 2232 rid = BGE_PCI_BAR0; 2233 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2234 RF_ACTIVE | PCI_RF_DENSE); 2235 2236 if (sc->bge_res == NULL) { 2237 device_printf (sc->bge_dev, "couldn't map memory\n"); 2238 error = ENXIO; 2239 goto fail; 2240 } 2241 2242 sc->bge_btag = rman_get_bustag(sc->bge_res); 2243 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2244 2245 /* Save ASIC rev. */ 2246 2247 sc->bge_chipid = 2248 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2249 BGE_PCIMISCCTL_ASICREV; 2250 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2251 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2252 2253 if (bge_has_eeprom(sc)) 2254 sc->bge_flags |= BGE_FLAG_EEPROM; 2255 2256 /* Save chipset family. */ 2257 switch (sc->bge_asicrev) { 2258 case BGE_ASICREV_BCM5700: 2259 case BGE_ASICREV_BCM5701: 2260 case BGE_ASICREV_BCM5703: 2261 case BGE_ASICREV_BCM5704: 2262 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 2263 break; 2264 case BGE_ASICREV_BCM5714_A0: 2265 case BGE_ASICREV_BCM5780: 2266 case BGE_ASICREV_BCM5714: 2267 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; 2268 /* FALLTHRU */ 2269 case BGE_ASICREV_BCM5750: 2270 case BGE_ASICREV_BCM5752: 2271 case BGE_ASICREV_BCM5755: 2272 case BGE_ASICREV_BCM5787: 2273 sc->bge_flags |= BGE_FLAG_575X_PLUS; 2274 /* FALLTHRU */ 2275 case BGE_ASICREV_BCM5705: 2276 sc->bge_flags |= BGE_FLAG_5705_PLUS; 2277 break; 2278 } 2279 2280 /* Set various bug flags. */ 2281 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2282 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2283 sc->bge_flags |= BGE_FLAG_CRC_BUG; 2284 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 2285 sc->bge_chiprev == BGE_CHIPREV_5704_AX) 2286 sc->bge_flags |= BGE_FLAG_ADC_BUG; 2287 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2288 sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 2289 if (BGE_IS_5705_PLUS(sc) && 2290 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 2291 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2292 sc->bge_asicrev == BGE_ASICREV_BCM5787) 2293 sc->bge_flags |= BGE_FLAG_JITTER_BUG; 2294 else 2295 sc->bge_flags |= BGE_FLAG_BER_BUG; 2296 } 2297 2298 /* 2299 * Check if this is a PCI-X or PCI Express device. 2300 */ 2301#if __FreeBSD_version > 602101 2302 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 2303 /* 2304 * Found a PCI Express capabilities register, this 2305 * must be a PCI Express device. 2306 */ 2307 if (reg != 0) 2308 sc->bge_flags |= BGE_FLAG_PCIE; 2309 } else if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 2310 if (reg != 0) 2311 sc->bge_flags |= BGE_FLAG_PCIX; 2312 } 2313 2314#else 2315 if (BGE_IS_5705_PLUS(sc)) { 2316 reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2317 if ((reg & 0xFF) == BGE_PCIE_CAPID) 2318 sc->bge_flags |= BGE_FLAG_PCIE; 2319 } else { 2320 /* 2321 * Check if the device is in PCI-X Mode. 2322 * (This bit is not valid on PCI Express controllers.) 2323 */ 2324 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2325 BGE_PCISTATE_PCI_BUSMODE) == 0) 2326 sc->bge_flags |= BGE_FLAG_PCIX; 2327 } 2328#endif 2329 2330#if __FreeBSD_version > 602105 2331 { 2332 int msicount; 2333 2334 /* 2335 * Allocate the interrupt, using MSI if possible. These devices 2336 * support 8 MSI messages, but only the first one is used in 2337 * normal operation. 2338 */ 2339 if (bge_can_use_msi(sc)) { 2340 msicount = pci_msi_count(dev); 2341 if (msicount > 1) 2342 msicount = 1; 2343 } else 2344 msicount = 0; 2345 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { 2346 rid = 1; 2347 sc->bge_flags |= BGE_FLAG_MSI; 2348 } else 2349 rid = 0; 2350 } 2351#else 2352 rid = 0; 2353#endif 2354 2355 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2356 RF_SHAREABLE | RF_ACTIVE); 2357 2358 if (sc->bge_irq == NULL) { 2359 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2360 error = ENXIO; 2361 goto fail; 2362 } 2363 2364 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2365 2366 /* Try to reset the chip. */ 2367 if (bge_reset(sc)) { 2368 device_printf(sc->bge_dev, "chip reset failed\n"); 2369 error = ENXIO; 2370 goto fail; 2371 } 2372 2373 sc->bge_asf_mode = 0; 2374 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2375 == BGE_MAGIC_NUMBER)) { 2376 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2377 & BGE_HWCFG_ASF) { 2378 sc->bge_asf_mode |= ASF_ENABLE; 2379 sc->bge_asf_mode |= ASF_STACKUP; 2380 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2381 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2382 } 2383 } 2384 } 2385 2386 /* Try to reset the chip again the nice way. */ 2387 bge_stop_fw(sc); 2388 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2389 if (bge_reset(sc)) { 2390 device_printf(sc->bge_dev, "chip reset failed\n"); 2391 error = ENXIO; 2392 goto fail; 2393 } 2394 2395 bge_sig_legacy(sc, BGE_RESET_STOP); 2396 bge_sig_post_reset(sc, BGE_RESET_STOP); 2397 2398 if (bge_chipinit(sc)) { 2399 device_printf(sc->bge_dev, "chip initialization failed\n"); 2400 error = ENXIO; 2401 goto fail; 2402 } 2403 2404#ifdef __sparc64__ 2405 if ((sc->bge_flags & BGE_FLAG_EEPROM) == 0) 2406 OF_getetheraddr(dev, eaddr); 2407 else 2408#endif 2409 { 2410 mac_tmp = bge_readmem_ind(sc, 0x0C14); 2411 if ((mac_tmp >> 16) == 0x484B) { 2412 eaddr[0] = (u_char)(mac_tmp >> 8); 2413 eaddr[1] = (u_char)mac_tmp; 2414 mac_tmp = bge_readmem_ind(sc, 0x0C18); 2415 eaddr[2] = (u_char)(mac_tmp >> 24); 2416 eaddr[3] = (u_char)(mac_tmp >> 16); 2417 eaddr[4] = (u_char)(mac_tmp >> 8); 2418 eaddr[5] = (u_char)mac_tmp; 2419 } else if (bge_read_eeprom(sc, eaddr, 2420 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2421 device_printf(sc->bge_dev, 2422 "failed to read station address\n"); 2423 error = ENXIO; 2424 goto fail; 2425 } 2426 } 2427 2428 /* 5705 limits RX return ring to 512 entries. */ 2429 if (BGE_IS_5705_PLUS(sc)) 2430 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2431 else 2432 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2433 2434 if (bge_dma_alloc(dev)) { 2435 device_printf(sc->bge_dev, 2436 "failed to allocate DMA resources\n"); 2437 error = ENXIO; 2438 goto fail; 2439 } 2440 2441 /* Set default tuneable values. */ 2442 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2443 sc->bge_rx_coal_ticks = 150; 2444 sc->bge_tx_coal_ticks = 150; 2445 sc->bge_rx_max_coal_bds = 10; 2446 sc->bge_tx_max_coal_bds = 10; 2447 2448 /* Set up ifnet structure */ 2449 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2450 if (ifp == NULL) { 2451 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2452 error = ENXIO; 2453 goto fail; 2454 } 2455 ifp->if_softc = sc; 2456 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2457 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2458 ifp->if_ioctl = bge_ioctl; 2459 ifp->if_start = bge_start; 2460 ifp->if_init = bge_init; 2461 ifp->if_mtu = ETHERMTU; 2462 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2463 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2464 IFQ_SET_READY(&ifp->if_snd); 2465 ifp->if_hwassist = BGE_CSUM_FEATURES; 2466 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2467 IFCAP_VLAN_MTU; 2468#ifdef IFCAP_VLAN_HWCSUM 2469 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2470#endif 2471 ifp->if_capenable = ifp->if_capabilities; 2472#ifdef DEVICE_POLLING 2473 ifp->if_capabilities |= IFCAP_POLLING; 2474#endif 2475 2476 /* 2477 * 5700 B0 chips do not support checksumming correctly due 2478 * to hardware bugs. 2479 */ 2480 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2481 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2482 ifp->if_capenable &= IFCAP_HWCSUM; 2483 ifp->if_hwassist = 0; 2484 } 2485 2486 /* 2487 * Figure out what sort of media we have by checking the 2488 * hardware config word in the first 32k of NIC internal memory, 2489 * or fall back to examining the EEPROM if necessary. 2490 * Note: on some BCM5700 cards, this value appears to be unset. 2491 * If that's the case, we have to rely on identifying the NIC 2492 * by its PCI subsystem ID, as we do below for the SysKonnect 2493 * SK-9D41. 2494 */ 2495 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2496 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2497 else if (sc->bge_flags & BGE_FLAG_EEPROM) { 2498 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2499 sizeof(hwcfg))) { 2500 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2501 error = ENXIO; 2502 goto fail; 2503 } 2504 hwcfg = ntohl(hwcfg); 2505 } 2506 2507 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2508 sc->bge_flags |= BGE_FLAG_TBI; 2509 2510 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2511 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2512 sc->bge_flags |= BGE_FLAG_TBI; 2513 2514 if (sc->bge_flags & BGE_FLAG_TBI) { 2515 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2516 bge_ifmedia_sts); 2517 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 2518 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2519 0, NULL); 2520 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 2521 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2522 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2523 } else { 2524 /* 2525 * Do transceiver setup and tell the firmware the 2526 * driver is down so we can try to get access the 2527 * probe if ASF is running. Retry a couple of times 2528 * if we get a conflict with the ASF firmware accessing 2529 * the PHY. 2530 */ 2531 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2532again: 2533 bge_asf_driver_up(sc); 2534 2535 trys = 0; 2536 if (mii_phy_probe(dev, &sc->bge_miibus, 2537 bge_ifmedia_upd, bge_ifmedia_sts)) { 2538 if (trys++ < 4) { 2539 device_printf(sc->bge_dev, "Try again\n"); 2540 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, 2541 BMCR_RESET); 2542 goto again; 2543 } 2544 2545 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2546 error = ENXIO; 2547 goto fail; 2548 } 2549 2550 /* 2551 * Now tell the firmware we are going up after probing the PHY 2552 */ 2553 if (sc->bge_asf_mode & ASF_STACKUP) 2554 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2555 } 2556 2557 /* 2558 * When using the BCM5701 in PCI-X mode, data corruption has 2559 * been observed in the first few bytes of some received packets. 2560 * Aligning the packet buffer in memory eliminates the corruption. 2561 * Unfortunately, this misaligns the packet payloads. On platforms 2562 * which do not support unaligned accesses, we will realign the 2563 * payloads by copying the received packets. 2564 */ 2565 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2566 sc->bge_flags & BGE_FLAG_PCIX) 2567 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2568 2569 /* 2570 * Call MI attach routine. 2571 */ 2572 ether_ifattach(ifp, eaddr); 2573 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 2574 2575 /* 2576 * Hookup IRQ last. 2577 */ 2578#if __FreeBSD_version > 700030 2579 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2580 NULL, bge_intr, sc, &sc->bge_intrhand); 2581#else 2582 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2583 bge_intr, sc, &sc->bge_intrhand); 2584#endif 2585 2586 if (error) { 2587 bge_detach(dev); 2588 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2589 } 2590 2591 bge_add_sysctls(sc); 2592 2593 return (0); 2594 2595fail: 2596 bge_release_resources(sc); 2597 2598 return (error); 2599} 2600 2601static int 2602bge_detach(device_t dev) 2603{ 2604 struct bge_softc *sc; 2605 struct ifnet *ifp; 2606 2607 sc = device_get_softc(dev); 2608 ifp = sc->bge_ifp; 2609 2610#ifdef DEVICE_POLLING 2611 if (ifp->if_capenable & IFCAP_POLLING) 2612 ether_poll_deregister(ifp); 2613#endif 2614 2615 BGE_LOCK(sc); 2616 bge_stop(sc); 2617 bge_reset(sc); 2618 BGE_UNLOCK(sc); 2619 2620 callout_drain(&sc->bge_stat_ch); 2621 2622 ether_ifdetach(ifp); 2623 2624 if (sc->bge_flags & BGE_FLAG_TBI) { 2625 ifmedia_removeall(&sc->bge_ifmedia); 2626 } else { 2627 bus_generic_detach(dev); 2628 device_delete_child(dev, sc->bge_miibus); 2629 } 2630 2631 bge_release_resources(sc); 2632 2633 return (0); 2634} 2635 2636static void 2637bge_release_resources(struct bge_softc *sc) 2638{ 2639 device_t dev; 2640 2641 dev = sc->bge_dev; 2642 2643 if (sc->bge_intrhand != NULL) 2644 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2645 2646 if (sc->bge_irq != NULL) 2647 bus_release_resource(dev, SYS_RES_IRQ, 2648 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); 2649 2650#if __FreeBSD_version > 602105 2651 if (sc->bge_flags & BGE_FLAG_MSI) 2652 pci_release_msi(dev); 2653#endif 2654 2655 if (sc->bge_res != NULL) 2656 bus_release_resource(dev, SYS_RES_MEMORY, 2657 BGE_PCI_BAR0, sc->bge_res); 2658 2659 if (sc->bge_ifp != NULL) 2660 if_free(sc->bge_ifp); 2661 2662 bge_dma_free(sc); 2663 2664 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2665 BGE_LOCK_DESTROY(sc); 2666} 2667 2668static int 2669bge_reset(struct bge_softc *sc) 2670{ 2671 device_t dev; 2672 uint32_t cachesize, command, pcistate, reset; 2673 void (*write_op)(struct bge_softc *, int, int); 2674 int i, val = 0; 2675 2676 dev = sc->bge_dev; 2677 2678 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) { 2679 if (sc->bge_flags & BGE_FLAG_PCIE) 2680 write_op = bge_writemem_direct; 2681 else 2682 write_op = bge_writemem_ind; 2683 } else 2684 write_op = bge_writereg_ind; 2685 2686 /* Save some important PCI state. */ 2687 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2688 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2689 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2690 2691 pci_write_config(dev, BGE_PCI_MISC_CTL, 2692 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 2693 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 2694 2695 /* Disable fastboot on controllers that support it. */ 2696 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2697 sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2698 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2699 if (bootverbose) 2700 device_printf(sc->bge_dev, "Disabling fastboot\n"); 2701 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2702 } 2703 2704 /* 2705 * Write the magic number to SRAM at offset 0xB50. 2706 * When firmware finishes its initialization it will 2707 * write ~BGE_MAGIC_NUMBER to the same location. 2708 */ 2709 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2710 2711 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 2712 2713 /* XXX: Broadcom Linux driver. */ 2714 if (sc->bge_flags & BGE_FLAG_PCIE) { 2715 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 2716 CSR_WRITE_4(sc, 0x7E2C, 0x20); 2717 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2718 /* Prevent PCIE link training during global reset */ 2719 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2720 reset |= 1 << 29; 2721 } 2722 } 2723 2724 /* 2725 * Set GPHY Power Down Override to leave GPHY 2726 * powered up in D0 uninitialized. 2727 */ 2728 if (BGE_IS_5705_PLUS(sc)) 2729 reset |= 0x04000000; 2730 2731 /* Issue global reset */ 2732 write_op(sc, BGE_MISC_CFG, reset); 2733 2734 DELAY(1000); 2735 2736 /* XXX: Broadcom Linux driver. */ 2737 if (sc->bge_flags & BGE_FLAG_PCIE) { 2738 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2739 uint32_t v; 2740 2741 DELAY(500000); /* wait for link training to complete */ 2742 v = pci_read_config(dev, 0xC4, 4); 2743 pci_write_config(dev, 0xC4, v | (1 << 15), 4); 2744 } 2745 /* 2746 * Set PCIE max payload size to 128 bytes and clear error 2747 * status. 2748 */ 2749 pci_write_config(dev, 0xD8, 0xF5000, 4); 2750 } 2751 2752 /* Reset some of the PCI state that got zapped by reset. */ 2753 pci_write_config(dev, BGE_PCI_MISC_CTL, 2754 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 2755 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 2756 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2757 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2758 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2759 2760 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ 2761 if (BGE_IS_5714_FAMILY(sc)) { 2762 uint32_t val; 2763 2764 /* This chip disables MSI on reset. */ 2765 if (sc->bge_flags & BGE_FLAG_MSI) { 2766 val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2); 2767 pci_write_config(dev, BGE_PCI_MSI_CTL, 2768 val | PCIM_MSICTRL_MSI_ENABLE, 2); 2769 val = CSR_READ_4(sc, BGE_MSI_MODE); 2770 CSR_WRITE_4(sc, BGE_MSI_MODE, 2771 val | BGE_MSIMODE_ENABLE); 2772 } 2773 val = CSR_READ_4(sc, BGE_MARB_MODE); 2774 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2775 } else 2776 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2777 2778 /* 2779 * Poll until we see the 1's complement of the magic number. 2780 * This indicates that the firmware initialization is complete. 2781 * We expect this to fail if no EEPROM is fitted though. 2782 */ 2783 for (i = 0; i < BGE_TIMEOUT; i++) { 2784 DELAY(10); 2785 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2786 if (val == ~BGE_MAGIC_NUMBER) 2787 break; 2788 } 2789 2790 if ((sc->bge_flags & BGE_FLAG_EEPROM) && i == BGE_TIMEOUT) 2791 device_printf(sc->bge_dev, "firmware handshake timed out, " 2792 "found 0x%08x\n", val); 2793 2794 /* 2795 * XXX Wait for the value of the PCISTATE register to 2796 * return to its original pre-reset state. This is a 2797 * fairly good indicator of reset completion. If we don't 2798 * wait for the reset to fully complete, trying to read 2799 * from the device's non-PCI registers may yield garbage 2800 * results. 2801 */ 2802 for (i = 0; i < BGE_TIMEOUT; i++) { 2803 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2804 break; 2805 DELAY(10); 2806 } 2807 2808 if (sc->bge_flags & BGE_FLAG_PCIE) { 2809 reset = bge_readmem_ind(sc, 0x7C00); 2810 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25)); 2811 } 2812 2813 /* Fix up byte swapping. */ 2814 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 2815 BGE_MODECTL_BYTESWAP_DATA); 2816 2817 /* Tell the ASF firmware we are up */ 2818 if (sc->bge_asf_mode & ASF_STACKUP) 2819 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2820 2821 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2822 2823 /* 2824 * The 5704 in TBI mode apparently needs some special 2825 * adjustment to insure the SERDES drive level is set 2826 * to 1.2V. 2827 */ 2828 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2829 sc->bge_flags & BGE_FLAG_TBI) { 2830 uint32_t serdescfg; 2831 2832 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2833 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2834 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2835 } 2836 2837 /* XXX: Broadcom Linux driver. */ 2838 if (sc->bge_flags & BGE_FLAG_PCIE && 2839 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2840 uint32_t v; 2841 2842 v = CSR_READ_4(sc, 0x7C00); 2843 CSR_WRITE_4(sc, 0x7C00, v | (1 << 25)); 2844 } 2845 DELAY(10000); 2846 2847 return(0); 2848} 2849 2850/* 2851 * Frame reception handling. This is called if there's a frame 2852 * on the receive return list. 2853 * 2854 * Note: we have to be able to handle two possibilities here: 2855 * 1) the frame is from the jumbo receive ring 2856 * 2) the frame is from the standard receive ring 2857 */ 2858 2859static void 2860bge_rxeof(struct bge_softc *sc) 2861{ 2862 struct ifnet *ifp; 2863 int stdcnt = 0, jumbocnt = 0; 2864 2865 BGE_LOCK_ASSERT(sc); 2866 2867 /* Nothing to do. */ 2868 if (sc->bge_rx_saved_considx == 2869 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2870 return; 2871 2872 ifp = sc->bge_ifp; 2873 2874 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2875 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2876 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2877 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2878 if (BGE_IS_JUMBO_CAPABLE(sc)) 2879 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2880 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); 2881 2882 while(sc->bge_rx_saved_considx != 2883 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2884 struct bge_rx_bd *cur_rx; 2885 uint32_t rxidx; 2886 struct mbuf *m = NULL; 2887 uint16_t vlan_tag = 0; 2888 int have_tag = 0; 2889 2890#ifdef DEVICE_POLLING 2891 if (ifp->if_capenable & IFCAP_POLLING) { 2892 if (sc->rxcycles <= 0) 2893 break; 2894 sc->rxcycles--; 2895 } 2896#endif 2897 2898 cur_rx = 2899 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2900 2901 rxidx = cur_rx->bge_idx; 2902 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2903 2904 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2905 have_tag = 1; 2906 vlan_tag = cur_rx->bge_vlan_tag; 2907 } 2908 2909 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2910 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2911 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2912 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2913 BUS_DMASYNC_POSTREAD); 2914 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2915 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2916 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2917 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2918 jumbocnt++; 2919 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2920 ifp->if_ierrors++; 2921 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2922 continue; 2923 } 2924 if (bge_newbuf_jumbo(sc, 2925 sc->bge_jumbo, NULL) == ENOBUFS) { 2926 ifp->if_ierrors++; 2927 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2928 continue; 2929 } 2930 } else { 2931 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2932 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2933 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2934 BUS_DMASYNC_POSTREAD); 2935 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2936 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2937 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2938 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2939 stdcnt++; 2940 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2941 ifp->if_ierrors++; 2942 bge_newbuf_std(sc, sc->bge_std, m); 2943 continue; 2944 } 2945 if (bge_newbuf_std(sc, sc->bge_std, 2946 NULL) == ENOBUFS) { 2947 ifp->if_ierrors++; 2948 bge_newbuf_std(sc, sc->bge_std, m); 2949 continue; 2950 } 2951 } 2952 2953 ifp->if_ipackets++; 2954#ifndef __NO_STRICT_ALIGNMENT 2955 /* 2956 * For architectures with strict alignment we must make sure 2957 * the payload is aligned. 2958 */ 2959 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2960 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2961 cur_rx->bge_len); 2962 m->m_data += ETHER_ALIGN; 2963 } 2964#endif 2965 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2966 m->m_pkthdr.rcvif = ifp; 2967 2968 if (ifp->if_capenable & IFCAP_RXCSUM) { 2969 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2970 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2971 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 2972 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2973 } 2974 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2975 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2976 m->m_pkthdr.csum_data = 2977 cur_rx->bge_tcp_udp_csum; 2978 m->m_pkthdr.csum_flags |= 2979 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2980 } 2981 } 2982 2983 /* 2984 * If we received a packet with a vlan tag, 2985 * attach that information to the packet. 2986 */ 2987 if (have_tag) { 2988#if __FreeBSD_version > 700022 2989 m->m_pkthdr.ether_vtag = vlan_tag; 2990 m->m_flags |= M_VLANTAG; 2991#else 2992 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); 2993 if (m == NULL) 2994 continue; 2995#endif 2996 } 2997 2998 BGE_UNLOCK(sc); 2999 (*ifp->if_input)(ifp, m); 3000 BGE_LOCK(sc); 3001 } 3002 3003 if (stdcnt > 0) 3004 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 3005 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 3006 3007 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) 3008 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 3009 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 3010 3011 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3012 if (stdcnt) 3013 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3014 if (jumbocnt) 3015 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3016#ifdef notyet 3017 /* 3018 * This register wraps very quickly under heavy packet drops. 3019 * If you need correct statistics, you can enable this check. 3020 */ 3021 if (BGE_IS_5705_PLUS(sc)) 3022 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3023#endif 3024} 3025 3026static void 3027bge_txeof(struct bge_softc *sc) 3028{ 3029 struct bge_tx_bd *cur_tx = NULL; 3030 struct ifnet *ifp; 3031 3032 BGE_LOCK_ASSERT(sc); 3033 3034 /* Nothing to do. */ 3035 if (sc->bge_tx_saved_considx == 3036 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 3037 return; 3038 3039 ifp = sc->bge_ifp; 3040 3041 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 3042 sc->bge_cdata.bge_tx_ring_map, 3043 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3044 /* 3045 * Go through our tx ring and free mbufs for those 3046 * frames that have been sent. 3047 */ 3048 while (sc->bge_tx_saved_considx != 3049 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 3050 uint32_t idx = 0; 3051 3052 idx = sc->bge_tx_saved_considx; 3053 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 3054 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3055 ifp->if_opackets++; 3056 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 3057 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 3058 sc->bge_cdata.bge_tx_dmamap[idx], 3059 BUS_DMASYNC_POSTWRITE); 3060 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 3061 sc->bge_cdata.bge_tx_dmamap[idx]); 3062 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3063 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3064 } 3065 sc->bge_txcnt--; 3066 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3067 } 3068 3069 if (cur_tx != NULL) 3070 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3071 if (sc->bge_txcnt == 0) 3072 sc->bge_timer = 0; 3073} 3074 3075#ifdef DEVICE_POLLING 3076static void 3077bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3078{ 3079 struct bge_softc *sc = ifp->if_softc; 3080 uint32_t statusword; 3081 3082 BGE_LOCK(sc); 3083 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3084 BGE_UNLOCK(sc); 3085 return; 3086 } 3087 3088 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3089 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3090 3091 statusword = atomic_readandclear_32( 3092 &sc->bge_ldata.bge_status_block->bge_status); 3093 3094 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3095 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 3096 3097 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 3098 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 3099 sc->bge_link_evt++; 3100 3101 if (cmd == POLL_AND_CHECK_STATUS) 3102 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3103 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3104 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 3105 bge_link_upd(sc); 3106 3107 sc->rxcycles = count; 3108 bge_rxeof(sc); 3109 bge_txeof(sc); 3110 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3111 bge_start_locked(ifp); 3112 3113 BGE_UNLOCK(sc); 3114} 3115#endif /* DEVICE_POLLING */ 3116 3117static void 3118bge_intr(void *xsc) 3119{ 3120 struct bge_softc *sc; 3121 struct ifnet *ifp; 3122 uint32_t statusword; 3123 3124 sc = xsc; 3125 3126 BGE_LOCK(sc); 3127 3128 ifp = sc->bge_ifp; 3129 3130#ifdef DEVICE_POLLING 3131 if (ifp->if_capenable & IFCAP_POLLING) { 3132 BGE_UNLOCK(sc); 3133 return; 3134 } 3135#endif 3136 3137 /* 3138 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3139 * disable interrupts by writing nonzero like we used to, since with 3140 * our current organization this just gives complications and 3141 * pessimizations for re-enabling interrupts. We used to have races 3142 * instead of the necessary complications. Disabling interrupts 3143 * would just reduce the chance of a status update while we are 3144 * running (by switching to the interrupt-mode coalescence 3145 * parameters), but this chance is already very low so it is more 3146 * efficient to get another interrupt than prevent it. 3147 * 3148 * We do the ack first to ensure another interrupt if there is a 3149 * status update after the ack. We don't check for the status 3150 * changing later because it is more efficient to get another 3151 * interrupt than prevent it, not quite as above (not checking is 3152 * a smaller optimization than not toggling the interrupt enable, 3153 * since checking doesn't involve PCI accesses and toggling require 3154 * the status check). So toggling would probably be a pessimization 3155 * even with MSI. It would only be needed for using a task queue. 3156 */ 3157 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3158 3159 /* 3160 * Do the mandatory PCI flush as well as get the link status. 3161 */ 3162 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 3163 3164 /* Make sure the descriptor ring indexes are coherent. */ 3165 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3166 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3167 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3168 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 3169 3170 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3171 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3172 statusword || sc->bge_link_evt) 3173 bge_link_upd(sc); 3174 3175 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3176 /* Check RX return ring producer/consumer. */ 3177 bge_rxeof(sc); 3178 3179 /* Check TX ring producer/consumer. */ 3180 bge_txeof(sc); 3181 } 3182 3183 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3184 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3185 bge_start_locked(ifp); 3186 3187 BGE_UNLOCK(sc); 3188} 3189 3190static void 3191bge_asf_driver_up(struct bge_softc *sc) 3192{ 3193 if (sc->bge_asf_mode & ASF_STACKUP) { 3194 /* Send ASF heartbeat aprox. every 2s */ 3195 if (sc->bge_asf_count) 3196 sc->bge_asf_count --; 3197 else { 3198 sc->bge_asf_count = 5; 3199 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 3200 BGE_FW_DRV_ALIVE); 3201 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 3202 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 3203 CSR_WRITE_4(sc, BGE_CPU_EVENT, 3204 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 3205 } 3206 } 3207} 3208 3209static void 3210bge_tick(void *xsc) 3211{ 3212 struct bge_softc *sc = xsc; 3213 struct mii_data *mii = NULL; 3214 3215 BGE_LOCK_ASSERT(sc); 3216 3217 /* Synchronize with possible callout reset/stop. */ 3218 if (callout_pending(&sc->bge_stat_ch) || 3219 !callout_active(&sc->bge_stat_ch)) 3220 return; 3221 3222 if (BGE_IS_5705_PLUS(sc)) 3223 bge_stats_update_regs(sc); 3224 else 3225 bge_stats_update(sc); 3226 3227 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 3228 mii = device_get_softc(sc->bge_miibus); 3229 /* Don't mess with the PHY in IPMI/ASF mode */ 3230 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link))) 3231 mii_tick(mii); 3232 } else { 3233 /* 3234 * Since in TBI mode auto-polling can't be used we should poll 3235 * link status manually. Here we register pending link event 3236 * and trigger interrupt. 3237 */ 3238#ifdef DEVICE_POLLING 3239 /* In polling mode we poll link state in bge_poll(). */ 3240 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 3241#endif 3242 { 3243 sc->bge_link_evt++; 3244 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3245 } 3246 } 3247 3248 bge_asf_driver_up(sc); 3249 bge_watchdog(sc); 3250 3251 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3252} 3253 3254static void 3255bge_stats_update_regs(struct bge_softc *sc) 3256{ 3257 struct ifnet *ifp; 3258 3259 ifp = sc->bge_ifp; 3260 3261 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 3262 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3263 3264 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3265} 3266 3267static void 3268bge_stats_update(struct bge_softc *sc) 3269{ 3270 struct ifnet *ifp; 3271 bus_size_t stats; 3272 uint32_t cnt; /* current register value */ 3273 3274 ifp = sc->bge_ifp; 3275 3276 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3277 3278#define READ_STAT(sc, stats, stat) \ 3279 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3280 3281 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 3282 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); 3283 sc->bge_tx_collisions = cnt; 3284 3285 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 3286 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); 3287 sc->bge_rx_discards = cnt; 3288 3289 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 3290 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); 3291 sc->bge_tx_discards = cnt; 3292 3293#undef READ_STAT 3294} 3295 3296/* 3297 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3298 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3299 * but when such padded frames employ the bge IP/TCP checksum offload, 3300 * the hardware checksum assist gives incorrect results (possibly 3301 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3302 * If we pad such runts with zeros, the onboard checksum comes out correct. 3303 */ 3304static __inline int 3305bge_cksum_pad(struct mbuf *m) 3306{ 3307 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3308 struct mbuf *last; 3309 3310 /* If there's only the packet-header and we can pad there, use it. */ 3311 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3312 M_TRAILINGSPACE(m) >= padlen) { 3313 last = m; 3314 } else { 3315 /* 3316 * Walk packet chain to find last mbuf. We will either 3317 * pad there, or append a new mbuf and pad it. 3318 */ 3319 for (last = m; last->m_next != NULL; last = last->m_next); 3320 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3321 /* Allocate new empty mbuf, pad it. Compact later. */ 3322 struct mbuf *n; 3323 3324 MGET(n, M_DONTWAIT, MT_DATA); 3325 if (n == NULL) 3326 return (ENOBUFS); 3327 n->m_len = 0; 3328 last->m_next = n; 3329 last = n; 3330 } 3331 } 3332 3333 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3334 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3335 last->m_len += padlen; 3336 m->m_pkthdr.len += padlen; 3337 3338 return (0); 3339} 3340 3341/* 3342 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3343 * pointers to descriptors. 3344 */ 3345static int 3346bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 3347{ 3348 bus_dma_segment_t segs[BGE_NSEG_NEW]; 3349 bus_dmamap_t map; 3350 struct bge_tx_bd *d; 3351 struct mbuf *m = *m_head; 3352 uint32_t idx = *txidx; 3353 uint16_t csum_flags; 3354 int nsegs, i, error; 3355 3356 csum_flags = 0; 3357 if (m->m_pkthdr.csum_flags) { 3358 if (m->m_pkthdr.csum_flags & CSUM_IP) 3359 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3360 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 3361 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3362 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 3363 (error = bge_cksum_pad(m)) != 0) { 3364 m_freem(m); 3365 *m_head = NULL; 3366 return (error); 3367 } 3368 } 3369 if (m->m_flags & M_LASTFRAG) 3370 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3371 else if (m->m_flags & M_FRAG) 3372 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3373 } 3374 3375 map = sc->bge_cdata.bge_tx_dmamap[idx]; 3376 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs, 3377 &nsegs, BUS_DMA_NOWAIT); 3378 if (error == EFBIG) { 3379 m = m_defrag(m, M_DONTWAIT); 3380 if (m == NULL) { 3381 m_freem(*m_head); 3382 *m_head = NULL; 3383 return (ENOBUFS); 3384 } 3385 *m_head = m; 3386 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, 3387 segs, &nsegs, BUS_DMA_NOWAIT); 3388 if (error) { 3389 m_freem(m); 3390 *m_head = NULL; 3391 return (error); 3392 } 3393 } else if (error != 0) 3394 return (error); 3395 3396 /* 3397 * Sanity check: avoid coming within 16 descriptors 3398 * of the end of the ring. 3399 */ 3400 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3401 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map); 3402 return (ENOBUFS); 3403 } 3404 3405 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE); 3406 3407 for (i = 0; ; i++) { 3408 d = &sc->bge_ldata.bge_tx_ring[idx]; 3409 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3410 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3411 d->bge_len = segs[i].ds_len; 3412 d->bge_flags = csum_flags; 3413 if (i == nsegs - 1) 3414 break; 3415 BGE_INC(idx, BGE_TX_RING_CNT); 3416 } 3417 3418 /* Mark the last segment as end of packet... */ 3419 d->bge_flags |= BGE_TXBDFLAG_END; 3420 3421 /* ... and put VLAN tag into first segment. */ 3422 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 3423#if __FreeBSD_version > 700022 3424 if (m->m_flags & M_VLANTAG) { 3425 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3426 d->bge_vlan_tag = m->m_pkthdr.ether_vtag; 3427 } else 3428 d->bge_vlan_tag = 0; 3429#else 3430 { 3431 struct m_tag *mtag; 3432 3433 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { 3434 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3435 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3436 } else 3437 d->bge_vlan_tag = 0; 3438 } 3439#endif 3440 3441 /* 3442 * Insure that the map for this transmission 3443 * is placed at the array index of the last descriptor 3444 * in this chain. 3445 */ 3446 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 3447 sc->bge_cdata.bge_tx_dmamap[idx] = map; 3448 sc->bge_cdata.bge_tx_chain[idx] = m; 3449 sc->bge_txcnt += nsegs; 3450 3451 BGE_INC(idx, BGE_TX_RING_CNT); 3452 *txidx = idx; 3453 3454 return (0); 3455} 3456 3457/* 3458 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3459 * to the mbuf data regions directly in the transmit descriptors. 3460 */ 3461static void 3462bge_start_locked(struct ifnet *ifp) 3463{ 3464 struct bge_softc *sc; 3465 struct mbuf *m_head = NULL; 3466 uint32_t prodidx; 3467 int count = 0; 3468 3469 sc = ifp->if_softc; 3470 3471 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3472 return; 3473 3474 prodidx = sc->bge_tx_prodidx; 3475 3476 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3477 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 3478 if (m_head == NULL) 3479 break; 3480 3481 /* 3482 * XXX 3483 * The code inside the if() block is never reached since we 3484 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3485 * requests to checksum TCP/UDP in a fragmented packet. 3486 * 3487 * XXX 3488 * safety overkill. If this is a fragmented packet chain 3489 * with delayed TCP/UDP checksums, then only encapsulate 3490 * it if we have enough descriptors to handle the entire 3491 * chain at once. 3492 * (paranoia -- may not actually be needed) 3493 */ 3494 if (m_head->m_flags & M_FIRSTFRAG && 3495 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3496 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3497 m_head->m_pkthdr.csum_data + 16) { 3498 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3499 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3500 break; 3501 } 3502 } 3503 3504 /* 3505 * Pack the data into the transmit ring. If we 3506 * don't have room, set the OACTIVE flag and wait 3507 * for the NIC to drain the ring. 3508 */ 3509 if (bge_encap(sc, &m_head, &prodidx)) { 3510 if (m_head == NULL) 3511 break; 3512 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3513 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3514 break; 3515 } 3516 ++count; 3517 3518 /* 3519 * If there's a BPF listener, bounce a copy of this frame 3520 * to him. 3521 */ 3522#ifdef ETHER_BPF_MTAP 3523 ETHER_BPF_MTAP(ifp, m_head); 3524#else 3525 BPF_MTAP(ifp, m_head); 3526#endif 3527 } 3528 3529 if (count == 0) 3530 /* No packets were dequeued. */ 3531 return; 3532 3533 /* Transmit. */ 3534 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3535 /* 5700 b2 errata */ 3536 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3537 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3538 3539 sc->bge_tx_prodidx = prodidx; 3540 3541 /* 3542 * Set a timeout in case the chip goes out to lunch. 3543 */ 3544 sc->bge_timer = 5; 3545} 3546 3547/* 3548 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3549 * to the mbuf data regions directly in the transmit descriptors. 3550 */ 3551static void 3552bge_start(struct ifnet *ifp) 3553{ 3554 struct bge_softc *sc; 3555 3556 sc = ifp->if_softc; 3557 BGE_LOCK(sc); 3558 bge_start_locked(ifp); 3559 BGE_UNLOCK(sc); 3560} 3561 3562static void 3563bge_init_locked(struct bge_softc *sc) 3564{ 3565 struct ifnet *ifp; 3566 uint16_t *m; 3567 3568 BGE_LOCK_ASSERT(sc); 3569 3570 ifp = sc->bge_ifp; 3571 3572 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3573 return; 3574 3575 /* Cancel pending I/O and flush buffers. */ 3576 bge_stop(sc); 3577 3578 bge_stop_fw(sc); 3579 bge_sig_pre_reset(sc, BGE_RESET_START); 3580 bge_reset(sc); 3581 bge_sig_legacy(sc, BGE_RESET_START); 3582 bge_sig_post_reset(sc, BGE_RESET_START); 3583 3584 bge_chipinit(sc); 3585 3586 /* 3587 * Init the various state machines, ring 3588 * control blocks and firmware. 3589 */ 3590 if (bge_blockinit(sc)) { 3591 device_printf(sc->bge_dev, "initialization failure\n"); 3592 return; 3593 } 3594 3595 ifp = sc->bge_ifp; 3596 3597 /* Specify MTU. */ 3598 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3599 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3600 3601 /* Load our MAC address. */ 3602 m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 3603 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3604 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3605 3606 /* Program promiscuous mode. */ 3607 bge_setpromisc(sc); 3608 3609 /* Program multicast filter. */ 3610 bge_setmulti(sc); 3611 3612 /* Init RX ring. */ 3613 bge_init_rx_ring_std(sc); 3614 3615 /* 3616 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3617 * memory to insure that the chip has in fact read the first 3618 * entry of the ring. 3619 */ 3620 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3621 uint32_t v, i; 3622 for (i = 0; i < 10; i++) { 3623 DELAY(20); 3624 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3625 if (v == (MCLBYTES - ETHER_ALIGN)) 3626 break; 3627 } 3628 if (i == 10) 3629 device_printf (sc->bge_dev, 3630 "5705 A0 chip failed to load RX ring\n"); 3631 } 3632 3633 /* Init jumbo RX ring. */ 3634 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3635 bge_init_rx_ring_jumbo(sc); 3636 3637 /* Init our RX return ring index. */ 3638 sc->bge_rx_saved_considx = 0; 3639 3640 /* Init our RX/TX stat counters. */ 3641 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 3642 3643 /* Init TX ring. */ 3644 bge_init_tx_ring(sc); 3645 3646 /* Turn on transmitter. */ 3647 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3648 3649 /* Turn on receiver. */ 3650 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3651 3652 /* Tell firmware we're alive. */ 3653 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3654 3655#ifdef DEVICE_POLLING 3656 /* Disable interrupts if we are polling. */ 3657 if (ifp->if_capenable & IFCAP_POLLING) { 3658 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3659 BGE_PCIMISCCTL_MASK_PCI_INTR); 3660 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3661 } else 3662#endif 3663 3664 /* Enable host interrupts. */ 3665 { 3666 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3667 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3668 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3669 } 3670 3671 bge_ifmedia_upd_locked(ifp); 3672 3673 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3674 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3675 3676 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3677} 3678 3679static void 3680bge_init(void *xsc) 3681{ 3682 struct bge_softc *sc = xsc; 3683 3684 BGE_LOCK(sc); 3685 bge_init_locked(sc); 3686 BGE_UNLOCK(sc); 3687} 3688 3689/* 3690 * Set media options. 3691 */ 3692static int 3693bge_ifmedia_upd(struct ifnet *ifp) 3694{ 3695 struct bge_softc *sc = ifp->if_softc; 3696 int res; 3697 3698 BGE_LOCK(sc); 3699 res = bge_ifmedia_upd_locked(ifp); 3700 BGE_UNLOCK(sc); 3701 3702 return (res); 3703} 3704 3705static int 3706bge_ifmedia_upd_locked(struct ifnet *ifp) 3707{ 3708 struct bge_softc *sc = ifp->if_softc; 3709 struct mii_data *mii; 3710 struct ifmedia *ifm; 3711 3712 BGE_LOCK_ASSERT(sc); 3713 3714 ifm = &sc->bge_ifmedia; 3715 3716 /* If this is a 1000baseX NIC, enable the TBI port. */ 3717 if (sc->bge_flags & BGE_FLAG_TBI) { 3718 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3719 return (EINVAL); 3720 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3721 case IFM_AUTO: 3722 /* 3723 * The BCM5704 ASIC appears to have a special 3724 * mechanism for programming the autoneg 3725 * advertisement registers in TBI mode. 3726 */ 3727 if (bge_fake_autoneg == 0 && 3728 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3729 uint32_t sgdig; 3730 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3731 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3732 sgdig |= BGE_SGDIGCFG_AUTO | 3733 BGE_SGDIGCFG_PAUSE_CAP | 3734 BGE_SGDIGCFG_ASYM_PAUSE; 3735 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3736 sgdig | BGE_SGDIGCFG_SEND); 3737 DELAY(5); 3738 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3739 } 3740 break; 3741 case IFM_1000_SX: 3742 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3743 BGE_CLRBIT(sc, BGE_MAC_MODE, 3744 BGE_MACMODE_HALF_DUPLEX); 3745 } else { 3746 BGE_SETBIT(sc, BGE_MAC_MODE, 3747 BGE_MACMODE_HALF_DUPLEX); 3748 } 3749 break; 3750 default: 3751 return (EINVAL); 3752 } 3753 return (0); 3754 } 3755 3756 sc->bge_link_evt++; 3757 mii = device_get_softc(sc->bge_miibus); 3758 if (mii->mii_instance) { 3759 struct mii_softc *miisc; 3760 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 3761 miisc = LIST_NEXT(miisc, mii_list)) 3762 mii_phy_reset(miisc); 3763 } 3764 mii_mediachg(mii); 3765 3766 return (0); 3767} 3768 3769/* 3770 * Report current media status. 3771 */ 3772static void 3773bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3774{ 3775 struct bge_softc *sc = ifp->if_softc; 3776 struct mii_data *mii; 3777 3778 BGE_LOCK(sc); 3779 3780 if (sc->bge_flags & BGE_FLAG_TBI) { 3781 ifmr->ifm_status = IFM_AVALID; 3782 ifmr->ifm_active = IFM_ETHER; 3783 if (CSR_READ_4(sc, BGE_MAC_STS) & 3784 BGE_MACSTAT_TBI_PCS_SYNCHED) 3785 ifmr->ifm_status |= IFM_ACTIVE; 3786 else { 3787 ifmr->ifm_active |= IFM_NONE; 3788 BGE_UNLOCK(sc); 3789 return; 3790 } 3791 ifmr->ifm_active |= IFM_1000_SX; 3792 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3793 ifmr->ifm_active |= IFM_HDX; 3794 else 3795 ifmr->ifm_active |= IFM_FDX; 3796 BGE_UNLOCK(sc); 3797 return; 3798 } 3799 3800 mii = device_get_softc(sc->bge_miibus); 3801 mii_pollstat(mii); 3802 ifmr->ifm_active = mii->mii_media_active; 3803 ifmr->ifm_status = mii->mii_media_status; 3804 3805 BGE_UNLOCK(sc); 3806} 3807 3808static int 3809bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3810{ 3811 struct bge_softc *sc = ifp->if_softc; 3812 struct ifreq *ifr = (struct ifreq *) data; 3813 struct mii_data *mii; 3814 int flags, mask, error = 0; 3815 3816 switch (command) { 3817 case SIOCSIFMTU: 3818 if (ifr->ifr_mtu < ETHERMIN || 3819 ((BGE_IS_JUMBO_CAPABLE(sc)) && 3820 ifr->ifr_mtu > BGE_JUMBO_MTU) || 3821 ((!BGE_IS_JUMBO_CAPABLE(sc)) && 3822 ifr->ifr_mtu > ETHERMTU)) 3823 error = EINVAL; 3824 else if (ifp->if_mtu != ifr->ifr_mtu) { 3825 ifp->if_mtu = ifr->ifr_mtu; 3826 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3827 bge_init(sc); 3828 } 3829 break; 3830 case SIOCSIFFLAGS: 3831 BGE_LOCK(sc); 3832 if (ifp->if_flags & IFF_UP) { 3833 /* 3834 * If only the state of the PROMISC flag changed, 3835 * then just use the 'set promisc mode' command 3836 * instead of reinitializing the entire NIC. Doing 3837 * a full re-init means reloading the firmware and 3838 * waiting for it to start up, which may take a 3839 * second or two. Similarly for ALLMULTI. 3840 */ 3841 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3842 flags = ifp->if_flags ^ sc->bge_if_flags; 3843 if (flags & IFF_PROMISC) 3844 bge_setpromisc(sc); 3845 if (flags & IFF_ALLMULTI) 3846 bge_setmulti(sc); 3847 } else 3848 bge_init_locked(sc); 3849 } else { 3850 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3851 bge_stop(sc); 3852 } 3853 } 3854 sc->bge_if_flags = ifp->if_flags; 3855 BGE_UNLOCK(sc); 3856 error = 0; 3857 break; 3858 case SIOCADDMULTI: 3859 case SIOCDELMULTI: 3860 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3861 BGE_LOCK(sc); 3862 bge_setmulti(sc); 3863 BGE_UNLOCK(sc); 3864 error = 0; 3865 } 3866 break; 3867 case SIOCSIFMEDIA: 3868 case SIOCGIFMEDIA: 3869 if (sc->bge_flags & BGE_FLAG_TBI) { 3870 error = ifmedia_ioctl(ifp, ifr, 3871 &sc->bge_ifmedia, command); 3872 } else { 3873 mii = device_get_softc(sc->bge_miibus); 3874 error = ifmedia_ioctl(ifp, ifr, 3875 &mii->mii_media, command); 3876 } 3877 break; 3878 case SIOCSIFCAP: 3879 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3880#ifdef DEVICE_POLLING 3881 if (mask & IFCAP_POLLING) { 3882 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3883 error = ether_poll_register(bge_poll, ifp); 3884 if (error) 3885 return (error); 3886 BGE_LOCK(sc); 3887 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3888 BGE_PCIMISCCTL_MASK_PCI_INTR); 3889 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3890 ifp->if_capenable |= IFCAP_POLLING; 3891 BGE_UNLOCK(sc); 3892 } else { 3893 error = ether_poll_deregister(ifp); 3894 /* Enable interrupt even in error case */ 3895 BGE_LOCK(sc); 3896 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 3897 BGE_PCIMISCCTL_MASK_PCI_INTR); 3898 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3899 ifp->if_capenable &= ~IFCAP_POLLING; 3900 BGE_UNLOCK(sc); 3901 } 3902 } 3903#endif 3904 if (mask & IFCAP_HWCSUM) { 3905 ifp->if_capenable ^= IFCAP_HWCSUM; 3906 if (IFCAP_HWCSUM & ifp->if_capenable && 3907 IFCAP_HWCSUM & ifp->if_capabilities) 3908 ifp->if_hwassist = BGE_CSUM_FEATURES; 3909 else 3910 ifp->if_hwassist = 0; 3911#ifdef VLAN_CAPABILITIES 3912 VLAN_CAPABILITIES(ifp); 3913#endif 3914 } 3915 break; 3916 default: 3917 error = ether_ioctl(ifp, command, data); 3918 break; 3919 } 3920 3921 return (error); 3922} 3923 3924static void 3925bge_watchdog(struct bge_softc *sc) 3926{ 3927 struct ifnet *ifp; 3928 3929 BGE_LOCK_ASSERT(sc); 3930 3931 if (sc->bge_timer == 0 || --sc->bge_timer) 3932 return; 3933 3934 ifp = sc->bge_ifp; 3935 3936 if_printf(ifp, "watchdog timeout -- resetting\n"); 3937 3938 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3939 bge_init_locked(sc); 3940 3941 ifp->if_oerrors++; 3942} 3943 3944/* 3945 * Stop the adapter and free any mbufs allocated to the 3946 * RX and TX lists. 3947 */ 3948static void 3949bge_stop(struct bge_softc *sc) 3950{ 3951 struct ifnet *ifp; 3952 struct ifmedia_entry *ifm; 3953 struct mii_data *mii = NULL; 3954 int mtmp, itmp; 3955 3956 BGE_LOCK_ASSERT(sc); 3957 3958 ifp = sc->bge_ifp; 3959 3960 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) 3961 mii = device_get_softc(sc->bge_miibus); 3962 3963 callout_stop(&sc->bge_stat_ch); 3964 3965 /* 3966 * Disable all of the receiver blocks. 3967 */ 3968 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3969 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3970 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3971 if (!(BGE_IS_5705_PLUS(sc))) 3972 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3973 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3974 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3975 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3976 3977 /* 3978 * Disable all of the transmit blocks. 3979 */ 3980 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3981 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3982 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3983 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3984 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3985 if (!(BGE_IS_5705_PLUS(sc))) 3986 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3987 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3988 3989 /* 3990 * Shut down all of the memory managers and related 3991 * state machines. 3992 */ 3993 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3994 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3995 if (!(BGE_IS_5705_PLUS(sc))) 3996 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3997 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3998 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3999 if (!(BGE_IS_5705_PLUS(sc))) { 4000 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4001 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4002 } 4003 4004 /* Disable host interrupts. */ 4005 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4006 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4007 4008 /* 4009 * Tell firmware we're shutting down. 4010 */ 4011 4012 bge_stop_fw(sc); 4013 bge_sig_pre_reset(sc, BGE_RESET_STOP); 4014 bge_reset(sc); 4015 bge_sig_legacy(sc, BGE_RESET_STOP); 4016 bge_sig_post_reset(sc, BGE_RESET_STOP); 4017 4018 /* 4019 * Keep the ASF firmware running if up. 4020 */ 4021 if (sc->bge_asf_mode & ASF_STACKUP) 4022 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4023 else 4024 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4025 4026 /* Free the RX lists. */ 4027 bge_free_rx_ring_std(sc); 4028 4029 /* Free jumbo RX list. */ 4030 if (BGE_IS_JUMBO_CAPABLE(sc)) 4031 bge_free_rx_ring_jumbo(sc); 4032 4033 /* Free TX buffers. */ 4034 bge_free_tx_ring(sc); 4035 4036 /* 4037 * Isolate/power down the PHY, but leave the media selection 4038 * unchanged so that things will be put back to normal when 4039 * we bring the interface back up. 4040 */ 4041 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 4042 itmp = ifp->if_flags; 4043 ifp->if_flags |= IFF_UP; 4044 /* 4045 * If we are called from bge_detach(), mii is already NULL. 4046 */ 4047 if (mii != NULL) { 4048 ifm = mii->mii_media.ifm_cur; 4049 mtmp = ifm->ifm_media; 4050 ifm->ifm_media = IFM_ETHER | IFM_NONE; 4051 mii_mediachg(mii); 4052 ifm->ifm_media = mtmp; 4053 } 4054 ifp->if_flags = itmp; 4055 } 4056 4057 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4058 4059 /* Clear MAC's link state (PHY may still have link UP). */ 4060 if (bootverbose && sc->bge_link) 4061 if_printf(sc->bge_ifp, "link DOWN\n"); 4062 sc->bge_link = 0; 4063 4064 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4065} 4066 4067/* 4068 * Stop all chip I/O so that the kernel's probe routines don't 4069 * get confused by errant DMAs when rebooting. 4070 */ 4071static void 4072bge_shutdown(device_t dev) 4073{ 4074 struct bge_softc *sc; 4075 4076 sc = device_get_softc(dev); 4077 4078 BGE_LOCK(sc); 4079 bge_stop(sc); 4080 bge_reset(sc); 4081 BGE_UNLOCK(sc); 4082} 4083 4084static int 4085bge_suspend(device_t dev) 4086{ 4087 struct bge_softc *sc; 4088 4089 sc = device_get_softc(dev); 4090 BGE_LOCK(sc); 4091 bge_stop(sc); 4092 BGE_UNLOCK(sc); 4093 4094 return (0); 4095} 4096 4097static int 4098bge_resume(device_t dev) 4099{ 4100 struct bge_softc *sc; 4101 struct ifnet *ifp; 4102 4103 sc = device_get_softc(dev); 4104 BGE_LOCK(sc); 4105 ifp = sc->bge_ifp; 4106 if (ifp->if_flags & IFF_UP) { 4107 bge_init_locked(sc); 4108 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4109 bge_start_locked(ifp); 4110 } 4111 BGE_UNLOCK(sc); 4112 4113 return (0); 4114} 4115 4116static void 4117bge_link_upd(struct bge_softc *sc) 4118{ 4119 struct mii_data *mii; 4120 uint32_t link, status; 4121 4122 BGE_LOCK_ASSERT(sc); 4123 4124 /* Clear 'pending link event' flag. */ 4125 sc->bge_link_evt = 0; 4126 4127 /* 4128 * Process link state changes. 4129 * Grrr. The link status word in the status block does 4130 * not work correctly on the BCM5700 rev AX and BX chips, 4131 * according to all available information. Hence, we have 4132 * to enable MII interrupts in order to properly obtain 4133 * async link changes. Unfortunately, this also means that 4134 * we have to read the MAC status register to detect link 4135 * changes, thereby adding an additional register access to 4136 * the interrupt handler. 4137 * 4138 * XXX: perhaps link state detection procedure used for 4139 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4140 */ 4141 4142 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 4143 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 4144 status = CSR_READ_4(sc, BGE_MAC_STS); 4145 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4146 mii = device_get_softc(sc->bge_miibus); 4147 mii_pollstat(mii); 4148 if (!sc->bge_link && 4149 mii->mii_media_status & IFM_ACTIVE && 4150 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4151 sc->bge_link++; 4152 if (bootverbose) 4153 if_printf(sc->bge_ifp, "link UP\n"); 4154 } else if (sc->bge_link && 4155 (!(mii->mii_media_status & IFM_ACTIVE) || 4156 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4157 sc->bge_link = 0; 4158 if (bootverbose) 4159 if_printf(sc->bge_ifp, "link DOWN\n"); 4160 } 4161 4162 /* Clear the interrupt. */ 4163 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4164 BGE_EVTENB_MI_INTERRUPT); 4165 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4166 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4167 BRGPHY_INTRS); 4168 } 4169 return; 4170 } 4171 4172 if (sc->bge_flags & BGE_FLAG_TBI) { 4173 status = CSR_READ_4(sc, BGE_MAC_STS); 4174 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4175 if (!sc->bge_link) { 4176 sc->bge_link++; 4177 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 4178 BGE_CLRBIT(sc, BGE_MAC_MODE, 4179 BGE_MACMODE_TBI_SEND_CFGS); 4180 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4181 if (bootverbose) 4182 if_printf(sc->bge_ifp, "link UP\n"); 4183 if_link_state_change(sc->bge_ifp, 4184 LINK_STATE_UP); 4185 } 4186 } else if (sc->bge_link) { 4187 sc->bge_link = 0; 4188 if (bootverbose) 4189 if_printf(sc->bge_ifp, "link DOWN\n"); 4190 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 4191 } 4192 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 4193 /* 4194 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 4195 * in status word always set. Workaround this bug by reading 4196 * PHY link status directly. 4197 */ 4198 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 4199 4200 if (link != sc->bge_link || 4201 sc->bge_asicrev == BGE_ASICREV_BCM5700) { 4202 mii = device_get_softc(sc->bge_miibus); 4203 mii_pollstat(mii); 4204 if (!sc->bge_link && 4205 mii->mii_media_status & IFM_ACTIVE && 4206 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4207 sc->bge_link++; 4208 if (bootverbose) 4209 if_printf(sc->bge_ifp, "link UP\n"); 4210 } else if (sc->bge_link && 4211 (!(mii->mii_media_status & IFM_ACTIVE) || 4212 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4213 sc->bge_link = 0; 4214 if (bootverbose) 4215 if_printf(sc->bge_ifp, "link DOWN\n"); 4216 } 4217 } 4218 } else { 4219 /* 4220 * Discard link events for MII/GMII controllers 4221 * if MI auto-polling is disabled. 4222 */ 4223 } 4224 4225 /* Clear the attention. */ 4226 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4227 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4228 BGE_MACSTAT_LINK_CHANGED); 4229} 4230 4231#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 4232 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ 4233 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ 4234 desc) 4235 4236static void 4237bge_add_sysctls(struct bge_softc *sc) 4238{ 4239 struct sysctl_ctx_list *ctx; 4240 struct sysctl_oid_list *children, *schildren; 4241 struct sysctl_oid *tree; 4242 4243 ctx = device_get_sysctl_ctx(sc->bge_dev); 4244 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 4245 4246#ifdef BGE_REGISTER_DEBUG 4247 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 4248 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", 4249 "Debug Information"); 4250 4251 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 4252 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", 4253 "Register Read"); 4254 4255 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 4256 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", 4257 "Memory Read"); 4258 4259#endif 4260 4261 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4262 NULL, "BGE Statistics"); 4263 schildren = children = SYSCTL_CHILDREN(tree); 4264 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 4265 children, COSFramesDroppedDueToFilters, 4266 "FramesDroppedDueToFilters"); 4267 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 4268 children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 4269 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 4270 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 4271 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 4272 children, nicNoMoreRxBDs, "NoMoreRxBDs"); 4273 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 4274 children, ifInDiscards, "InputDiscards"); 4275 BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 4276 children, ifInErrors, "InputErrors"); 4277 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 4278 children, nicRecvThresholdHit, "RecvThresholdHit"); 4279 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 4280 children, nicDmaReadQueueFull, "DmaReadQueueFull"); 4281 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 4282 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 4283 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 4284 children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 4285 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 4286 children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 4287 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 4288 children, nicRingStatusUpdate, "RingStatusUpdate"); 4289 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 4290 children, nicInterrupts, "Interrupts"); 4291 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 4292 children, nicAvoidedInterrupts, "AvoidedInterrupts"); 4293 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 4294 children, nicSendThresholdHit, "SendThresholdHit"); 4295 4296 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, 4297 NULL, "BGE RX Statistics"); 4298 children = SYSCTL_CHILDREN(tree); 4299 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 4300 children, rxstats.ifHCInOctets, "Octets"); 4301 BGE_SYSCTL_STAT(sc, ctx, "Fragments", 4302 children, rxstats.etherStatsFragments, "Fragments"); 4303 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 4304 children, rxstats.ifHCInUcastPkts, "UcastPkts"); 4305 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 4306 children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 4307 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 4308 children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 4309 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 4310 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 4311 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 4312 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 4313 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 4314 children, rxstats.xoffPauseFramesReceived, 4315 "xoffPauseFramesReceived"); 4316 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 4317 children, rxstats.macControlFramesReceived, 4318 "ControlFramesReceived"); 4319 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 4320 children, rxstats.xoffStateEntered, "xoffStateEntered"); 4321 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 4322 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 4323 BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 4324 children, rxstats.etherStatsJabbers, "Jabbers"); 4325 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 4326 children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 4327 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 4328 children, rxstats.inRangeLengthError, "inRangeLengthError"); 4329 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 4330 children, rxstats.outRangeLengthError, "outRangeLengthError"); 4331 4332 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, 4333 NULL, "BGE TX Statistics"); 4334 children = SYSCTL_CHILDREN(tree); 4335 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 4336 children, txstats.ifHCOutOctets, "Octets"); 4337 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 4338 children, txstats.etherStatsCollisions, "Collisions"); 4339 BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 4340 children, txstats.outXonSent, "XonSent"); 4341 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 4342 children, txstats.outXoffSent, "XoffSent"); 4343 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 4344 children, txstats.flowControlDone, "flowControlDone"); 4345 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 4346 children, txstats.dot3StatsInternalMacTransmitErrors, 4347 "InternalMacTransmitErrors"); 4348 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 4349 children, txstats.dot3StatsSingleCollisionFrames, 4350 "SingleCollisionFrames"); 4351 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 4352 children, txstats.dot3StatsMultipleCollisionFrames, 4353 "MultipleCollisionFrames"); 4354 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 4355 children, txstats.dot3StatsDeferredTransmissions, 4356 "DeferredTransmissions"); 4357 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 4358 children, txstats.dot3StatsExcessiveCollisions, 4359 "ExcessiveCollisions"); 4360 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 4361 children, txstats.dot3StatsLateCollisions, 4362 "LateCollisions"); 4363 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 4364 children, txstats.ifHCOutUcastPkts, "UcastPkts"); 4365 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 4366 children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 4367 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 4368 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 4369 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 4370 children, txstats.dot3StatsCarrierSenseErrors, 4371 "CarrierSenseErrors"); 4372 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 4373 children, txstats.ifOutDiscards, "Discards"); 4374 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 4375 children, txstats.ifOutErrors, "Errors"); 4376} 4377 4378static int 4379bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 4380{ 4381 struct bge_softc *sc; 4382 uint32_t result; 4383 int base, offset; 4384 4385 sc = (struct bge_softc *)arg1; 4386 offset = arg2; 4387 if (BGE_IS_5705_PLUS(sc)) 4388 base = BGE_MAC_STATS; 4389 else 4390 base = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4391 result = CSR_READ_4(sc, base + offset + offsetof(bge_hostaddr, 4392 bge_addr_lo)); 4393 return (sysctl_handle_int(oidp, &result, sizeof(result), req)); 4394} 4395 4396#ifdef BGE_REGISTER_DEBUG 4397static int 4398bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 4399{ 4400 struct bge_softc *sc; 4401 uint16_t *sbdata; 4402 int error; 4403 int result; 4404 int i, j; 4405 4406 result = -1; 4407 error = sysctl_handle_int(oidp, &result, 0, req); 4408 if (error || (req->newptr == NULL)) 4409 return (error); 4410 4411 if (result == 1) { 4412 sc = (struct bge_softc *)arg1; 4413 4414 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 4415 printf("Status Block:\n"); 4416 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { 4417 printf("%06x:", i); 4418 for (j = 0; j < 8; j++) { 4419 printf(" %04x", sbdata[i]); 4420 i += 4; 4421 } 4422 printf("\n"); 4423 } 4424 4425 printf("Registers:\n"); 4426 for (i = 0x800; i < 0xA00; ) { 4427 printf("%06x:", i); 4428 for (j = 0; j < 8; j++) { 4429 printf(" %08x", CSR_READ_4(sc, i)); 4430 i += 4; 4431 } 4432 printf("\n"); 4433 } 4434 4435 printf("Hardware Flags:\n"); 4436 if (BGE_IS_575X_PLUS(sc)) 4437 printf(" - 575X Plus\n"); 4438 if (BGE_IS_5705_PLUS(sc)) 4439 printf(" - 5705 Plus\n"); 4440 if (BGE_IS_5714_FAMILY(sc)) 4441 printf(" - 5714 Family\n"); 4442 if (BGE_IS_5700_FAMILY(sc)) 4443 printf(" - 5700 Family\n"); 4444 if (sc->bge_flags & BGE_FLAG_JUMBO) 4445 printf(" - Supports Jumbo Frames\n"); 4446 if (sc->bge_flags & BGE_FLAG_PCIX) 4447 printf(" - PCI-X Bus\n"); 4448 if (sc->bge_flags & BGE_FLAG_PCIE) 4449 printf(" - PCI Express Bus\n"); 4450 if (sc->bge_flags & BGE_FLAG_NO_3LED) 4451 printf(" - No 3 LEDs\n"); 4452 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 4453 printf(" - RX Alignment Bug\n"); 4454 } 4455 4456 return (error); 4457} 4458 4459static int 4460bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 4461{ 4462 struct bge_softc *sc; 4463 int error; 4464 uint16_t result; 4465 uint32_t val; 4466 4467 result = -1; 4468 error = sysctl_handle_int(oidp, &result, 0, req); 4469 if (error || (req->newptr == NULL)) 4470 return (error); 4471 4472 if (result < 0x8000) { 4473 sc = (struct bge_softc *)arg1; 4474 val = CSR_READ_4(sc, result); 4475 printf("reg 0x%06X = 0x%08X\n", result, val); 4476 } 4477 4478 return (error); 4479} 4480 4481static int 4482bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 4483{ 4484 struct bge_softc *sc; 4485 int error; 4486 uint16_t result; 4487 uint32_t val; 4488 4489 result = -1; 4490 error = sysctl_handle_int(oidp, &result, 0, req); 4491 if (error || (req->newptr == NULL)) 4492 return (error); 4493 4494 if (result < 0x8000) { 4495 sc = (struct bge_softc *)arg1; 4496 val = bge_readmem_ind(sc, result); 4497 printf("mem 0x%06X = 0x%08X\n", result, val); 4498 } 4499 4500 return (error); 4501} 4502#endif
| 1237 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1238 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1239 1240 /* 1241 * Set up general mode register. 1242 */ 1243 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 1244 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1245 BGE_MODECTL_TX_NO_PHDR_CSUM); 1246 1247 /* 1248 * Tell the firmware the driver is running 1249 */ 1250 if (sc->bge_asf_mode & ASF_STACKUP) 1251 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1252 1253 /* 1254 * Disable memory write invalidate. Apparently it is not supported 1255 * properly by these devices. 1256 */ 1257 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1258 1259 /* Set the timer prescaler (always 66Mhz) */ 1260 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 1261 1262 return (0); 1263} 1264 1265static int 1266bge_blockinit(struct bge_softc *sc) 1267{ 1268 struct bge_rcb *rcb; 1269 bus_size_t vrcb; 1270 bge_hostaddr taddr; 1271 uint32_t val; 1272 int i; 1273 1274 /* 1275 * Initialize the memory window pointer register so that 1276 * we can access the first 32K of internal NIC RAM. This will 1277 * allow us to set up the TX send ring RCBs and the RX return 1278 * ring RCBs, plus other things which live in NIC memory. 1279 */ 1280 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1281 1282 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1283 1284 if (!(BGE_IS_5705_PLUS(sc))) { 1285 /* Configure mbuf memory pool */ 1286 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1287 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1288 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1289 else 1290 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1291 1292 /* Configure DMA resource pool */ 1293 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1294 BGE_DMA_DESCRIPTORS); 1295 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1296 } 1297 1298 /* Configure mbuf pool watermarks */ 1299 if (!(BGE_IS_5705_PLUS(sc))) { 1300 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1301 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1302 } else { 1303 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1304 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1305 } 1306 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1307 1308 /* Configure DMA resource watermarks */ 1309 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1310 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1311 1312 /* Enable buffer manager */ 1313 if (!(BGE_IS_5705_PLUS(sc))) { 1314 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1315 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 1316 1317 /* Poll for buffer manager start indication */ 1318 for (i = 0; i < BGE_TIMEOUT; i++) { 1319 DELAY(10); 1320 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1321 break; 1322 } 1323 1324 if (i == BGE_TIMEOUT) { 1325 device_printf(sc->bge_dev, 1326 "buffer manager failed to start\n"); 1327 return (ENXIO); 1328 } 1329 } 1330 1331 /* Enable flow-through queues */ 1332 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1333 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1334 1335 /* Wait until queue initialization is complete */ 1336 for (i = 0; i < BGE_TIMEOUT; i++) { 1337 DELAY(10); 1338 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1339 break; 1340 } 1341 1342 if (i == BGE_TIMEOUT) { 1343 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1344 return (ENXIO); 1345 } 1346 1347 /* Initialize the standard RX ring control block */ 1348 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1349 rcb->bge_hostaddr.bge_addr_lo = 1350 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1351 rcb->bge_hostaddr.bge_addr_hi = 1352 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1353 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1354 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1355 if (BGE_IS_5705_PLUS(sc)) 1356 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1357 else 1358 rcb->bge_maxlen_flags = 1359 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1360 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1361 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1363 1364 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1365 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1366 1367 /* 1368 * Initialize the jumbo RX ring control block 1369 * We set the 'ring disabled' bit in the flags 1370 * field until we're actually ready to start 1371 * using this ring (i.e. once we set the MTU 1372 * high enough to require it). 1373 */ 1374 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1375 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1376 1377 rcb->bge_hostaddr.bge_addr_lo = 1378 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1379 rcb->bge_hostaddr.bge_addr_hi = 1380 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1381 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1382 sc->bge_cdata.bge_rx_jumbo_ring_map, 1383 BUS_DMASYNC_PREREAD); 1384 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1385 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 1386 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1387 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1388 rcb->bge_hostaddr.bge_addr_hi); 1389 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1390 rcb->bge_hostaddr.bge_addr_lo); 1391 1392 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1393 rcb->bge_maxlen_flags); 1394 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1395 1396 /* Set up dummy disabled mini ring RCB */ 1397 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1398 rcb->bge_maxlen_flags = 1399 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1400 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1401 rcb->bge_maxlen_flags); 1402 } 1403 1404 /* 1405 * Set the BD ring replentish thresholds. The recommended 1406 * values are 1/8th the number of descriptors allocated to 1407 * each ring. 1408 * XXX The 5754 requires a lower threshold, so it might be a 1409 * requirement of all 575x family chips. The Linux driver sets 1410 * the lower threshold for all 5705 family chips as well, but there 1411 * are reports that it might not need to be so strict. 1412 */ 1413 if (BGE_IS_5705_PLUS(sc)) 1414 val = 8; 1415 else 1416 val = BGE_STD_RX_RING_CNT / 8; 1417 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1418 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1419 1420 /* 1421 * Disable all unused send rings by setting the 'ring disabled' 1422 * bit in the flags field of all the TX send ring control blocks. 1423 * These are located in NIC memory. 1424 */ 1425 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1426 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1427 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1428 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1429 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1430 vrcb += sizeof(struct bge_rcb); 1431 } 1432 1433 /* Configure TX RCB 0 (we use only the first ring) */ 1434 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1435 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1436 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1437 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1438 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1439 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1440 if (!(BGE_IS_5705_PLUS(sc))) 1441 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1442 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1443 1444 /* Disable all unused RX return rings */ 1445 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1446 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1447 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1448 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1449 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1450 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1451 BGE_RCB_FLAG_RING_DISABLED)); 1452 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1453 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1454 (i * (sizeof(uint64_t))), 0); 1455 vrcb += sizeof(struct bge_rcb); 1456 } 1457 1458 /* Initialize RX ring indexes */ 1459 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1460 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1461 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1462 1463 /* 1464 * Set up RX return ring 0 1465 * Note that the NIC address for RX return rings is 0x00000000. 1466 * The return rings live entirely within the host, so the 1467 * nicaddr field in the RCB isn't used. 1468 */ 1469 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1470 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1471 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1472 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1473 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1474 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1475 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1476 1477 /* Set random backoff seed for TX */ 1478 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1479 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1480 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1481 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1482 BGE_TX_BACKOFF_SEED_MASK); 1483 1484 /* Set inter-packet gap */ 1485 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1486 1487 /* 1488 * Specify which ring to use for packets that don't match 1489 * any RX rules. 1490 */ 1491 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1492 1493 /* 1494 * Configure number of RX lists. One interrupt distribution 1495 * list, sixteen active lists, one bad frames class. 1496 */ 1497 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1498 1499 /* Inialize RX list placement stats mask. */ 1500 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1501 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1502 1503 /* Disable host coalescing until we get it set up */ 1504 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1505 1506 /* Poll to make sure it's shut down. */ 1507 for (i = 0; i < BGE_TIMEOUT; i++) { 1508 DELAY(10); 1509 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1510 break; 1511 } 1512 1513 if (i == BGE_TIMEOUT) { 1514 device_printf(sc->bge_dev, 1515 "host coalescing engine failed to idle\n"); 1516 return (ENXIO); 1517 } 1518 1519 /* Set up host coalescing defaults */ 1520 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1521 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1522 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1523 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1524 if (!(BGE_IS_5705_PLUS(sc))) { 1525 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1526 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1527 } 1528 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1529 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 1530 1531 /* Set up address of statistics block */ 1532 if (!(BGE_IS_5705_PLUS(sc))) { 1533 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1534 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1535 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1536 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1537 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1538 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1539 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1540 } 1541 1542 /* Set up address of status block */ 1543 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1544 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1545 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1546 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1547 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1548 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1549 1550 /* Turn on host coalescing state machine */ 1551 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1552 1553 /* Turn on RX BD completion state machine and enable attentions */ 1554 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1555 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 1556 1557 /* Turn on RX list placement state machine */ 1558 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1559 1560 /* Turn on RX list selector state machine. */ 1561 if (!(BGE_IS_5705_PLUS(sc))) 1562 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1563 1564 /* Turn on DMA, clear stats */ 1565 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB | 1566 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | 1567 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | 1568 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB | 1569 ((sc->bge_flags & BGE_FLAG_TBI) ? 1570 BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1571 1572 /* Set misc. local control, enable interrupts on attentions */ 1573 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1574 1575#ifdef notdef 1576 /* Assert GPIO pins for PHY reset */ 1577 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 1578 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 1579 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 1580 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 1581#endif 1582 1583 /* Turn on DMA completion state machine */ 1584 if (!(BGE_IS_5705_PLUS(sc))) 1585 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1586 1587 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 1588 1589 /* Enable host coalescing bug fix. */ 1590 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 1591 sc->bge_asicrev == BGE_ASICREV_BCM5787) 1592 val |= 1 << 29; 1593 1594 /* Turn on write DMA state machine */ 1595 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1596 1597 /* Turn on read DMA state machine */ 1598 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1599 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS); 1600 1601 /* Turn on RX data completion state machine */ 1602 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1603 1604 /* Turn on RX BD initiator state machine */ 1605 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1606 1607 /* Turn on RX data and RX BD initiator state machine */ 1608 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1609 1610 /* Turn on Mbuf cluster free state machine */ 1611 if (!(BGE_IS_5705_PLUS(sc))) 1612 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1613 1614 /* Turn on send BD completion state machine */ 1615 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1616 1617 /* Turn on send data completion state machine */ 1618 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1619 1620 /* Turn on send data initiator state machine */ 1621 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1622 1623 /* Turn on send BD initiator state machine */ 1624 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1625 1626 /* Turn on send BD selector state machine */ 1627 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1628 1629 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1630 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1631 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 1632 1633 /* ack/clear link change events */ 1634 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 1635 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 1636 BGE_MACSTAT_LINK_CHANGED); 1637 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1638 1639 /* Enable PHY auto polling (for MII/GMII only) */ 1640 if (sc->bge_flags & BGE_FLAG_TBI) { 1641 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1642 } else { 1643 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 1644 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1645 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1646 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1647 BGE_EVTENB_MI_INTERRUPT); 1648 } 1649 1650 /* 1651 * Clear any pending link state attention. 1652 * Otherwise some link state change events may be lost until attention 1653 * is cleared by bge_intr() -> bge_link_upd() sequence. 1654 * It's not necessary on newer BCM chips - perhaps enabling link 1655 * state change attentions implies clearing pending attention. 1656 */ 1657 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 1658 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 1659 BGE_MACSTAT_LINK_CHANGED); 1660 1661 /* Enable link state change attentions. */ 1662 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1663 1664 return (0); 1665} 1666 1667const struct bge_revision * 1668bge_lookup_rev(uint32_t chipid) 1669{ 1670 const struct bge_revision *br; 1671 1672 for (br = bge_revisions; br->br_name != NULL; br++) { 1673 if (br->br_chipid == chipid) 1674 return (br); 1675 } 1676 1677 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1678 if (br->br_chipid == BGE_ASICREV(chipid)) 1679 return (br); 1680 } 1681 1682 return (NULL); 1683} 1684 1685const struct bge_vendor * 1686bge_lookup_vendor(uint16_t vid) 1687{ 1688 const struct bge_vendor *v; 1689 1690 for (v = bge_vendors; v->v_name != NULL; v++) 1691 if (v->v_id == vid) 1692 return (v); 1693 1694 panic("%s: unknown vendor %d", __func__, vid); 1695 return (NULL); 1696} 1697 1698/* 1699 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1700 * against our list and return its name if we find a match. 1701 * 1702 * Note that since the Broadcom controller contains VPD support, we 1703 * try to get the device name string from the controller itself instead 1704 * of the compiled-in string. It guarantees we'll always announce the 1705 * right product name. We fall back to the compiled-in string when 1706 * VPD is unavailable or corrupt. 1707 */ 1708static int 1709bge_probe(device_t dev) 1710{ 1711 struct bge_type *t = bge_devs; 1712 struct bge_softc *sc = device_get_softc(dev); 1713 uint16_t vid, did; 1714 1715 sc->bge_dev = dev; 1716 vid = pci_get_vendor(dev); 1717 did = pci_get_device(dev); 1718 while(t->bge_vid != 0) { 1719 if ((vid == t->bge_vid) && (did == t->bge_did)) { 1720 char model[64], buf[96]; 1721 const struct bge_revision *br; 1722 const struct bge_vendor *v; 1723 uint32_t id; 1724 1725 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1726 BGE_PCIMISCCTL_ASICREV; 1727 br = bge_lookup_rev(id); 1728 v = bge_lookup_vendor(vid); 1729 { 1730#if __FreeBSD_version > 700024 1731 const char *pname; 1732 1733 if (pci_get_vpd_ident(dev, &pname) == 0) 1734 snprintf(model, 64, "%s", pname); 1735 else 1736#endif 1737 snprintf(model, 64, "%s %s", 1738 v->v_name, 1739 br != NULL ? br->br_name : 1740 "NetXtreme Ethernet Controller"); 1741 } 1742 snprintf(buf, 96, "%s, %sASIC rev. %#04x", model, 1743 br != NULL ? "" : "unknown ", id >> 16); 1744 device_set_desc_copy(dev, buf); 1745 if (pci_get_subvendor(dev) == DELL_VENDORID) 1746 sc->bge_flags |= BGE_FLAG_NO_3LED; 1747 if (did == BCOM_DEVICEID_BCM5755M) 1748 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; 1749 return (0); 1750 } 1751 t++; 1752 } 1753 1754 return (ENXIO); 1755} 1756 1757static void 1758bge_dma_free(struct bge_softc *sc) 1759{ 1760 int i; 1761 1762 /* Destroy DMA maps for RX buffers. */ 1763 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1764 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1765 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1766 sc->bge_cdata.bge_rx_std_dmamap[i]); 1767 } 1768 1769 /* Destroy DMA maps for jumbo RX buffers. */ 1770 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1771 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1772 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1773 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1774 } 1775 1776 /* Destroy DMA maps for TX buffers. */ 1777 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1778 if (sc->bge_cdata.bge_tx_dmamap[i]) 1779 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1780 sc->bge_cdata.bge_tx_dmamap[i]); 1781 } 1782 1783 if (sc->bge_cdata.bge_mtag) 1784 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1785 1786 1787 /* Destroy standard RX ring. */ 1788 if (sc->bge_cdata.bge_rx_std_ring_map) 1789 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1790 sc->bge_cdata.bge_rx_std_ring_map); 1791 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1792 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1793 sc->bge_ldata.bge_rx_std_ring, 1794 sc->bge_cdata.bge_rx_std_ring_map); 1795 1796 if (sc->bge_cdata.bge_rx_std_ring_tag) 1797 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1798 1799 /* Destroy jumbo RX ring. */ 1800 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1801 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1802 sc->bge_cdata.bge_rx_jumbo_ring_map); 1803 1804 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1805 sc->bge_ldata.bge_rx_jumbo_ring) 1806 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1807 sc->bge_ldata.bge_rx_jumbo_ring, 1808 sc->bge_cdata.bge_rx_jumbo_ring_map); 1809 1810 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1811 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1812 1813 /* Destroy RX return ring. */ 1814 if (sc->bge_cdata.bge_rx_return_ring_map) 1815 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1816 sc->bge_cdata.bge_rx_return_ring_map); 1817 1818 if (sc->bge_cdata.bge_rx_return_ring_map && 1819 sc->bge_ldata.bge_rx_return_ring) 1820 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1821 sc->bge_ldata.bge_rx_return_ring, 1822 sc->bge_cdata.bge_rx_return_ring_map); 1823 1824 if (sc->bge_cdata.bge_rx_return_ring_tag) 1825 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1826 1827 /* Destroy TX ring. */ 1828 if (sc->bge_cdata.bge_tx_ring_map) 1829 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1830 sc->bge_cdata.bge_tx_ring_map); 1831 1832 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1833 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1834 sc->bge_ldata.bge_tx_ring, 1835 sc->bge_cdata.bge_tx_ring_map); 1836 1837 if (sc->bge_cdata.bge_tx_ring_tag) 1838 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1839 1840 /* Destroy status block. */ 1841 if (sc->bge_cdata.bge_status_map) 1842 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1843 sc->bge_cdata.bge_status_map); 1844 1845 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1846 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1847 sc->bge_ldata.bge_status_block, 1848 sc->bge_cdata.bge_status_map); 1849 1850 if (sc->bge_cdata.bge_status_tag) 1851 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1852 1853 /* Destroy statistics block. */ 1854 if (sc->bge_cdata.bge_stats_map) 1855 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1856 sc->bge_cdata.bge_stats_map); 1857 1858 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1859 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1860 sc->bge_ldata.bge_stats, 1861 sc->bge_cdata.bge_stats_map); 1862 1863 if (sc->bge_cdata.bge_stats_tag) 1864 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1865 1866 /* Destroy the parent tag. */ 1867 if (sc->bge_cdata.bge_parent_tag) 1868 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1869} 1870 1871static int 1872bge_dma_alloc(device_t dev) 1873{ 1874 struct bge_dmamap_arg ctx; 1875 struct bge_softc *sc; 1876 int i, error; 1877 1878 sc = device_get_softc(dev); 1879 1880 /* 1881 * Allocate the parent bus DMA tag appropriate for PCI. 1882 */ 1883 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), /* parent */ 1884 1, 0, /* alignment, boundary */ 1885 BUS_SPACE_MAXADDR, /* lowaddr */ 1886 BUS_SPACE_MAXADDR, /* highaddr */ 1887 NULL, NULL, /* filter, filterarg */ 1888 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1889 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1890 0, /* flags */ 1891 NULL, NULL, /* lockfunc, lockarg */ 1892 &sc->bge_cdata.bge_parent_tag); 1893 1894 if (error != 0) { 1895 device_printf(sc->bge_dev, 1896 "could not allocate parent dma tag\n"); 1897 return (ENOMEM); 1898 } 1899 1900 /* 1901 * Create tag for RX mbufs. 1902 */ 1903 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1904 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1905 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1906 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1907 1908 if (error) { 1909 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1910 return (ENOMEM); 1911 } 1912 1913 /* Create DMA maps for RX buffers. */ 1914 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1915 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1916 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1917 if (error) { 1918 device_printf(sc->bge_dev, 1919 "can't create DMA map for RX\n"); 1920 return (ENOMEM); 1921 } 1922 } 1923 1924 /* Create DMA maps for TX buffers. */ 1925 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1926 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1927 &sc->bge_cdata.bge_tx_dmamap[i]); 1928 if (error) { 1929 device_printf(sc->bge_dev, 1930 "can't create DMA map for RX\n"); 1931 return (ENOMEM); 1932 } 1933 } 1934 1935 /* Create tag for standard RX ring. */ 1936 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1937 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1938 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1939 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1940 1941 if (error) { 1942 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1943 return (ENOMEM); 1944 } 1945 1946 /* Allocate DMA'able memory for standard RX ring. */ 1947 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1948 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1949 &sc->bge_cdata.bge_rx_std_ring_map); 1950 if (error) 1951 return (ENOMEM); 1952 1953 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1954 1955 /* Load the address of the standard RX ring. */ 1956 ctx.bge_maxsegs = 1; 1957 ctx.sc = sc; 1958 1959 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1960 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1961 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1962 1963 if (error) 1964 return (ENOMEM); 1965 1966 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1967 1968 /* Create tags for jumbo mbufs. */ 1969 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1970 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1971 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1972 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1973 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1974 if (error) { 1975 device_printf(sc->bge_dev, 1976 "could not allocate jumbo dma tag\n"); 1977 return (ENOMEM); 1978 } 1979 1980 /* Create tag for jumbo RX ring. */ 1981 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1982 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1983 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1984 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1985 1986 if (error) { 1987 device_printf(sc->bge_dev, 1988 "could not allocate jumbo ring dma tag\n"); 1989 return (ENOMEM); 1990 } 1991 1992 /* Allocate DMA'able memory for jumbo RX ring. */ 1993 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1994 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1995 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1996 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1997 if (error) 1998 return (ENOMEM); 1999 2000 /* Load the address of the jumbo RX ring. */ 2001 ctx.bge_maxsegs = 1; 2002 ctx.sc = sc; 2003 2004 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2005 sc->bge_cdata.bge_rx_jumbo_ring_map, 2006 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 2007 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2008 2009 if (error) 2010 return (ENOMEM); 2011 2012 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 2013 2014 /* Create DMA maps for jumbo RX buffers. */ 2015 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2016 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2017 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2018 if (error) { 2019 device_printf(sc->bge_dev, 2020 "can't create DMA map for jumbo RX\n"); 2021 return (ENOMEM); 2022 } 2023 } 2024 2025 } 2026 2027 /* Create tag for RX return ring. */ 2028 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2029 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2030 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 2031 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 2032 2033 if (error) { 2034 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2035 return (ENOMEM); 2036 } 2037 2038 /* Allocate DMA'able memory for RX return ring. */ 2039 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 2040 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 2041 &sc->bge_cdata.bge_rx_return_ring_map); 2042 if (error) 2043 return (ENOMEM); 2044 2045 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 2046 BGE_RX_RTN_RING_SZ(sc)); 2047 2048 /* Load the address of the RX return ring. */ 2049 ctx.bge_maxsegs = 1; 2050 ctx.sc = sc; 2051 2052 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 2053 sc->bge_cdata.bge_rx_return_ring_map, 2054 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 2055 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2056 2057 if (error) 2058 return (ENOMEM); 2059 2060 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2061 2062 /* Create tag for TX ring. */ 2063 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2064 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2065 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2066 &sc->bge_cdata.bge_tx_ring_tag); 2067 2068 if (error) { 2069 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2070 return (ENOMEM); 2071 } 2072 2073 /* Allocate DMA'able memory for TX ring. */ 2074 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2075 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2076 &sc->bge_cdata.bge_tx_ring_map); 2077 if (error) 2078 return (ENOMEM); 2079 2080 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2081 2082 /* Load the address of the TX ring. */ 2083 ctx.bge_maxsegs = 1; 2084 ctx.sc = sc; 2085 2086 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2087 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2088 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2089 2090 if (error) 2091 return (ENOMEM); 2092 2093 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2094 2095 /* Create tag for status block. */ 2096 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2097 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2098 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2099 NULL, NULL, &sc->bge_cdata.bge_status_tag); 2100 2101 if (error) { 2102 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2103 return (ENOMEM); 2104 } 2105 2106 /* Allocate DMA'able memory for status block. */ 2107 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2108 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2109 &sc->bge_cdata.bge_status_map); 2110 if (error) 2111 return (ENOMEM); 2112 2113 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2114 2115 /* Load the address of the status block. */ 2116 ctx.sc = sc; 2117 ctx.bge_maxsegs = 1; 2118 2119 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2120 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2121 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2122 2123 if (error) 2124 return (ENOMEM); 2125 2126 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2127 2128 /* Create tag for statistics block. */ 2129 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2130 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2131 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2132 &sc->bge_cdata.bge_stats_tag); 2133 2134 if (error) { 2135 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2136 return (ENOMEM); 2137 } 2138 2139 /* Allocate DMA'able memory for statistics block. */ 2140 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2141 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2142 &sc->bge_cdata.bge_stats_map); 2143 if (error) 2144 return (ENOMEM); 2145 2146 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2147 2148 /* Load the address of the statstics block. */ 2149 ctx.sc = sc; 2150 ctx.bge_maxsegs = 1; 2151 2152 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2153 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2154 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2155 2156 if (error) 2157 return (ENOMEM); 2158 2159 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2160 2161 return (0); 2162} 2163 2164#if __FreeBSD_version > 602105 2165/* 2166 * Return true if this device has more than one port. 2167 */ 2168static int 2169bge_has_multiple_ports(struct bge_softc *sc) 2170{ 2171 device_t dev = sc->bge_dev; 2172 u_int b, s, f, fscan; 2173 2174 b = pci_get_bus(dev); 2175 s = pci_get_slot(dev); 2176 f = pci_get_function(dev); 2177 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 2178 if (fscan != f && pci_find_bsf(b, s, fscan) != NULL) 2179 return (1); 2180 return (0); 2181} 2182 2183/* 2184 * Return true if MSI can be used with this device. 2185 */ 2186static int 2187bge_can_use_msi(struct bge_softc *sc) 2188{ 2189 int can_use_msi = 0; 2190 2191 switch (sc->bge_asicrev) { 2192 case BGE_ASICREV_BCM5714: 2193 /* 2194 * Apparently, MSI doesn't work when this chip is configured 2195 * in single-port mode. 2196 */ 2197 if (bge_has_multiple_ports(sc)) 2198 can_use_msi = 1; 2199 break; 2200 case BGE_ASICREV_BCM5750: 2201 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 2202 sc->bge_chiprev != BGE_CHIPREV_5750_BX) 2203 can_use_msi = 1; 2204 break; 2205 case BGE_ASICREV_BCM5752: 2206 case BGE_ASICREV_BCM5780: 2207 can_use_msi = 1; 2208 break; 2209 } 2210 return (can_use_msi); 2211} 2212#endif 2213 2214static int 2215bge_attach(device_t dev) 2216{ 2217 struct ifnet *ifp; 2218 struct bge_softc *sc; 2219 uint32_t hwcfg = 0; 2220 uint32_t mac_tmp = 0; 2221 u_char eaddr[ETHER_ADDR_LEN]; 2222 int error, reg, rid, trys; 2223 2224 sc = device_get_softc(dev); 2225 sc->bge_dev = dev; 2226 2227 /* 2228 * Map control/status registers. 2229 */ 2230 pci_enable_busmaster(dev); 2231 2232 rid = BGE_PCI_BAR0; 2233 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2234 RF_ACTIVE | PCI_RF_DENSE); 2235 2236 if (sc->bge_res == NULL) { 2237 device_printf (sc->bge_dev, "couldn't map memory\n"); 2238 error = ENXIO; 2239 goto fail; 2240 } 2241 2242 sc->bge_btag = rman_get_bustag(sc->bge_res); 2243 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2244 2245 /* Save ASIC rev. */ 2246 2247 sc->bge_chipid = 2248 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2249 BGE_PCIMISCCTL_ASICREV; 2250 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2251 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2252 2253 if (bge_has_eeprom(sc)) 2254 sc->bge_flags |= BGE_FLAG_EEPROM; 2255 2256 /* Save chipset family. */ 2257 switch (sc->bge_asicrev) { 2258 case BGE_ASICREV_BCM5700: 2259 case BGE_ASICREV_BCM5701: 2260 case BGE_ASICREV_BCM5703: 2261 case BGE_ASICREV_BCM5704: 2262 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 2263 break; 2264 case BGE_ASICREV_BCM5714_A0: 2265 case BGE_ASICREV_BCM5780: 2266 case BGE_ASICREV_BCM5714: 2267 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; 2268 /* FALLTHRU */ 2269 case BGE_ASICREV_BCM5750: 2270 case BGE_ASICREV_BCM5752: 2271 case BGE_ASICREV_BCM5755: 2272 case BGE_ASICREV_BCM5787: 2273 sc->bge_flags |= BGE_FLAG_575X_PLUS; 2274 /* FALLTHRU */ 2275 case BGE_ASICREV_BCM5705: 2276 sc->bge_flags |= BGE_FLAG_5705_PLUS; 2277 break; 2278 } 2279 2280 /* Set various bug flags. */ 2281 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2282 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2283 sc->bge_flags |= BGE_FLAG_CRC_BUG; 2284 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 2285 sc->bge_chiprev == BGE_CHIPREV_5704_AX) 2286 sc->bge_flags |= BGE_FLAG_ADC_BUG; 2287 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2288 sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 2289 if (BGE_IS_5705_PLUS(sc) && 2290 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 2291 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2292 sc->bge_asicrev == BGE_ASICREV_BCM5787) 2293 sc->bge_flags |= BGE_FLAG_JITTER_BUG; 2294 else 2295 sc->bge_flags |= BGE_FLAG_BER_BUG; 2296 } 2297 2298 /* 2299 * Check if this is a PCI-X or PCI Express device. 2300 */ 2301#if __FreeBSD_version > 602101 2302 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 2303 /* 2304 * Found a PCI Express capabilities register, this 2305 * must be a PCI Express device. 2306 */ 2307 if (reg != 0) 2308 sc->bge_flags |= BGE_FLAG_PCIE; 2309 } else if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 2310 if (reg != 0) 2311 sc->bge_flags |= BGE_FLAG_PCIX; 2312 } 2313 2314#else 2315 if (BGE_IS_5705_PLUS(sc)) { 2316 reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2317 if ((reg & 0xFF) == BGE_PCIE_CAPID) 2318 sc->bge_flags |= BGE_FLAG_PCIE; 2319 } else { 2320 /* 2321 * Check if the device is in PCI-X Mode. 2322 * (This bit is not valid on PCI Express controllers.) 2323 */ 2324 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2325 BGE_PCISTATE_PCI_BUSMODE) == 0) 2326 sc->bge_flags |= BGE_FLAG_PCIX; 2327 } 2328#endif 2329 2330#if __FreeBSD_version > 602105 2331 { 2332 int msicount; 2333 2334 /* 2335 * Allocate the interrupt, using MSI if possible. These devices 2336 * support 8 MSI messages, but only the first one is used in 2337 * normal operation. 2338 */ 2339 if (bge_can_use_msi(sc)) { 2340 msicount = pci_msi_count(dev); 2341 if (msicount > 1) 2342 msicount = 1; 2343 } else 2344 msicount = 0; 2345 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { 2346 rid = 1; 2347 sc->bge_flags |= BGE_FLAG_MSI; 2348 } else 2349 rid = 0; 2350 } 2351#else 2352 rid = 0; 2353#endif 2354 2355 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2356 RF_SHAREABLE | RF_ACTIVE); 2357 2358 if (sc->bge_irq == NULL) { 2359 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2360 error = ENXIO; 2361 goto fail; 2362 } 2363 2364 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2365 2366 /* Try to reset the chip. */ 2367 if (bge_reset(sc)) { 2368 device_printf(sc->bge_dev, "chip reset failed\n"); 2369 error = ENXIO; 2370 goto fail; 2371 } 2372 2373 sc->bge_asf_mode = 0; 2374 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2375 == BGE_MAGIC_NUMBER)) { 2376 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2377 & BGE_HWCFG_ASF) { 2378 sc->bge_asf_mode |= ASF_ENABLE; 2379 sc->bge_asf_mode |= ASF_STACKUP; 2380 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2381 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2382 } 2383 } 2384 } 2385 2386 /* Try to reset the chip again the nice way. */ 2387 bge_stop_fw(sc); 2388 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2389 if (bge_reset(sc)) { 2390 device_printf(sc->bge_dev, "chip reset failed\n"); 2391 error = ENXIO; 2392 goto fail; 2393 } 2394 2395 bge_sig_legacy(sc, BGE_RESET_STOP); 2396 bge_sig_post_reset(sc, BGE_RESET_STOP); 2397 2398 if (bge_chipinit(sc)) { 2399 device_printf(sc->bge_dev, "chip initialization failed\n"); 2400 error = ENXIO; 2401 goto fail; 2402 } 2403 2404#ifdef __sparc64__ 2405 if ((sc->bge_flags & BGE_FLAG_EEPROM) == 0) 2406 OF_getetheraddr(dev, eaddr); 2407 else 2408#endif 2409 { 2410 mac_tmp = bge_readmem_ind(sc, 0x0C14); 2411 if ((mac_tmp >> 16) == 0x484B) { 2412 eaddr[0] = (u_char)(mac_tmp >> 8); 2413 eaddr[1] = (u_char)mac_tmp; 2414 mac_tmp = bge_readmem_ind(sc, 0x0C18); 2415 eaddr[2] = (u_char)(mac_tmp >> 24); 2416 eaddr[3] = (u_char)(mac_tmp >> 16); 2417 eaddr[4] = (u_char)(mac_tmp >> 8); 2418 eaddr[5] = (u_char)mac_tmp; 2419 } else if (bge_read_eeprom(sc, eaddr, 2420 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2421 device_printf(sc->bge_dev, 2422 "failed to read station address\n"); 2423 error = ENXIO; 2424 goto fail; 2425 } 2426 } 2427 2428 /* 5705 limits RX return ring to 512 entries. */ 2429 if (BGE_IS_5705_PLUS(sc)) 2430 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2431 else 2432 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2433 2434 if (bge_dma_alloc(dev)) { 2435 device_printf(sc->bge_dev, 2436 "failed to allocate DMA resources\n"); 2437 error = ENXIO; 2438 goto fail; 2439 } 2440 2441 /* Set default tuneable values. */ 2442 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2443 sc->bge_rx_coal_ticks = 150; 2444 sc->bge_tx_coal_ticks = 150; 2445 sc->bge_rx_max_coal_bds = 10; 2446 sc->bge_tx_max_coal_bds = 10; 2447 2448 /* Set up ifnet structure */ 2449 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2450 if (ifp == NULL) { 2451 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2452 error = ENXIO; 2453 goto fail; 2454 } 2455 ifp->if_softc = sc; 2456 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2457 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2458 ifp->if_ioctl = bge_ioctl; 2459 ifp->if_start = bge_start; 2460 ifp->if_init = bge_init; 2461 ifp->if_mtu = ETHERMTU; 2462 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2463 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2464 IFQ_SET_READY(&ifp->if_snd); 2465 ifp->if_hwassist = BGE_CSUM_FEATURES; 2466 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2467 IFCAP_VLAN_MTU; 2468#ifdef IFCAP_VLAN_HWCSUM 2469 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2470#endif 2471 ifp->if_capenable = ifp->if_capabilities; 2472#ifdef DEVICE_POLLING 2473 ifp->if_capabilities |= IFCAP_POLLING; 2474#endif 2475 2476 /* 2477 * 5700 B0 chips do not support checksumming correctly due 2478 * to hardware bugs. 2479 */ 2480 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2481 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2482 ifp->if_capenable &= IFCAP_HWCSUM; 2483 ifp->if_hwassist = 0; 2484 } 2485 2486 /* 2487 * Figure out what sort of media we have by checking the 2488 * hardware config word in the first 32k of NIC internal memory, 2489 * or fall back to examining the EEPROM if necessary. 2490 * Note: on some BCM5700 cards, this value appears to be unset. 2491 * If that's the case, we have to rely on identifying the NIC 2492 * by its PCI subsystem ID, as we do below for the SysKonnect 2493 * SK-9D41. 2494 */ 2495 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2496 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2497 else if (sc->bge_flags & BGE_FLAG_EEPROM) { 2498 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2499 sizeof(hwcfg))) { 2500 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2501 error = ENXIO; 2502 goto fail; 2503 } 2504 hwcfg = ntohl(hwcfg); 2505 } 2506 2507 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2508 sc->bge_flags |= BGE_FLAG_TBI; 2509 2510 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2511 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2512 sc->bge_flags |= BGE_FLAG_TBI; 2513 2514 if (sc->bge_flags & BGE_FLAG_TBI) { 2515 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2516 bge_ifmedia_sts); 2517 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 2518 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2519 0, NULL); 2520 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 2521 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2522 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2523 } else { 2524 /* 2525 * Do transceiver setup and tell the firmware the 2526 * driver is down so we can try to get access the 2527 * probe if ASF is running. Retry a couple of times 2528 * if we get a conflict with the ASF firmware accessing 2529 * the PHY. 2530 */ 2531 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2532again: 2533 bge_asf_driver_up(sc); 2534 2535 trys = 0; 2536 if (mii_phy_probe(dev, &sc->bge_miibus, 2537 bge_ifmedia_upd, bge_ifmedia_sts)) { 2538 if (trys++ < 4) { 2539 device_printf(sc->bge_dev, "Try again\n"); 2540 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, 2541 BMCR_RESET); 2542 goto again; 2543 } 2544 2545 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2546 error = ENXIO; 2547 goto fail; 2548 } 2549 2550 /* 2551 * Now tell the firmware we are going up after probing the PHY 2552 */ 2553 if (sc->bge_asf_mode & ASF_STACKUP) 2554 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2555 } 2556 2557 /* 2558 * When using the BCM5701 in PCI-X mode, data corruption has 2559 * been observed in the first few bytes of some received packets. 2560 * Aligning the packet buffer in memory eliminates the corruption. 2561 * Unfortunately, this misaligns the packet payloads. On platforms 2562 * which do not support unaligned accesses, we will realign the 2563 * payloads by copying the received packets. 2564 */ 2565 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2566 sc->bge_flags & BGE_FLAG_PCIX) 2567 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2568 2569 /* 2570 * Call MI attach routine. 2571 */ 2572 ether_ifattach(ifp, eaddr); 2573 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 2574 2575 /* 2576 * Hookup IRQ last. 2577 */ 2578#if __FreeBSD_version > 700030 2579 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2580 NULL, bge_intr, sc, &sc->bge_intrhand); 2581#else 2582 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2583 bge_intr, sc, &sc->bge_intrhand); 2584#endif 2585 2586 if (error) { 2587 bge_detach(dev); 2588 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2589 } 2590 2591 bge_add_sysctls(sc); 2592 2593 return (0); 2594 2595fail: 2596 bge_release_resources(sc); 2597 2598 return (error); 2599} 2600 2601static int 2602bge_detach(device_t dev) 2603{ 2604 struct bge_softc *sc; 2605 struct ifnet *ifp; 2606 2607 sc = device_get_softc(dev); 2608 ifp = sc->bge_ifp; 2609 2610#ifdef DEVICE_POLLING 2611 if (ifp->if_capenable & IFCAP_POLLING) 2612 ether_poll_deregister(ifp); 2613#endif 2614 2615 BGE_LOCK(sc); 2616 bge_stop(sc); 2617 bge_reset(sc); 2618 BGE_UNLOCK(sc); 2619 2620 callout_drain(&sc->bge_stat_ch); 2621 2622 ether_ifdetach(ifp); 2623 2624 if (sc->bge_flags & BGE_FLAG_TBI) { 2625 ifmedia_removeall(&sc->bge_ifmedia); 2626 } else { 2627 bus_generic_detach(dev); 2628 device_delete_child(dev, sc->bge_miibus); 2629 } 2630 2631 bge_release_resources(sc); 2632 2633 return (0); 2634} 2635 2636static void 2637bge_release_resources(struct bge_softc *sc) 2638{ 2639 device_t dev; 2640 2641 dev = sc->bge_dev; 2642 2643 if (sc->bge_intrhand != NULL) 2644 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2645 2646 if (sc->bge_irq != NULL) 2647 bus_release_resource(dev, SYS_RES_IRQ, 2648 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); 2649 2650#if __FreeBSD_version > 602105 2651 if (sc->bge_flags & BGE_FLAG_MSI) 2652 pci_release_msi(dev); 2653#endif 2654 2655 if (sc->bge_res != NULL) 2656 bus_release_resource(dev, SYS_RES_MEMORY, 2657 BGE_PCI_BAR0, sc->bge_res); 2658 2659 if (sc->bge_ifp != NULL) 2660 if_free(sc->bge_ifp); 2661 2662 bge_dma_free(sc); 2663 2664 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2665 BGE_LOCK_DESTROY(sc); 2666} 2667 2668static int 2669bge_reset(struct bge_softc *sc) 2670{ 2671 device_t dev; 2672 uint32_t cachesize, command, pcistate, reset; 2673 void (*write_op)(struct bge_softc *, int, int); 2674 int i, val = 0; 2675 2676 dev = sc->bge_dev; 2677 2678 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) { 2679 if (sc->bge_flags & BGE_FLAG_PCIE) 2680 write_op = bge_writemem_direct; 2681 else 2682 write_op = bge_writemem_ind; 2683 } else 2684 write_op = bge_writereg_ind; 2685 2686 /* Save some important PCI state. */ 2687 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2688 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2689 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2690 2691 pci_write_config(dev, BGE_PCI_MISC_CTL, 2692 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 2693 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 2694 2695 /* Disable fastboot on controllers that support it. */ 2696 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2697 sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2698 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2699 if (bootverbose) 2700 device_printf(sc->bge_dev, "Disabling fastboot\n"); 2701 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2702 } 2703 2704 /* 2705 * Write the magic number to SRAM at offset 0xB50. 2706 * When firmware finishes its initialization it will 2707 * write ~BGE_MAGIC_NUMBER to the same location. 2708 */ 2709 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2710 2711 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 2712 2713 /* XXX: Broadcom Linux driver. */ 2714 if (sc->bge_flags & BGE_FLAG_PCIE) { 2715 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 2716 CSR_WRITE_4(sc, 0x7E2C, 0x20); 2717 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2718 /* Prevent PCIE link training during global reset */ 2719 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2720 reset |= 1 << 29; 2721 } 2722 } 2723 2724 /* 2725 * Set GPHY Power Down Override to leave GPHY 2726 * powered up in D0 uninitialized. 2727 */ 2728 if (BGE_IS_5705_PLUS(sc)) 2729 reset |= 0x04000000; 2730 2731 /* Issue global reset */ 2732 write_op(sc, BGE_MISC_CFG, reset); 2733 2734 DELAY(1000); 2735 2736 /* XXX: Broadcom Linux driver. */ 2737 if (sc->bge_flags & BGE_FLAG_PCIE) { 2738 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2739 uint32_t v; 2740 2741 DELAY(500000); /* wait for link training to complete */ 2742 v = pci_read_config(dev, 0xC4, 4); 2743 pci_write_config(dev, 0xC4, v | (1 << 15), 4); 2744 } 2745 /* 2746 * Set PCIE max payload size to 128 bytes and clear error 2747 * status. 2748 */ 2749 pci_write_config(dev, 0xD8, 0xF5000, 4); 2750 } 2751 2752 /* Reset some of the PCI state that got zapped by reset. */ 2753 pci_write_config(dev, BGE_PCI_MISC_CTL, 2754 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 2755 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 2756 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2757 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2758 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2759 2760 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ 2761 if (BGE_IS_5714_FAMILY(sc)) { 2762 uint32_t val; 2763 2764 /* This chip disables MSI on reset. */ 2765 if (sc->bge_flags & BGE_FLAG_MSI) { 2766 val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2); 2767 pci_write_config(dev, BGE_PCI_MSI_CTL, 2768 val | PCIM_MSICTRL_MSI_ENABLE, 2); 2769 val = CSR_READ_4(sc, BGE_MSI_MODE); 2770 CSR_WRITE_4(sc, BGE_MSI_MODE, 2771 val | BGE_MSIMODE_ENABLE); 2772 } 2773 val = CSR_READ_4(sc, BGE_MARB_MODE); 2774 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2775 } else 2776 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2777 2778 /* 2779 * Poll until we see the 1's complement of the magic number. 2780 * This indicates that the firmware initialization is complete. 2781 * We expect this to fail if no EEPROM is fitted though. 2782 */ 2783 for (i = 0; i < BGE_TIMEOUT; i++) { 2784 DELAY(10); 2785 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2786 if (val == ~BGE_MAGIC_NUMBER) 2787 break; 2788 } 2789 2790 if ((sc->bge_flags & BGE_FLAG_EEPROM) && i == BGE_TIMEOUT) 2791 device_printf(sc->bge_dev, "firmware handshake timed out, " 2792 "found 0x%08x\n", val); 2793 2794 /* 2795 * XXX Wait for the value of the PCISTATE register to 2796 * return to its original pre-reset state. This is a 2797 * fairly good indicator of reset completion. If we don't 2798 * wait for the reset to fully complete, trying to read 2799 * from the device's non-PCI registers may yield garbage 2800 * results. 2801 */ 2802 for (i = 0; i < BGE_TIMEOUT; i++) { 2803 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2804 break; 2805 DELAY(10); 2806 } 2807 2808 if (sc->bge_flags & BGE_FLAG_PCIE) { 2809 reset = bge_readmem_ind(sc, 0x7C00); 2810 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25)); 2811 } 2812 2813 /* Fix up byte swapping. */ 2814 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 2815 BGE_MODECTL_BYTESWAP_DATA); 2816 2817 /* Tell the ASF firmware we are up */ 2818 if (sc->bge_asf_mode & ASF_STACKUP) 2819 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2820 2821 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2822 2823 /* 2824 * The 5704 in TBI mode apparently needs some special 2825 * adjustment to insure the SERDES drive level is set 2826 * to 1.2V. 2827 */ 2828 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2829 sc->bge_flags & BGE_FLAG_TBI) { 2830 uint32_t serdescfg; 2831 2832 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2833 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2834 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2835 } 2836 2837 /* XXX: Broadcom Linux driver. */ 2838 if (sc->bge_flags & BGE_FLAG_PCIE && 2839 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2840 uint32_t v; 2841 2842 v = CSR_READ_4(sc, 0x7C00); 2843 CSR_WRITE_4(sc, 0x7C00, v | (1 << 25)); 2844 } 2845 DELAY(10000); 2846 2847 return(0); 2848} 2849 2850/* 2851 * Frame reception handling. This is called if there's a frame 2852 * on the receive return list. 2853 * 2854 * Note: we have to be able to handle two possibilities here: 2855 * 1) the frame is from the jumbo receive ring 2856 * 2) the frame is from the standard receive ring 2857 */ 2858 2859static void 2860bge_rxeof(struct bge_softc *sc) 2861{ 2862 struct ifnet *ifp; 2863 int stdcnt = 0, jumbocnt = 0; 2864 2865 BGE_LOCK_ASSERT(sc); 2866 2867 /* Nothing to do. */ 2868 if (sc->bge_rx_saved_considx == 2869 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2870 return; 2871 2872 ifp = sc->bge_ifp; 2873 2874 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2875 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2876 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2877 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2878 if (BGE_IS_JUMBO_CAPABLE(sc)) 2879 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2880 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); 2881 2882 while(sc->bge_rx_saved_considx != 2883 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2884 struct bge_rx_bd *cur_rx; 2885 uint32_t rxidx; 2886 struct mbuf *m = NULL; 2887 uint16_t vlan_tag = 0; 2888 int have_tag = 0; 2889 2890#ifdef DEVICE_POLLING 2891 if (ifp->if_capenable & IFCAP_POLLING) { 2892 if (sc->rxcycles <= 0) 2893 break; 2894 sc->rxcycles--; 2895 } 2896#endif 2897 2898 cur_rx = 2899 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2900 2901 rxidx = cur_rx->bge_idx; 2902 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2903 2904 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2905 have_tag = 1; 2906 vlan_tag = cur_rx->bge_vlan_tag; 2907 } 2908 2909 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2910 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2911 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2912 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2913 BUS_DMASYNC_POSTREAD); 2914 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2915 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2916 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2917 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2918 jumbocnt++; 2919 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2920 ifp->if_ierrors++; 2921 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2922 continue; 2923 } 2924 if (bge_newbuf_jumbo(sc, 2925 sc->bge_jumbo, NULL) == ENOBUFS) { 2926 ifp->if_ierrors++; 2927 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2928 continue; 2929 } 2930 } else { 2931 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2932 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2933 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2934 BUS_DMASYNC_POSTREAD); 2935 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2936 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2937 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2938 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2939 stdcnt++; 2940 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2941 ifp->if_ierrors++; 2942 bge_newbuf_std(sc, sc->bge_std, m); 2943 continue; 2944 } 2945 if (bge_newbuf_std(sc, sc->bge_std, 2946 NULL) == ENOBUFS) { 2947 ifp->if_ierrors++; 2948 bge_newbuf_std(sc, sc->bge_std, m); 2949 continue; 2950 } 2951 } 2952 2953 ifp->if_ipackets++; 2954#ifndef __NO_STRICT_ALIGNMENT 2955 /* 2956 * For architectures with strict alignment we must make sure 2957 * the payload is aligned. 2958 */ 2959 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2960 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2961 cur_rx->bge_len); 2962 m->m_data += ETHER_ALIGN; 2963 } 2964#endif 2965 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2966 m->m_pkthdr.rcvif = ifp; 2967 2968 if (ifp->if_capenable & IFCAP_RXCSUM) { 2969 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2970 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2971 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 2972 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2973 } 2974 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2975 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2976 m->m_pkthdr.csum_data = 2977 cur_rx->bge_tcp_udp_csum; 2978 m->m_pkthdr.csum_flags |= 2979 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2980 } 2981 } 2982 2983 /* 2984 * If we received a packet with a vlan tag, 2985 * attach that information to the packet. 2986 */ 2987 if (have_tag) { 2988#if __FreeBSD_version > 700022 2989 m->m_pkthdr.ether_vtag = vlan_tag; 2990 m->m_flags |= M_VLANTAG; 2991#else 2992 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); 2993 if (m == NULL) 2994 continue; 2995#endif 2996 } 2997 2998 BGE_UNLOCK(sc); 2999 (*ifp->if_input)(ifp, m); 3000 BGE_LOCK(sc); 3001 } 3002 3003 if (stdcnt > 0) 3004 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 3005 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 3006 3007 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) 3008 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 3009 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 3010 3011 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3012 if (stdcnt) 3013 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3014 if (jumbocnt) 3015 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3016#ifdef notyet 3017 /* 3018 * This register wraps very quickly under heavy packet drops. 3019 * If you need correct statistics, you can enable this check. 3020 */ 3021 if (BGE_IS_5705_PLUS(sc)) 3022 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3023#endif 3024} 3025 3026static void 3027bge_txeof(struct bge_softc *sc) 3028{ 3029 struct bge_tx_bd *cur_tx = NULL; 3030 struct ifnet *ifp; 3031 3032 BGE_LOCK_ASSERT(sc); 3033 3034 /* Nothing to do. */ 3035 if (sc->bge_tx_saved_considx == 3036 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 3037 return; 3038 3039 ifp = sc->bge_ifp; 3040 3041 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 3042 sc->bge_cdata.bge_tx_ring_map, 3043 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3044 /* 3045 * Go through our tx ring and free mbufs for those 3046 * frames that have been sent. 3047 */ 3048 while (sc->bge_tx_saved_considx != 3049 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 3050 uint32_t idx = 0; 3051 3052 idx = sc->bge_tx_saved_considx; 3053 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 3054 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3055 ifp->if_opackets++; 3056 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 3057 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 3058 sc->bge_cdata.bge_tx_dmamap[idx], 3059 BUS_DMASYNC_POSTWRITE); 3060 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 3061 sc->bge_cdata.bge_tx_dmamap[idx]); 3062 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3063 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3064 } 3065 sc->bge_txcnt--; 3066 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3067 } 3068 3069 if (cur_tx != NULL) 3070 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3071 if (sc->bge_txcnt == 0) 3072 sc->bge_timer = 0; 3073} 3074 3075#ifdef DEVICE_POLLING 3076static void 3077bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3078{ 3079 struct bge_softc *sc = ifp->if_softc; 3080 uint32_t statusword; 3081 3082 BGE_LOCK(sc); 3083 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3084 BGE_UNLOCK(sc); 3085 return; 3086 } 3087 3088 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3089 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3090 3091 statusword = atomic_readandclear_32( 3092 &sc->bge_ldata.bge_status_block->bge_status); 3093 3094 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3095 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 3096 3097 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 3098 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 3099 sc->bge_link_evt++; 3100 3101 if (cmd == POLL_AND_CHECK_STATUS) 3102 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3103 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3104 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 3105 bge_link_upd(sc); 3106 3107 sc->rxcycles = count; 3108 bge_rxeof(sc); 3109 bge_txeof(sc); 3110 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3111 bge_start_locked(ifp); 3112 3113 BGE_UNLOCK(sc); 3114} 3115#endif /* DEVICE_POLLING */ 3116 3117static void 3118bge_intr(void *xsc) 3119{ 3120 struct bge_softc *sc; 3121 struct ifnet *ifp; 3122 uint32_t statusword; 3123 3124 sc = xsc; 3125 3126 BGE_LOCK(sc); 3127 3128 ifp = sc->bge_ifp; 3129 3130#ifdef DEVICE_POLLING 3131 if (ifp->if_capenable & IFCAP_POLLING) { 3132 BGE_UNLOCK(sc); 3133 return; 3134 } 3135#endif 3136 3137 /* 3138 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3139 * disable interrupts by writing nonzero like we used to, since with 3140 * our current organization this just gives complications and 3141 * pessimizations for re-enabling interrupts. We used to have races 3142 * instead of the necessary complications. Disabling interrupts 3143 * would just reduce the chance of a status update while we are 3144 * running (by switching to the interrupt-mode coalescence 3145 * parameters), but this chance is already very low so it is more 3146 * efficient to get another interrupt than prevent it. 3147 * 3148 * We do the ack first to ensure another interrupt if there is a 3149 * status update after the ack. We don't check for the status 3150 * changing later because it is more efficient to get another 3151 * interrupt than prevent it, not quite as above (not checking is 3152 * a smaller optimization than not toggling the interrupt enable, 3153 * since checking doesn't involve PCI accesses and toggling require 3154 * the status check). So toggling would probably be a pessimization 3155 * even with MSI. It would only be needed for using a task queue. 3156 */ 3157 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3158 3159 /* 3160 * Do the mandatory PCI flush as well as get the link status. 3161 */ 3162 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 3163 3164 /* Make sure the descriptor ring indexes are coherent. */ 3165 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3166 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3167 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3168 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 3169 3170 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3171 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3172 statusword || sc->bge_link_evt) 3173 bge_link_upd(sc); 3174 3175 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3176 /* Check RX return ring producer/consumer. */ 3177 bge_rxeof(sc); 3178 3179 /* Check TX ring producer/consumer. */ 3180 bge_txeof(sc); 3181 } 3182 3183 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3184 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3185 bge_start_locked(ifp); 3186 3187 BGE_UNLOCK(sc); 3188} 3189 3190static void 3191bge_asf_driver_up(struct bge_softc *sc) 3192{ 3193 if (sc->bge_asf_mode & ASF_STACKUP) { 3194 /* Send ASF heartbeat aprox. every 2s */ 3195 if (sc->bge_asf_count) 3196 sc->bge_asf_count --; 3197 else { 3198 sc->bge_asf_count = 5; 3199 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 3200 BGE_FW_DRV_ALIVE); 3201 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 3202 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 3203 CSR_WRITE_4(sc, BGE_CPU_EVENT, 3204 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 3205 } 3206 } 3207} 3208 3209static void 3210bge_tick(void *xsc) 3211{ 3212 struct bge_softc *sc = xsc; 3213 struct mii_data *mii = NULL; 3214 3215 BGE_LOCK_ASSERT(sc); 3216 3217 /* Synchronize with possible callout reset/stop. */ 3218 if (callout_pending(&sc->bge_stat_ch) || 3219 !callout_active(&sc->bge_stat_ch)) 3220 return; 3221 3222 if (BGE_IS_5705_PLUS(sc)) 3223 bge_stats_update_regs(sc); 3224 else 3225 bge_stats_update(sc); 3226 3227 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 3228 mii = device_get_softc(sc->bge_miibus); 3229 /* Don't mess with the PHY in IPMI/ASF mode */ 3230 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link))) 3231 mii_tick(mii); 3232 } else { 3233 /* 3234 * Since in TBI mode auto-polling can't be used we should poll 3235 * link status manually. Here we register pending link event 3236 * and trigger interrupt. 3237 */ 3238#ifdef DEVICE_POLLING 3239 /* In polling mode we poll link state in bge_poll(). */ 3240 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 3241#endif 3242 { 3243 sc->bge_link_evt++; 3244 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3245 } 3246 } 3247 3248 bge_asf_driver_up(sc); 3249 bge_watchdog(sc); 3250 3251 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3252} 3253 3254static void 3255bge_stats_update_regs(struct bge_softc *sc) 3256{ 3257 struct ifnet *ifp; 3258 3259 ifp = sc->bge_ifp; 3260 3261 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 3262 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3263 3264 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3265} 3266 3267static void 3268bge_stats_update(struct bge_softc *sc) 3269{ 3270 struct ifnet *ifp; 3271 bus_size_t stats; 3272 uint32_t cnt; /* current register value */ 3273 3274 ifp = sc->bge_ifp; 3275 3276 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3277 3278#define READ_STAT(sc, stats, stat) \ 3279 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3280 3281 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 3282 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); 3283 sc->bge_tx_collisions = cnt; 3284 3285 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 3286 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); 3287 sc->bge_rx_discards = cnt; 3288 3289 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 3290 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); 3291 sc->bge_tx_discards = cnt; 3292 3293#undef READ_STAT 3294} 3295 3296/* 3297 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3298 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3299 * but when such padded frames employ the bge IP/TCP checksum offload, 3300 * the hardware checksum assist gives incorrect results (possibly 3301 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3302 * If we pad such runts with zeros, the onboard checksum comes out correct. 3303 */ 3304static __inline int 3305bge_cksum_pad(struct mbuf *m) 3306{ 3307 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3308 struct mbuf *last; 3309 3310 /* If there's only the packet-header and we can pad there, use it. */ 3311 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3312 M_TRAILINGSPACE(m) >= padlen) { 3313 last = m; 3314 } else { 3315 /* 3316 * Walk packet chain to find last mbuf. We will either 3317 * pad there, or append a new mbuf and pad it. 3318 */ 3319 for (last = m; last->m_next != NULL; last = last->m_next); 3320 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3321 /* Allocate new empty mbuf, pad it. Compact later. */ 3322 struct mbuf *n; 3323 3324 MGET(n, M_DONTWAIT, MT_DATA); 3325 if (n == NULL) 3326 return (ENOBUFS); 3327 n->m_len = 0; 3328 last->m_next = n; 3329 last = n; 3330 } 3331 } 3332 3333 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3334 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3335 last->m_len += padlen; 3336 m->m_pkthdr.len += padlen; 3337 3338 return (0); 3339} 3340 3341/* 3342 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3343 * pointers to descriptors. 3344 */ 3345static int 3346bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 3347{ 3348 bus_dma_segment_t segs[BGE_NSEG_NEW]; 3349 bus_dmamap_t map; 3350 struct bge_tx_bd *d; 3351 struct mbuf *m = *m_head; 3352 uint32_t idx = *txidx; 3353 uint16_t csum_flags; 3354 int nsegs, i, error; 3355 3356 csum_flags = 0; 3357 if (m->m_pkthdr.csum_flags) { 3358 if (m->m_pkthdr.csum_flags & CSUM_IP) 3359 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3360 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 3361 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3362 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 3363 (error = bge_cksum_pad(m)) != 0) { 3364 m_freem(m); 3365 *m_head = NULL; 3366 return (error); 3367 } 3368 } 3369 if (m->m_flags & M_LASTFRAG) 3370 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3371 else if (m->m_flags & M_FRAG) 3372 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3373 } 3374 3375 map = sc->bge_cdata.bge_tx_dmamap[idx]; 3376 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs, 3377 &nsegs, BUS_DMA_NOWAIT); 3378 if (error == EFBIG) { 3379 m = m_defrag(m, M_DONTWAIT); 3380 if (m == NULL) { 3381 m_freem(*m_head); 3382 *m_head = NULL; 3383 return (ENOBUFS); 3384 } 3385 *m_head = m; 3386 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, 3387 segs, &nsegs, BUS_DMA_NOWAIT); 3388 if (error) { 3389 m_freem(m); 3390 *m_head = NULL; 3391 return (error); 3392 } 3393 } else if (error != 0) 3394 return (error); 3395 3396 /* 3397 * Sanity check: avoid coming within 16 descriptors 3398 * of the end of the ring. 3399 */ 3400 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3401 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map); 3402 return (ENOBUFS); 3403 } 3404 3405 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE); 3406 3407 for (i = 0; ; i++) { 3408 d = &sc->bge_ldata.bge_tx_ring[idx]; 3409 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3410 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3411 d->bge_len = segs[i].ds_len; 3412 d->bge_flags = csum_flags; 3413 if (i == nsegs - 1) 3414 break; 3415 BGE_INC(idx, BGE_TX_RING_CNT); 3416 } 3417 3418 /* Mark the last segment as end of packet... */ 3419 d->bge_flags |= BGE_TXBDFLAG_END; 3420 3421 /* ... and put VLAN tag into first segment. */ 3422 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 3423#if __FreeBSD_version > 700022 3424 if (m->m_flags & M_VLANTAG) { 3425 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3426 d->bge_vlan_tag = m->m_pkthdr.ether_vtag; 3427 } else 3428 d->bge_vlan_tag = 0; 3429#else 3430 { 3431 struct m_tag *mtag; 3432 3433 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { 3434 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3435 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3436 } else 3437 d->bge_vlan_tag = 0; 3438 } 3439#endif 3440 3441 /* 3442 * Insure that the map for this transmission 3443 * is placed at the array index of the last descriptor 3444 * in this chain. 3445 */ 3446 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 3447 sc->bge_cdata.bge_tx_dmamap[idx] = map; 3448 sc->bge_cdata.bge_tx_chain[idx] = m; 3449 sc->bge_txcnt += nsegs; 3450 3451 BGE_INC(idx, BGE_TX_RING_CNT); 3452 *txidx = idx; 3453 3454 return (0); 3455} 3456 3457/* 3458 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3459 * to the mbuf data regions directly in the transmit descriptors. 3460 */ 3461static void 3462bge_start_locked(struct ifnet *ifp) 3463{ 3464 struct bge_softc *sc; 3465 struct mbuf *m_head = NULL; 3466 uint32_t prodidx; 3467 int count = 0; 3468 3469 sc = ifp->if_softc; 3470 3471 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3472 return; 3473 3474 prodidx = sc->bge_tx_prodidx; 3475 3476 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3477 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 3478 if (m_head == NULL) 3479 break; 3480 3481 /* 3482 * XXX 3483 * The code inside the if() block is never reached since we 3484 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3485 * requests to checksum TCP/UDP in a fragmented packet. 3486 * 3487 * XXX 3488 * safety overkill. If this is a fragmented packet chain 3489 * with delayed TCP/UDP checksums, then only encapsulate 3490 * it if we have enough descriptors to handle the entire 3491 * chain at once. 3492 * (paranoia -- may not actually be needed) 3493 */ 3494 if (m_head->m_flags & M_FIRSTFRAG && 3495 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3496 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3497 m_head->m_pkthdr.csum_data + 16) { 3498 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3499 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3500 break; 3501 } 3502 } 3503 3504 /* 3505 * Pack the data into the transmit ring. If we 3506 * don't have room, set the OACTIVE flag and wait 3507 * for the NIC to drain the ring. 3508 */ 3509 if (bge_encap(sc, &m_head, &prodidx)) { 3510 if (m_head == NULL) 3511 break; 3512 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3513 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3514 break; 3515 } 3516 ++count; 3517 3518 /* 3519 * If there's a BPF listener, bounce a copy of this frame 3520 * to him. 3521 */ 3522#ifdef ETHER_BPF_MTAP 3523 ETHER_BPF_MTAP(ifp, m_head); 3524#else 3525 BPF_MTAP(ifp, m_head); 3526#endif 3527 } 3528 3529 if (count == 0) 3530 /* No packets were dequeued. */ 3531 return; 3532 3533 /* Transmit. */ 3534 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3535 /* 5700 b2 errata */ 3536 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3537 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3538 3539 sc->bge_tx_prodidx = prodidx; 3540 3541 /* 3542 * Set a timeout in case the chip goes out to lunch. 3543 */ 3544 sc->bge_timer = 5; 3545} 3546 3547/* 3548 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3549 * to the mbuf data regions directly in the transmit descriptors. 3550 */ 3551static void 3552bge_start(struct ifnet *ifp) 3553{ 3554 struct bge_softc *sc; 3555 3556 sc = ifp->if_softc; 3557 BGE_LOCK(sc); 3558 bge_start_locked(ifp); 3559 BGE_UNLOCK(sc); 3560} 3561 3562static void 3563bge_init_locked(struct bge_softc *sc) 3564{ 3565 struct ifnet *ifp; 3566 uint16_t *m; 3567 3568 BGE_LOCK_ASSERT(sc); 3569 3570 ifp = sc->bge_ifp; 3571 3572 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3573 return; 3574 3575 /* Cancel pending I/O and flush buffers. */ 3576 bge_stop(sc); 3577 3578 bge_stop_fw(sc); 3579 bge_sig_pre_reset(sc, BGE_RESET_START); 3580 bge_reset(sc); 3581 bge_sig_legacy(sc, BGE_RESET_START); 3582 bge_sig_post_reset(sc, BGE_RESET_START); 3583 3584 bge_chipinit(sc); 3585 3586 /* 3587 * Init the various state machines, ring 3588 * control blocks and firmware. 3589 */ 3590 if (bge_blockinit(sc)) { 3591 device_printf(sc->bge_dev, "initialization failure\n"); 3592 return; 3593 } 3594 3595 ifp = sc->bge_ifp; 3596 3597 /* Specify MTU. */ 3598 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3599 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3600 3601 /* Load our MAC address. */ 3602 m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 3603 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3604 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3605 3606 /* Program promiscuous mode. */ 3607 bge_setpromisc(sc); 3608 3609 /* Program multicast filter. */ 3610 bge_setmulti(sc); 3611 3612 /* Init RX ring. */ 3613 bge_init_rx_ring_std(sc); 3614 3615 /* 3616 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3617 * memory to insure that the chip has in fact read the first 3618 * entry of the ring. 3619 */ 3620 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3621 uint32_t v, i; 3622 for (i = 0; i < 10; i++) { 3623 DELAY(20); 3624 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3625 if (v == (MCLBYTES - ETHER_ALIGN)) 3626 break; 3627 } 3628 if (i == 10) 3629 device_printf (sc->bge_dev, 3630 "5705 A0 chip failed to load RX ring\n"); 3631 } 3632 3633 /* Init jumbo RX ring. */ 3634 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3635 bge_init_rx_ring_jumbo(sc); 3636 3637 /* Init our RX return ring index. */ 3638 sc->bge_rx_saved_considx = 0; 3639 3640 /* Init our RX/TX stat counters. */ 3641 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 3642 3643 /* Init TX ring. */ 3644 bge_init_tx_ring(sc); 3645 3646 /* Turn on transmitter. */ 3647 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3648 3649 /* Turn on receiver. */ 3650 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3651 3652 /* Tell firmware we're alive. */ 3653 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3654 3655#ifdef DEVICE_POLLING 3656 /* Disable interrupts if we are polling. */ 3657 if (ifp->if_capenable & IFCAP_POLLING) { 3658 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3659 BGE_PCIMISCCTL_MASK_PCI_INTR); 3660 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3661 } else 3662#endif 3663 3664 /* Enable host interrupts. */ 3665 { 3666 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3667 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3668 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3669 } 3670 3671 bge_ifmedia_upd_locked(ifp); 3672 3673 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3674 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3675 3676 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3677} 3678 3679static void 3680bge_init(void *xsc) 3681{ 3682 struct bge_softc *sc = xsc; 3683 3684 BGE_LOCK(sc); 3685 bge_init_locked(sc); 3686 BGE_UNLOCK(sc); 3687} 3688 3689/* 3690 * Set media options. 3691 */ 3692static int 3693bge_ifmedia_upd(struct ifnet *ifp) 3694{ 3695 struct bge_softc *sc = ifp->if_softc; 3696 int res; 3697 3698 BGE_LOCK(sc); 3699 res = bge_ifmedia_upd_locked(ifp); 3700 BGE_UNLOCK(sc); 3701 3702 return (res); 3703} 3704 3705static int 3706bge_ifmedia_upd_locked(struct ifnet *ifp) 3707{ 3708 struct bge_softc *sc = ifp->if_softc; 3709 struct mii_data *mii; 3710 struct ifmedia *ifm; 3711 3712 BGE_LOCK_ASSERT(sc); 3713 3714 ifm = &sc->bge_ifmedia; 3715 3716 /* If this is a 1000baseX NIC, enable the TBI port. */ 3717 if (sc->bge_flags & BGE_FLAG_TBI) { 3718 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3719 return (EINVAL); 3720 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3721 case IFM_AUTO: 3722 /* 3723 * The BCM5704 ASIC appears to have a special 3724 * mechanism for programming the autoneg 3725 * advertisement registers in TBI mode. 3726 */ 3727 if (bge_fake_autoneg == 0 && 3728 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3729 uint32_t sgdig; 3730 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3731 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3732 sgdig |= BGE_SGDIGCFG_AUTO | 3733 BGE_SGDIGCFG_PAUSE_CAP | 3734 BGE_SGDIGCFG_ASYM_PAUSE; 3735 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3736 sgdig | BGE_SGDIGCFG_SEND); 3737 DELAY(5); 3738 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3739 } 3740 break; 3741 case IFM_1000_SX: 3742 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3743 BGE_CLRBIT(sc, BGE_MAC_MODE, 3744 BGE_MACMODE_HALF_DUPLEX); 3745 } else { 3746 BGE_SETBIT(sc, BGE_MAC_MODE, 3747 BGE_MACMODE_HALF_DUPLEX); 3748 } 3749 break; 3750 default: 3751 return (EINVAL); 3752 } 3753 return (0); 3754 } 3755 3756 sc->bge_link_evt++; 3757 mii = device_get_softc(sc->bge_miibus); 3758 if (mii->mii_instance) { 3759 struct mii_softc *miisc; 3760 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 3761 miisc = LIST_NEXT(miisc, mii_list)) 3762 mii_phy_reset(miisc); 3763 } 3764 mii_mediachg(mii); 3765 3766 return (0); 3767} 3768 3769/* 3770 * Report current media status. 3771 */ 3772static void 3773bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3774{ 3775 struct bge_softc *sc = ifp->if_softc; 3776 struct mii_data *mii; 3777 3778 BGE_LOCK(sc); 3779 3780 if (sc->bge_flags & BGE_FLAG_TBI) { 3781 ifmr->ifm_status = IFM_AVALID; 3782 ifmr->ifm_active = IFM_ETHER; 3783 if (CSR_READ_4(sc, BGE_MAC_STS) & 3784 BGE_MACSTAT_TBI_PCS_SYNCHED) 3785 ifmr->ifm_status |= IFM_ACTIVE; 3786 else { 3787 ifmr->ifm_active |= IFM_NONE; 3788 BGE_UNLOCK(sc); 3789 return; 3790 } 3791 ifmr->ifm_active |= IFM_1000_SX; 3792 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3793 ifmr->ifm_active |= IFM_HDX; 3794 else 3795 ifmr->ifm_active |= IFM_FDX; 3796 BGE_UNLOCK(sc); 3797 return; 3798 } 3799 3800 mii = device_get_softc(sc->bge_miibus); 3801 mii_pollstat(mii); 3802 ifmr->ifm_active = mii->mii_media_active; 3803 ifmr->ifm_status = mii->mii_media_status; 3804 3805 BGE_UNLOCK(sc); 3806} 3807 3808static int 3809bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3810{ 3811 struct bge_softc *sc = ifp->if_softc; 3812 struct ifreq *ifr = (struct ifreq *) data; 3813 struct mii_data *mii; 3814 int flags, mask, error = 0; 3815 3816 switch (command) { 3817 case SIOCSIFMTU: 3818 if (ifr->ifr_mtu < ETHERMIN || 3819 ((BGE_IS_JUMBO_CAPABLE(sc)) && 3820 ifr->ifr_mtu > BGE_JUMBO_MTU) || 3821 ((!BGE_IS_JUMBO_CAPABLE(sc)) && 3822 ifr->ifr_mtu > ETHERMTU)) 3823 error = EINVAL; 3824 else if (ifp->if_mtu != ifr->ifr_mtu) { 3825 ifp->if_mtu = ifr->ifr_mtu; 3826 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3827 bge_init(sc); 3828 } 3829 break; 3830 case SIOCSIFFLAGS: 3831 BGE_LOCK(sc); 3832 if (ifp->if_flags & IFF_UP) { 3833 /* 3834 * If only the state of the PROMISC flag changed, 3835 * then just use the 'set promisc mode' command 3836 * instead of reinitializing the entire NIC. Doing 3837 * a full re-init means reloading the firmware and 3838 * waiting for it to start up, which may take a 3839 * second or two. Similarly for ALLMULTI. 3840 */ 3841 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3842 flags = ifp->if_flags ^ sc->bge_if_flags; 3843 if (flags & IFF_PROMISC) 3844 bge_setpromisc(sc); 3845 if (flags & IFF_ALLMULTI) 3846 bge_setmulti(sc); 3847 } else 3848 bge_init_locked(sc); 3849 } else { 3850 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3851 bge_stop(sc); 3852 } 3853 } 3854 sc->bge_if_flags = ifp->if_flags; 3855 BGE_UNLOCK(sc); 3856 error = 0; 3857 break; 3858 case SIOCADDMULTI: 3859 case SIOCDELMULTI: 3860 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3861 BGE_LOCK(sc); 3862 bge_setmulti(sc); 3863 BGE_UNLOCK(sc); 3864 error = 0; 3865 } 3866 break; 3867 case SIOCSIFMEDIA: 3868 case SIOCGIFMEDIA: 3869 if (sc->bge_flags & BGE_FLAG_TBI) { 3870 error = ifmedia_ioctl(ifp, ifr, 3871 &sc->bge_ifmedia, command); 3872 } else { 3873 mii = device_get_softc(sc->bge_miibus); 3874 error = ifmedia_ioctl(ifp, ifr, 3875 &mii->mii_media, command); 3876 } 3877 break; 3878 case SIOCSIFCAP: 3879 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3880#ifdef DEVICE_POLLING 3881 if (mask & IFCAP_POLLING) { 3882 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3883 error = ether_poll_register(bge_poll, ifp); 3884 if (error) 3885 return (error); 3886 BGE_LOCK(sc); 3887 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3888 BGE_PCIMISCCTL_MASK_PCI_INTR); 3889 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3890 ifp->if_capenable |= IFCAP_POLLING; 3891 BGE_UNLOCK(sc); 3892 } else { 3893 error = ether_poll_deregister(ifp); 3894 /* Enable interrupt even in error case */ 3895 BGE_LOCK(sc); 3896 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 3897 BGE_PCIMISCCTL_MASK_PCI_INTR); 3898 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3899 ifp->if_capenable &= ~IFCAP_POLLING; 3900 BGE_UNLOCK(sc); 3901 } 3902 } 3903#endif 3904 if (mask & IFCAP_HWCSUM) { 3905 ifp->if_capenable ^= IFCAP_HWCSUM; 3906 if (IFCAP_HWCSUM & ifp->if_capenable && 3907 IFCAP_HWCSUM & ifp->if_capabilities) 3908 ifp->if_hwassist = BGE_CSUM_FEATURES; 3909 else 3910 ifp->if_hwassist = 0; 3911#ifdef VLAN_CAPABILITIES 3912 VLAN_CAPABILITIES(ifp); 3913#endif 3914 } 3915 break; 3916 default: 3917 error = ether_ioctl(ifp, command, data); 3918 break; 3919 } 3920 3921 return (error); 3922} 3923 3924static void 3925bge_watchdog(struct bge_softc *sc) 3926{ 3927 struct ifnet *ifp; 3928 3929 BGE_LOCK_ASSERT(sc); 3930 3931 if (sc->bge_timer == 0 || --sc->bge_timer) 3932 return; 3933 3934 ifp = sc->bge_ifp; 3935 3936 if_printf(ifp, "watchdog timeout -- resetting\n"); 3937 3938 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3939 bge_init_locked(sc); 3940 3941 ifp->if_oerrors++; 3942} 3943 3944/* 3945 * Stop the adapter and free any mbufs allocated to the 3946 * RX and TX lists. 3947 */ 3948static void 3949bge_stop(struct bge_softc *sc) 3950{ 3951 struct ifnet *ifp; 3952 struct ifmedia_entry *ifm; 3953 struct mii_data *mii = NULL; 3954 int mtmp, itmp; 3955 3956 BGE_LOCK_ASSERT(sc); 3957 3958 ifp = sc->bge_ifp; 3959 3960 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) 3961 mii = device_get_softc(sc->bge_miibus); 3962 3963 callout_stop(&sc->bge_stat_ch); 3964 3965 /* 3966 * Disable all of the receiver blocks. 3967 */ 3968 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3969 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3970 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3971 if (!(BGE_IS_5705_PLUS(sc))) 3972 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3973 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3974 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3975 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3976 3977 /* 3978 * Disable all of the transmit blocks. 3979 */ 3980 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3981 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3982 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3983 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3984 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3985 if (!(BGE_IS_5705_PLUS(sc))) 3986 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3987 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3988 3989 /* 3990 * Shut down all of the memory managers and related 3991 * state machines. 3992 */ 3993 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3994 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3995 if (!(BGE_IS_5705_PLUS(sc))) 3996 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3997 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3998 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3999 if (!(BGE_IS_5705_PLUS(sc))) { 4000 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4001 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4002 } 4003 4004 /* Disable host interrupts. */ 4005 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4006 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4007 4008 /* 4009 * Tell firmware we're shutting down. 4010 */ 4011 4012 bge_stop_fw(sc); 4013 bge_sig_pre_reset(sc, BGE_RESET_STOP); 4014 bge_reset(sc); 4015 bge_sig_legacy(sc, BGE_RESET_STOP); 4016 bge_sig_post_reset(sc, BGE_RESET_STOP); 4017 4018 /* 4019 * Keep the ASF firmware running if up. 4020 */ 4021 if (sc->bge_asf_mode & ASF_STACKUP) 4022 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4023 else 4024 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4025 4026 /* Free the RX lists. */ 4027 bge_free_rx_ring_std(sc); 4028 4029 /* Free jumbo RX list. */ 4030 if (BGE_IS_JUMBO_CAPABLE(sc)) 4031 bge_free_rx_ring_jumbo(sc); 4032 4033 /* Free TX buffers. */ 4034 bge_free_tx_ring(sc); 4035 4036 /* 4037 * Isolate/power down the PHY, but leave the media selection 4038 * unchanged so that things will be put back to normal when 4039 * we bring the interface back up. 4040 */ 4041 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 4042 itmp = ifp->if_flags; 4043 ifp->if_flags |= IFF_UP; 4044 /* 4045 * If we are called from bge_detach(), mii is already NULL. 4046 */ 4047 if (mii != NULL) { 4048 ifm = mii->mii_media.ifm_cur; 4049 mtmp = ifm->ifm_media; 4050 ifm->ifm_media = IFM_ETHER | IFM_NONE; 4051 mii_mediachg(mii); 4052 ifm->ifm_media = mtmp; 4053 } 4054 ifp->if_flags = itmp; 4055 } 4056 4057 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4058 4059 /* Clear MAC's link state (PHY may still have link UP). */ 4060 if (bootverbose && sc->bge_link) 4061 if_printf(sc->bge_ifp, "link DOWN\n"); 4062 sc->bge_link = 0; 4063 4064 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4065} 4066 4067/* 4068 * Stop all chip I/O so that the kernel's probe routines don't 4069 * get confused by errant DMAs when rebooting. 4070 */ 4071static void 4072bge_shutdown(device_t dev) 4073{ 4074 struct bge_softc *sc; 4075 4076 sc = device_get_softc(dev); 4077 4078 BGE_LOCK(sc); 4079 bge_stop(sc); 4080 bge_reset(sc); 4081 BGE_UNLOCK(sc); 4082} 4083 4084static int 4085bge_suspend(device_t dev) 4086{ 4087 struct bge_softc *sc; 4088 4089 sc = device_get_softc(dev); 4090 BGE_LOCK(sc); 4091 bge_stop(sc); 4092 BGE_UNLOCK(sc); 4093 4094 return (0); 4095} 4096 4097static int 4098bge_resume(device_t dev) 4099{ 4100 struct bge_softc *sc; 4101 struct ifnet *ifp; 4102 4103 sc = device_get_softc(dev); 4104 BGE_LOCK(sc); 4105 ifp = sc->bge_ifp; 4106 if (ifp->if_flags & IFF_UP) { 4107 bge_init_locked(sc); 4108 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4109 bge_start_locked(ifp); 4110 } 4111 BGE_UNLOCK(sc); 4112 4113 return (0); 4114} 4115 4116static void 4117bge_link_upd(struct bge_softc *sc) 4118{ 4119 struct mii_data *mii; 4120 uint32_t link, status; 4121 4122 BGE_LOCK_ASSERT(sc); 4123 4124 /* Clear 'pending link event' flag. */ 4125 sc->bge_link_evt = 0; 4126 4127 /* 4128 * Process link state changes. 4129 * Grrr. The link status word in the status block does 4130 * not work correctly on the BCM5700 rev AX and BX chips, 4131 * according to all available information. Hence, we have 4132 * to enable MII interrupts in order to properly obtain 4133 * async link changes. Unfortunately, this also means that 4134 * we have to read the MAC status register to detect link 4135 * changes, thereby adding an additional register access to 4136 * the interrupt handler. 4137 * 4138 * XXX: perhaps link state detection procedure used for 4139 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4140 */ 4141 4142 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 4143 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 4144 status = CSR_READ_4(sc, BGE_MAC_STS); 4145 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4146 mii = device_get_softc(sc->bge_miibus); 4147 mii_pollstat(mii); 4148 if (!sc->bge_link && 4149 mii->mii_media_status & IFM_ACTIVE && 4150 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4151 sc->bge_link++; 4152 if (bootverbose) 4153 if_printf(sc->bge_ifp, "link UP\n"); 4154 } else if (sc->bge_link && 4155 (!(mii->mii_media_status & IFM_ACTIVE) || 4156 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4157 sc->bge_link = 0; 4158 if (bootverbose) 4159 if_printf(sc->bge_ifp, "link DOWN\n"); 4160 } 4161 4162 /* Clear the interrupt. */ 4163 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4164 BGE_EVTENB_MI_INTERRUPT); 4165 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4166 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4167 BRGPHY_INTRS); 4168 } 4169 return; 4170 } 4171 4172 if (sc->bge_flags & BGE_FLAG_TBI) { 4173 status = CSR_READ_4(sc, BGE_MAC_STS); 4174 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4175 if (!sc->bge_link) { 4176 sc->bge_link++; 4177 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 4178 BGE_CLRBIT(sc, BGE_MAC_MODE, 4179 BGE_MACMODE_TBI_SEND_CFGS); 4180 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4181 if (bootverbose) 4182 if_printf(sc->bge_ifp, "link UP\n"); 4183 if_link_state_change(sc->bge_ifp, 4184 LINK_STATE_UP); 4185 } 4186 } else if (sc->bge_link) { 4187 sc->bge_link = 0; 4188 if (bootverbose) 4189 if_printf(sc->bge_ifp, "link DOWN\n"); 4190 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 4191 } 4192 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 4193 /* 4194 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 4195 * in status word always set. Workaround this bug by reading 4196 * PHY link status directly. 4197 */ 4198 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 4199 4200 if (link != sc->bge_link || 4201 sc->bge_asicrev == BGE_ASICREV_BCM5700) { 4202 mii = device_get_softc(sc->bge_miibus); 4203 mii_pollstat(mii); 4204 if (!sc->bge_link && 4205 mii->mii_media_status & IFM_ACTIVE && 4206 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4207 sc->bge_link++; 4208 if (bootverbose) 4209 if_printf(sc->bge_ifp, "link UP\n"); 4210 } else if (sc->bge_link && 4211 (!(mii->mii_media_status & IFM_ACTIVE) || 4212 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4213 sc->bge_link = 0; 4214 if (bootverbose) 4215 if_printf(sc->bge_ifp, "link DOWN\n"); 4216 } 4217 } 4218 } else { 4219 /* 4220 * Discard link events for MII/GMII controllers 4221 * if MI auto-polling is disabled. 4222 */ 4223 } 4224 4225 /* Clear the attention. */ 4226 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4227 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4228 BGE_MACSTAT_LINK_CHANGED); 4229} 4230 4231#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 4232 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ 4233 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ 4234 desc) 4235 4236static void 4237bge_add_sysctls(struct bge_softc *sc) 4238{ 4239 struct sysctl_ctx_list *ctx; 4240 struct sysctl_oid_list *children, *schildren; 4241 struct sysctl_oid *tree; 4242 4243 ctx = device_get_sysctl_ctx(sc->bge_dev); 4244 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 4245 4246#ifdef BGE_REGISTER_DEBUG 4247 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 4248 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", 4249 "Debug Information"); 4250 4251 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 4252 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", 4253 "Register Read"); 4254 4255 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 4256 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", 4257 "Memory Read"); 4258 4259#endif 4260 4261 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4262 NULL, "BGE Statistics"); 4263 schildren = children = SYSCTL_CHILDREN(tree); 4264 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 4265 children, COSFramesDroppedDueToFilters, 4266 "FramesDroppedDueToFilters"); 4267 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 4268 children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 4269 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 4270 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 4271 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 4272 children, nicNoMoreRxBDs, "NoMoreRxBDs"); 4273 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 4274 children, ifInDiscards, "InputDiscards"); 4275 BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 4276 children, ifInErrors, "InputErrors"); 4277 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 4278 children, nicRecvThresholdHit, "RecvThresholdHit"); 4279 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 4280 children, nicDmaReadQueueFull, "DmaReadQueueFull"); 4281 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 4282 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 4283 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 4284 children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 4285 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 4286 children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 4287 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 4288 children, nicRingStatusUpdate, "RingStatusUpdate"); 4289 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 4290 children, nicInterrupts, "Interrupts"); 4291 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 4292 children, nicAvoidedInterrupts, "AvoidedInterrupts"); 4293 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 4294 children, nicSendThresholdHit, "SendThresholdHit"); 4295 4296 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, 4297 NULL, "BGE RX Statistics"); 4298 children = SYSCTL_CHILDREN(tree); 4299 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 4300 children, rxstats.ifHCInOctets, "Octets"); 4301 BGE_SYSCTL_STAT(sc, ctx, "Fragments", 4302 children, rxstats.etherStatsFragments, "Fragments"); 4303 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 4304 children, rxstats.ifHCInUcastPkts, "UcastPkts"); 4305 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 4306 children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 4307 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 4308 children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 4309 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 4310 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 4311 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 4312 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 4313 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 4314 children, rxstats.xoffPauseFramesReceived, 4315 "xoffPauseFramesReceived"); 4316 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 4317 children, rxstats.macControlFramesReceived, 4318 "ControlFramesReceived"); 4319 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 4320 children, rxstats.xoffStateEntered, "xoffStateEntered"); 4321 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 4322 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 4323 BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 4324 children, rxstats.etherStatsJabbers, "Jabbers"); 4325 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 4326 children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 4327 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 4328 children, rxstats.inRangeLengthError, "inRangeLengthError"); 4329 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 4330 children, rxstats.outRangeLengthError, "outRangeLengthError"); 4331 4332 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, 4333 NULL, "BGE TX Statistics"); 4334 children = SYSCTL_CHILDREN(tree); 4335 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 4336 children, txstats.ifHCOutOctets, "Octets"); 4337 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 4338 children, txstats.etherStatsCollisions, "Collisions"); 4339 BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 4340 children, txstats.outXonSent, "XonSent"); 4341 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 4342 children, txstats.outXoffSent, "XoffSent"); 4343 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 4344 children, txstats.flowControlDone, "flowControlDone"); 4345 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 4346 children, txstats.dot3StatsInternalMacTransmitErrors, 4347 "InternalMacTransmitErrors"); 4348 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 4349 children, txstats.dot3StatsSingleCollisionFrames, 4350 "SingleCollisionFrames"); 4351 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 4352 children, txstats.dot3StatsMultipleCollisionFrames, 4353 "MultipleCollisionFrames"); 4354 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 4355 children, txstats.dot3StatsDeferredTransmissions, 4356 "DeferredTransmissions"); 4357 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 4358 children, txstats.dot3StatsExcessiveCollisions, 4359 "ExcessiveCollisions"); 4360 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 4361 children, txstats.dot3StatsLateCollisions, 4362 "LateCollisions"); 4363 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 4364 children, txstats.ifHCOutUcastPkts, "UcastPkts"); 4365 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 4366 children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 4367 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 4368 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 4369 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 4370 children, txstats.dot3StatsCarrierSenseErrors, 4371 "CarrierSenseErrors"); 4372 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 4373 children, txstats.ifOutDiscards, "Discards"); 4374 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 4375 children, txstats.ifOutErrors, "Errors"); 4376} 4377 4378static int 4379bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 4380{ 4381 struct bge_softc *sc; 4382 uint32_t result; 4383 int base, offset; 4384 4385 sc = (struct bge_softc *)arg1; 4386 offset = arg2; 4387 if (BGE_IS_5705_PLUS(sc)) 4388 base = BGE_MAC_STATS; 4389 else 4390 base = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4391 result = CSR_READ_4(sc, base + offset + offsetof(bge_hostaddr, 4392 bge_addr_lo)); 4393 return (sysctl_handle_int(oidp, &result, sizeof(result), req)); 4394} 4395 4396#ifdef BGE_REGISTER_DEBUG 4397static int 4398bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 4399{ 4400 struct bge_softc *sc; 4401 uint16_t *sbdata; 4402 int error; 4403 int result; 4404 int i, j; 4405 4406 result = -1; 4407 error = sysctl_handle_int(oidp, &result, 0, req); 4408 if (error || (req->newptr == NULL)) 4409 return (error); 4410 4411 if (result == 1) { 4412 sc = (struct bge_softc *)arg1; 4413 4414 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 4415 printf("Status Block:\n"); 4416 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { 4417 printf("%06x:", i); 4418 for (j = 0; j < 8; j++) { 4419 printf(" %04x", sbdata[i]); 4420 i += 4; 4421 } 4422 printf("\n"); 4423 } 4424 4425 printf("Registers:\n"); 4426 for (i = 0x800; i < 0xA00; ) { 4427 printf("%06x:", i); 4428 for (j = 0; j < 8; j++) { 4429 printf(" %08x", CSR_READ_4(sc, i)); 4430 i += 4; 4431 } 4432 printf("\n"); 4433 } 4434 4435 printf("Hardware Flags:\n"); 4436 if (BGE_IS_575X_PLUS(sc)) 4437 printf(" - 575X Plus\n"); 4438 if (BGE_IS_5705_PLUS(sc)) 4439 printf(" - 5705 Plus\n"); 4440 if (BGE_IS_5714_FAMILY(sc)) 4441 printf(" - 5714 Family\n"); 4442 if (BGE_IS_5700_FAMILY(sc)) 4443 printf(" - 5700 Family\n"); 4444 if (sc->bge_flags & BGE_FLAG_JUMBO) 4445 printf(" - Supports Jumbo Frames\n"); 4446 if (sc->bge_flags & BGE_FLAG_PCIX) 4447 printf(" - PCI-X Bus\n"); 4448 if (sc->bge_flags & BGE_FLAG_PCIE) 4449 printf(" - PCI Express Bus\n"); 4450 if (sc->bge_flags & BGE_FLAG_NO_3LED) 4451 printf(" - No 3 LEDs\n"); 4452 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 4453 printf(" - RX Alignment Bug\n"); 4454 } 4455 4456 return (error); 4457} 4458 4459static int 4460bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 4461{ 4462 struct bge_softc *sc; 4463 int error; 4464 uint16_t result; 4465 uint32_t val; 4466 4467 result = -1; 4468 error = sysctl_handle_int(oidp, &result, 0, req); 4469 if (error || (req->newptr == NULL)) 4470 return (error); 4471 4472 if (result < 0x8000) { 4473 sc = (struct bge_softc *)arg1; 4474 val = CSR_READ_4(sc, result); 4475 printf("reg 0x%06X = 0x%08X\n", result, val); 4476 } 4477 4478 return (error); 4479} 4480 4481static int 4482bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 4483{ 4484 struct bge_softc *sc; 4485 int error; 4486 uint16_t result; 4487 uint32_t val; 4488 4489 result = -1; 4490 error = sysctl_handle_int(oidp, &result, 0, req); 4491 if (error || (req->newptr == NULL)) 4492 return (error); 4493 4494 if (result < 0x8000) { 4495 sc = (struct bge_softc *)arg1; 4496 val = bge_readmem_ind(sc, result); 4497 printf("mem 0x%06X = 0x%08X\n", result, val); 4498 } 4499 4500 return (error); 4501} 4502#endif
|