1/* $OpenBSD: if_bge.c,v 1.405 2024/05/24 06:02:53 jsg Exp $ */ 2 3/* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $ 36 */ 37 38/* 39 * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD. 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Engineer, Wind River Systems 43 */ 44 45/* 46 * The Broadcom BCM5700 is based on technology originally developed by 47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 48 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 51 * frames, highly configurable RX filtering, and 16 RX and TX queues 52 * (which, along with RX filter rules, can be used for QOS applications). 53 * Other features, such as TCP segmentation, may be available as part 54 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 55 * firmware images can be stored in hardware and need not be compiled 56 * into the driver. 57 * 58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 59 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 60 * 61 * The BCM5701 is a single-chip solution incorporating both the BCM5700 62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 63 * does not support external SSRAM. 64 * 65 * Broadcom also produces a variation of the BCM5700 under the "Altima" 66 * brand name, which is functionally similar but lacks PCI-X support. 67 * 68 * Without external SSRAM, you can only have at most 4 TX rings, 69 * and the use of the mini RX ring is disabled. This seems to imply 70 * that these features are simply not available on the BCM5701. As a 71 * result, this driver does not implement any support for the mini RX 72 * ring. 73 */ 74 75#include "bpfilter.h" 76#include "vlan.h" 77#include "kstat.h" 78 79#include <sys/param.h> 80#include <sys/systm.h> 81#include <sys/sockio.h> 82#include <sys/mbuf.h> 83#include <sys/malloc.h> 84#include <sys/device.h> 85#include <sys/timeout.h> 86#include <sys/atomic.h> 87#include <sys/kstat.h> 88 89#include <net/if.h> 90#include <net/if_media.h> 91 92#include <netinet/in.h> 93#include <netinet/if_ether.h> 94 95#if NBPFILTER > 0 96#include <net/bpf.h> 97#endif 98 99#if defined(__sparc64__) || defined(__HAVE_FDT) 100#include <dev/ofw/openfirm.h> 101#endif 102 103#include <dev/pci/pcireg.h> 104#include <dev/pci/pcivar.h> 105#include <dev/pci/pcidevs.h> 106 107#include <dev/mii/mii.h> 108#include <dev/mii/miivar.h> 109#include <dev/mii/brgphyreg.h> 110 111#include <dev/pci/if_bgereg.h> 112 113#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 114 115const struct bge_revision * bge_lookup_rev(u_int32_t); 116int bge_can_use_msi(struct bge_softc *); 117int bge_probe(struct device *, void *, void *); 118void bge_attach(struct device *, struct device *, void *); 119int bge_detach(struct device *, int); 120int bge_activate(struct device *, int); 121 122const struct cfattach bge_ca = { 123 sizeof(struct bge_softc), bge_probe, bge_attach, bge_detach, 124 bge_activate 125}; 126 127struct cfdriver bge_cd = { 128 NULL, "bge", DV_IFNET 129}; 130 131void bge_txeof(struct bge_softc *); 132void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 133void bge_rxeof(struct bge_softc *); 134 135void bge_tick(void *); 136void bge_stats_update(struct bge_softc *); 137void bge_stats_update_regs(struct bge_softc *); 138int bge_cksum_pad(struct mbuf *); 139int bge_encap(struct bge_softc *, struct mbuf *, int *); 140int bge_compact_dma_runt(struct mbuf *); 141 142int bge_intr(void *); 143void bge_start(struct ifqueue *); 144int bge_ioctl(struct ifnet *, u_long, caddr_t); 145int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *); 146void bge_init(void *); 147void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t); 148void bge_stop(struct bge_softc *, int); 149void bge_watchdog(struct ifnet *); 150int bge_ifmedia_upd(struct ifnet *); 151void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 152 153u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *); 154int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 155u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 156int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 157 158void bge_iff(struct bge_softc *); 159 160int bge_newbuf_jumbo(struct bge_softc *, int); 161int bge_init_rx_ring_jumbo(struct bge_softc *); 162void bge_fill_rx_ring_jumbo(struct bge_softc *); 163void bge_free_rx_ring_jumbo(struct bge_softc *); 164 165int bge_newbuf(struct bge_softc *, int); 166int bge_init_rx_ring_std(struct bge_softc *); 167void bge_rxtick(void *); 168void bge_fill_rx_ring_std(struct bge_softc *); 169void bge_free_rx_ring_std(struct bge_softc *); 170 171void bge_free_tx_ring(struct bge_softc *); 172int bge_init_tx_ring(struct bge_softc *); 173 174void bge_chipinit(struct bge_softc *); 175int bge_blockinit(struct bge_softc *); 176u_int32_t bge_dma_swap_options(struct bge_softc *); 177int bge_phy_addr(struct bge_softc *); 178 179u_int32_t bge_readmem_ind(struct bge_softc *, int); 180void bge_writemem_ind(struct bge_softc *, int, int); 181void bge_writereg_ind(struct bge_softc *, int, int); 182void bge_writembx(struct bge_softc *, int, int); 183 184int bge_miibus_readreg(struct device *, int, int); 185void bge_miibus_writereg(struct device *, int, int, int); 186void bge_miibus_statchg(struct device *); 187 188#define BGE_RESET_SHUTDOWN 0 189#define BGE_RESET_START 1 190#define BGE_RESET_SUSPEND 2 191void bge_sig_post_reset(struct bge_softc *, int); 192void bge_sig_legacy(struct bge_softc *, int); 193void bge_sig_pre_reset(struct bge_softc *, int); 194void bge_stop_fw(struct bge_softc *, int); 195void bge_reset(struct bge_softc *); 196void bge_link_upd(struct bge_softc *); 197 198void bge_ape_lock_init(struct bge_softc *); 199void bge_ape_read_fw_ver(struct bge_softc *); 200int bge_ape_lock(struct bge_softc *, int); 201void bge_ape_unlock(struct bge_softc *, int); 202void bge_ape_send_event(struct bge_softc *, uint32_t); 203void bge_ape_driver_state_change(struct bge_softc *, int); 204 205#if NKSTAT > 0 206void bge_kstat_attach(struct bge_softc *); 207 208enum { 209 bge_stat_out_octets = 0, 210 bge_stat_collisions, 211 bge_stat_xon_sent, 212 bge_stat_xoff_sent, 213 bge_stat_xmit_errors, 214 bge_stat_coll_frames, 215 bge_stat_multicoll_frames, 216 bge_stat_deferred_xmit, 217 bge_stat_excess_coll, 218 bge_stat_late_coll, 219 bge_stat_out_ucast_pkt, 220 bge_stat_out_mcast_pkt, 221 bge_stat_out_bcast_pkt, 222 bge_stat_in_octets, 223 bge_stat_fragments, 224 bge_stat_in_ucast_pkt, 225 bge_stat_in_mcast_pkt, 226 bge_stat_in_bcast_pkt, 227 bge_stat_fcs_errors, 228 bge_stat_align_errors, 229 bge_stat_xon_rcvd, 230 bge_stat_xoff_rcvd, 231 bge_stat_ctrl_frame_rcvd, 232 bge_stat_xoff_entered, 233 bge_stat_too_long_frames, 234 bge_stat_jabbers, 235 bge_stat_too_short_pkts, 236 237 bge_stat_dma_rq_full, 238 bge_stat_dma_hprq_full, 239 bge_stat_sdc_queue_full, 240 bge_stat_nic_sendprod_set, 241 bge_stat_status_updated, 242 bge_stat_irqs, 243 bge_stat_avoided_irqs, 244 bge_stat_tx_thresh_hit, 245 246 bge_stat_filtdrop, 247 bge_stat_dma_wrq_full, 248 bge_stat_dma_hpwrq_full, 249 bge_stat_out_of_bds, 250 bge_stat_if_in_drops, 251 bge_stat_if_in_errors, 252 bge_stat_rx_thresh_hit, 253}; 254 255#endif 256 257#ifdef BGE_DEBUG 258#define DPRINTF(x) do { if (bgedebug) printf x; } while (0) 259#define DPRINTFN(n,x) do { if (bgedebug >= (n)) printf x; } while (0) 260int bgedebug = 0; 261#else 262#define DPRINTF(x) 263#define DPRINTFN(n,x) 264#endif 265 266/* 267 * Various supported device vendors/types and their names. Note: the 268 * spec seems to indicate that the hardware still has Alteon's vendor 269 * ID burned into it, though it will always be overridden by the vendor 270 * ID in the EEPROM. Just to be safe, we cover all possibilities. 271 */ 272const struct pci_matchid bge_devices[] = { 273 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 }, 274 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 }, 275 276 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 }, 277 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 }, 278 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 }, 279 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 }, 280 281 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 }, 282 283 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 }, 284 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 }, 285 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 }, 286 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT }, 287 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X }, 288 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 }, 289 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT }, 290 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X }, 291 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C }, 292 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S }, 293 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT }, 294 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 }, 295 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F }, 296 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K }, 297 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M }, 298 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT }, 299 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 }, 300 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S }, 301 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 }, 302 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S }, 303 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717 }, 304 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C }, 305 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718 }, 306 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719 }, 307 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 }, 308 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 }, 309 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 }, 310 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 }, 311 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725 }, 312 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727 }, 313 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 }, 314 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F }, 315 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M }, 316 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 }, 317 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M }, 318 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 }, 319 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F }, 320 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M }, 321 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 }, 322 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M }, 323 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 }, 324 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M }, 325 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 }, 326 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 }, 327 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E }, 328 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S }, 329 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE }, 330 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762 }, 331 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 }, 332 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 }, 333 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S }, 334 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 }, 335 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 }, 336 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 }, 337 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F }, 338 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G }, 339 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 }, 340 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 }, 341 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F }, 342 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M }, 343 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 }, 344 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 }, 345 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 }, 346 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 }, 347 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M }, 348 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 }, 349 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M }, 350 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 }, 351 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761 }, 352 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762 }, 353 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57764 }, 354 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765 }, 355 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766 }, 356 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57767 }, 357 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 }, 358 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781 }, 359 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782 }, 360 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785 }, 361 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786 }, 362 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57787 }, 363 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 }, 364 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 }, 365 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791 }, 366 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795 }, 367 368 { PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 }, 369 { PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 }, 370 { PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN }, 371 372 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 }, 373 374 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 } 375}; 376 377#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE) 378#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY) 379#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS) 380#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY) 381#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_575X_PLUS) 382#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS) 383#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_5717_PLUS) 384#define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_57765_PLUS) 385 386static const struct bge_revision { 387 u_int32_t br_chipid; 388 const char *br_name; 389} bge_revisions[] = { 390 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 391 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 392 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 393 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 394 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 395 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 396 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 397 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 398 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 399 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 400 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 401 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 402 /* the 5702 and 5703 share the same ASIC ID */ 403 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 404 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 405 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 406 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 407 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 408 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 409 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 410 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 411 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 412 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 413 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 414 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 415 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 416 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 417 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 418 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 419 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 420 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 421 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 422 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 423 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 424 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 425 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 426 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 427 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 428 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 429 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 430 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 431 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 432 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 433 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 434 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 435 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 436 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 437 { BGE_CHIPID_BCM5719_A1, "BCM5719 A1" }, 438 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 439 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 440 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 441 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 442 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 443 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 444 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 445 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, 446 { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" }, 447 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 448 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 449 /* the 5754 and 5787 share the same ASIC ID */ 450 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 451 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 452 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 453 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 454 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 455 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 456 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 457 { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" }, 458 { BGE_CHIPID_BCM57766_A1, "BCM57766 A1" }, 459 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 460 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 461 462 { 0, NULL } 463}; 464 465/* 466 * Some defaults for major revisions, so that newer steppings 467 * that we don't know about have a shot at working. 468 */ 469static const struct bge_revision bge_majorrevs[] = { 470 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 471 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 472 /* 5702 and 5703 share the same ASIC ID */ 473 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 474 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 475 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 476 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 477 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 478 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 479 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 480 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 481 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 482 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 483 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 484 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 485 /* 5754 and 5787 share the same ASIC ID */ 486 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 487 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 488 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 489 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 490 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 491 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 492 { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 493 { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 494 { BGE_ASICREV_BCM5762, "unknown BCM5762" }, 495 496 { 0, NULL } 497}; 498 499u_int32_t 500bge_readmem_ind(struct bge_softc *sc, int off) 501{ 502 struct pci_attach_args *pa = &(sc->bge_pa); 503 u_int32_t val; 504 505 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 506 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 507 return (0); 508 509 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 510 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 511 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0); 512 return (val); 513} 514 515void 516bge_writemem_ind(struct bge_softc *sc, int off, int val) 517{ 518 struct pci_attach_args *pa = &(sc->bge_pa); 519 520 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 521 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 522 return; 523 524 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 525 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 526 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0); 527} 528 529void 530bge_writereg_ind(struct bge_softc *sc, int off, int val) 531{ 532 struct pci_attach_args *pa = &(sc->bge_pa); 533 534 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 535 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 536} 537 538void 539bge_writembx(struct bge_softc *sc, int off, int val) 540{ 541 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 542 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 543 544 CSR_WRITE_4(sc, off, val); 545} 546 547/* 548 * Clear all stale locks and select the lock for this driver instance. 549 */ 550void 551bge_ape_lock_init(struct bge_softc *sc) 552{ 553 struct pci_attach_args *pa = &(sc->bge_pa); 554 uint32_t bit, regbase; 555 int i; 556 557 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 558 regbase = BGE_APE_LOCK_GRANT; 559 else 560 regbase = BGE_APE_PER_LOCK_GRANT; 561 562 /* Clear any stale locks. */ 563 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 564 switch (i) { 565 case BGE_APE_LOCK_PHY0: 566 case BGE_APE_LOCK_PHY1: 567 case BGE_APE_LOCK_PHY2: 568 case BGE_APE_LOCK_PHY3: 569 bit = BGE_APE_LOCK_GRANT_DRIVER0; 570 break; 571 default: 572 if (pa->pa_function == 0) 573 bit = BGE_APE_LOCK_GRANT_DRIVER0; 574 else 575 bit = (1 << pa->pa_function); 576 } 577 APE_WRITE_4(sc, regbase + 4 * i, bit); 578 } 579 580 /* Select the PHY lock based on the device's function number. */ 581 switch (pa->pa_function) { 582 case 0: 583 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 584 break; 585 case 1: 586 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 587 break; 588 case 2: 589 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 590 break; 591 case 3: 592 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 593 break; 594 default: 595 printf("%s: PHY lock not supported on function %d\n", 596 sc->bge_dev.dv_xname, pa->pa_function); 597 break; 598 } 599} 600 601/* 602 * Check for APE firmware, set flags, and print version info. 603 */ 604void 605bge_ape_read_fw_ver(struct bge_softc *sc) 606{ 607 const char *fwtype; 608 uint32_t apedata, features; 609 610 /* Check for a valid APE signature in shared memory. */ 611 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 612 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 613 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 614 return; 615 } 616 617 /* Check if APE firmware is running. */ 618 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 619 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 620 printf("%s: APE signature found but FW status not ready! " 621 "0x%08x\n", sc->bge_dev.dv_xname, apedata); 622 return; 623 } 624 625 sc->bge_mfw_flags |= BGE_MFW_ON_APE; 626 627 /* Fetch the APE firmware type and version. */ 628 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 629 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 630 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 631 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 632 fwtype = "NCSI"; 633 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 634 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 635 fwtype = "DASH"; 636 } else 637 fwtype = "UNKN"; 638 639 /* Print the APE firmware version. */ 640 printf(", APE firmware %s %d.%d.%d.%d", fwtype, 641 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 642 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 643 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 644 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 645} 646 647int 648bge_ape_lock(struct bge_softc *sc, int locknum) 649{ 650 struct pci_attach_args *pa = &(sc->bge_pa); 651 uint32_t bit, gnt, req, status; 652 int i, off; 653 654 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 655 return (0); 656 657 /* Lock request/grant registers have different bases. */ 658 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { 659 req = BGE_APE_LOCK_REQ; 660 gnt = BGE_APE_LOCK_GRANT; 661 } else { 662 req = BGE_APE_PER_LOCK_REQ; 663 gnt = BGE_APE_PER_LOCK_GRANT; 664 } 665 666 off = 4 * locknum; 667 668 switch (locknum) { 669 case BGE_APE_LOCK_GPIO: 670 /* Lock required when using GPIO. */ 671 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 672 return (0); 673 if (pa->pa_function == 0) 674 bit = BGE_APE_LOCK_REQ_DRIVER0; 675 else 676 bit = (1 << pa->pa_function); 677 break; 678 case BGE_APE_LOCK_GRC: 679 /* Lock required to reset the device. */ 680 if (pa->pa_function == 0) 681 bit = BGE_APE_LOCK_REQ_DRIVER0; 682 else 683 bit = (1 << pa->pa_function); 684 break; 685 case BGE_APE_LOCK_MEM: 686 /* Lock required when accessing certain APE memory. */ 687 if (pa->pa_function == 0) 688 bit = BGE_APE_LOCK_REQ_DRIVER0; 689 else 690 bit = (1 << pa->pa_function); 691 break; 692 case BGE_APE_LOCK_PHY0: 693 case BGE_APE_LOCK_PHY1: 694 case BGE_APE_LOCK_PHY2: 695 case BGE_APE_LOCK_PHY3: 696 /* Lock required when accessing PHYs. */ 697 bit = BGE_APE_LOCK_REQ_DRIVER0; 698 break; 699 default: 700 return (EINVAL); 701 } 702 703 /* Request a lock. */ 704 APE_WRITE_4(sc, req + off, bit); 705 706 /* Wait up to 1 second to acquire lock. */ 707 for (i = 0; i < 20000; i++) { 708 status = APE_READ_4(sc, gnt + off); 709 if (status == bit) 710 break; 711 DELAY(50); 712 } 713 714 /* Handle any errors. */ 715 if (status != bit) { 716 printf("%s: APE lock %d request failed! " 717 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 718 sc->bge_dev.dv_xname, 719 locknum, req + off, bit & 0xFFFF, gnt + off, 720 status & 0xFFFF); 721 /* Revoke the lock request. */ 722 APE_WRITE_4(sc, gnt + off, bit); 723 return (EBUSY); 724 } 725 726 return (0); 727} 728 729void 730bge_ape_unlock(struct bge_softc *sc, int locknum) 731{ 732 struct pci_attach_args *pa = &(sc->bge_pa); 733 uint32_t bit, gnt; 734 int off; 735 736 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 737 return; 738 739 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 740 gnt = BGE_APE_LOCK_GRANT; 741 else 742 gnt = BGE_APE_PER_LOCK_GRANT; 743 744 off = 4 * locknum; 745 746 switch (locknum) { 747 case BGE_APE_LOCK_GPIO: 748 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 749 return; 750 if (pa->pa_function == 0) 751 bit = BGE_APE_LOCK_GRANT_DRIVER0; 752 else 753 bit = (1 << pa->pa_function); 754 break; 755 case BGE_APE_LOCK_GRC: 756 if (pa->pa_function == 0) 757 bit = BGE_APE_LOCK_GRANT_DRIVER0; 758 else 759 bit = (1 << pa->pa_function); 760 break; 761 case BGE_APE_LOCK_MEM: 762 if (pa->pa_function == 0) 763 bit = BGE_APE_LOCK_GRANT_DRIVER0; 764 else 765 bit = (1 << pa->pa_function); 766 break; 767 case BGE_APE_LOCK_PHY0: 768 case BGE_APE_LOCK_PHY1: 769 case BGE_APE_LOCK_PHY2: 770 case BGE_APE_LOCK_PHY3: 771 bit = BGE_APE_LOCK_GRANT_DRIVER0; 772 break; 773 default: 774 return; 775 } 776 777 APE_WRITE_4(sc, gnt + off, bit); 778} 779 780/* 781 * Send an event to the APE firmware. 782 */ 783void 784bge_ape_send_event(struct bge_softc *sc, uint32_t event) 785{ 786 uint32_t apedata; 787 int i; 788 789 /* NCSI does not support APE events. */ 790 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 791 return; 792 793 /* Wait up to 1ms for APE to service previous event. */ 794 for (i = 10; i > 0; i--) { 795 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 796 break; 797 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 798 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 799 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 800 BGE_APE_EVENT_STATUS_EVENT_PENDING); 801 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 802 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 803 break; 804 } 805 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 806 DELAY(100); 807 } 808 if (i == 0) { 809 printf("%s: APE event 0x%08x send timed out\n", 810 sc->bge_dev.dv_xname, event); 811 } 812} 813 814void 815bge_ape_driver_state_change(struct bge_softc *sc, int kind) 816{ 817 uint32_t apedata, event; 818 819 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 820 return; 821 822 switch (kind) { 823 case BGE_RESET_START: 824 /* If this is the first load, clear the load counter. */ 825 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 826 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 827 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 828 else { 829 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 830 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 831 } 832 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 833 BGE_APE_HOST_SEG_SIG_MAGIC); 834 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 835 BGE_APE_HOST_SEG_LEN_MAGIC); 836 837 /* Add some version info if bge(4) supports it. */ 838 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 839 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 840 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 841 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 842 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 843 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 844 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 845 BGE_APE_HOST_DRVR_STATE_START); 846 event = BGE_APE_EVENT_STATUS_STATE_START; 847 break; 848 case BGE_RESET_SHUTDOWN: 849 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 850 BGE_APE_HOST_DRVR_STATE_UNLOAD); 851 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 852 break; 853 case BGE_RESET_SUSPEND: 854 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 855 break; 856 default: 857 return; 858 } 859 860 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 861 BGE_APE_EVENT_STATUS_STATE_CHNGE); 862} 863 864 865u_int8_t 866bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 867{ 868 u_int32_t access, byte = 0; 869 int i; 870 871 /* Lock. */ 872 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 873 for (i = 0; i < 8000; i++) { 874 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 875 break; 876 DELAY(20); 877 } 878 if (i == 8000) 879 return (1); 880 881 /* Enable access. */ 882 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 883 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 884 885 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 886 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 887 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 888 DELAY(10); 889 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 890 DELAY(10); 891 break; 892 } 893 } 894 895 if (i == BGE_TIMEOUT * 10) { 896 printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname); 897 return (1); 898 } 899 900 /* Get result. */ 901 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 902 903 *dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF; 904 905 /* Disable access. */ 906 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 907 908 /* Unlock. */ 909 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 910 CSR_READ_4(sc, BGE_NVRAM_SWARB); 911 912 return (0); 913} 914 915/* 916 * Read a sequence of bytes from NVRAM. 917 */ 918 919int 920bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 921{ 922 int err = 0, i; 923 u_int8_t byte = 0; 924 925 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 926 return (1); 927 928 for (i = 0; i < cnt; i++) { 929 err = bge_nvram_getbyte(sc, off + i, &byte); 930 if (err) 931 break; 932 *(dest + i) = byte; 933 } 934 935 return (err ? 1 : 0); 936} 937 938/* 939 * Read a byte of data stored in the EEPROM at address 'addr.' The 940 * BCM570x supports both the traditional bitbang interface and an 941 * auto access interface for reading the EEPROM. We use the auto 942 * access method. 943 */ 944u_int8_t 945bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 946{ 947 int i; 948 u_int32_t byte = 0; 949 950 /* 951 * Enable use of auto EEPROM access so we can avoid 952 * having to use the bitbang method. 953 */ 954 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 955 956 /* Reset the EEPROM, load the clock period. */ 957 CSR_WRITE_4(sc, BGE_EE_ADDR, 958 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 959 DELAY(20); 960 961 /* Issue the read EEPROM command. */ 962 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 963 964 /* Wait for completion */ 965 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 966 DELAY(10); 967 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 968 break; 969 } 970 971 if (i == BGE_TIMEOUT * 10) { 972 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 973 return (1); 974 } 975 976 /* Get result. */ 977 byte = CSR_READ_4(sc, BGE_EE_DATA); 978 979 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 980 981 return (0); 982} 983 984/* 985 * Read a sequence of bytes from the EEPROM. 986 */ 987int 988bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 989{ 990 int i, error = 0; 991 u_int8_t byte = 0; 992 993 for (i = 0; i < cnt; i++) { 994 error = bge_eeprom_getbyte(sc, off + i, &byte); 995 if (error) 996 break; 997 *(dest + i) = byte; 998 } 999 1000 return (error ? 1 : 0); 1001} 1002 1003int 1004bge_miibus_readreg(struct device *dev, int phy, int reg) 1005{ 1006 struct bge_softc *sc = (struct bge_softc *)dev; 1007 u_int32_t val, autopoll; 1008 int i; 1009 1010 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1011 return (0); 1012 1013 /* Reading with autopolling on may trigger PCI errors */ 1014 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1015 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1016 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1017 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1018 DELAY(80); 1019 } 1020 1021 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 1022 BGE_MIPHY(phy)|BGE_MIREG(reg)); 1023 CSR_READ_4(sc, BGE_MI_COMM); /* force write */ 1024 1025 for (i = 0; i < 200; i++) { 1026 delay(1); 1027 val = CSR_READ_4(sc, BGE_MI_COMM); 1028 if (!(val & BGE_MICOMM_BUSY)) 1029 break; 1030 delay(10); 1031 } 1032 1033 if (i == 200) { 1034 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 1035 val = 0; 1036 goto done; 1037 } 1038 1039 val = CSR_READ_4(sc, BGE_MI_COMM); 1040 1041done: 1042 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1043 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1044 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1045 DELAY(80); 1046 } 1047 1048 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1049 1050 if (val & BGE_MICOMM_READFAIL) 1051 return (0); 1052 1053 return (val & 0xFFFF); 1054} 1055 1056void 1057bge_miibus_writereg(struct device *dev, int phy, int reg, int val) 1058{ 1059 struct bge_softc *sc = (struct bge_softc *)dev; 1060 u_int32_t autopoll; 1061 int i; 1062 1063 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1064 (reg == MII_100T2CR || reg == BRGPHY_MII_AUXCTL)) 1065 return; 1066 1067 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1068 return; 1069 1070 /* Reading with autopolling on may trigger PCI errors */ 1071 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1072 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1073 DELAY(40); 1074 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1075 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1076 DELAY(40); /* 40 usec is supposed to be adequate */ 1077 } 1078 1079 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 1080 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 1081 CSR_READ_4(sc, BGE_MI_COMM); /* force write */ 1082 1083 for (i = 0; i < 200; i++) { 1084 delay(1); 1085 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 1086 break; 1087 delay(10); 1088 } 1089 1090 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1091 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1092 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1093 DELAY(40); 1094 } 1095 1096 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1097 1098 if (i == 200) { 1099 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 1100 } 1101} 1102 1103void 1104bge_miibus_statchg(struct device *dev) 1105{ 1106 struct bge_softc *sc = (struct bge_softc *)dev; 1107 struct mii_data *mii = &sc->bge_mii; 1108 u_int32_t mac_mode, rx_mode, tx_mode; 1109 1110 /* 1111 * Get flow control negotiation result. 1112 */ 1113 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1114 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) 1115 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1116 1117 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 1118 mii->mii_media_status & IFM_ACTIVE && 1119 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1120 BGE_STS_SETBIT(sc, BGE_STS_LINK); 1121 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 1122 (!(mii->mii_media_status & IFM_ACTIVE) || 1123 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 1124 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 1125 1126 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 1127 return; 1128 1129 /* Set the port mode (MII/GMII) to match the link speed. */ 1130 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1131 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1132 tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1133 rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1134 1135 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1136 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1137 mac_mode |= BGE_PORTMODE_GMII; 1138 else 1139 mac_mode |= BGE_PORTMODE_MII; 1140 1141 /* Set MAC flow control behavior to match link flow control settings. */ 1142 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1143 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1144 if (mii->mii_media_active & IFM_FDX) { 1145 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1146 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1147 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1148 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1149 } else 1150 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1151 1152 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 1153 DELAY(40); 1154 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1155 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1156} 1157 1158/* 1159 * Initialize a standard receive ring descriptor. 1160 */ 1161int 1162bge_newbuf(struct bge_softc *sc, int i) 1163{ 1164 bus_dmamap_t dmap = sc->bge_cdata.bge_rx_std_map[i]; 1165 struct bge_rx_bd *r = &sc->bge_rdata->bge_rx_std_ring[i]; 1166 struct mbuf *m; 1167 int error; 1168 1169 m = MCLGETL(NULL, M_DONTWAIT, sc->bge_rx_std_len); 1170 if (!m) 1171 return (ENOBUFS); 1172 m->m_len = m->m_pkthdr.len = sc->bge_rx_std_len; 1173 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1174 m_adj(m, ETHER_ALIGN); 1175 1176 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m, 1177 BUS_DMA_READ|BUS_DMA_NOWAIT); 1178 if (error) { 1179 m_freem(m); 1180 return (ENOBUFS); 1181 } 1182 1183 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize, 1184 BUS_DMASYNC_PREREAD); 1185 sc->bge_cdata.bge_rx_std_chain[i] = m; 1186 1187 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1188 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1189 i * sizeof (struct bge_rx_bd), 1190 sizeof (struct bge_rx_bd), 1191 BUS_DMASYNC_POSTWRITE); 1192 1193 BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr); 1194 r->bge_flags = BGE_RXBDFLAG_END; 1195 r->bge_len = m->m_len; 1196 r->bge_idx = i; 1197 1198 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1199 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1200 i * sizeof (struct bge_rx_bd), 1201 sizeof (struct bge_rx_bd), 1202 BUS_DMASYNC_PREWRITE); 1203 1204 return (0); 1205} 1206 1207/* 1208 * Initialize a Jumbo receive ring descriptor. 1209 */ 1210int 1211bge_newbuf_jumbo(struct bge_softc *sc, int i) 1212{ 1213 bus_dmamap_t dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; 1214 struct bge_ext_rx_bd *r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1215 struct mbuf *m; 1216 int error; 1217 1218 m = MCLGETL(NULL, M_DONTWAIT, BGE_JLEN); 1219 if (!m) 1220 return (ENOBUFS); 1221 m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1222 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1223 m_adj(m, ETHER_ALIGN); 1224 1225 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m, 1226 BUS_DMA_READ|BUS_DMA_NOWAIT); 1227 if (error) { 1228 m_freem(m); 1229 return (ENOBUFS); 1230 } 1231 1232 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize, 1233 BUS_DMASYNC_PREREAD); 1234 sc->bge_cdata.bge_rx_jumbo_chain[i] = m; 1235 1236 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1237 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1238 i * sizeof (struct bge_ext_rx_bd), 1239 sizeof (struct bge_ext_rx_bd), 1240 BUS_DMASYNC_POSTWRITE); 1241 1242 /* 1243 * Fill in the extended RX buffer descriptor. 1244 */ 1245 r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 1246 r->bge_bd.bge_idx = i; 1247 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 1248 switch (dmap->dm_nsegs) { 1249 case 4: 1250 BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr); 1251 r->bge_len3 = dmap->dm_segs[3].ds_len; 1252 /* FALLTHROUGH */ 1253 case 3: 1254 BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr); 1255 r->bge_len2 = dmap->dm_segs[2].ds_len; 1256 /* FALLTHROUGH */ 1257 case 2: 1258 BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr); 1259 r->bge_len1 = dmap->dm_segs[1].ds_len; 1260 /* FALLTHROUGH */ 1261 case 1: 1262 BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr); 1263 r->bge_bd.bge_len = dmap->dm_segs[0].ds_len; 1264 break; 1265 default: 1266 panic("%s: %d segments", __func__, dmap->dm_nsegs); 1267 } 1268 1269 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1270 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1271 i * sizeof (struct bge_ext_rx_bd), 1272 sizeof (struct bge_ext_rx_bd), 1273 BUS_DMASYNC_PREWRITE); 1274 1275 return (0); 1276} 1277 1278int 1279bge_init_rx_ring_std(struct bge_softc *sc) 1280{ 1281 int i; 1282 1283 if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) 1284 return (0); 1285 1286 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1287 if (bus_dmamap_create(sc->bge_dmatag, sc->bge_rx_std_len, 1, 1288 sc->bge_rx_std_len, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1289 &sc->bge_cdata.bge_rx_std_map[i]) != 0) { 1290 printf("%s: unable to create dmamap for slot %d\n", 1291 sc->bge_dev.dv_xname, i); 1292 goto uncreate; 1293 } 1294 bzero(&sc->bge_rdata->bge_rx_std_ring[i], 1295 sizeof(struct bge_rx_bd)); 1296 } 1297 1298 sc->bge_std = BGE_STD_RX_RING_CNT - 1; 1299 1300 /* lwm must be greater than the replenish threshold */ 1301 if_rxr_init(&sc->bge_std_ring, 17, BGE_STD_RX_RING_CNT); 1302 bge_fill_rx_ring_std(sc); 1303 1304 SET(sc->bge_flags, BGE_RXRING_VALID); 1305 1306 return (0); 1307 1308uncreate: 1309 while (--i) { 1310 bus_dmamap_destroy(sc->bge_dmatag, 1311 sc->bge_cdata.bge_rx_std_map[i]); 1312 } 1313 return (1); 1314} 1315 1316/* 1317 * When the refill timeout for a ring is active, that ring is so empty 1318 * that no more packets can be received on it, so the interrupt handler 1319 * will not attempt to refill it, meaning we don't need to protect against 1320 * interrupts here. 1321 */ 1322 1323void 1324bge_rxtick(void *arg) 1325{ 1326 struct bge_softc *sc = arg; 1327 1328 if (ISSET(sc->bge_flags, BGE_RXRING_VALID) && 1329 if_rxr_inuse(&sc->bge_std_ring) <= 8) 1330 bge_fill_rx_ring_std(sc); 1331} 1332 1333void 1334bge_rxtick_jumbo(void *arg) 1335{ 1336 struct bge_softc *sc = arg; 1337 1338 if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) && 1339 if_rxr_inuse(&sc->bge_jumbo_ring) <= 8) 1340 bge_fill_rx_ring_jumbo(sc); 1341} 1342 1343void 1344bge_fill_rx_ring_std(struct bge_softc *sc) 1345{ 1346 int i; 1347 int post = 0; 1348 u_int slots; 1349 1350 i = sc->bge_std; 1351 for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT); 1352 slots > 0; slots--) { 1353 BGE_INC(i, BGE_STD_RX_RING_CNT); 1354 1355 if (bge_newbuf(sc, i) != 0) 1356 break; 1357 1358 sc->bge_std = i; 1359 post = 1; 1360 } 1361 if_rxr_put(&sc->bge_std_ring, slots); 1362 1363 if (post) 1364 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1365 1366 /* 1367 * bge always needs more than 8 packets on the ring. if we cant do 1368 * that now, then try again later. 1369 */ 1370 if (if_rxr_inuse(&sc->bge_std_ring) <= 8) 1371 timeout_add(&sc->bge_rxtimeout, 1); 1372} 1373 1374void 1375bge_free_rx_ring_std(struct bge_softc *sc) 1376{ 1377 bus_dmamap_t dmap; 1378 struct mbuf *m; 1379 int i; 1380 1381 if (!ISSET(sc->bge_flags, BGE_RXRING_VALID)) 1382 return; 1383 1384 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1385 dmap = sc->bge_cdata.bge_rx_std_map[i]; 1386 m = sc->bge_cdata.bge_rx_std_chain[i]; 1387 if (m != NULL) { 1388 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, 1389 dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1390 bus_dmamap_unload(sc->bge_dmatag, dmap); 1391 m_freem(m); 1392 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1393 } 1394 bus_dmamap_destroy(sc->bge_dmatag, dmap); 1395 sc->bge_cdata.bge_rx_std_map[i] = NULL; 1396 bzero(&sc->bge_rdata->bge_rx_std_ring[i], 1397 sizeof(struct bge_rx_bd)); 1398 } 1399 1400 CLR(sc->bge_flags, BGE_RXRING_VALID); 1401} 1402 1403int 1404bge_init_rx_ring_jumbo(struct bge_softc *sc) 1405{ 1406 volatile struct bge_rcb *rcb; 1407 int i; 1408 1409 if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) 1410 return (0); 1411 1412 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1413 if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0, 1414 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1415 &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) { 1416 printf("%s: unable to create dmamap for slot %d\n", 1417 sc->bge_dev.dv_xname, i); 1418 goto uncreate; 1419 } 1420 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i], 1421 sizeof(struct bge_ext_rx_bd)); 1422 } 1423 1424 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 1425 1426 /* lwm must be greater than the replenish threshold */ 1427 if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT); 1428 bge_fill_rx_ring_jumbo(sc); 1429 1430 SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID); 1431 1432 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1433 rcb->bge_maxlen_flags = 1434 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); 1435 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1436 1437 return (0); 1438 1439uncreate: 1440 while (--i) { 1441 bus_dmamap_destroy(sc->bge_dmatag, 1442 sc->bge_cdata.bge_rx_jumbo_map[i]); 1443 } 1444 return (1); 1445} 1446 1447void 1448bge_fill_rx_ring_jumbo(struct bge_softc *sc) 1449{ 1450 int i; 1451 int post = 0; 1452 u_int slots; 1453 1454 i = sc->bge_jumbo; 1455 for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT); 1456 slots > 0; slots--) { 1457 BGE_INC(i, BGE_JUMBO_RX_RING_CNT); 1458 1459 if (bge_newbuf_jumbo(sc, i) != 0) 1460 break; 1461 1462 sc->bge_jumbo = i; 1463 post = 1; 1464 } 1465 if_rxr_put(&sc->bge_jumbo_ring, slots); 1466 1467 if (post) 1468 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1469 1470 /* 1471 * bge always needs more than 8 packets on the ring. if we cant do 1472 * that now, then try again later. 1473 */ 1474 if (if_rxr_inuse(&sc->bge_jumbo_ring) <= 8) 1475 timeout_add(&sc->bge_rxtimeout_jumbo, 1); 1476} 1477 1478void 1479bge_free_rx_ring_jumbo(struct bge_softc *sc) 1480{ 1481 bus_dmamap_t dmap; 1482 struct mbuf *m; 1483 int i; 1484 1485 if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) 1486 return; 1487 1488 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1489 dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; 1490 m = sc->bge_cdata.bge_rx_jumbo_chain[i]; 1491 if (m != NULL) { 1492 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, 1493 dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1494 bus_dmamap_unload(sc->bge_dmatag, dmap); 1495 m_freem(m); 1496 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1497 } 1498 bus_dmamap_destroy(sc->bge_dmatag, dmap); 1499 sc->bge_cdata.bge_rx_jumbo_map[i] = NULL; 1500 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i], 1501 sizeof(struct bge_ext_rx_bd)); 1502 } 1503 1504 CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID); 1505} 1506 1507void 1508bge_free_tx_ring(struct bge_softc *sc) 1509{ 1510 int i; 1511 1512 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1513 return; 1514 1515 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1516 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1517 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1518 sc->bge_cdata.bge_tx_chain[i] = NULL; 1519 sc->bge_cdata.bge_tx_map[i] = NULL; 1520 } 1521 bzero(&sc->bge_rdata->bge_tx_ring[i], 1522 sizeof(struct bge_tx_bd)); 1523 1524 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_txdma[i]); 1525 } 1526 1527 sc->bge_flags &= ~BGE_TXRING_VALID; 1528} 1529 1530int 1531bge_init_tx_ring(struct bge_softc *sc) 1532{ 1533 int i; 1534 bus_size_t txsegsz, txmaxsegsz; 1535 1536 if (sc->bge_flags & BGE_TXRING_VALID) 1537 return (0); 1538 1539 sc->bge_txcnt = 0; 1540 sc->bge_tx_saved_considx = 0; 1541 1542 /* Initialize transmit producer index for host-memory send ring. */ 1543 sc->bge_tx_prodidx = 0; 1544 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1545 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1546 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1547 1548 /* NIC-memory send ring not used; initialize to zero. */ 1549 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1550 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1551 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1552 1553 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1554 txsegsz = 4096; 1555 txmaxsegsz = BGE_JLEN; 1556 } else { 1557 txsegsz = MCLBYTES; 1558 txmaxsegsz = MCLBYTES; 1559 } 1560 1561 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1562 if (bus_dmamap_create(sc->bge_dmatag, txmaxsegsz, 1563 BGE_NTXSEG, txsegsz, 0, BUS_DMA_NOWAIT, &sc->bge_txdma[i])) 1564 return (ENOBUFS); 1565 } 1566 1567 sc->bge_flags |= BGE_TXRING_VALID; 1568 1569 return (0); 1570} 1571 1572void 1573bge_iff(struct bge_softc *sc) 1574{ 1575 struct arpcom *ac = &sc->arpcom; 1576 struct ifnet *ifp = &ac->ac_if; 1577 struct ether_multi *enm; 1578 struct ether_multistep step; 1579 u_int8_t hashes[16]; 1580 u_int32_t h, rxmode; 1581 1582 /* First, zot all the existing filters. */ 1583 rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC; 1584 ifp->if_flags &= ~IFF_ALLMULTI; 1585 memset(hashes, 0x00, sizeof(hashes)); 1586 1587 if (ifp->if_flags & IFF_PROMISC) { 1588 ifp->if_flags |= IFF_ALLMULTI; 1589 rxmode |= BGE_RXMODE_RX_PROMISC; 1590 } else if (ac->ac_multirangecnt > 0) { 1591 ifp->if_flags |= IFF_ALLMULTI; 1592 memset(hashes, 0xff, sizeof(hashes)); 1593 } else { 1594 ETHER_FIRST_MULTI(step, ac, enm); 1595 while (enm != NULL) { 1596 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1597 1598 setbit(hashes, h & 0x7F); 1599 1600 ETHER_NEXT_MULTI(step, enm); 1601 } 1602 } 1603 1604 bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0, 1605 hashes, sizeof(hashes)); 1606 CSR_WRITE_4(sc, BGE_RX_MODE, rxmode); 1607} 1608 1609void 1610bge_sig_pre_reset(struct bge_softc *sc, int type) 1611{ 1612 /* no bge_asf_mode. */ 1613 1614 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 1615 bge_ape_driver_state_change(sc, type); 1616} 1617 1618void 1619bge_sig_post_reset(struct bge_softc *sc, int type) 1620{ 1621 /* no bge_asf_mode. */ 1622 1623 if (type == BGE_RESET_SHUTDOWN) 1624 bge_ape_driver_state_change(sc, type); 1625} 1626 1627void 1628bge_sig_legacy(struct bge_softc *sc, int type) 1629{ 1630 /* no bge_asf_mode. */ 1631} 1632 1633void 1634bge_stop_fw(struct bge_softc *sc, int type) 1635{ 1636 /* no bge_asf_mode. */ 1637} 1638 1639u_int32_t 1640bge_dma_swap_options(struct bge_softc *sc) 1641{ 1642 u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS; 1643 1644 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 1645 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1646 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | 1647 BGE_MODECTL_HTX2B_ENABLE; 1648 } 1649 1650 return (dma_options); 1651} 1652 1653int 1654bge_phy_addr(struct bge_softc *sc) 1655{ 1656 struct pci_attach_args *pa = &(sc->bge_pa); 1657 int phy_addr = 1; 1658 1659 switch (BGE_ASICREV(sc->bge_chipid)) { 1660 case BGE_ASICREV_BCM5717: 1661 case BGE_ASICREV_BCM5719: 1662 case BGE_ASICREV_BCM5720: 1663 phy_addr = pa->pa_function; 1664 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 1665 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & 1666 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; 1667 } else { 1668 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 1669 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; 1670 } 1671 } 1672 1673 return (phy_addr); 1674} 1675 1676/* 1677 * Do endian, PCI and DMA initialization. 1678 */ 1679void 1680bge_chipinit(struct bge_softc *sc) 1681{ 1682 struct pci_attach_args *pa = &(sc->bge_pa); 1683 u_int32_t dma_rw_ctl, misc_ctl, mode_ctl; 1684 int i; 1685 1686 /* Set endianness before we access any non-PCI registers. */ 1687 misc_ctl = BGE_INIT; 1688 if (sc->bge_flags & BGE_TAGGED_STATUS) 1689 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; 1690 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1691 misc_ctl); 1692 1693 /* 1694 * Clear the MAC statistics block in the NIC's 1695 * internal memory. 1696 */ 1697 for (i = BGE_STATS_BLOCK; 1698 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1699 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1700 1701 for (i = BGE_STATUS_BLOCK; 1702 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1703 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1704 1705 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 1706 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 1707 /* 1708 * For the 57766 and non Ax versions of 57765, bootcode 1709 * needs to setup the PCIE Fast Training Sequence (FTS) 1710 * value to prevent transmit hangs. 1711 */ 1712 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { 1713 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 1714 CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) | 1715 BGE_CPMU_PADRNG_CTL_RDIV2); 1716 } 1717 } 1718 1719 /* 1720 * Set up the PCI DMA control register. 1721 */ 1722 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1723 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1724 1725 if (sc->bge_flags & BGE_PCIE) { 1726 if (sc->bge_mps >= 256) 1727 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1728 else 1729 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1730 } else if (sc->bge_flags & BGE_PCIX) { 1731 /* PCI-X bus */ 1732 if (BGE_IS_5714_FAMILY(sc)) { 1733 /* 256 bytes for read and write. */ 1734 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1735 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1736 1737 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1738 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1739 else 1740 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1741 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1742 /* 1536 bytes for read, 384 bytes for write. */ 1743 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1744 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1745 } else { 1746 /* 384 bytes for read and write. */ 1747 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1748 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 1749 (0x0F); 1750 } 1751 1752 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1753 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1754 u_int32_t tmp; 1755 1756 /* Set ONEDMA_ATONCE for hardware workaround. */ 1757 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1758 if (tmp == 6 || tmp == 7) 1759 dma_rw_ctl |= 1760 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1761 1762 /* Set PCI-X DMA write workaround. */ 1763 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1764 } 1765 } else { 1766 /* Conventional PCI bus: 256 bytes for read and write. */ 1767 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1768 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1769 1770 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 1771 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 1772 dma_rw_ctl |= 0x0F; 1773 } 1774 1775 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 1776 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 1777 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1778 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1779 1780 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1781 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1782 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1783 1784 if (BGE_IS_5717_PLUS(sc)) { 1785 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1786 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 1787 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1788 1789 /* 1790 * Enable HW workaround for controllers that misinterpret 1791 * a status tag update and leave interrupts permanently 1792 * disabled. 1793 */ 1794 if (!BGE_IS_57765_PLUS(sc) && 1795 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 1796 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762) 1797 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1798 } 1799 1800 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1801 1802 /* 1803 * Set up general mode register. 1804 */ 1805 mode_ctl = bge_dma_swap_options(sc); 1806 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 1807 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 1808 /* Retain Host-2-BMC settings written by APE firmware. */ 1809 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 1810 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1811 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 1812 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 1813 } 1814 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1815 BGE_MODECTL_TX_NO_PHDR_CSUM; 1816 1817 /* 1818 * BCM5701 B5 have a bug causing data corruption when using 1819 * 64-bit DMA reads, which can be terminated early and then 1820 * completed later as 32-bit accesses, in combination with 1821 * certain bridges. 1822 */ 1823 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 1824 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1825 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 1826 1827 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1828 1829 /* 1830 * Disable memory write invalidate. Apparently it is not supported 1831 * properly by these devices. 1832 */ 1833 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1834 PCI_COMMAND_INVALIDATE_ENABLE); 1835 1836#ifdef __brokenalpha__ 1837 /* 1838 * Must ensure that we do not cross an 8K (bytes) boundary 1839 * for DMA reads. Our highest limit is 1K bytes. This is a 1840 * restriction on some ALPHA platforms with early revision 1841 * 21174 PCI chipsets, such as the AlphaPC 164lx 1842 */ 1843 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1844 BGE_PCI_READ_BNDRY_1024); 1845#endif 1846 1847 /* Set the timer prescaler (always 66MHz) */ 1848 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 1849 1850 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1851 DELAY(40); /* XXX */ 1852 1853 /* Put PHY into ready state */ 1854 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1855 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1856 DELAY(40); 1857 } 1858} 1859 1860int 1861bge_blockinit(struct bge_softc *sc) 1862{ 1863 volatile struct bge_rcb *rcb; 1864 vaddr_t rcb_addr; 1865 bge_hostaddr taddr; 1866 u_int32_t dmactl, rdmareg, mimode, val; 1867 int i, limit; 1868 1869 /* 1870 * Initialize the memory window pointer register so that 1871 * we can access the first 32K of internal NIC RAM. This will 1872 * allow us to set up the TX send ring RCBs and the RX return 1873 * ring RCBs, plus other things which live in NIC memory. 1874 */ 1875 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1876 1877 /* Configure mbuf memory pool */ 1878 if (!BGE_IS_5705_PLUS(sc)) { 1879 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1880 BGE_BUFFPOOL_1); 1881 1882 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1883 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1884 else 1885 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1886 1887 /* Configure DMA resource pool */ 1888 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1889 BGE_DMA_DESCRIPTORS); 1890 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1891 } 1892 1893 /* Configure mbuf pool watermarks */ 1894 /* new Broadcom docs strongly recommend these: */ 1895 if (BGE_IS_5717_PLUS(sc)) { 1896 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1897 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1898 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1899 } else if (BGE_IS_5705_PLUS(sc)) { 1900 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1901 1902 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1903 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1904 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1905 } else { 1906 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1907 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1908 } 1909 } else { 1910 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1911 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1912 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1913 } 1914 1915 /* Configure DMA resource watermarks */ 1916 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1917 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1918 1919 /* Enable buffer manager */ 1920 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1921 /* 1922 * Change the arbitration algorithm of TXMBUF read request to 1923 * round-robin instead of priority based for BCM5719. When 1924 * TXFIFO is almost empty, RDMA will hold its request until 1925 * TXFIFO is not almost empty. 1926 */ 1927 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 1928 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1929 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1930 1931 /* Poll for buffer manager start indication */ 1932 for (i = 0; i < 2000; i++) { 1933 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1934 break; 1935 DELAY(10); 1936 } 1937 1938 if (i == 2000) { 1939 printf("%s: buffer manager failed to start\n", 1940 sc->bge_dev.dv_xname); 1941 return (ENXIO); 1942 } 1943 1944 /* Enable flow-through queues */ 1945 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1946 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1947 1948 /* Wait until queue initialization is complete */ 1949 for (i = 0; i < 2000; i++) { 1950 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1951 break; 1952 DELAY(10); 1953 } 1954 1955 if (i == 2000) { 1956 printf("%s: flow-through queue init failed\n", 1957 sc->bge_dev.dv_xname); 1958 return (ENXIO); 1959 } 1960 1961 /* 1962 * Summary of rings supported by the controller: 1963 * 1964 * Standard Receive Producer Ring 1965 * - This ring is used to feed receive buffers for "standard" 1966 * sized frames (typically 1536 bytes) to the controller. 1967 * 1968 * Jumbo Receive Producer Ring 1969 * - This ring is used to feed receive buffers for jumbo sized 1970 * frames (i.e. anything bigger than the "standard" frames) 1971 * to the controller. 1972 * 1973 * Mini Receive Producer Ring 1974 * - This ring is used to feed receive buffers for "mini" 1975 * sized frames to the controller. 1976 * - This feature required external memory for the controller 1977 * but was never used in a production system. Should always 1978 * be disabled. 1979 * 1980 * Receive Return Ring 1981 * - After the controller has placed an incoming frame into a 1982 * receive buffer that buffer is moved into a receive return 1983 * ring. The driver is then responsible to passing the 1984 * buffer up to the stack. Many versions of the controller 1985 * support multiple RR rings. 1986 * 1987 * Send Ring 1988 * - This ring is used for outgoing frames. Many versions of 1989 * the controller support multiple send rings. 1990 */ 1991 1992 /* Initialize the standard RX ring control block */ 1993 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1994 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1995 if (BGE_IS_5717_PLUS(sc)) { 1996 /* 1997 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1998 * Bits 15-2 : Maximum RX frame size 1999 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 2000 * Bit 0 : Reserved 2001 */ 2002 rcb->bge_maxlen_flags = 2003 BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2); 2004 } else if (BGE_IS_5705_PLUS(sc)) { 2005 /* 2006 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2007 * Bits 15-2 : Reserved (should be 0) 2008 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2009 * Bit 0 : Reserved 2010 */ 2011 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2012 } else { 2013 /* 2014 * Ring size is always XXX entries 2015 * Bits 31-16: Maximum RX frame size 2016 * Bits 15-2 : Reserved (should be 0) 2017 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2018 * Bit 0 : Reserved 2019 */ 2020 rcb->bge_maxlen_flags = 2021 BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0); 2022 } 2023 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2024 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2025 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2026 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2027 else 2028 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2029 /* Write the standard receive producer ring control block. */ 2030 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2031 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2032 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2033 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2034 2035 /* Reset the standard receive producer ring producer index. */ 2036 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2037 2038 /* 2039 * Initialize the Jumbo RX ring control block 2040 * We set the 'ring disabled' bit in the flags 2041 * field until we're actually ready to start 2042 * using this ring (i.e. once we set the MTU 2043 * high enough to require it). 2044 */ 2045 if (sc->bge_flags & BGE_JUMBO_RING) { 2046 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2047 BGE_HOSTADDR(rcb->bge_hostaddr, 2048 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2049 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2050 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2051 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2052 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2053 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2054 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2055 else 2056 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2057 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2058 rcb->bge_hostaddr.bge_addr_hi); 2059 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2060 rcb->bge_hostaddr.bge_addr_lo); 2061 /* Program the jumbo receive producer ring RCB parameters. */ 2062 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2063 rcb->bge_maxlen_flags); 2064 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2065 /* Reset the jumbo receive producer ring producer index. */ 2066 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2067 } 2068 2069 /* Disable the mini receive producer ring RCB. */ 2070 if (BGE_IS_5700_FAMILY(sc)) { 2071 /* Set up dummy disabled mini ring RCB */ 2072 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2073 rcb->bge_maxlen_flags = 2074 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2075 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2076 rcb->bge_maxlen_flags); 2077 /* Reset the mini receive producer ring producer index. */ 2078 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2079 2080 /* XXX why? */ 2081 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2082 offsetof(struct bge_ring_data, bge_info), 2083 sizeof (struct bge_gib), 2084 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2085 } 2086 2087 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2088 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2089 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2090 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2091 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2092 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2093 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2094 } 2095 /* 2096 * The BD ring replenish thresholds control how often the 2097 * hardware fetches new BD's from the producer rings in host 2098 * memory. Setting the value too low on a busy system can 2099 * starve the hardware and reduce the throughput. 2100 * 2101 * Set the BD ring replenish thresholds. The recommended 2102 * values are 1/8th the number of descriptors allocated to 2103 * each ring, but since we try to avoid filling the entire 2104 * ring we set these to the minimal value of 8. This needs to 2105 * be done on several of the supported chip revisions anyway, 2106 * to work around HW bugs. 2107 */ 2108 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); 2109 if (sc->bge_flags & BGE_JUMBO_RING) 2110 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); 2111 2112 if (BGE_IS_5717_PLUS(sc)) { 2113 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2114 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2115 } 2116 2117 /* 2118 * Disable all send rings by setting the 'ring disabled' bit 2119 * in the flags field of all the TX send ring control blocks, 2120 * located in NIC memory. 2121 */ 2122 if (BGE_IS_5700_FAMILY(sc)) { 2123 /* 5700 to 5704 had 16 send rings. */ 2124 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2125 } else if (BGE_IS_57765_PLUS(sc) || 2126 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2127 limit = 2; 2128 else if (BGE_IS_5717_PLUS(sc)) 2129 limit = 4; 2130 else 2131 limit = 1; 2132 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2133 for (i = 0; i < limit; i++) { 2134 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2135 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2136 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2137 rcb_addr += sizeof(struct bge_rcb); 2138 } 2139 2140 /* Configure send ring RCB 0 (we use only the first ring) */ 2141 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2142 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2143 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2144 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2145 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2146 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2147 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2148 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); 2149 else 2150 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2151 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2152 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2153 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2154 2155 /* 2156 * Disable all receive return rings by setting the 2157 * 'ring disabled' bit in the flags field of all the receive 2158 * return ring control blocks, located in NIC memory. 2159 */ 2160 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2161 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2162 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2163 /* Should be 17, use 16 until we get an SRAM map. */ 2164 limit = 16; 2165 } else if (BGE_IS_5700_FAMILY(sc)) 2166 limit = BGE_RX_RINGS_MAX; 2167 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2168 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 || 2169 BGE_IS_57765_PLUS(sc)) 2170 limit = 4; 2171 else 2172 limit = 1; 2173 /* Disable all receive return rings */ 2174 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2175 for (i = 0; i < limit; i++) { 2176 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2177 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2178 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2179 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2180 BGE_RCB_FLAG_RING_DISABLED)); 2181 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2182 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2183 (i * (sizeof(u_int64_t))), 0); 2184 rcb_addr += sizeof(struct bge_rcb); 2185 } 2186 2187 /* 2188 * Set up receive return ring 0. Note that the NIC address 2189 * for RX return rings is 0x0. The return rings live entirely 2190 * within the host, so the nicaddr field in the RCB isn't used. 2191 */ 2192 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2193 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2194 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2195 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2196 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2197 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2198 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2199 2200 /* Set random backoff seed for TX */ 2201 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2202 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 2203 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 2204 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 2205 BGE_TX_BACKOFF_SEED_MASK); 2206 2207 /* Set inter-packet gap */ 2208 val = 0x2620; 2209 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2210 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2211 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2212 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2213 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2214 2215 /* 2216 * Specify which ring to use for packets that don't match 2217 * any RX rules. 2218 */ 2219 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2220 2221 /* 2222 * Configure number of RX lists. One interrupt distribution 2223 * list, sixteen active lists, one bad frames class. 2224 */ 2225 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2226 2227 /* Initialize RX list placement stats mask. */ 2228 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF); 2229 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2230 2231 /* Disable host coalescing until we get it set up */ 2232 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2233 2234 /* Poll to make sure it's shut down. */ 2235 for (i = 0; i < 2000; i++) { 2236 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2237 break; 2238 DELAY(10); 2239 } 2240 2241 if (i == 2000) { 2242 printf("%s: host coalescing engine failed to idle\n", 2243 sc->bge_dev.dv_xname); 2244 return (ENXIO); 2245 } 2246 2247 /* Set up host coalescing defaults */ 2248 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2249 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2250 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2251 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2252 if (!(BGE_IS_5705_PLUS(sc))) { 2253 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2254 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2255 } 2256 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2257 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2258 2259 /* Set up address of statistics block */ 2260 if (!(BGE_IS_5705_PLUS(sc))) { 2261 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2262 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2263 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2264 2265 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2266 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2267 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2268 } 2269 2270 /* Set up address of status block */ 2271 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2272 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2273 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2274 2275 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2276 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2277 2278 /* Set up status block size. */ 2279 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && 2280 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2281 val = BGE_STATBLKSZ_FULL; 2282 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); 2283 } else { 2284 val = BGE_STATBLKSZ_32BYTE; 2285 bzero(&sc->bge_rdata->bge_status_block, 32); 2286 } 2287 2288 /* Turn on host coalescing state machine */ 2289 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2290 2291 /* Turn on RX BD completion state machine and enable attentions */ 2292 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2293 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 2294 2295 /* Turn on RX list placement state machine */ 2296 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2297 2298 /* Turn on RX list selector state machine. */ 2299 if (!(BGE_IS_5705_PLUS(sc))) 2300 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2301 2302 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2303 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2304 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2305 BGE_MACMODE_FRMHDR_DMA_ENB; 2306 2307 if (sc->bge_flags & BGE_FIBER_TBI) 2308 val |= BGE_PORTMODE_TBI; 2309 else if (sc->bge_flags & BGE_FIBER_MII) 2310 val |= BGE_PORTMODE_GMII; 2311 else 2312 val |= BGE_PORTMODE_MII; 2313 2314 /* Allow APE to send/receive frames. */ 2315 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2316 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2317 2318 /* Turn on DMA, clear stats */ 2319 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2320 DELAY(40); 2321 2322 /* Set misc. local control, enable interrupts on attentions */ 2323 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2324 2325#ifdef notdef 2326 /* Assert GPIO pins for PHY reset */ 2327 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 2328 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 2329 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 2330 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 2331#endif 2332 2333 /* Turn on DMA completion state machine */ 2334 if (!(BGE_IS_5705_PLUS(sc))) 2335 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2336 2337 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 2338 2339 /* Enable host coalescing bug fix. */ 2340 if (BGE_IS_5755_PLUS(sc)) 2341 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2342 2343 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 2344 val |= BGE_WDMAMODE_BURST_ALL_DATA; 2345 2346 /* Turn on write DMA state machine */ 2347 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 2348 DELAY(40); 2349 2350 val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS; 2351 2352 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) 2353 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 2354 2355 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2356 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2357 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2358 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2359 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2360 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2361 2362 if (sc->bge_flags & BGE_PCIE) 2363 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 2364 2365 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2366 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2367 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 2368 BGE_RDMAMODE_H2BNC_VLAN_DET; 2369 /* 2370 * Allow multiple outstanding read requests from 2371 * non-LSO read DMA engine. 2372 */ 2373 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 2374 } 2375 2376 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2377 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2378 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2379 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || 2380 BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) { 2381 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2382 rdmareg = BGE_RDMA_RSRVCTRL_REG2; 2383 else 2384 rdmareg = BGE_RDMA_RSRVCTRL; 2385 dmactl = CSR_READ_4(sc, rdmareg); 2386 /* 2387 * Adjust tx margin to prevent TX data corruption and 2388 * fix internal FIFO overflow. 2389 */ 2390 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2391 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2392 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 2393 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 2394 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 2395 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 2396 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 2397 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 2398 } 2399 /* 2400 * Enable fix for read DMA FIFO overruns. 2401 * The fix is to limit the number of RX BDs 2402 * the hardware would fetch at a time. 2403 */ 2404 CSR_WRITE_4(sc, rdmareg, dmactl | 2405 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 2406 } 2407 2408 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { 2409 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2410 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2411 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2412 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2413 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2414 /* 2415 * Allow 4KB burst length reads for non-LSO frames. 2416 * Enable 512B burst length reads for buffer descriptors. 2417 */ 2418 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2419 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2420 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 2421 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2422 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2423 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, 2424 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | 2425 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2426 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2427 } 2428 2429 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 2430 DELAY(40); 2431 2432 if (sc->bge_flags & BGE_RDMA_BUG) { 2433 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { 2434 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); 2435 if ((val & 0xFFFF) > ETHER_MAX_LEN) 2436 break; 2437 if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN) 2438 break; 2439 } 2440 if (i != BGE_NUM_RDMA_CHANNELS / 2) { 2441 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 2442 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2443 val |= BGE_RDMA_TX_LENGTH_WA_5719; 2444 else 2445 val |= BGE_RDMA_TX_LENGTH_WA_5720; 2446 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 2447 } 2448 } 2449 2450 /* Turn on RX data completion state machine */ 2451 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2452 2453 /* Turn on RX BD initiator state machine */ 2454 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2455 2456 /* Turn on RX data and RX BD initiator state machine */ 2457 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2458 2459 /* Turn on Mbuf cluster free state machine */ 2460 if (!BGE_IS_5705_PLUS(sc)) 2461 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2462 2463 /* Turn on send BD completion state machine */ 2464 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2465 2466 /* Turn on send data completion state machine */ 2467 val = BGE_SDCMODE_ENABLE; 2468 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2469 val |= BGE_SDCMODE_CDELAY; 2470 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2471 2472 /* Turn on send data initiator state machine */ 2473 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2474 2475 /* Turn on send BD initiator state machine */ 2476 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2477 2478 /* Turn on send BD selector state machine */ 2479 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2480 2481 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF); 2482 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2483 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 2484 2485 /* ack/clear link change events */ 2486 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2487 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2488 BGE_MACSTAT_LINK_CHANGED); 2489 2490 /* Enable PHY auto polling (for MII/GMII only) */ 2491 if (sc->bge_flags & BGE_FIBER_TBI) { 2492 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 2493 } else { 2494 if ((sc->bge_flags & BGE_CPMU_PRESENT) != 0) 2495 mimode = BGE_MIMODE_500KHZ_CONST; 2496 else 2497 mimode = BGE_MIMODE_BASE; 2498 if (BGE_IS_5700_FAMILY(sc) || 2499 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) { 2500 mimode |= BGE_MIMODE_AUTOPOLL; 2501 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 2502 } 2503 mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 2504 CSR_WRITE_4(sc, BGE_MI_MODE, mimode); 2505 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 2506 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2507 BGE_EVTENB_MI_INTERRUPT); 2508 } 2509 2510 /* 2511 * Clear any pending link state attention. 2512 * Otherwise some link state change events may be lost until attention 2513 * is cleared by bge_intr() -> bge_link_upd() sequence. 2514 * It's not necessary on newer BCM chips - perhaps enabling link 2515 * state change attentions implies clearing pending attention. 2516 */ 2517 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2518 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 2519 BGE_MACSTAT_LINK_CHANGED); 2520 2521 /* Enable link state change attentions. */ 2522 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 2523 2524 return (0); 2525} 2526 2527const struct bge_revision * 2528bge_lookup_rev(u_int32_t chipid) 2529{ 2530 const struct bge_revision *br; 2531 2532 for (br = bge_revisions; br->br_name != NULL; br++) { 2533 if (br->br_chipid == chipid) 2534 return (br); 2535 } 2536 2537 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2538 if (br->br_chipid == BGE_ASICREV(chipid)) 2539 return (br); 2540 } 2541 2542 return (NULL); 2543} 2544 2545int 2546bge_can_use_msi(struct bge_softc *sc) 2547{ 2548 int can_use_msi = 0; 2549 2550 switch (BGE_ASICREV(sc->bge_chipid)) { 2551 case BGE_ASICREV_BCM5714_A0: 2552 case BGE_ASICREV_BCM5714: 2553 /* 2554 * Apparently, MSI doesn't work when these chips are 2555 * configured in single-port mode. 2556 */ 2557 break; 2558 case BGE_ASICREV_BCM5750: 2559 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX && 2560 BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX) 2561 can_use_msi = 1; 2562 break; 2563 default: 2564 if (BGE_IS_575X_PLUS(sc)) 2565 can_use_msi = 1; 2566 } 2567 2568 return (can_use_msi); 2569} 2570 2571/* 2572 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2573 * against our list and return its name if we find a match. Note 2574 * that since the Broadcom controller contains VPD support, we 2575 * can get the device name string from the controller itself instead 2576 * of the compiled-in string. This is a little slow, but it guarantees 2577 * we'll always announce the right product name. 2578 */ 2579int 2580bge_probe(struct device *parent, void *match, void *aux) 2581{ 2582 return (pci_matchbyid(aux, bge_devices, nitems(bge_devices))); 2583} 2584 2585void 2586bge_attach(struct device *parent, struct device *self, void *aux) 2587{ 2588 struct bge_softc *sc = (struct bge_softc *)self; 2589 struct pci_attach_args *pa = aux; 2590 pci_chipset_tag_t pc = pa->pa_pc; 2591 const struct bge_revision *br; 2592 pcireg_t pm_ctl, memtype, subid, reg; 2593 pci_intr_handle_t ih; 2594 const char *intrstr = NULL; 2595 int gotenaddr = 0; 2596 u_int32_t hwcfg = 0; 2597 u_int32_t mac_addr = 0; 2598 u_int32_t misccfg; 2599 struct ifnet *ifp; 2600 caddr_t kva; 2601#ifdef __sparc64__ 2602 char name[32]; 2603#endif 2604 2605 sc->bge_pa = *pa; 2606 2607 subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 2608 2609 /* 2610 * Map control/status registers. 2611 */ 2612 DPRINTFN(5, ("Map control/status regs\n")); 2613 2614 DPRINTFN(5, ("pci_mapreg_map\n")); 2615 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2616 if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag, 2617 &sc->bge_bhandle, NULL, &sc->bge_bsize, 0)) { 2618 printf(": can't find mem space\n"); 2619 return; 2620 } 2621 2622 /* 2623 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2624 * can clobber the chip's PCI config-space power control registers, 2625 * leaving the card in D3 powersave state. 2626 * We do not have memory-mapped registers in this state, 2627 * so force device into D0 state before starting initialization. 2628 */ 2629 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2630 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2631 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2632 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2633 DELAY(1000); /* 27 usec is allegedly sufficient */ 2634 2635 /* 2636 * Save ASIC rev. 2637 */ 2638 sc->bge_chipid = 2639 (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) 2640 >> BGE_PCIMISCCTL_ASICREV_SHIFT); 2641 2642 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) { 2643 switch (PCI_PRODUCT(pa->pa_id)) { 2644 case PCI_PRODUCT_BROADCOM_BCM5717: 2645 case PCI_PRODUCT_BROADCOM_BCM5718: 2646 case PCI_PRODUCT_BROADCOM_BCM5719: 2647 case PCI_PRODUCT_BROADCOM_BCM5720: 2648 case PCI_PRODUCT_BROADCOM_BCM5725: 2649 case PCI_PRODUCT_BROADCOM_BCM5727: 2650 case PCI_PRODUCT_BROADCOM_BCM5762: 2651 case PCI_PRODUCT_BROADCOM_BCM57764: 2652 case PCI_PRODUCT_BROADCOM_BCM57767: 2653 case PCI_PRODUCT_BROADCOM_BCM57787: 2654 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2655 BGE_PCI_GEN2_PRODID_ASICREV); 2656 break; 2657 case PCI_PRODUCT_BROADCOM_BCM57761: 2658 case PCI_PRODUCT_BROADCOM_BCM57762: 2659 case PCI_PRODUCT_BROADCOM_BCM57765: 2660 case PCI_PRODUCT_BROADCOM_BCM57766: 2661 case PCI_PRODUCT_BROADCOM_BCM57781: 2662 case PCI_PRODUCT_BROADCOM_BCM57782: 2663 case PCI_PRODUCT_BROADCOM_BCM57785: 2664 case PCI_PRODUCT_BROADCOM_BCM57786: 2665 case PCI_PRODUCT_BROADCOM_BCM57791: 2666 case PCI_PRODUCT_BROADCOM_BCM57795: 2667 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2668 BGE_PCI_GEN15_PRODID_ASICREV); 2669 break; 2670 default: 2671 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2672 BGE_PCI_PRODID_ASICREV); 2673 break; 2674 } 2675 } 2676 2677 sc->bge_phy_addr = bge_phy_addr(sc); 2678 2679 printf(", "); 2680 br = bge_lookup_rev(sc->bge_chipid); 2681 if (br == NULL) 2682 printf("unknown ASIC (0x%x)", sc->bge_chipid); 2683 else 2684 printf("%s (0x%x)", br->br_name, sc->bge_chipid); 2685 2686 /* 2687 * PCI Express or PCI-X controller check. 2688 */ 2689 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2690 &sc->bge_expcap, NULL) != 0) { 2691 /* Extract supported maximum payload size. */ 2692 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + 2693 PCI_PCIE_DCAP); 2694 sc->bge_mps = 128 << (reg & 0x7); 2695 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2696 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2697 sc->bge_expmrq = (fls(2048) - 8) << 12; 2698 else 2699 sc->bge_expmrq = (fls(4096) - 8) << 12; 2700 /* Disable PCIe Active State Power Management (ASPM). */ 2701 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 2702 sc->bge_expcap + PCI_PCIE_LCSR); 2703 reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1); 2704 pci_conf_write(pa->pa_pc, pa->pa_tag, 2705 sc->bge_expcap + PCI_PCIE_LCSR, reg); 2706 sc->bge_flags |= BGE_PCIE; 2707 } else { 2708 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2709 BGE_PCISTATE_PCI_BUSMODE) == 0) 2710 sc->bge_flags |= BGE_PCIX; 2711 } 2712 2713 /* 2714 * SEEPROM check. 2715 */ 2716#ifdef __sparc64__ 2717 /* 2718 * Onboard interfaces on UltraSPARC systems generally don't 2719 * have a SEEPROM fitted. These interfaces, and cards that 2720 * have FCode, are named "network" by the PROM, whereas cards 2721 * without FCode show up as "ethernet". Since we don't really 2722 * need the information from the SEEPROM on cards that have 2723 * FCode it's fine to pretend they don't have one. 2724 */ 2725 if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name, 2726 sizeof(name)) > 0 && strcmp(name, "network") == 0) 2727 sc->bge_flags |= BGE_NO_EEPROM; 2728#endif 2729 2730 /* Save chipset family. */ 2731 switch (BGE_ASICREV(sc->bge_chipid)) { 2732 case BGE_ASICREV_BCM5762: 2733 case BGE_ASICREV_BCM57765: 2734 case BGE_ASICREV_BCM57766: 2735 sc->bge_flags |= BGE_57765_PLUS; 2736 /* FALLTHROUGH */ 2737 case BGE_ASICREV_BCM5717: 2738 case BGE_ASICREV_BCM5719: 2739 case BGE_ASICREV_BCM5720: 2740 sc->bge_flags |= BGE_5717_PLUS | BGE_5755_PLUS | BGE_575X_PLUS | 2741 BGE_5705_PLUS | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING | 2742 BGE_JUMBO_FRAME; 2743 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2744 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2745 /* 2746 * Enable work around for DMA engine miscalculation 2747 * of TXMBUF available space. 2748 */ 2749 sc->bge_flags |= BGE_RDMA_BUG; 2750 2751 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 && 2752 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { 2753 /* Jumbo frame on BCM5719 A0 does not work. */ 2754 sc->bge_flags &= ~(BGE_JUMBO_CAPABLE | 2755 BGE_JUMBO_RING | BGE_JUMBO_FRAME); 2756 } 2757 } 2758 break; 2759 case BGE_ASICREV_BCM5755: 2760 case BGE_ASICREV_BCM5761: 2761 case BGE_ASICREV_BCM5784: 2762 case BGE_ASICREV_BCM5785: 2763 case BGE_ASICREV_BCM5787: 2764 case BGE_ASICREV_BCM57780: 2765 sc->bge_flags |= BGE_5755_PLUS | BGE_575X_PLUS | BGE_5705_PLUS; 2766 break; 2767 case BGE_ASICREV_BCM5700: 2768 case BGE_ASICREV_BCM5701: 2769 case BGE_ASICREV_BCM5703: 2770 case BGE_ASICREV_BCM5704: 2771 sc->bge_flags |= BGE_5700_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING; 2772 break; 2773 case BGE_ASICREV_BCM5714_A0: 2774 case BGE_ASICREV_BCM5780: 2775 case BGE_ASICREV_BCM5714: 2776 sc->bge_flags |= BGE_5714_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_STD; 2777 /* FALLTHROUGH */ 2778 case BGE_ASICREV_BCM5750: 2779 case BGE_ASICREV_BCM5752: 2780 case BGE_ASICREV_BCM5906: 2781 sc->bge_flags |= BGE_575X_PLUS; 2782 /* FALLTHROUGH */ 2783 case BGE_ASICREV_BCM5705: 2784 sc->bge_flags |= BGE_5705_PLUS; 2785 break; 2786 } 2787 2788 if (sc->bge_flags & BGE_JUMBO_STD) 2789 sc->bge_rx_std_len = BGE_JLEN; 2790 else 2791 sc->bge_rx_std_len = MCLBYTES; 2792 2793 /* 2794 * When using the BCM5701 in PCI-X mode, data corruption has 2795 * been observed in the first few bytes of some received packets. 2796 * Aligning the packet buffer in memory eliminates the corruption. 2797 * Unfortunately, this misaligns the packet payloads. On platforms 2798 * which do not support unaligned accesses, we will realign the 2799 * payloads by copying the received packets. 2800 */ 2801 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2802 sc->bge_flags & BGE_PCIX) 2803 sc->bge_flags |= BGE_RX_ALIGNBUG; 2804 2805 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2806 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 2807 PCI_VENDOR(subid) == DELL_VENDORID) 2808 sc->bge_phy_flags |= BGE_PHY_NO_3LED; 2809 2810 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 2811 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 2812 2813 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2814 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2815 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2816 sc->bge_flags |= BGE_IS_5788; 2817 2818 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 2819 (misccfg == 0x4000 || misccfg == 0x8000)) || 2820 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2821 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2822 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 2823 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2824 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2825 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2826 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 2827 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 2828 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2829 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 2830 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 2831 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || 2832 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2833 sc->bge_phy_flags |= BGE_PHY_10_100_ONLY; 2834 2835 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2836 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2837 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2838 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2839 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2840 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; 2841 2842 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2843 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2844 sc->bge_phy_flags |= BGE_PHY_CRC_BUG; 2845 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 2846 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 2847 sc->bge_phy_flags |= BGE_PHY_ADC_BUG; 2848 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2849 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; 2850 2851 if ((BGE_IS_5705_PLUS(sc)) && 2852 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 2853 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 2854 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && 2855 !BGE_IS_5717_PLUS(sc)) { 2856 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2857 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2858 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2859 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 2860 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 2861 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 2862 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; 2863 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 2864 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; 2865 } else 2866 sc->bge_phy_flags |= BGE_PHY_BER_BUG; 2867 } 2868 2869 /* Identify chips with APE processor. */ 2870 switch (BGE_ASICREV(sc->bge_chipid)) { 2871 case BGE_ASICREV_BCM5717: 2872 case BGE_ASICREV_BCM5719: 2873 case BGE_ASICREV_BCM5720: 2874 case BGE_ASICREV_BCM5761: 2875 case BGE_ASICREV_BCM5762: 2876 sc->bge_flags |= BGE_APE; 2877 break; 2878 } 2879 2880 /* Chips with APE need BAR2 access for APE registers/memory. */ 2881 if ((sc->bge_flags & BGE_APE) != 0) { 2882 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); 2883 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, 2884 &sc->bge_apetag, &sc->bge_apehandle, NULL, 2885 &sc->bge_apesize, 0)) { 2886 printf(": couldn't map BAR2 memory\n"); 2887 goto fail_1; 2888 } 2889 2890 /* Enable APE register/memory access by host driver. */ 2891 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2892 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2893 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2894 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2895 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); 2896 2897 bge_ape_lock_init(sc); 2898 bge_ape_read_fw_ver(sc); 2899 } 2900 2901 /* Identify the chips that use an CPMU. */ 2902 if (BGE_IS_5717_PLUS(sc) || 2903 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2904 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2905 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2906 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2907 sc->bge_flags |= BGE_CPMU_PRESENT; 2908 2909 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, 2910 &sc->bge_msicap, NULL)) { 2911 if (bge_can_use_msi(sc) == 0) 2912 pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED; 2913 } 2914 2915 DPRINTFN(5, ("pci_intr_map\n")); 2916 if (pci_intr_map_msi(pa, &ih) == 0) 2917 sc->bge_flags |= BGE_MSI; 2918 else if (pci_intr_map(pa, &ih)) { 2919 printf(": couldn't map interrupt\n"); 2920 goto fail_1; 2921 } 2922 2923 /* 2924 * All controllers except BCM5700 supports tagged status but 2925 * we use tagged status only for MSI case on BCM5717. Otherwise 2926 * MSI on BCM5717 does not work. 2927 */ 2928 if (BGE_IS_5717_PLUS(sc) && sc->bge_flags & BGE_MSI) 2929 sc->bge_flags |= BGE_TAGGED_STATUS; 2930 2931 DPRINTFN(5, ("pci_intr_string\n")); 2932 intrstr = pci_intr_string(pc, ih); 2933 2934 /* Try to reset the chip. */ 2935 DPRINTFN(5, ("bge_reset\n")); 2936 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 2937 bge_reset(sc); 2938 2939 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 2940 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 2941 2942 bge_chipinit(sc); 2943 2944#if defined(__sparc64__) || defined(__HAVE_FDT) 2945 if (!gotenaddr && PCITAG_NODE(pa->pa_tag)) { 2946 if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address", 2947 sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN) 2948 gotenaddr = 1; 2949 } 2950#endif 2951 2952 /* 2953 * Get station address from the EEPROM. 2954 */ 2955 if (!gotenaddr) { 2956 mac_addr = bge_readmem_ind(sc, 0x0c14); 2957 if ((mac_addr >> 16) == 0x484b) { 2958 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); 2959 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; 2960 mac_addr = bge_readmem_ind(sc, 0x0c18); 2961 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); 2962 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); 2963 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); 2964 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; 2965 gotenaddr = 1; 2966 } 2967 } 2968 if (!gotenaddr) { 2969 int mac_offset = BGE_EE_MAC_OFFSET; 2970 2971 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2972 mac_offset = BGE_EE_MAC_OFFSET_5906; 2973 2974 if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr, 2975 mac_offset + 2, ETHER_ADDR_LEN) == 0) 2976 gotenaddr = 1; 2977 } 2978 if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) { 2979 if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 2980 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0) 2981 gotenaddr = 1; 2982 } 2983 2984#ifdef __sparc64__ 2985 if (!gotenaddr) { 2986 extern void myetheraddr(u_char *); 2987 2988 myetheraddr(sc->arpcom.ac_enaddr); 2989 gotenaddr = 1; 2990 } 2991#endif 2992 2993 if (!gotenaddr) { 2994 printf(": failed to read station address\n"); 2995 goto fail_2; 2996 } 2997 2998 /* Allocate the general information block and ring buffers. */ 2999 sc->bge_dmatag = pa->pa_dmat; 3000 DPRINTFN(5, ("bus_dmamem_alloc\n")); 3001 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 3002 PAGE_SIZE, 0, &sc->bge_ring_seg, 1, &sc->bge_ring_nseg, 3003 BUS_DMA_NOWAIT)) { 3004 printf(": can't alloc rx buffers\n"); 3005 goto fail_2; 3006 } 3007 DPRINTFN(5, ("bus_dmamem_map\n")); 3008 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, 3009 sc->bge_ring_nseg, sizeof(struct bge_ring_data), &kva, 3010 BUS_DMA_NOWAIT)) { 3011 printf(": can't map dma buffers (%lu bytes)\n", 3012 sizeof(struct bge_ring_data)); 3013 goto fail_3; 3014 } 3015 DPRINTFN(5, ("bus_dmamap_create\n")); 3016 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 3017 sizeof(struct bge_ring_data), 0, 3018 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 3019 printf(": can't create dma map\n"); 3020 goto fail_4; 3021 } 3022 DPRINTFN(5, ("bus_dmamap_load\n")); 3023 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 3024 sizeof(struct bge_ring_data), NULL, 3025 BUS_DMA_NOWAIT)) { 3026 goto fail_5; 3027 } 3028 3029 DPRINTFN(5, ("bzero\n")); 3030 sc->bge_rdata = (struct bge_ring_data *)kva; 3031 3032 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 3033 3034 /* Set default tuneable values. */ 3035 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3036 sc->bge_rx_coal_ticks = 150; 3037 sc->bge_rx_max_coal_bds = 64; 3038 sc->bge_tx_coal_ticks = 300; 3039 sc->bge_tx_max_coal_bds = 400; 3040 3041 /* 5705 limits RX return ring to 512 entries. */ 3042 if (BGE_IS_5700_FAMILY(sc) || BGE_IS_5717_PLUS(sc)) 3043 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3044 else 3045 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3046 3047 mtx_init(&sc->bge_kstat_mtx, IPL_SOFTCLOCK); 3048#if NKSTAT > 0 3049 if (BGE_IS_5705_PLUS(sc)) 3050 bge_kstat_attach(sc); 3051#endif 3052 3053 /* Set up ifnet structure */ 3054 ifp = &sc->arpcom.ac_if; 3055 ifp->if_softc = sc; 3056 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3057 ifp->if_xflags = IFXF_MPSAFE; 3058 ifp->if_ioctl = bge_ioctl; 3059 ifp->if_qstart = bge_start; 3060 ifp->if_watchdog = bge_watchdog; 3061 ifq_init_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 3062 3063 DPRINTFN(5, ("bcopy\n")); 3064 bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 3065 3066 ifp->if_capabilities = IFCAP_VLAN_MTU; 3067 3068#if NVLAN > 0 3069 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 3070#endif 3071 3072 /* 3073 * 5700 B0 chips do not support checksumming correctly due 3074 * to hardware bugs. 3075 * 3076 * It seems all controllers have a bug that can generate UDP 3077 * datagrams with a checksum value 0 when TX UDP checksum 3078 * offloading is enabled. Generating UDP checksum value 0 is 3079 * a violation of RFC 768. 3080 */ 3081 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 3082 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4; 3083 3084 if (BGE_IS_JUMBO_CAPABLE(sc)) 3085 ifp->if_hardmtu = BGE_JUMBO_MTU; 3086 3087 /* 3088 * Do MII setup. 3089 */ 3090 DPRINTFN(5, ("mii setup\n")); 3091 sc->bge_mii.mii_ifp = ifp; 3092 sc->bge_mii.mii_readreg = bge_miibus_readreg; 3093 sc->bge_mii.mii_writereg = bge_miibus_writereg; 3094 sc->bge_mii.mii_statchg = bge_miibus_statchg; 3095 3096 /* 3097 * Figure out what sort of media we have by checking the hardware 3098 * config word in the first 32K of internal NIC memory, or fall back to 3099 * examining the EEPROM if necessary. Note: on some BCM5700 cards, 3100 * this value seems to be unset. If that's the case, we have to rely on 3101 * identifying the NIC by its PCI subsystem ID, as we do below for the 3102 * SysKonnect SK-9D41. 3103 */ 3104 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 3105 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 3106 else if (!(sc->bge_flags & BGE_NO_EEPROM)) { 3107 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 3108 sizeof(hwcfg))) { 3109 printf(": failed to read media type\n"); 3110 goto fail_6; 3111 } 3112 hwcfg = ntohl(hwcfg); 3113 } 3114 3115 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 3116 if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 || 3117 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3118 if (BGE_IS_5700_FAMILY(sc)) 3119 sc->bge_flags |= BGE_FIBER_TBI; 3120 else 3121 sc->bge_flags |= BGE_FIBER_MII; 3122 } 3123 3124 /* Take advantage of single-shot MSI. */ 3125 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_MSI) 3126 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & 3127 ~BGE_MSIMODE_ONE_SHOT_DISABLE); 3128 3129 /* Hookup IRQ last. */ 3130 DPRINTFN(5, ("pci_intr_establish\n")); 3131 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, 3132 bge_intr, sc, sc->bge_dev.dv_xname); 3133 if (sc->bge_intrhand == NULL) { 3134 printf(": couldn't establish interrupt"); 3135 if (intrstr != NULL) 3136 printf(" at %s", intrstr); 3137 printf("\n"); 3138 goto fail_6; 3139 } 3140 3141 /* 3142 * A Broadcom chip was detected. Inform the world. 3143 */ 3144 printf(": %s, address %s\n", intrstr, 3145 ether_sprintf(sc->arpcom.ac_enaddr)); 3146 3147 if (sc->bge_flags & BGE_FIBER_TBI) { 3148 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3149 bge_ifmedia_sts); 3150 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 3151 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 3152 0, NULL); 3153 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 3154 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 3155 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3156 } else { 3157 int mii_flags; 3158 3159 /* 3160 * Do transceiver setup. 3161 */ 3162 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 3163 bge_ifmedia_sts); 3164 mii_flags = MIIF_DOPAUSE; 3165 if (sc->bge_flags & BGE_FIBER_MII) 3166 mii_flags |= MIIF_HAVEFIBER; 3167 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 3168 sc->bge_phy_addr, MII_OFFSET_ANY, mii_flags); 3169 3170 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 3171 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 3172 ifmedia_add(&sc->bge_mii.mii_media, 3173 IFM_ETHER|IFM_MANUAL, 0, NULL); 3174 ifmedia_set(&sc->bge_mii.mii_media, 3175 IFM_ETHER|IFM_MANUAL); 3176 } else 3177 ifmedia_set(&sc->bge_mii.mii_media, 3178 IFM_ETHER|IFM_AUTO); 3179 } 3180 3181 /* 3182 * Call MI attach routine. 3183 */ 3184 if_attach(ifp); 3185 ether_ifattach(ifp); 3186 3187 timeout_set(&sc->bge_timeout, bge_tick, sc); 3188 timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc); 3189 timeout_set(&sc->bge_rxtimeout_jumbo, bge_rxtick_jumbo, sc); 3190 return; 3191 3192fail_6: 3193 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 3194 3195fail_5: 3196 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3197 3198fail_4: 3199 bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata, 3200 sizeof(struct bge_ring_data)); 3201 3202fail_3: 3203 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg); 3204 3205fail_2: 3206 if ((sc->bge_flags & BGE_APE) != 0) 3207 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 3208 sc->bge_apesize); 3209 3210fail_1: 3211 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 3212} 3213 3214int 3215bge_detach(struct device *self, int flags) 3216{ 3217 struct bge_softc *sc = (struct bge_softc *)self; 3218 struct ifnet *ifp = &sc->arpcom.ac_if; 3219 3220 bge_stop(sc, 1); 3221 3222 if (sc->bge_intrhand) 3223 pci_intr_disestablish(sc->bge_pa.pa_pc, sc->bge_intrhand); 3224 3225 /* Detach any PHYs we might have. */ 3226 if (LIST_FIRST(&sc->bge_mii.mii_phys) != NULL) 3227 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); 3228 3229 /* Delete any remaining media. */ 3230 ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY); 3231 3232 ether_ifdetach(ifp); 3233 if_detach(ifp); 3234 3235 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 3236 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3237 bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata, 3238 sizeof(struct bge_ring_data)); 3239 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg); 3240 3241 if ((sc->bge_flags & BGE_APE) != 0) 3242 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 3243 sc->bge_apesize); 3244 3245 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 3246 return (0); 3247} 3248 3249int 3250bge_activate(struct device *self, int act) 3251{ 3252 struct bge_softc *sc = (struct bge_softc *)self; 3253 struct ifnet *ifp = &sc->arpcom.ac_if; 3254 int rv = 0; 3255 3256 switch (act) { 3257 case DVACT_SUSPEND: 3258 rv = config_activate_children(self, act); 3259 if (ifp->if_flags & IFF_RUNNING) 3260 bge_stop(sc, 0); 3261 break; 3262 case DVACT_RESUME: 3263 if (ifp->if_flags & IFF_UP) 3264 bge_init(sc); 3265 break; 3266 default: 3267 rv = config_activate_children(self, act); 3268 break; 3269 } 3270 return (rv); 3271} 3272 3273void 3274bge_reset(struct bge_softc *sc) 3275{ 3276 struct pci_attach_args *pa = &sc->bge_pa; 3277 pcireg_t cachesize, command, devctl; 3278 u_int32_t reset, mac_mode, mac_mode_mask, val; 3279 void (*write_op)(struct bge_softc *, int, int); 3280 int i; 3281 3282 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 3283 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 3284 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 3285 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 3286 3287 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 3288 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) { 3289 if (sc->bge_flags & BGE_PCIE) 3290 write_op = bge_writembx; 3291 else 3292 write_op = bge_writemem_ind; 3293 } else 3294 write_op = bge_writereg_ind; 3295 3296 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && 3297 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701 && 3298 !(sc->bge_flags & BGE_NO_EEPROM)) { 3299 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 3300 for (i = 0; i < 8000; i++) { 3301 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 3302 BGE_NVRAMSWARB_GNT1) 3303 break; 3304 DELAY(20); 3305 } 3306 if (i == 8000) 3307 printf("%s: nvram lock timed out\n", 3308 sc->bge_dev.dv_xname); 3309 } 3310 /* Take APE lock when performing reset. */ 3311 bge_ape_lock(sc, BGE_APE_LOCK_GRC); 3312 3313 /* Save some important PCI state. */ 3314 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 3315 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 3316 3317 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 3318 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3319 BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW); 3320 3321 /* Disable fastboot on controllers that support it. */ 3322 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 3323 BGE_IS_5755_PLUS(sc)) 3324 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 3325 3326 /* 3327 * Write the magic number to SRAM at offset 0xB50. 3328 * When firmware finishes its initialization it will 3329 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 3330 */ 3331 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 3332 3333 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 3334 3335 if (sc->bge_flags & BGE_PCIE) { 3336 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3337 !BGE_IS_5717_PLUS(sc)) { 3338 if (CSR_READ_4(sc, 0x7e2c) == 0x60) { 3339 /* PCI Express 1.0 system */ 3340 CSR_WRITE_4(sc, 0x7e2c, 0x20); 3341 } 3342 } 3343 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3344 /* 3345 * Prevent PCI Express link training 3346 * during global reset. 3347 */ 3348 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 3349 reset |= (1<<29); 3350 } 3351 } 3352 3353 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3354 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 3355 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 3356 val | BGE_VCPU_STATUS_DRV_RESET); 3357 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 3358 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 3359 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 3360 3361 sc->bge_flags |= BGE_NO_EEPROM; 3362 } 3363 3364 /* 3365 * Set GPHY Power Down Override to leave GPHY 3366 * powered up in D0 uninitialized. 3367 */ 3368 if (BGE_IS_5705_PLUS(sc) && 3369 (sc->bge_flags & BGE_CPMU_PRESENT) == 0) 3370 reset |= BGE_MISCCFG_KEEP_GPHY_POWER; 3371 3372 /* Issue global reset */ 3373 write_op(sc, BGE_MISC_CFG, reset); 3374 3375 if (sc->bge_flags & BGE_PCIE) 3376 DELAY(100 * 1000); 3377 else 3378 DELAY(1000); 3379 3380 if (sc->bge_flags & BGE_PCIE) { 3381 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3382 pcireg_t v; 3383 3384 DELAY(500000); /* wait for link training to complete */ 3385 v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4); 3386 pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15)); 3387 } 3388 3389 devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + 3390 PCI_PCIE_DCSR); 3391 /* Clear enable no snoop and disable relaxed ordering. */ 3392 devctl &= ~(PCI_PCIE_DCSR_ERO | PCI_PCIE_DCSR_ENS); 3393 /* Set PCI Express max payload size. */ 3394 devctl = (devctl & ~PCI_PCIE_DCSR_MPS) | sc->bge_expmrq; 3395 /* Clear error status. */ 3396 devctl |= PCI_PCIE_DCSR_CEE | PCI_PCIE_DCSR_NFE | 3397 PCI_PCIE_DCSR_FEE | PCI_PCIE_DCSR_URE; 3398 pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap + 3399 PCI_PCIE_DCSR, devctl); 3400 } 3401 3402 /* Reset some of the PCI state that got zapped by reset */ 3403 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 3404 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3405 BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW); 3406 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 3407 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 3408 (sc->bge_flags & BGE_PCIX) != 0) 3409 val |= BGE_PCISTATE_RETRY_SAME_DMA; 3410 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 3411 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3412 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3413 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3414 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, val); 3415 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 3416 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 3417 3418 /* Re-enable MSI, if necessary, and enable memory arbiter. */ 3419 if (BGE_IS_5714_FAMILY(sc)) { 3420 /* This chip disables MSI on reset. */ 3421 if (sc->bge_flags & BGE_MSI) { 3422 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 3423 sc->bge_msicap + PCI_MSI_MC); 3424 pci_conf_write(pa->pa_pc, pa->pa_tag, 3425 sc->bge_msicap + PCI_MSI_MC, 3426 val | PCI_MSI_MC_MSIE); 3427 val = CSR_READ_4(sc, BGE_MSI_MODE); 3428 CSR_WRITE_4(sc, BGE_MSI_MODE, 3429 val | BGE_MSIMODE_ENABLE); 3430 } 3431 val = CSR_READ_4(sc, BGE_MARB_MODE); 3432 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 3433 } else 3434 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3435 3436 /* Fix up byte swapping */ 3437 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc)); 3438 3439 val = CSR_READ_4(sc, BGE_MAC_MODE); 3440 val = (val & ~mac_mode_mask) | mac_mode; 3441 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 3442 DELAY(40); 3443 3444 bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 3445 3446 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3447 for (i = 0; i < BGE_TIMEOUT; i++) { 3448 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 3449 if (val & BGE_VCPU_STATUS_INIT_DONE) 3450 break; 3451 DELAY(100); 3452 } 3453 3454 if (i >= BGE_TIMEOUT) 3455 printf("%s: reset timed out\n", sc->bge_dev.dv_xname); 3456 } else { 3457 /* 3458 * Poll until we see 1's complement of the magic number. 3459 * This indicates that the firmware initialization 3460 * is complete. We expect this to fail if no SEEPROM 3461 * is fitted. 3462 */ 3463 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 3464 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 3465 if (val == ~BGE_MAGIC_NUMBER) 3466 break; 3467 DELAY(10); 3468 } 3469 3470 if ((i >= BGE_TIMEOUT * 10) && 3471 (!(sc->bge_flags & BGE_NO_EEPROM))) 3472 printf("%s: firmware handshake timed out\n", 3473 sc->bge_dev.dv_xname); 3474 /* BCM57765 A0 needs additional time before accessing. */ 3475 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 3476 DELAY(10 * 1000); /* XXX */ 3477 } 3478 3479 /* 3480 * The 5704 in TBI mode apparently needs some special 3481 * adjustment to ensure the SERDES drive level is set 3482 * to 1.2V. 3483 */ 3484 if (sc->bge_flags & BGE_FIBER_TBI && 3485 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 3486 val = CSR_READ_4(sc, BGE_SERDES_CFG); 3487 val = (val & ~0xFFF) | 0x880; 3488 CSR_WRITE_4(sc, BGE_SERDES_CFG, val); 3489 } 3490 3491 if (sc->bge_flags & BGE_PCIE && 3492 !BGE_IS_5717_PLUS(sc) && 3493 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 3494 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { 3495 /* Enable Data FIFO protection. */ 3496 val = CSR_READ_4(sc, 0x7c00); 3497 CSR_WRITE_4(sc, 0x7c00, val | (1<<25)); 3498 } 3499 3500 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 3501 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 3502 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 3503} 3504 3505/* 3506 * Frame reception handling. This is called if there's a frame 3507 * on the receive return list. 3508 * 3509 * Note: we have to be able to handle two possibilities here: 3510 * 1) the frame is from the jumbo receive ring 3511 * 2) the frame is from the standard receive ring 3512 */ 3513 3514void 3515bge_rxeof(struct bge_softc *sc) 3516{ 3517 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 3518 struct ifnet *ifp; 3519 uint16_t rx_prod, rx_cons; 3520 int stdcnt = 0, jumbocnt = 0; 3521 bus_dmamap_t dmamap; 3522 bus_addr_t offset, toff; 3523 bus_size_t tlen; 3524 int tosync; 3525 int livelocked; 3526 3527 rx_cons = sc->bge_rx_saved_considx; 3528 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 3529 3530 /* Nothing to do */ 3531 if (rx_cons == rx_prod) 3532 return; 3533 3534 ifp = &sc->arpcom.ac_if; 3535 3536 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3537 offsetof(struct bge_ring_data, bge_status_block), 3538 sizeof (struct bge_status_block), 3539 BUS_DMASYNC_POSTREAD); 3540 3541 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 3542 tosync = rx_prod - rx_cons; 3543 3544 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 3545 3546 if (tosync < 0) { 3547 tlen = (sc->bge_return_ring_cnt - rx_cons) * 3548 sizeof (struct bge_rx_bd); 3549 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3550 toff, tlen, BUS_DMASYNC_POSTREAD); 3551 tosync = -tosync; 3552 } 3553 3554 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3555 offset, tosync * sizeof (struct bge_rx_bd), 3556 BUS_DMASYNC_POSTREAD); 3557 3558 while (rx_cons != rx_prod) { 3559 struct bge_rx_bd *cur_rx; 3560 u_int32_t rxidx; 3561 struct mbuf *m = NULL; 3562 3563 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 3564 3565 rxidx = cur_rx->bge_idx; 3566 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 3567 3568 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3569 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3570 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3571 3572 jumbocnt++; 3573 3574 dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx]; 3575 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3576 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3577 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3578 3579 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3580 m_freem(m); 3581 continue; 3582 } 3583 } else { 3584 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3585 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3586 3587 stdcnt++; 3588 3589 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3590 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3591 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3592 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3593 3594 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3595 m_freem(m); 3596 continue; 3597 } 3598 } 3599 3600#ifdef __STRICT_ALIGNMENT 3601 /* 3602 * The i386 allows unaligned accesses, but for other 3603 * platforms we must make sure the payload is aligned. 3604 */ 3605 if (sc->bge_flags & BGE_RX_ALIGNBUG) { 3606 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 3607 cur_rx->bge_len); 3608 m->m_data += ETHER_ALIGN; 3609 } 3610#endif 3611 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3612 3613 bge_rxcsum(sc, cur_rx, m); 3614 3615#if NVLAN > 0 3616 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING && 3617 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3618 m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag; 3619 m->m_flags |= M_VLANTAG; 3620 } 3621#endif 3622 3623 ml_enqueue(&ml, m); 3624 } 3625 3626 sc->bge_rx_saved_considx = rx_cons; 3627 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3628 3629 livelocked = ifiq_input(&ifp->if_rcv, &ml); 3630 if (stdcnt) { 3631 if_rxr_put(&sc->bge_std_ring, stdcnt); 3632 if (livelocked) 3633 if_rxr_livelocked(&sc->bge_std_ring); 3634 bge_fill_rx_ring_std(sc); 3635 } 3636 if (jumbocnt) { 3637 if_rxr_put(&sc->bge_jumbo_ring, jumbocnt); 3638 if (livelocked) 3639 if_rxr_livelocked(&sc->bge_jumbo_ring); 3640 bge_fill_rx_ring_jumbo(sc); 3641 } 3642} 3643 3644void 3645bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 3646{ 3647 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 3648 /* 3649 * 5700 B0 chips do not support checksumming correctly due 3650 * to hardware bugs. 3651 */ 3652 return; 3653 } else if (BGE_IS_5717_PLUS(sc)) { 3654 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 3655 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM && 3656 (cur_rx->bge_error_flag & 3657 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 3658 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 3659 3660 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 3661 m->m_pkthdr.csum_flags |= 3662 M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK; 3663 } 3664 } 3665 } else { 3666 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM && 3667 cur_rx->bge_ip_csum == 0xFFFF) 3668 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 3669 3670 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3671 m->m_pkthdr.len >= ETHER_MIN_NOPAD && 3672 cur_rx->bge_tcp_udp_csum == 0xFFFF) { 3673 m->m_pkthdr.csum_flags |= 3674 M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK; 3675 } 3676 } 3677} 3678 3679void 3680bge_txeof(struct bge_softc *sc) 3681{ 3682 struct bge_tx_bd *cur_tx = NULL; 3683 struct ifnet *ifp; 3684 bus_dmamap_t dmamap; 3685 bus_addr_t offset, toff; 3686 bus_size_t tlen; 3687 int tosync, freed, txcnt; 3688 u_int32_t cons, newcons; 3689 struct mbuf *m; 3690 3691 /* Nothing to do */ 3692 cons = sc->bge_tx_saved_considx; 3693 newcons = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx; 3694 if (cons == newcons) 3695 return; 3696 3697 ifp = &sc->arpcom.ac_if; 3698 3699 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3700 offsetof(struct bge_ring_data, bge_status_block), 3701 sizeof (struct bge_status_block), 3702 BUS_DMASYNC_POSTREAD); 3703 3704 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3705 tosync = newcons - cons; 3706 3707 toff = offset + (cons * sizeof (struct bge_tx_bd)); 3708 3709 if (tosync < 0) { 3710 tlen = (BGE_TX_RING_CNT - cons) * sizeof (struct bge_tx_bd); 3711 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3712 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3713 tosync = -tosync; 3714 } 3715 3716 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3717 offset, tosync * sizeof (struct bge_tx_bd), 3718 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3719 3720 /* 3721 * Go through our tx ring and free mbufs for those 3722 * frames that have been sent. 3723 */ 3724 freed = 0; 3725 while (cons != newcons) { 3726 cur_tx = &sc->bge_rdata->bge_tx_ring[cons]; 3727 m = sc->bge_cdata.bge_tx_chain[cons]; 3728 if (m != NULL) { 3729 dmamap = sc->bge_cdata.bge_tx_map[cons]; 3730 3731 sc->bge_cdata.bge_tx_chain[cons] = NULL; 3732 sc->bge_cdata.bge_tx_map[cons] = NULL; 3733 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3734 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3735 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3736 3737 m_freem(m); 3738 } 3739 freed++; 3740 BGE_INC(cons, BGE_TX_RING_CNT); 3741 } 3742 3743 txcnt = atomic_sub_int_nv(&sc->bge_txcnt, freed); 3744 3745 sc->bge_tx_saved_considx = cons; 3746 3747 if (ifq_is_oactive(&ifp->if_snd)) 3748 ifq_restart(&ifp->if_snd); 3749 else if (txcnt == 0) 3750 ifp->if_timer = 0; 3751} 3752 3753int 3754bge_intr(void *xsc) 3755{ 3756 struct bge_softc *sc; 3757 struct ifnet *ifp; 3758 u_int32_t statusword, statustag; 3759 3760 sc = xsc; 3761 ifp = &sc->arpcom.ac_if; 3762 3763 /* read status word from status block */ 3764 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3765 offsetof(struct bge_ring_data, bge_status_block), 3766 sizeof (struct bge_status_block), 3767 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3768 3769 statusword = sc->bge_rdata->bge_status_block.bge_status; 3770 statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; 3771 3772 if (sc->bge_flags & BGE_TAGGED_STATUS) { 3773 if (sc->bge_lasttag == statustag && 3774 (CSR_READ_4(sc, BGE_PCI_PCISTATE) & 3775 BGE_PCISTATE_INTR_NOT_ACTIVE)) 3776 return (0); 3777 sc->bge_lasttag = statustag; 3778 } else { 3779 if (!(statusword & BGE_STATFLAG_UPDATED) && 3780 (CSR_READ_4(sc, BGE_PCI_PCISTATE) & 3781 BGE_PCISTATE_INTR_NOT_ACTIVE)) 3782 return (0); 3783 /* Ack interrupt and stop others from occurring. */ 3784 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3785 statustag = 0; 3786 } 3787 3788 /* clear status word */ 3789 sc->bge_rdata->bge_status_block.bge_status = 0; 3790 3791 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3792 offsetof(struct bge_ring_data, bge_status_block), 3793 sizeof (struct bge_status_block), 3794 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3795 3796 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3797 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 3798 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) { 3799 KERNEL_LOCK(); 3800 bge_link_upd(sc); 3801 KERNEL_UNLOCK(); 3802 } 3803 3804 /* Re-enable interrupts. */ 3805 bge_writembx(sc, BGE_MBX_IRQ0_LO, statustag); 3806 3807 if (ifp->if_flags & IFF_RUNNING) { 3808 /* Check RX return ring producer/consumer */ 3809 bge_rxeof(sc); 3810 3811 /* Check TX ring producer/consumer */ 3812 bge_txeof(sc); 3813 } 3814 3815 return (1); 3816} 3817 3818void 3819bge_tick(void *xsc) 3820{ 3821 struct bge_softc *sc = xsc; 3822 struct mii_data *mii = &sc->bge_mii; 3823 int s; 3824 3825 s = splnet(); 3826 3827 if (BGE_IS_5705_PLUS(sc)) { 3828 mtx_enter(&sc->bge_kstat_mtx); 3829 bge_stats_update_regs(sc); 3830 mtx_leave(&sc->bge_kstat_mtx); 3831 } else 3832 bge_stats_update(sc); 3833 3834 if (sc->bge_flags & BGE_FIBER_TBI) { 3835 /* 3836 * Since in TBI mode auto-polling can't be used we should poll 3837 * link status manually. Here we register pending link event 3838 * and trigger interrupt. 3839 */ 3840 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 3841 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3842 } else { 3843 /* 3844 * Do not touch PHY if we have link up. This could break 3845 * IPMI/ASF mode or produce extra input errors. 3846 * (extra input errors was reported for bcm5701 & bcm5704). 3847 */ 3848 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 3849 mii_tick(mii); 3850 } 3851 3852 timeout_add_sec(&sc->bge_timeout, 1); 3853 3854 splx(s); 3855} 3856 3857void 3858bge_stats_update_regs(struct bge_softc *sc) 3859{ 3860 struct ifnet *ifp = &sc->arpcom.ac_if; 3861 uint32_t collisions, discards, inerrors; 3862 uint32_t ucast, mcast, bcast; 3863 u_int32_t val; 3864#if NKSTAT > 0 3865 struct kstat_kv *kvs = sc->bge_kstat->ks_data; 3866#endif 3867 3868 collisions = CSR_READ_4(sc, BGE_MAC_STATS + 3869 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3870 3871 /* 3872 * XXX 3873 * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter 3874 * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0 3875 * controllers includes the number of unwanted multicast frames. 3876 * This comes from a silicon bug and known workaround to get rough 3877 * (not exact) counter is to enable interrupt on MBUF low watermark 3878 * attention. This can be accomplished by setting BGE_HCCMODE_ATTN 3879 * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE 3880 * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However 3881 * that change would generate more interrupts and there are still 3882 * possibilities of losing multiple frames during 3883 * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that 3884 * the workaround still would not get correct counter I don't think 3885 * it's worth to implement it. So ignore reading the counter on 3886 * controllers that have the silicon bug. 3887 */ 3888 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 3889 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 && 3890 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && 3891 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) 3892 discards = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3893 else 3894 discards = 0; 3895 3896 inerrors = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 3897 3898 ifp->if_collisions += collisions; 3899 ifp->if_ierrors += discards + inerrors; 3900 3901 ucast = CSR_READ_4(sc, BGE_MAC_STATS + 3902 offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts)); 3903 mcast = CSR_READ_4(sc, BGE_MAC_STATS + 3904 offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts)); 3905 bcast = CSR_READ_4(sc, BGE_MAC_STATS + 3906 offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts)); 3907 if (sc->bge_flags & BGE_RDMA_BUG) { 3908 /* 3909 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS 3910 * frames, it's safe to disable workaround for DMA engine's 3911 * miscalculation of TXMBUF space. 3912 */ 3913 if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) { 3914 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 3915 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 3916 val &= ~BGE_RDMA_TX_LENGTH_WA_5719; 3917 else 3918 val &= ~BGE_RDMA_TX_LENGTH_WA_5720; 3919 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 3920 sc->bge_flags &= ~BGE_RDMA_BUG; 3921 } 3922 } 3923 3924#if NKSTAT > 0 3925 kstat_kv_u32(&kvs[bge_stat_out_ucast_pkt]) += ucast; 3926 kstat_kv_u32(&kvs[bge_stat_out_mcast_pkt]) += mcast; 3927 kstat_kv_u32(&kvs[bge_stat_out_bcast_pkt]) += bcast; 3928 kstat_kv_u32(&kvs[bge_stat_collisions]) += collisions; 3929 kstat_kv_u32(&kvs[bge_stat_if_in_drops]) += discards; 3930 kstat_kv_u32(&kvs[bge_stat_if_in_errors]) += inerrors; 3931#endif 3932} 3933 3934void 3935bge_stats_update(struct bge_softc *sc) 3936{ 3937 struct ifnet *ifp = &sc->arpcom.ac_if; 3938 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3939 u_int32_t cnt; 3940 3941#define READ_STAT(sc, stats, stat) \ 3942 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3943 3944 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 3945 ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions); 3946 sc->bge_tx_collisions = cnt; 3947 3948 cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); 3949 sc->bge_rx_overruns = cnt; 3950 cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); 3951 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors); 3952 sc->bge_rx_inerrors = cnt; 3953 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 3954 ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards); 3955 sc->bge_rx_discards = cnt; 3956 3957 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 3958 ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards); 3959 sc->bge_tx_discards = cnt; 3960 3961#undef READ_STAT 3962} 3963 3964/* 3965 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3966 */ 3967int 3968bge_compact_dma_runt(struct mbuf *pkt) 3969{ 3970 struct mbuf *m, *prev, *n = NULL; 3971 int totlen, newprevlen; 3972 3973 prev = NULL; 3974 totlen = 0; 3975 3976 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3977 int mlen = m->m_len; 3978 int shortfall = 8 - mlen ; 3979 3980 totlen += mlen; 3981 if (mlen == 0) 3982 continue; 3983 if (mlen >= 8) 3984 continue; 3985 3986 /* If we get here, mbuf data is too small for DMA engine. 3987 * Try to fix by shuffling data to prev or next in chain. 3988 * If that fails, do a compacting deep-copy of the whole chain. 3989 */ 3990 3991 /* Internal frag. If fits in prev, copy it there. */ 3992 if (prev && m_trailingspace(prev) >= m->m_len) { 3993 bcopy(m->m_data, prev->m_data+prev->m_len, mlen); 3994 prev->m_len += mlen; 3995 m->m_len = 0; 3996 /* XXX stitch chain */ 3997 prev->m_next = m_free(m); 3998 m = prev; 3999 continue; 4000 } else if (m->m_next != NULL && 4001 m_trailingspace(m) >= shortfall && 4002 m->m_next->m_len >= (8 + shortfall)) { 4003 /* m is writable and have enough data in next, pull up. */ 4004 4005 bcopy(m->m_next->m_data, m->m_data+m->m_len, shortfall); 4006 m->m_len += shortfall; 4007 m->m_next->m_len -= shortfall; 4008 m->m_next->m_data += shortfall; 4009 } else if (m->m_next == NULL || 1) { 4010 /* Got a runt at the very end of the packet. 4011 * borrow data from the tail of the preceding mbuf and 4012 * update its length in-place. (The original data is still 4013 * valid, so we can do this even if prev is not writable.) 4014 */ 4015 4016 /* if we'd make prev a runt, just move all of its data. */ 4017#ifdef DEBUG 4018 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 4019 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 4020#endif 4021 if ((prev->m_len - shortfall) < 8) 4022 shortfall = prev->m_len; 4023 4024 newprevlen = prev->m_len - shortfall; 4025 4026 MGET(n, M_NOWAIT, MT_DATA); 4027 if (n == NULL) 4028 return (ENOBUFS); 4029 KASSERT(m->m_len + shortfall < MLEN 4030 /*, 4031 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 4032 4033 /* first copy the data we're stealing from prev */ 4034 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 4035 4036 /* update prev->m_len accordingly */ 4037 prev->m_len -= shortfall; 4038 4039 /* copy data from runt m */ 4040 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 4041 4042 /* n holds what we stole from prev, plus m */ 4043 n->m_len = shortfall + m->m_len; 4044 4045 /* stitch n into chain and free m */ 4046 n->m_next = m->m_next; 4047 prev->m_next = n; 4048 /* KASSERT(m->m_next == NULL); */ 4049 m->m_next = NULL; 4050 m_free(m); 4051 m = n; /* for continuing loop */ 4052 } 4053 } 4054 return (0); 4055} 4056 4057/* 4058 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 4059 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 4060 * but when such padded frames employ the bge IP/TCP checksum offload, 4061 * the hardware checksum assist gives incorrect results (possibly 4062 * from incorporating its own padding into the UDP/TCP checksum; who knows). 4063 * If we pad such runts with zeros, the onboard checksum comes out correct. 4064 */ 4065int 4066bge_cksum_pad(struct mbuf *m) 4067{ 4068 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 4069 struct mbuf *last; 4070 4071 /* If there's only the packet-header and we can pad there, use it. */ 4072 if (m->m_pkthdr.len == m->m_len && m_trailingspace(m) >= padlen) { 4073 last = m; 4074 } else { 4075 /* 4076 * Walk packet chain to find last mbuf. We will either 4077 * pad there, or append a new mbuf and pad it. 4078 */ 4079 for (last = m; last->m_next != NULL; last = last->m_next) 4080 ; 4081 if (m_trailingspace(last) < padlen) { 4082 /* Allocate new empty mbuf, pad it. Compact later. */ 4083 struct mbuf *n; 4084 4085 MGET(n, M_DONTWAIT, MT_DATA); 4086 if (n == NULL) 4087 return (ENOBUFS); 4088 n->m_len = 0; 4089 last->m_next = n; 4090 last = n; 4091 } 4092 } 4093 4094 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 4095 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 4096 last->m_len += padlen; 4097 m->m_pkthdr.len += padlen; 4098 4099 return (0); 4100} 4101 4102/* 4103 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 4104 * pointers to descriptors. 4105 */ 4106int 4107bge_encap(struct bge_softc *sc, struct mbuf *m, int *txinc) 4108{ 4109 struct bge_tx_bd *f = NULL; 4110 u_int32_t frag, cur; 4111 u_int16_t csum_flags = 0; 4112 bus_dmamap_t dmamap; 4113 int i = 0; 4114 4115 cur = frag = (sc->bge_tx_prodidx + *txinc) % BGE_TX_RING_CNT; 4116 4117 if (m->m_pkthdr.csum_flags) { 4118 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 4119 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 4120 if (m->m_pkthdr.csum_flags & 4121 (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) { 4122 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 4123 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 4124 bge_cksum_pad(m) != 0) 4125 return (ENOBUFS); 4126 } 4127 } 4128 4129 if (sc->bge_flags & BGE_JUMBO_FRAME && 4130 m->m_pkthdr.len > ETHER_MAX_LEN) 4131 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME; 4132 4133 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 4134 goto doit; 4135 4136 /* 4137 * bcm5700 Revision B silicon cannot handle DMA descriptors with 4138 * less than eight bytes. If we encounter a teeny mbuf 4139 * at the end of a chain, we can pad. Otherwise, copy. 4140 */ 4141 if (bge_compact_dma_runt(m) != 0) 4142 return (ENOBUFS); 4143 4144doit: 4145 dmamap = sc->bge_txdma[cur]; 4146 4147 /* 4148 * Start packing the mbufs in this chain into 4149 * the fragment pointers. Stop when we run out 4150 * of fragments or hit the end of the mbuf chain. 4151 */ 4152 switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m, 4153 BUS_DMA_NOWAIT)) { 4154 case 0: 4155 break; 4156 case EFBIG: 4157 if (m_defrag(m, M_DONTWAIT) == 0 && 4158 bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m, 4159 BUS_DMA_NOWAIT) == 0) 4160 break; 4161 4162 /* FALLTHROUGH */ 4163 default: 4164 return (ENOBUFS); 4165 } 4166 4167 for (i = 0; i < dmamap->dm_nsegs; i++) { 4168 f = &sc->bge_rdata->bge_tx_ring[frag]; 4169 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 4170 break; 4171 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 4172 f->bge_len = dmamap->dm_segs[i].ds_len; 4173 f->bge_flags = csum_flags; 4174 f->bge_vlan_tag = 0; 4175#if NVLAN > 0 4176 if (m->m_flags & M_VLANTAG) { 4177 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 4178 f->bge_vlan_tag = m->m_pkthdr.ether_vtag; 4179 } 4180#endif 4181 cur = frag; 4182 BGE_INC(frag, BGE_TX_RING_CNT); 4183 } 4184 4185 if (i < dmamap->dm_nsegs) 4186 goto fail_unload; 4187 4188 if (frag == sc->bge_tx_saved_considx) 4189 goto fail_unload; 4190 4191 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 4192 BUS_DMASYNC_PREWRITE); 4193 4194 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 4195 sc->bge_cdata.bge_tx_chain[cur] = m; 4196 sc->bge_cdata.bge_tx_map[cur] = dmamap; 4197 4198 *txinc += dmamap->dm_nsegs; 4199 4200 return (0); 4201 4202fail_unload: 4203 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4204 4205 return (ENOBUFS); 4206} 4207 4208/* 4209 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 4210 * to the mbuf data regions directly in the transmit descriptors. 4211 */ 4212void 4213bge_start(struct ifqueue *ifq) 4214{ 4215 struct ifnet *ifp = ifq->ifq_if; 4216 struct bge_softc *sc = ifp->if_softc; 4217 struct mbuf *m; 4218 int txinc; 4219 4220 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4221 ifq_purge(ifq); 4222 return; 4223 } 4224 4225 txinc = 0; 4226 while (1) { 4227 /* Check if we have enough free send BDs. */ 4228 if (sc->bge_txcnt + txinc + BGE_NTXSEG + 16 >= 4229 BGE_TX_RING_CNT) { 4230 ifq_set_oactive(ifq); 4231 break; 4232 } 4233 4234 m = ifq_dequeue(ifq); 4235 if (m == NULL) 4236 break; 4237 4238 if (bge_encap(sc, m, &txinc) != 0) { 4239 m_freem(m); 4240 continue; 4241 } 4242 4243#if NBPFILTER > 0 4244 if (ifp->if_bpf) 4245 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 4246#endif 4247 } 4248 4249 if (txinc != 0) { 4250 /* Transmit */ 4251 sc->bge_tx_prodidx = (sc->bge_tx_prodidx + txinc) % 4252 BGE_TX_RING_CNT; 4253 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 4254 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 4255 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, 4256 sc->bge_tx_prodidx); 4257 4258 atomic_add_int(&sc->bge_txcnt, txinc); 4259 4260 /* 4261 * Set a timeout in case the chip goes out to lunch. 4262 */ 4263 ifp->if_timer = 5; 4264 } 4265} 4266 4267void 4268bge_init(void *xsc) 4269{ 4270 struct bge_softc *sc = xsc; 4271 struct ifnet *ifp; 4272 u_int16_t *m; 4273 u_int32_t mode; 4274 int s; 4275 4276 s = splnet(); 4277 4278 ifp = &sc->arpcom.ac_if; 4279 4280 /* Cancel pending I/O and flush buffers. */ 4281 bge_stop(sc, 0); 4282 bge_sig_pre_reset(sc, BGE_RESET_START); 4283 bge_reset(sc); 4284 bge_sig_legacy(sc, BGE_RESET_START); 4285 bge_sig_post_reset(sc, BGE_RESET_START); 4286 4287 bge_chipinit(sc); 4288 4289 /* 4290 * Init the various state machines, ring 4291 * control blocks and firmware. 4292 */ 4293 if (bge_blockinit(sc)) { 4294 printf("%s: initialization failure\n", sc->bge_dev.dv_xname); 4295 splx(s); 4296 return; 4297 } 4298 4299 /* Specify MRU. */ 4300 if (BGE_IS_JUMBO_CAPABLE(sc)) 4301 CSR_WRITE_4(sc, BGE_RX_MTU, 4302 BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN); 4303 else 4304 CSR_WRITE_4(sc, BGE_RX_MTU, 4305 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 4306 4307 /* Load our MAC address. */ 4308 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 4309 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 4310 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 4311 4312 if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 4313 /* Disable hardware decapsulation of VLAN frames. */ 4314 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 4315 } 4316 4317 /* Program promiscuous mode and multicast filters. */ 4318 bge_iff(sc); 4319 4320 /* Init RX ring. */ 4321 bge_init_rx_ring_std(sc); 4322 4323 /* 4324 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 4325 * memory to ensure that the chip has in fact read the first 4326 * entry of the ring. 4327 */ 4328 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 4329 u_int32_t v, i; 4330 for (i = 0; i < 10; i++) { 4331 DELAY(20); 4332 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 4333 if (v == (MCLBYTES - ETHER_ALIGN)) 4334 break; 4335 } 4336 if (i == 10) 4337 printf("%s: 5705 A0 chip failed to load RX ring\n", 4338 sc->bge_dev.dv_xname); 4339 } 4340 4341 /* Init Jumbo RX ring. */ 4342 if (sc->bge_flags & BGE_JUMBO_RING) 4343 bge_init_rx_ring_jumbo(sc); 4344 4345 /* Init our RX return ring index */ 4346 sc->bge_rx_saved_considx = 0; 4347 4348 /* Init our RX/TX stat counters. */ 4349 sc->bge_tx_collisions = 0; 4350 sc->bge_rx_discards = 0; 4351 sc->bge_rx_inerrors = 0; 4352 sc->bge_rx_overruns = 0; 4353 sc->bge_tx_discards = 0; 4354 4355 /* Init TX ring. */ 4356 bge_init_tx_ring(sc); 4357 4358 /* Enable TX MAC state machine lockup fix. */ 4359 mode = CSR_READ_4(sc, BGE_TX_MODE); 4360 if (BGE_IS_5755_PLUS(sc) || 4361 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4362 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 4363 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 4364 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 4365 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 4366 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 4367 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 4368 } 4369 4370 /* Turn on transmitter */ 4371 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 4372 DELAY(100); 4373 4374 mode = CSR_READ_4(sc, BGE_RX_MODE); 4375 if (BGE_IS_5755_PLUS(sc)) 4376 mode |= BGE_RXMODE_IPV6_ENABLE; 4377 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 4378 mode |= BGE_RXMODE_IPV4_FRAG_FIX; 4379 4380 /* Turn on receiver */ 4381 CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 4382 DELAY(10); 4383 4384 /* 4385 * Set the number of good frames to receive after RX MBUF 4386 * Low Watermark has been reached. After the RX MAC receives 4387 * this number of frames, it will drop subsequent incoming 4388 * frames until the MBUF High Watermark is reached. 4389 */ 4390 if (BGE_IS_57765_PLUS(sc)) 4391 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 4392 else 4393 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4394 4395 /* Tell firmware we're alive. */ 4396 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4397 4398 /* Enable host interrupts. */ 4399 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4400 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4401 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 4402 4403 bge_ifmedia_upd(ifp); 4404 4405 ifp->if_flags |= IFF_RUNNING; 4406 ifq_clr_oactive(&ifp->if_snd); 4407 4408 splx(s); 4409 4410 timeout_add_sec(&sc->bge_timeout, 1); 4411} 4412 4413/* 4414 * Set media options. 4415 */ 4416int 4417bge_ifmedia_upd(struct ifnet *ifp) 4418{ 4419 struct bge_softc *sc = ifp->if_softc; 4420 struct mii_data *mii = &sc->bge_mii; 4421 struct ifmedia *ifm = &sc->bge_ifmedia; 4422 4423 /* If this is a 1000baseX NIC, enable the TBI port. */ 4424 if (sc->bge_flags & BGE_FIBER_TBI) { 4425 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4426 return (EINVAL); 4427 switch(IFM_SUBTYPE(ifm->ifm_media)) { 4428 case IFM_AUTO: 4429 /* 4430 * The BCM5704 ASIC appears to have a special 4431 * mechanism for programming the autoneg 4432 * advertisement registers in TBI mode. 4433 */ 4434 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4435 u_int32_t sgdig; 4436 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 4437 if (sgdig & BGE_SGDIGSTS_DONE) { 4438 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4439 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4440 sgdig |= BGE_SGDIGCFG_AUTO | 4441 BGE_SGDIGCFG_PAUSE_CAP | 4442 BGE_SGDIGCFG_ASYM_PAUSE; 4443 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4444 sgdig | BGE_SGDIGCFG_SEND); 4445 DELAY(5); 4446 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4447 } 4448 } 4449 break; 4450 case IFM_1000_SX: 4451 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4452 BGE_CLRBIT(sc, BGE_MAC_MODE, 4453 BGE_MACMODE_HALF_DUPLEX); 4454 } else { 4455 BGE_SETBIT(sc, BGE_MAC_MODE, 4456 BGE_MACMODE_HALF_DUPLEX); 4457 } 4458 DELAY(40); 4459 break; 4460 default: 4461 return (EINVAL); 4462 } 4463 /* XXX 802.3x flow control for 1000BASE-SX */ 4464 return (0); 4465 } 4466 4467 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4468 if (mii->mii_instance) { 4469 struct mii_softc *miisc; 4470 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4471 mii_phy_reset(miisc); 4472 } 4473 mii_mediachg(mii); 4474 4475 /* 4476 * Force an interrupt so that we will call bge_link_upd 4477 * if needed and clear any pending link state attention. 4478 * Without this we are not getting any further interrupts 4479 * for link state changes and thus will not UP the link and 4480 * not be able to send in bge_start. The only way to get 4481 * things working was to receive a packet and get a RX intr. 4482 */ 4483 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4484 sc->bge_flags & BGE_IS_5788) 4485 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4486 else 4487 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4488 4489 return (0); 4490} 4491 4492/* 4493 * Report current media status. 4494 */ 4495void 4496bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4497{ 4498 struct bge_softc *sc = ifp->if_softc; 4499 struct mii_data *mii = &sc->bge_mii; 4500 4501 if (sc->bge_flags & BGE_FIBER_TBI) { 4502 ifmr->ifm_status = IFM_AVALID; 4503 ifmr->ifm_active = IFM_ETHER; 4504 if (CSR_READ_4(sc, BGE_MAC_STS) & 4505 BGE_MACSTAT_TBI_PCS_SYNCHED) { 4506 ifmr->ifm_status |= IFM_ACTIVE; 4507 } else { 4508 ifmr->ifm_active |= IFM_NONE; 4509 return; 4510 } 4511 ifmr->ifm_active |= IFM_1000_SX; 4512 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4513 ifmr->ifm_active |= IFM_HDX; 4514 else 4515 ifmr->ifm_active |= IFM_FDX; 4516 return; 4517 } 4518 4519 mii_pollstat(mii); 4520 ifmr->ifm_status = mii->mii_media_status; 4521 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4522 sc->bge_flowflags; 4523} 4524 4525int 4526bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 4527{ 4528 struct bge_softc *sc = ifp->if_softc; 4529 struct ifreq *ifr = (struct ifreq *) data; 4530 int s, error = 0; 4531 struct mii_data *mii; 4532 4533 s = splnet(); 4534 4535 switch(command) { 4536 case SIOCSIFADDR: 4537 ifp->if_flags |= IFF_UP; 4538 if (!(ifp->if_flags & IFF_RUNNING)) 4539 bge_init(sc); 4540 break; 4541 4542 case SIOCSIFFLAGS: 4543 if (ifp->if_flags & IFF_UP) { 4544 if (ifp->if_flags & IFF_RUNNING) 4545 error = ENETRESET; 4546 else 4547 bge_init(sc); 4548 } else { 4549 if (ifp->if_flags & IFF_RUNNING) 4550 bge_stop(sc, 0); 4551 } 4552 break; 4553 4554 case SIOCSIFMEDIA: 4555 /* XXX Flow control is not supported for 1000BASE-SX */ 4556 if (sc->bge_flags & BGE_FIBER_TBI) { 4557 ifr->ifr_media &= ~IFM_ETH_FMASK; 4558 sc->bge_flowflags = 0; 4559 } 4560 4561 /* Flow control requires full-duplex mode. */ 4562 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4563 (ifr->ifr_media & IFM_FDX) == 0) { 4564 ifr->ifr_media &= ~IFM_ETH_FMASK; 4565 } 4566 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4567 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4568 /* We can do both TXPAUSE and RXPAUSE. */ 4569 ifr->ifr_media |= 4570 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4571 } 4572 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4573 } 4574 /* FALLTHROUGH */ 4575 case SIOCGIFMEDIA: 4576 if (sc->bge_flags & BGE_FIBER_TBI) { 4577 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4578 command); 4579 } else { 4580 mii = &sc->bge_mii; 4581 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4582 command); 4583 } 4584 break; 4585 4586 case SIOCGIFRXR: 4587 error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 4588 break; 4589 4590 default: 4591 error = ether_ioctl(ifp, &sc->arpcom, command, data); 4592 } 4593 4594 if (error == ENETRESET) { 4595 if (ifp->if_flags & IFF_RUNNING) 4596 bge_iff(sc); 4597 error = 0; 4598 } 4599 4600 splx(s); 4601 return (error); 4602} 4603 4604int 4605bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri) 4606{ 4607 struct if_rxring_info ifr[2]; 4608 u_int n = 0; 4609 4610 memset(ifr, 0, sizeof(ifr)); 4611 4612 if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) { 4613 ifr[n].ifr_size = sc->bge_rx_std_len; 4614 strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name)); 4615 ifr[n].ifr_info = sc->bge_std_ring; 4616 4617 n++; 4618 } 4619 4620 if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) { 4621 ifr[n].ifr_size = BGE_JLEN; 4622 strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name)); 4623 ifr[n].ifr_info = sc->bge_jumbo_ring; 4624 4625 n++; 4626 } 4627 4628 return (if_rxr_info_ioctl(ifri, n, ifr)); 4629} 4630 4631void 4632bge_watchdog(struct ifnet *ifp) 4633{ 4634 struct bge_softc *sc; 4635 4636 sc = ifp->if_softc; 4637 4638 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 4639 4640 bge_init(sc); 4641 4642 ifp->if_oerrors++; 4643} 4644 4645void 4646bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit) 4647{ 4648 int i; 4649 4650 BGE_CLRBIT(sc, reg, bit); 4651 4652 for (i = 0; i < BGE_TIMEOUT; i++) { 4653 if ((CSR_READ_4(sc, reg) & bit) == 0) 4654 return; 4655 delay(100); 4656 } 4657 4658 DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 4659 sc->bge_dev.dv_xname, (u_long) reg, bit)); 4660} 4661 4662/* 4663 * Stop the adapter and free any mbufs allocated to the 4664 * RX and TX lists. 4665 */ 4666void 4667bge_stop(struct bge_softc *sc, int softonly) 4668{ 4669 struct ifnet *ifp = &sc->arpcom.ac_if; 4670 struct ifmedia_entry *ifm; 4671 struct mii_data *mii; 4672 int mtmp, itmp; 4673 4674 timeout_del(&sc->bge_timeout); 4675 timeout_del(&sc->bge_rxtimeout); 4676 timeout_del(&sc->bge_rxtimeout_jumbo); 4677 4678 ifp->if_flags &= ~IFF_RUNNING; 4679 ifp->if_timer = 0; 4680 4681 if (!softonly) { 4682 /* 4683 * Tell firmware we're shutting down. 4684 */ 4685 /* bge_stop_fw(sc); */ 4686 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 4687 4688 /* 4689 * Disable all of the receiver blocks 4690 */ 4691 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4692 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4693 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4694 if (BGE_IS_5700_FAMILY(sc)) 4695 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4696 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4697 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4698 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4699 4700 /* 4701 * Disable all of the transmit blocks 4702 */ 4703 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4704 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4705 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4706 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4707 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4708 if (BGE_IS_5700_FAMILY(sc)) 4709 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4710 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4711 4712 /* 4713 * Shut down all of the memory managers and related 4714 * state machines. 4715 */ 4716 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4717 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4718 if (BGE_IS_5700_FAMILY(sc)) 4719 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4720 4721 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4722 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4723 4724 if (!BGE_IS_5705_PLUS(sc)) { 4725 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4726 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4727 } 4728 4729 bge_reset(sc); 4730 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 4731 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 4732 4733 /* 4734 * Tell firmware we're shutting down. 4735 */ 4736 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4737 } 4738 4739 intr_barrier(sc->bge_intrhand); 4740 ifq_barrier(&ifp->if_snd); 4741 4742 ifq_clr_oactive(&ifp->if_snd); 4743 4744 /* Free the RX lists. */ 4745 bge_free_rx_ring_std(sc); 4746 4747 /* Free jumbo RX list. */ 4748 if (sc->bge_flags & BGE_JUMBO_RING) 4749 bge_free_rx_ring_jumbo(sc); 4750 4751 /* Free TX buffers. */ 4752 bge_free_tx_ring(sc); 4753 4754 /* 4755 * Isolate/power down the PHY, but leave the media selection 4756 * unchanged so that things will be put back to normal when 4757 * we bring the interface back up. 4758 */ 4759 if (!(sc->bge_flags & BGE_FIBER_TBI)) { 4760 mii = &sc->bge_mii; 4761 itmp = ifp->if_flags; 4762 ifp->if_flags |= IFF_UP; 4763 ifm = mii->mii_media.ifm_cur; 4764 mtmp = ifm->ifm_media; 4765 ifm->ifm_media = IFM_ETHER|IFM_NONE; 4766 mii_mediachg(mii); 4767 ifm->ifm_media = mtmp; 4768 ifp->if_flags = itmp; 4769 } 4770 4771 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4772 4773 if (!softonly) { 4774 /* Clear MAC's link state (PHY may still have link UP). */ 4775 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4776 } 4777} 4778 4779void 4780bge_link_upd(struct bge_softc *sc) 4781{ 4782 struct ifnet *ifp = &sc->arpcom.ac_if; 4783 struct mii_data *mii = &sc->bge_mii; 4784 u_int32_t status; 4785 int link; 4786 4787 /* Clear 'pending link event' flag */ 4788 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 4789 4790 /* 4791 * Process link state changes. 4792 * Grrr. The link status word in the status block does 4793 * not work correctly on the BCM5700 rev AX and BX chips, 4794 * according to all available information. Hence, we have 4795 * to enable MII interrupts in order to properly obtain 4796 * async link changes. Unfortunately, this also means that 4797 * we have to read the MAC status register to detect link 4798 * changes, thereby adding an additional register access to 4799 * the interrupt handler. 4800 * 4801 */ 4802 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 4803 status = CSR_READ_4(sc, BGE_MAC_STS); 4804 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4805 mii_pollstat(mii); 4806 4807 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4808 mii->mii_media_status & IFM_ACTIVE && 4809 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4810 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4811 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4812 (!(mii->mii_media_status & IFM_ACTIVE) || 4813 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4814 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4815 4816 /* Clear the interrupt */ 4817 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4818 BGE_EVTENB_MI_INTERRUPT); 4819 bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr, 4820 BRGPHY_MII_ISR); 4821 bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr, 4822 BRGPHY_MII_IMR, BRGPHY_INTRS); 4823 } 4824 return; 4825 } 4826 4827 if (sc->bge_flags & BGE_FIBER_TBI) { 4828 status = CSR_READ_4(sc, BGE_MAC_STS); 4829 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4830 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4831 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4832 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 4833 BGE_CLRBIT(sc, BGE_MAC_MODE, 4834 BGE_MACMODE_TBI_SEND_CFGS); 4835 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4836 status = CSR_READ_4(sc, BGE_MAC_MODE); 4837 link = (status & BGE_MACMODE_HALF_DUPLEX) ? 4838 LINK_STATE_HALF_DUPLEX : 4839 LINK_STATE_FULL_DUPLEX; 4840 ifp->if_baudrate = IF_Gbps(1); 4841 if (ifp->if_link_state != link) { 4842 ifp->if_link_state = link; 4843 if_link_state_change(ifp); 4844 } 4845 } 4846 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 4847 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4848 link = LINK_STATE_DOWN; 4849 ifp->if_baudrate = 0; 4850 if (ifp->if_link_state != link) { 4851 ifp->if_link_state = link; 4852 if_link_state_change(ifp); 4853 } 4854 } 4855 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 4856 /* 4857 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 4858 * in status word always set. Workaround this bug by reading 4859 * PHY link status directly. 4860 */ 4861 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 4862 BGE_STS_LINK : 0; 4863 4864 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 4865 mii_pollstat(mii); 4866 4867 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4868 mii->mii_media_status & IFM_ACTIVE && 4869 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4870 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4871 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4872 (!(mii->mii_media_status & IFM_ACTIVE) || 4873 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4874 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4875 } 4876 } else { 4877 /* 4878 * For controllers that call mii_tick, we have to poll 4879 * link status. 4880 */ 4881 mii_pollstat(mii); 4882 } 4883 4884 /* Clear the attention */ 4885 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 4886 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 4887 BGE_MACSTAT_LINK_CHANGED); 4888} 4889 4890#if NKSTAT > 0 4891 4892struct bge_stat { 4893 char name[KSTAT_KV_NAMELEN]; 4894 enum kstat_kv_unit unit; 4895 bus_size_t reg; 4896}; 4897 4898#define MACREG(_f) \ 4899 BGE_MAC_STATS + offsetof(struct bge_mac_stats_regs, _f) 4900 4901static const struct bge_stat bge_kstat_tpl[] = { 4902 /* MAC stats */ 4903 [bge_stat_out_octets] = { "out octets", KSTAT_KV_U_BYTES, 4904 MACREG(ifHCOutOctets) }, 4905 [bge_stat_collisions] = { "collisions", KSTAT_KV_U_NONE, 0 }, 4906 [bge_stat_xon_sent] = { "xon sent", KSTAT_KV_U_NONE, 4907 MACREG(outXonSent) }, 4908 [bge_stat_xoff_sent] = { "xoff sent", KSTAT_KV_U_NONE, 4909 MACREG(outXonSent) }, 4910 [bge_stat_xmit_errors] = { "xmit errors", KSTAT_KV_U_NONE, 4911 MACREG(dot3StatsInternalMacTransmitErrors) }, 4912 [bge_stat_coll_frames] = { "coll frames", KSTAT_KV_U_PACKETS, 4913 MACREG(dot3StatsSingleCollisionFrames) }, 4914 [bge_stat_multicoll_frames] = { "multicoll frames", KSTAT_KV_U_PACKETS, 4915 MACREG(dot3StatsMultipleCollisionFrames) }, 4916 [bge_stat_deferred_xmit] = { "deferred xmit", KSTAT_KV_U_NONE, 4917 MACREG(dot3StatsDeferredTransmissions) }, 4918 [bge_stat_excess_coll] = { "excess coll", KSTAT_KV_U_NONE, 4919 MACREG(dot3StatsExcessiveCollisions) }, 4920 [bge_stat_late_coll] = { "late coll", KSTAT_KV_U_NONE, 4921 MACREG(dot3StatsLateCollisions) }, 4922 [bge_stat_out_ucast_pkt] = { "out ucast pkts", KSTAT_KV_U_PACKETS, 0 }, 4923 [bge_stat_out_mcast_pkt] = { "out mcast pkts", KSTAT_KV_U_PACKETS, 0 }, 4924 [bge_stat_out_bcast_pkt] = { "out bcast pkts", KSTAT_KV_U_PACKETS, 0 }, 4925 [bge_stat_in_octets] = { "in octets", KSTAT_KV_U_BYTES, 4926 MACREG(ifHCInOctets) }, 4927 [bge_stat_fragments] = { "fragments", KSTAT_KV_U_NONE, 4928 MACREG(etherStatsFragments) }, 4929 [bge_stat_in_ucast_pkt] = { "in ucast pkts", KSTAT_KV_U_PACKETS, 4930 MACREG(ifHCInUcastPkts) }, 4931 [bge_stat_in_mcast_pkt] = { "in mcast pkts", KSTAT_KV_U_PACKETS, 4932 MACREG(ifHCInMulticastPkts) }, 4933 [bge_stat_in_bcast_pkt] = { "in bcast pkts", KSTAT_KV_U_PACKETS, 4934 MACREG(ifHCInBroadcastPkts) }, 4935 [bge_stat_fcs_errors] = { "FCS errors", KSTAT_KV_U_NONE, 4936 MACREG(dot3StatsFCSErrors) }, 4937 [bge_stat_align_errors] = { "align errors", KSTAT_KV_U_NONE, 4938 MACREG(dot3StatsAlignmentErrors) }, 4939 [bge_stat_xon_rcvd] = { "xon rcvd", KSTAT_KV_U_NONE, 4940 MACREG(xonPauseFramesReceived) }, 4941 [bge_stat_xoff_rcvd] = { "xoff rcvd", KSTAT_KV_U_NONE, 4942 MACREG(xoffPauseFramesReceived) }, 4943 [bge_stat_ctrl_frame_rcvd] = { "ctrlframes rcvd", KSTAT_KV_U_NONE, 4944 MACREG(macControlFramesReceived) }, 4945 [bge_stat_xoff_entered] = { "xoff entered", KSTAT_KV_U_NONE, 4946 MACREG(xoffStateEntered) }, 4947 [bge_stat_too_long_frames] = { "too long frames", KSTAT_KV_U_NONE, 4948 MACREG(dot3StatsFramesTooLong) }, 4949 [bge_stat_jabbers] = { "jabbers", KSTAT_KV_U_NONE, 4950 MACREG(etherStatsJabbers) }, 4951 [bge_stat_too_short_pkts] = { "too short pkts", KSTAT_KV_U_NONE, 4952 MACREG(etherStatsUndersizePkts) }, 4953 4954 /* Send Data Initiator stats */ 4955 [bge_stat_dma_rq_full] = { "DMA RQ full", KSTAT_KV_U_NONE, 4956 BGE_LOCSTATS_DMA_RQ_FULL }, 4957 [bge_stat_dma_hprq_full] = { "DMA HPRQ full", KSTAT_KV_U_NONE, 4958 BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL }, 4959 [bge_stat_sdc_queue_full] = { "SDC queue full", KSTAT_KV_U_NONE, 4960 BGE_LOCSTATS_SDC_QUEUE_FULL }, 4961 [bge_stat_nic_sendprod_set] = { "sendprod set", KSTAT_KV_U_NONE, 4962 BGE_LOCSTATS_NIC_SENDPROD_SET }, 4963 [bge_stat_status_updated] = { "stats updated", KSTAT_KV_U_NONE, 4964 BGE_LOCSTATS_STATS_UPDATED }, 4965 [bge_stat_irqs] = { "irqs", KSTAT_KV_U_NONE, BGE_LOCSTATS_IRQS }, 4966 [bge_stat_avoided_irqs] = { "avoided irqs", KSTAT_KV_U_NONE, 4967 BGE_LOCSTATS_AVOIDED_IRQS }, 4968 [bge_stat_tx_thresh_hit] = { "tx thresh hit", KSTAT_KV_U_NONE, 4969 BGE_LOCSTATS_TX_THRESH_HIT }, 4970 4971 /* Receive List Placement stats */ 4972 [bge_stat_filtdrop] = { "filtdrop", KSTAT_KV_U_NONE, 4973 BGE_RXLP_LOCSTAT_FILTDROP }, 4974 [bge_stat_dma_wrq_full] = { "DMA WRQ full", KSTAT_KV_U_NONE, 4975 BGE_RXLP_LOCSTAT_DMA_WRQ_FULL }, 4976 [bge_stat_dma_hpwrq_full] = { "DMA HPWRQ full", KSTAT_KV_U_NONE, 4977 BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL }, 4978 [bge_stat_out_of_bds] = { "out of BDs", KSTAT_KV_U_NONE, 4979 BGE_RXLP_LOCSTAT_OUT_OF_BDS }, 4980 [bge_stat_if_in_drops] = { "if in drops", KSTAT_KV_U_NONE, 0 }, 4981 [bge_stat_if_in_errors] = { "if in errors", KSTAT_KV_U_NONE, 0 }, 4982 [bge_stat_rx_thresh_hit] = { "rx thresh hit", KSTAT_KV_U_NONE, 4983 BGE_RXLP_LOCSTAT_RXTHRESH_HIT }, 4984}; 4985 4986int 4987bge_kstat_read(struct kstat *ks) 4988{ 4989 struct bge_softc *sc = ks->ks_softc; 4990 struct kstat_kv *kvs = ks->ks_data; 4991 int i; 4992 4993 bge_stats_update_regs(sc); 4994 4995 for (i = 0; i < nitems(bge_kstat_tpl); i++) { 4996 if (bge_kstat_tpl[i].reg != 0) 4997 kstat_kv_u32(kvs) += CSR_READ_4(sc, 4998 bge_kstat_tpl[i].reg); 4999 kvs++; 5000 } 5001 5002 getnanouptime(&ks->ks_updated); 5003 return 0; 5004} 5005 5006void 5007bge_kstat_attach(struct bge_softc *sc) 5008{ 5009 struct kstat *ks; 5010 struct kstat_kv *kvs; 5011 int i; 5012 5013 5014 ks = kstat_create(sc->bge_dev.dv_xname, 0, "bge-stats", 0, 5015 KSTAT_T_KV, 0); 5016 if (ks == NULL) 5017 return; 5018 5019 kvs = mallocarray(nitems(bge_kstat_tpl), sizeof(*kvs), M_DEVBUF, 5020 M_ZERO | M_WAITOK); 5021 for (i = 0; i < nitems(bge_kstat_tpl); i++) { 5022 const struct bge_stat *tpl = &bge_kstat_tpl[i]; 5023 kstat_kv_unit_init(&kvs[i], tpl->name, KSTAT_KV_T_UINT32, 5024 tpl->unit); 5025 } 5026 5027 kstat_set_mutex(ks, &sc->bge_kstat_mtx); 5028 ks->ks_softc = sc; 5029 ks->ks_data = kvs; 5030 ks->ks_datalen = nitems(bge_kstat_tpl) * sizeof(*kvs); 5031 ks->ks_read = bge_kstat_read; 5032 5033 sc->bge_kstat = ks; 5034 kstat_install(ks); 5035} 5036#endif /* NKSTAT > 0 */ 5037