1/*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD$"); 38 39/* 40 * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver 41 * 42 * The Broadcom BCM5700 is based on technology originally developed by 43 * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet 44 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 45 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 46 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 47 * frames, highly configurable RX filtering, and 16 RX and TX queues 48 * (which, along with RX filter rules, can be used for QOS applications). 49 * Other features, such as TCP segmentation, may be available as part 50 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 51 * firmware images can be stored in hardware and need not be compiled 52 * into the driver. 53 * 54 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 55 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 56 * 57 * The BCM5701 is a single-chip solution incorporating both the BCM5700 58 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 59 * does not support external SSRAM. 60 * 61 * Broadcom also produces a variation of the BCM5700 under the "Altima" 62 * brand name, which is functionally similar but lacks PCI-X support. 63 * 64 * Without external SSRAM, you can only have at most 4 TX rings, 65 * and the use of the mini RX ring is disabled. This seems to imply 66 * that these features are simply not available on the BCM5701. As a 67 * result, this driver does not implement any support for the mini RX 68 * ring. 69 */ 70 71#ifdef HAVE_KERNEL_OPTION_HEADERS 72#include "opt_device_polling.h" 73#endif 74 75#include <sys/param.h> 76#include <sys/endian.h> 77#include <sys/systm.h> 78#include <sys/sockio.h> 79#include <sys/mbuf.h> 80#include <sys/malloc.h> 81#include <sys/kernel.h> 82#include <sys/module.h> 83#include <sys/socket.h> 84#include <sys/sysctl.h> 85#include <sys/taskqueue.h> 86 87#include <net/debugnet.h> 88#include <net/if.h> 89#include <net/if_var.h> 90#include <net/if_arp.h> 91#include <net/ethernet.h> 92#include <net/if_dl.h> 93#include <net/if_media.h> 94 95#include <net/bpf.h> 96 97#include <net/if_types.h> 98#include <net/if_vlan_var.h> 99 100#include <netinet/in_systm.h> 101#include <netinet/in.h> 102#include <netinet/ip.h> 103#include <netinet/tcp.h> 104 105#include <machine/bus.h> 106#include <machine/resource.h> 107#include <sys/bus.h> 108#include <sys/rman.h> 109 110#include <dev/mii/mii.h> 111#include <dev/mii/miivar.h> 112#include "miidevs.h" 113#include <dev/mii/brgphyreg.h> 114 115#include <dev/pci/pcireg.h> 116#include <dev/pci/pcivar.h> 117 118#include <dev/bge/if_bgereg.h> 119 120#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) 121#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 122 123MODULE_DEPEND(bge, pci, 1, 1, 1); 124MODULE_DEPEND(bge, ether, 1, 1, 1); 125MODULE_DEPEND(bge, miibus, 1, 1, 1); 126 127/* "device miibus" required. See GENERIC if you get errors here. */ 128#include "miibus_if.h" 129 130/* 131 * Various supported device vendors/types and their names. Note: the 132 * spec seems to indicate that the hardware still has Alteon's vendor 133 * ID burned into it, though it will always be overriden by the vendor 134 * ID in the EEPROM. Just to be safe, we cover all possibilities. 135 */ 136static const struct bge_type { 137 uint16_t bge_vid; 138 uint16_t bge_did; 139} bge_devs[] = { 140 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 142 143 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 146 147 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 148 149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717C }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, 176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5725 }, 178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5727 }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 }, 194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, 195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, 196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, 197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, 198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5762 }, 199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, 200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, 205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, 206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, 207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, 210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, 217 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, 218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, 219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 }, 220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57762 }, 221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57764 }, 222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 }, 223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57766 }, 224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57767 }, 225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, 226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 }, 227 { BCOM_VENDORID, BCOM_DEVICEID_BCM57782 }, 228 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 }, 229 { BCOM_VENDORID, BCOM_DEVICEID_BCM57786 }, 230 { BCOM_VENDORID, BCOM_DEVICEID_BCM57787 }, 231 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, 232 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, 233 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 }, 234 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 }, 235 236 { SK_VENDORID, SK_DEVICEID_ALTIMA }, 237 238 { TC_VENDORID, TC_DEVICEID_3C996 }, 239 240 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, 241 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, 242 { 0, 0 } 243}; 244 245static const struct bge_vendor { 246 uint16_t v_id; 247 const char *v_name; 248} bge_vendors[] = { 249 { ALTEON_VENDORID, "Alteon" }, 250 { ALTIMA_VENDORID, "Altima" }, 251 { APPLE_VENDORID, "Apple" }, 252 { BCOM_VENDORID, "Broadcom" }, 253 { SK_VENDORID, "SysKonnect" }, 254 { TC_VENDORID, "3Com" }, 255 { FJTSU_VENDORID, "Fujitsu" }, 256 { 0, NULL } 257}; 258 259static const struct bge_revision { 260 uint32_t br_chipid; 261 const char *br_name; 262} bge_revisions[] = { 263 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 264 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 265 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 266 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 267 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 268 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 269 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 270 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 271 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 272 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 273 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 274 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 275 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 276 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 277 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 278 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 279 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 280 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 281 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 282 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 283 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 284 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 285 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 286 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 287 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 288 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 289 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 290 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 291 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 292 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 293 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 294 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 295 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 296 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 297 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 298 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 299 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 300 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 301 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 302 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 303 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 304 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 305 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 306 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 307 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 308 { BGE_CHIPID_BCM5717_C0, "BCM5717 C0" }, 309 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 310 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 311 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 312 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 313 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 314 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, 315 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 316 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 317 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, 318 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 319 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 320 /* 5754 and 5787 share the same ASIC ID */ 321 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 322 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 323 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 324 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 325 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 326 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 327 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 328 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 329 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 330 { 0, NULL } 331}; 332 333/* 334 * Some defaults for major revisions, so that newer steppings 335 * that we don't know about have a shot at working. 336 */ 337static const struct bge_revision bge_majorrevs[] = { 338 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 339 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 340 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 341 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 342 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 343 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 344 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 345 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 346 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 347 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 348 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 349 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 350 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 351 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 352 /* 5754 and 5787 share the same ASIC ID */ 353 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 354 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 355 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 356 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 357 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 358 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 359 { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 360 { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 361 { BGE_ASICREV_BCM5762, "unknown BCM5762" }, 362 { 0, NULL } 363}; 364 365#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 366#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 367#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 368#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 369#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 370#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 371#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS) 372#define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS) 373 374static uint32_t bge_chipid(device_t); 375static const struct bge_vendor * bge_lookup_vendor(uint16_t); 376static const struct bge_revision * bge_lookup_rev(uint32_t); 377 378typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 379 380static int bge_probe(device_t); 381static int bge_attach(device_t); 382static int bge_detach(device_t); 383static int bge_suspend(device_t); 384static int bge_resume(device_t); 385static void bge_release_resources(struct bge_softc *); 386static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 387static int bge_dma_alloc(struct bge_softc *); 388static void bge_dma_free(struct bge_softc *); 389static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t, 390 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *); 391 392static void bge_devinfo(struct bge_softc *); 393static int bge_mbox_reorder(struct bge_softc *); 394 395static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); 396static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 397static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 398static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 399static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 400 401static void bge_txeof(struct bge_softc *, uint16_t); 402static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 403static int bge_rxeof(struct bge_softc *, uint16_t, int); 404 405static void bge_asf_driver_up (struct bge_softc *); 406static void bge_tick(void *); 407static void bge_stats_clear_regs(struct bge_softc *); 408static void bge_stats_update(struct bge_softc *); 409static void bge_stats_update_regs(struct bge_softc *); 410static struct mbuf *bge_check_short_dma(struct mbuf *); 411static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, 412 uint16_t *, uint16_t *); 413static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 414 415static void bge_intr(void *); 416static int bge_msi_intr(void *); 417static void bge_intr_task(void *, int); 418static void bge_start(if_t); 419static void bge_start_locked(if_t); 420static void bge_start_tx(struct bge_softc *, uint32_t); 421static int bge_ioctl(if_t, u_long, caddr_t); 422static void bge_init_locked(struct bge_softc *); 423static void bge_init(void *); 424static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t); 425static void bge_stop(struct bge_softc *); 426static void bge_watchdog(struct bge_softc *); 427static int bge_shutdown(device_t); 428static int bge_ifmedia_upd_locked(if_t); 429static int bge_ifmedia_upd(if_t); 430static void bge_ifmedia_sts(if_t, struct ifmediareq *); 431static uint64_t bge_get_counter(if_t, ift_counter); 432 433static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 434static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 435 436static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 437static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 438 439static void bge_setpromisc(struct bge_softc *); 440static void bge_setmulti(struct bge_softc *); 441static void bge_setvlan(struct bge_softc *); 442 443static __inline void bge_rxreuse_std(struct bge_softc *, int); 444static __inline void bge_rxreuse_jumbo(struct bge_softc *, int); 445static int bge_newbuf_std(struct bge_softc *, int); 446static int bge_newbuf_jumbo(struct bge_softc *, int); 447static int bge_init_rx_ring_std(struct bge_softc *); 448static void bge_free_rx_ring_std(struct bge_softc *); 449static int bge_init_rx_ring_jumbo(struct bge_softc *); 450static void bge_free_rx_ring_jumbo(struct bge_softc *); 451static void bge_free_tx_ring(struct bge_softc *); 452static int bge_init_tx_ring(struct bge_softc *); 453 454static int bge_chipinit(struct bge_softc *); 455static int bge_blockinit(struct bge_softc *); 456static uint32_t bge_dma_swap_options(struct bge_softc *); 457 458static int bge_has_eaddr(struct bge_softc *); 459static uint32_t bge_readmem_ind(struct bge_softc *, int); 460static void bge_writemem_ind(struct bge_softc *, int, int); 461#ifndef __HAIKU__ 462static void bge_writembx(struct bge_softc *, int, int); 463#endif 464#ifdef notdef 465static uint32_t bge_readreg_ind(struct bge_softc *, int); 466#endif 467static void bge_writemem_direct(struct bge_softc *, int, int); 468static void bge_writereg_ind(struct bge_softc *, int, int); 469 470static int bge_miibus_readreg(device_t, int, int); 471static int bge_miibus_writereg(device_t, int, int, int); 472static void bge_miibus_statchg(device_t); 473#ifdef DEVICE_POLLING 474static int bge_poll(if_t ifp, enum poll_cmd cmd, int count); 475#endif 476 477#define BGE_RESET_SHUTDOWN 0 478#define BGE_RESET_START 1 479#define BGE_RESET_SUSPEND 2 480static void bge_sig_post_reset(struct bge_softc *, int); 481static void bge_sig_legacy(struct bge_softc *, int); 482static void bge_sig_pre_reset(struct bge_softc *, int); 483static void bge_stop_fw(struct bge_softc *); 484static int bge_reset(struct bge_softc *); 485static void bge_link_upd(struct bge_softc *); 486 487static void bge_ape_lock_init(struct bge_softc *); 488static void bge_ape_read_fw_ver(struct bge_softc *); 489static int bge_ape_lock(struct bge_softc *, int); 490static void bge_ape_unlock(struct bge_softc *, int); 491static void bge_ape_send_event(struct bge_softc *, uint32_t); 492static void bge_ape_driver_state_change(struct bge_softc *, int); 493 494/* 495 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 496 * leak information to untrusted users. It is also known to cause alignment 497 * traps on certain architectures. 498 */ 499#ifdef BGE_REGISTER_DEBUG 500static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 501static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 502static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS); 503static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 504#endif 505static void bge_add_sysctls(struct bge_softc *); 506static void bge_add_sysctl_stats_regs(struct bge_softc *, 507 struct sysctl_ctx_list *, struct sysctl_oid_list *); 508static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *, 509 struct sysctl_oid_list *); 510static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 511 512DEBUGNET_DEFINE(bge); 513 514static device_method_t bge_methods[] = { 515 /* Device interface */ 516 DEVMETHOD(device_probe, bge_probe), 517 DEVMETHOD(device_attach, bge_attach), 518 DEVMETHOD(device_detach, bge_detach), 519 DEVMETHOD(device_shutdown, bge_shutdown), 520 DEVMETHOD(device_suspend, bge_suspend), 521 DEVMETHOD(device_resume, bge_resume), 522 523 /* MII interface */ 524 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 525 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 526 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 527 528 DEVMETHOD_END 529}; 530 531static driver_t bge_driver = { 532 "bge", 533 bge_methods, 534 sizeof(struct bge_softc) 535}; 536 537static devclass_t bge_devclass; 538 539DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 540MODULE_PNP_INFO("U16:vendor;U16:device", pci, bge, bge_devs, 541 nitems(bge_devs) - 1); 542DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 543 544static int bge_allow_asf = 1; 545 546static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 547 "BGE driver parameters"); 548SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0, 549 "Allow ASF mode if available"); 550 551static int 552bge_has_eaddr(struct bge_softc *sc) 553{ 554 return (1); 555} 556 557static uint32_t 558bge_readmem_ind(struct bge_softc *sc, int off) 559{ 560 device_t dev; 561 uint32_t val; 562 563 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 564 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 565 return (0); 566 567 dev = sc->bge_dev; 568 569 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 570 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 571 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 572 return (val); 573} 574 575static void 576bge_writemem_ind(struct bge_softc *sc, int off, int val) 577{ 578 device_t dev; 579 580 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 581 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 582 return; 583 584 dev = sc->bge_dev; 585 586 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 587 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 588 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 589} 590 591#ifdef notdef 592static uint32_t 593bge_readreg_ind(struct bge_softc *sc, int off) 594{ 595 device_t dev; 596 597 dev = sc->bge_dev; 598 599 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 600 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 601} 602#endif 603 604static void 605bge_writereg_ind(struct bge_softc *sc, int off, int val) 606{ 607 device_t dev; 608 609 dev = sc->bge_dev; 610 611 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 612 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 613} 614 615static void 616bge_writemem_direct(struct bge_softc *sc, int off, int val) 617{ 618 CSR_WRITE_4(sc, off, val); 619} 620 621#ifndef __HAIKU__ 622static void 623#else 624void 625#endif 626bge_writembx(struct bge_softc *sc, int off, int val) 627{ 628 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 629 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 630 631 CSR_WRITE_4(sc, off, val); 632 if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0) 633 CSR_READ_4(sc, off); 634} 635 636/* 637 * Clear all stale locks and select the lock for this driver instance. 638 */ 639static void 640bge_ape_lock_init(struct bge_softc *sc) 641{ 642 uint32_t bit, regbase; 643 int i; 644 645 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 646 regbase = BGE_APE_LOCK_GRANT; 647 else 648 regbase = BGE_APE_PER_LOCK_GRANT; 649 650 /* Clear any stale locks. */ 651 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 652 switch (i) { 653 case BGE_APE_LOCK_PHY0: 654 case BGE_APE_LOCK_PHY1: 655 case BGE_APE_LOCK_PHY2: 656 case BGE_APE_LOCK_PHY3: 657 bit = BGE_APE_LOCK_GRANT_DRIVER0; 658 break; 659 default: 660 if (sc->bge_func_addr == 0) 661 bit = BGE_APE_LOCK_GRANT_DRIVER0; 662 else 663 bit = (1 << sc->bge_func_addr); 664 } 665 APE_WRITE_4(sc, regbase + 4 * i, bit); 666 } 667 668 /* Select the PHY lock based on the device's function number. */ 669 switch (sc->bge_func_addr) { 670 case 0: 671 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 672 break; 673 case 1: 674 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 675 break; 676 case 2: 677 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 678 break; 679 case 3: 680 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 681 break; 682 default: 683 device_printf(sc->bge_dev, 684 "PHY lock not supported on this function\n"); 685 } 686} 687 688/* 689 * Check for APE firmware, set flags, and print version info. 690 */ 691static void 692bge_ape_read_fw_ver(struct bge_softc *sc) 693{ 694 const char *fwtype; 695 uint32_t apedata, features; 696 697 /* Check for a valid APE signature in shared memory. */ 698 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 699 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 700 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 701 return; 702 } 703 704 /* Check if APE firmware is running. */ 705 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 706 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 707 device_printf(sc->bge_dev, "APE signature found " 708 "but FW status not ready! 0x%08x\n", apedata); 709 return; 710 } 711 712 sc->bge_mfw_flags |= BGE_MFW_ON_APE; 713 714 /* Fetch the APE firwmare type and version. */ 715 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 716 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 717 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 718 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 719 fwtype = "NCSI"; 720 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 721 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 722 fwtype = "DASH"; 723 } else 724 fwtype = "UNKN"; 725 726 /* Print the APE firmware version. */ 727 device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n", 728 fwtype, 729 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 730 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 731 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 732 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 733} 734 735static int 736bge_ape_lock(struct bge_softc *sc, int locknum) 737{ 738 uint32_t bit, gnt, req, status; 739 int i, off; 740 741 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 742 return (0); 743 744 /* Lock request/grant registers have different bases. */ 745 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) { 746 req = BGE_APE_LOCK_REQ; 747 gnt = BGE_APE_LOCK_GRANT; 748 } else { 749 req = BGE_APE_PER_LOCK_REQ; 750 gnt = BGE_APE_PER_LOCK_GRANT; 751 } 752 753 off = 4 * locknum; 754 755 switch (locknum) { 756 case BGE_APE_LOCK_GPIO: 757 /* Lock required when using GPIO. */ 758 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 759 return (0); 760 if (sc->bge_func_addr == 0) 761 bit = BGE_APE_LOCK_REQ_DRIVER0; 762 else 763 bit = (1 << sc->bge_func_addr); 764 break; 765 case BGE_APE_LOCK_GRC: 766 /* Lock required to reset the device. */ 767 if (sc->bge_func_addr == 0) 768 bit = BGE_APE_LOCK_REQ_DRIVER0; 769 else 770 bit = (1 << sc->bge_func_addr); 771 break; 772 case BGE_APE_LOCK_MEM: 773 /* Lock required when accessing certain APE memory. */ 774 if (sc->bge_func_addr == 0) 775 bit = BGE_APE_LOCK_REQ_DRIVER0; 776 else 777 bit = (1 << sc->bge_func_addr); 778 break; 779 case BGE_APE_LOCK_PHY0: 780 case BGE_APE_LOCK_PHY1: 781 case BGE_APE_LOCK_PHY2: 782 case BGE_APE_LOCK_PHY3: 783 /* Lock required when accessing PHYs. */ 784 bit = BGE_APE_LOCK_REQ_DRIVER0; 785 break; 786 default: 787 return (EINVAL); 788 } 789 790 /* Request a lock. */ 791 APE_WRITE_4(sc, req + off, bit); 792 793 /* Wait up to 1 second to acquire lock. */ 794 for (i = 0; i < 20000; i++) { 795 status = APE_READ_4(sc, gnt + off); 796 if (status == bit) 797 break; 798 DELAY(50); 799 } 800 801 /* Handle any errors. */ 802 if (status != bit) { 803 device_printf(sc->bge_dev, "APE lock %d request failed! " 804 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 805 locknum, req + off, bit & 0xFFFF, gnt + off, 806 status & 0xFFFF); 807 /* Revoke the lock request. */ 808 APE_WRITE_4(sc, gnt + off, bit); 809 return (EBUSY); 810 } 811 812 return (0); 813} 814 815static void 816bge_ape_unlock(struct bge_softc *sc, int locknum) 817{ 818 uint32_t bit, gnt; 819 int off; 820 821 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 822 return; 823 824 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 825 gnt = BGE_APE_LOCK_GRANT; 826 else 827 gnt = BGE_APE_PER_LOCK_GRANT; 828 829 off = 4 * locknum; 830 831 switch (locknum) { 832 case BGE_APE_LOCK_GPIO: 833 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 834 return; 835 if (sc->bge_func_addr == 0) 836 bit = BGE_APE_LOCK_GRANT_DRIVER0; 837 else 838 bit = (1 << sc->bge_func_addr); 839 break; 840 case BGE_APE_LOCK_GRC: 841 if (sc->bge_func_addr == 0) 842 bit = BGE_APE_LOCK_GRANT_DRIVER0; 843 else 844 bit = (1 << sc->bge_func_addr); 845 break; 846 case BGE_APE_LOCK_MEM: 847 if (sc->bge_func_addr == 0) 848 bit = BGE_APE_LOCK_GRANT_DRIVER0; 849 else 850 bit = (1 << sc->bge_func_addr); 851 break; 852 case BGE_APE_LOCK_PHY0: 853 case BGE_APE_LOCK_PHY1: 854 case BGE_APE_LOCK_PHY2: 855 case BGE_APE_LOCK_PHY3: 856 bit = BGE_APE_LOCK_GRANT_DRIVER0; 857 break; 858 default: 859 return; 860 } 861 862 APE_WRITE_4(sc, gnt + off, bit); 863} 864 865/* 866 * Send an event to the APE firmware. 867 */ 868static void 869bge_ape_send_event(struct bge_softc *sc, uint32_t event) 870{ 871 uint32_t apedata; 872 int i; 873 874 /* NCSI does not support APE events. */ 875 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 876 return; 877 878 /* Wait up to 1ms for APE to service previous event. */ 879 for (i = 10; i > 0; i--) { 880 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 881 break; 882 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 883 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 884 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 885 BGE_APE_EVENT_STATUS_EVENT_PENDING); 886 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 887 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 888 break; 889 } 890 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 891 DELAY(100); 892 } 893 if (i == 0) 894 device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n", 895 event); 896} 897 898static void 899bge_ape_driver_state_change(struct bge_softc *sc, int kind) 900{ 901 uint32_t apedata, event; 902 903 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 904 return; 905 906 switch (kind) { 907 case BGE_RESET_START: 908 /* If this is the first load, clear the load counter. */ 909 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 910 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 911 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 912 else { 913 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 914 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 915 } 916 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 917 BGE_APE_HOST_SEG_SIG_MAGIC); 918 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 919 BGE_APE_HOST_SEG_LEN_MAGIC); 920 921 /* Add some version info if bge(4) supports it. */ 922 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 923 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 924 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 925 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 926 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 927 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 928 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 929 BGE_APE_HOST_DRVR_STATE_START); 930 event = BGE_APE_EVENT_STATUS_STATE_START; 931 break; 932 case BGE_RESET_SHUTDOWN: 933 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 934 BGE_APE_HOST_DRVR_STATE_UNLOAD); 935 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 936 break; 937 case BGE_RESET_SUSPEND: 938 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 939 break; 940 default: 941 return; 942 } 943 944 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 945 BGE_APE_EVENT_STATUS_STATE_CHNGE); 946} 947 948/* 949 * Map a single buffer address. 950 */ 951 952static void 953bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 954{ 955 struct bge_dmamap_arg *ctx; 956 957 if (error) 958 return; 959 960 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); 961 962 ctx = arg; 963 ctx->bge_busaddr = segs->ds_addr; 964} 965 966static uint8_t 967bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 968{ 969 uint32_t access, byte = 0; 970 int i; 971 972 /* Lock. */ 973 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 974 for (i = 0; i < 8000; i++) { 975 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 976 break; 977 DELAY(20); 978 } 979 if (i == 8000) 980 return (1); 981 982 /* Enable access. */ 983 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 984 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 985 986 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 987 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 988 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 989 DELAY(10); 990 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 991 DELAY(10); 992 break; 993 } 994 } 995 996 if (i == BGE_TIMEOUT * 10) { 997 if_printf(sc->bge_ifp, "nvram read timed out\n"); 998 return (1); 999 } 1000 1001 /* Get result. */ 1002 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 1003 1004 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 1005 1006 /* Disable access. */ 1007 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 1008 1009 /* Unlock. */ 1010 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 1011 CSR_READ_4(sc, BGE_NVRAM_SWARB); 1012 1013 return (0); 1014} 1015 1016/* 1017 * Read a sequence of bytes from NVRAM. 1018 */ 1019static int 1020bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 1021{ 1022 int err = 0, i; 1023 uint8_t byte = 0; 1024 1025 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 1026 return (1); 1027 1028 for (i = 0; i < cnt; i++) { 1029 err = bge_nvram_getbyte(sc, off + i, &byte); 1030 if (err) 1031 break; 1032 *(dest + i) = byte; 1033 } 1034 1035 return (err ? 1 : 0); 1036} 1037 1038/* 1039 * Read a byte of data stored in the EEPROM at address 'addr.' The 1040 * BCM570x supports both the traditional bitbang interface and an 1041 * auto access interface for reading the EEPROM. We use the auto 1042 * access method. 1043 */ 1044static uint8_t 1045bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 1046{ 1047 int i; 1048 uint32_t byte = 0; 1049 1050 /* 1051 * Enable use of auto EEPROM access so we can avoid 1052 * having to use the bitbang method. 1053 */ 1054 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 1055 1056 /* Reset the EEPROM, load the clock period. */ 1057 CSR_WRITE_4(sc, BGE_EE_ADDR, 1058 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 1059 DELAY(20); 1060 1061 /* Issue the read EEPROM command. */ 1062 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 1063 1064 /* Wait for completion */ 1065 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 1066 DELAY(10); 1067 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 1068 break; 1069 } 1070 1071 if (i == BGE_TIMEOUT * 10) { 1072 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 1073 return (1); 1074 } 1075 1076 /* Get result. */ 1077 byte = CSR_READ_4(sc, BGE_EE_DATA); 1078 1079 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 1080 1081 return (0); 1082} 1083 1084/* 1085 * Read a sequence of bytes from the EEPROM. 1086 */ 1087static int 1088bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 1089{ 1090 int i, error = 0; 1091 uint8_t byte = 0; 1092 1093 for (i = 0; i < cnt; i++) { 1094 error = bge_eeprom_getbyte(sc, off + i, &byte); 1095 if (error) 1096 break; 1097 *(dest + i) = byte; 1098 } 1099 1100 return (error ? 1 : 0); 1101} 1102 1103static int 1104bge_miibus_readreg(device_t dev, int phy, int reg) 1105{ 1106 struct bge_softc *sc; 1107 uint32_t val; 1108 int i; 1109 1110 sc = device_get_softc(dev); 1111 1112 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1113 return (0); 1114 1115 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 1116 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { 1117 CSR_WRITE_4(sc, BGE_MI_MODE, 1118 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); 1119 DELAY(80); 1120 } 1121 1122 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 1123 BGE_MIPHY(phy) | BGE_MIREG(reg)); 1124 1125 /* Poll for the PHY register access to complete. */ 1126 for (i = 0; i < BGE_TIMEOUT; i++) { 1127 DELAY(10); 1128 val = CSR_READ_4(sc, BGE_MI_COMM); 1129 if ((val & BGE_MICOMM_BUSY) == 0) { 1130 DELAY(5); 1131 val = CSR_READ_4(sc, BGE_MI_COMM); 1132 break; 1133 } 1134 } 1135 1136 if (i == BGE_TIMEOUT) { 1137 device_printf(sc->bge_dev, 1138 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", 1139 phy, reg, val); 1140 val = 0; 1141 } 1142 1143 /* Restore the autopoll bit if necessary. */ 1144 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { 1145 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 1146 DELAY(80); 1147 } 1148 1149 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1150 1151 if (val & BGE_MICOMM_READFAIL) 1152 return (0); 1153 1154 return (val & 0xFFFF); 1155} 1156 1157static int 1158bge_miibus_writereg(device_t dev, int phy, int reg, int val) 1159{ 1160 struct bge_softc *sc; 1161 int i; 1162 1163 sc = device_get_softc(dev); 1164 1165 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 1166 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 1167 return (0); 1168 1169 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1170 return (0); 1171 1172 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 1173 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { 1174 CSR_WRITE_4(sc, BGE_MI_MODE, 1175 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); 1176 DELAY(80); 1177 } 1178 1179 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1180 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1181 1182 for (i = 0; i < BGE_TIMEOUT; i++) { 1183 DELAY(10); 1184 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 1185 DELAY(5); 1186 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 1187 break; 1188 } 1189 } 1190 1191 /* Restore the autopoll bit if necessary. */ 1192 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { 1193 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 1194 DELAY(80); 1195 } 1196 1197 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1198 1199 if (i == BGE_TIMEOUT) 1200 device_printf(sc->bge_dev, 1201 "PHY write timed out (phy %d, reg %d, val 0x%04x)\n", 1202 phy, reg, val); 1203 1204 return (0); 1205} 1206 1207static void 1208bge_miibus_statchg(device_t dev) 1209{ 1210 struct bge_softc *sc; 1211 struct mii_data *mii; 1212 uint32_t mac_mode, rx_mode, tx_mode; 1213 1214 sc = device_get_softc(dev); 1215 if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0) 1216 return; 1217 mii = device_get_softc(sc->bge_miibus); 1218 1219 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1220 (IFM_ACTIVE | IFM_AVALID)) { 1221 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1222 case IFM_10_T: 1223 case IFM_100_TX: 1224 sc->bge_link = 1; 1225 break; 1226 case IFM_1000_T: 1227 case IFM_1000_SX: 1228 case IFM_2500_SX: 1229 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 1230 sc->bge_link = 1; 1231 else 1232 sc->bge_link = 0; 1233 break; 1234 default: 1235 sc->bge_link = 0; 1236 break; 1237 } 1238 } else 1239 sc->bge_link = 0; 1240 if (sc->bge_link == 0) 1241 return; 1242 1243 /* 1244 * APE firmware touches these registers to keep the MAC 1245 * connected to the outside world. Try to keep the 1246 * accesses atomic. 1247 */ 1248 1249 /* Set the port mode (MII/GMII) to match the link speed. */ 1250 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1251 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1252 tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1253 rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1254 1255 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1256 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1257 mac_mode |= BGE_PORTMODE_GMII; 1258 else 1259 mac_mode |= BGE_PORTMODE_MII; 1260 1261 /* Set MAC flow control behavior to match link flow control settings. */ 1262 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1263 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1264 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1265 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1266 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1267 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1268 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1269 } else 1270 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1271 1272 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 1273 DELAY(40); 1274 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1275 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1276} 1277 1278/* 1279 * Intialize a standard receive ring descriptor. 1280 */ 1281static int 1282bge_newbuf_std(struct bge_softc *sc, int i) 1283{ 1284 struct mbuf *m; 1285 struct bge_rx_bd *r; 1286 bus_dma_segment_t segs[1]; 1287 bus_dmamap_t map; 1288 int error, nsegs; 1289 1290 if (sc->bge_flags & BGE_FLAG_JUMBO_STD && 1291 (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + 1292 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) { 1293 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1294 if (m == NULL) 1295 return (ENOBUFS); 1296 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 1297 } else { 1298 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1299 if (m == NULL) 1300 return (ENOBUFS); 1301 m->m_len = m->m_pkthdr.len = MCLBYTES; 1302 } 1303 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 1304 m_adj(m, ETHER_ALIGN); 1305 1306 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, 1307 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); 1308 if (error != 0) { 1309 m_freem(m); 1310 return (error); 1311 } 1312 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1313 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1314 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); 1315 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1316 sc->bge_cdata.bge_rx_std_dmamap[i]); 1317 } 1318 map = sc->bge_cdata.bge_rx_std_dmamap[i]; 1319 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; 1320 sc->bge_cdata.bge_rx_std_sparemap = map; 1321 sc->bge_cdata.bge_rx_std_chain[i] = m; 1322 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len; 1323 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; 1324 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 1325 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 1326 r->bge_flags = BGE_RXBDFLAG_END; 1327 r->bge_len = segs[0].ds_len; 1328 r->bge_idx = i; 1329 1330 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1331 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); 1332 1333 return (0); 1334} 1335 1336/* 1337 * Initialize a jumbo receive ring descriptor. This allocates 1338 * a jumbo buffer from the pool managed internally by the driver. 1339 */ 1340static int 1341bge_newbuf_jumbo(struct bge_softc *sc, int i) 1342{ 1343 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 1344 bus_dmamap_t map; 1345 struct bge_extrx_bd *r; 1346 struct mbuf *m; 1347 int error, nsegs; 1348 1349 MGETHDR(m, M_NOWAIT, MT_DATA); 1350 if (m == NULL) 1351 return (ENOBUFS); 1352 1353 if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) { 1354 m_freem(m); 1355 return (ENOBUFS); 1356 } 1357 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 1358 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 1359 m_adj(m, ETHER_ALIGN); 1360 1361 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 1362 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); 1363 if (error != 0) { 1364 m_freem(m); 1365 return (error); 1366 } 1367 1368 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1369 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1370 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); 1371 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1372 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1373 } 1374 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; 1375 sc->bge_cdata.bge_rx_jumbo_dmamap[i] = 1376 sc->bge_cdata.bge_rx_jumbo_sparemap; 1377 sc->bge_cdata.bge_rx_jumbo_sparemap = map; 1378 sc->bge_cdata.bge_rx_jumbo_chain[i] = m; 1379 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0; 1380 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0; 1381 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0; 1382 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0; 1383 1384 /* 1385 * Fill in the extended RX buffer descriptor. 1386 */ 1387 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; 1388 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 1389 r->bge_idx = i; 1390 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 1391 switch (nsegs) { 1392 case 4: 1393 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 1394 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 1395 r->bge_len3 = segs[3].ds_len; 1396 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len; 1397 case 3: 1398 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 1399 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 1400 r->bge_len2 = segs[2].ds_len; 1401 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len; 1402 case 2: 1403 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 1404 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 1405 r->bge_len1 = segs[1].ds_len; 1406 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len; 1407 case 1: 1408 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 1409 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 1410 r->bge_len0 = segs[0].ds_len; 1411 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len; 1412 break; 1413 default: 1414 panic("%s: %d segments\n", __func__, nsegs); 1415 } 1416 1417 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1418 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); 1419 1420 return (0); 1421} 1422 1423static int 1424bge_init_rx_ring_std(struct bge_softc *sc) 1425{ 1426 int error, i; 1427 1428 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1429 sc->bge_std = 0; 1430 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1431 if ((error = bge_newbuf_std(sc, i)) != 0) 1432 return (error); 1433 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 1434 } 1435 1436 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1437 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 1438 1439 sc->bge_std = 0; 1440 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); 1441 1442 return (0); 1443} 1444 1445static void 1446bge_free_rx_ring_std(struct bge_softc *sc) 1447{ 1448 int i; 1449 1450 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1451 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1452 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1453 sc->bge_cdata.bge_rx_std_dmamap[i], 1454 BUS_DMASYNC_POSTREAD); 1455 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1456 sc->bge_cdata.bge_rx_std_dmamap[i]); 1457 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1458 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1459 } 1460 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 1461 sizeof(struct bge_rx_bd)); 1462 } 1463} 1464 1465static int 1466bge_init_rx_ring_jumbo(struct bge_softc *sc) 1467{ 1468 struct bge_rcb *rcb; 1469 int error, i; 1470 1471 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); 1472 sc->bge_jumbo = 0; 1473 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1474 if ((error = bge_newbuf_jumbo(sc, i)) != 0) 1475 return (error); 1476 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1477 } 1478 1479 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1480 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 1481 1482 sc->bge_jumbo = 0; 1483 1484 /* Enable the jumbo receive producer ring. */ 1485 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1486 rcb->bge_maxlen_flags = 1487 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); 1488 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1489 1490 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1); 1491 1492 return (0); 1493} 1494 1495static void 1496bge_free_rx_ring_jumbo(struct bge_softc *sc) 1497{ 1498 int i; 1499 1500 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1501 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1502 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1503 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 1504 BUS_DMASYNC_POSTREAD); 1505 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1506 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1507 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1508 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1509 } 1510 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 1511 sizeof(struct bge_extrx_bd)); 1512 } 1513} 1514 1515static void 1516bge_free_tx_ring(struct bge_softc *sc) 1517{ 1518 int i; 1519 1520 if (sc->bge_ldata.bge_tx_ring == NULL) 1521 return; 1522 1523 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1524 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1525 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 1526 sc->bge_cdata.bge_tx_dmamap[i], 1527 BUS_DMASYNC_POSTWRITE); 1528 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1529 sc->bge_cdata.bge_tx_dmamap[i]); 1530 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1531 sc->bge_cdata.bge_tx_chain[i] = NULL; 1532 } 1533 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 1534 sizeof(struct bge_tx_bd)); 1535 } 1536} 1537 1538static int 1539bge_init_tx_ring(struct bge_softc *sc) 1540{ 1541 sc->bge_txcnt = 0; 1542 sc->bge_tx_saved_considx = 0; 1543 1544 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1545 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 1546 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 1547 1548 /* Initialize transmit producer index for host-memory send ring. */ 1549 sc->bge_tx_prodidx = 0; 1550 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1551 1552 /* 5700 b2 errata */ 1553 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1554 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1555 1556 /* NIC-memory send ring not used; initialize to zero. */ 1557 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1558 /* 5700 b2 errata */ 1559 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1560 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1561 1562 return (0); 1563} 1564 1565static void 1566bge_setpromisc(struct bge_softc *sc) 1567{ 1568 if_t ifp; 1569 1570 BGE_LOCK_ASSERT(sc); 1571 1572 ifp = sc->bge_ifp; 1573 1574 /* Enable or disable promiscuous mode as needed. */ 1575 if (if_getflags(ifp) & IFF_PROMISC) 1576 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1577 else 1578 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1579} 1580 1581static u_int 1582bge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 1583{ 1584 uint32_t *hashes = arg; 1585 int h; 1586 1587 h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7F; 1588 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1589 1590 return (1); 1591} 1592 1593static void 1594bge_setmulti(struct bge_softc *sc) 1595{ 1596 if_t ifp; 1597 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1598 int i; 1599 1600 BGE_LOCK_ASSERT(sc); 1601 1602 ifp = sc->bge_ifp; 1603 1604 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { 1605 for (i = 0; i < 4; i++) 1606 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1607 return; 1608 } 1609 1610 /* First, zot all the existing filters. */ 1611 for (i = 0; i < 4; i++) 1612 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1613 1614 if_foreach_llmaddr(ifp, bge_hash_maddr, hashes); 1615 1616 for (i = 0; i < 4; i++) 1617 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1618} 1619 1620static void 1621bge_setvlan(struct bge_softc *sc) 1622{ 1623 if_t ifp; 1624 1625 BGE_LOCK_ASSERT(sc); 1626 1627 ifp = sc->bge_ifp; 1628 1629 /* Enable or disable VLAN tag stripping as needed. */ 1630 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) 1631 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1632 else 1633 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1634} 1635 1636static void 1637bge_sig_pre_reset(struct bge_softc *sc, int type) 1638{ 1639 1640 /* 1641 * Some chips don't like this so only do this if ASF is enabled 1642 */ 1643 if (sc->bge_asf_mode) 1644 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 1645 1646 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1647 switch (type) { 1648 case BGE_RESET_START: 1649 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1650 BGE_FW_DRV_STATE_START); 1651 break; 1652 case BGE_RESET_SHUTDOWN: 1653 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1654 BGE_FW_DRV_STATE_UNLOAD); 1655 break; 1656 case BGE_RESET_SUSPEND: 1657 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1658 BGE_FW_DRV_STATE_SUSPEND); 1659 break; 1660 } 1661 } 1662 1663 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 1664 bge_ape_driver_state_change(sc, type); 1665} 1666 1667static void 1668bge_sig_post_reset(struct bge_softc *sc, int type) 1669{ 1670 1671 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1672 switch (type) { 1673 case BGE_RESET_START: 1674 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1675 BGE_FW_DRV_STATE_START_DONE); 1676 /* START DONE */ 1677 break; 1678 case BGE_RESET_SHUTDOWN: 1679 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1680 BGE_FW_DRV_STATE_UNLOAD_DONE); 1681 break; 1682 } 1683 } 1684 if (type == BGE_RESET_SHUTDOWN) 1685 bge_ape_driver_state_change(sc, type); 1686} 1687 1688static void 1689bge_sig_legacy(struct bge_softc *sc, int type) 1690{ 1691 1692 if (sc->bge_asf_mode) { 1693 switch (type) { 1694 case BGE_RESET_START: 1695 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1696 BGE_FW_DRV_STATE_START); 1697 break; 1698 case BGE_RESET_SHUTDOWN: 1699 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1700 BGE_FW_DRV_STATE_UNLOAD); 1701 break; 1702 } 1703 } 1704} 1705 1706static void 1707bge_stop_fw(struct bge_softc *sc) 1708{ 1709 int i; 1710 1711 if (sc->bge_asf_mode) { 1712 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); 1713 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, 1714 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); 1715 1716 for (i = 0; i < 100; i++ ) { 1717 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & 1718 BGE_RX_CPU_DRV_EVENT)) 1719 break; 1720 DELAY(10); 1721 } 1722 } 1723} 1724 1725static uint32_t 1726bge_dma_swap_options(struct bge_softc *sc) 1727{ 1728 uint32_t dma_options; 1729 1730 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | 1731 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; 1732#if BYTE_ORDER == BIG_ENDIAN 1733 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; 1734#endif 1735 return (dma_options); 1736} 1737 1738/* 1739 * Do endian, PCI and DMA initialization. 1740 */ 1741static int 1742bge_chipinit(struct bge_softc *sc) 1743{ 1744 uint32_t dma_rw_ctl, misc_ctl, mode_ctl; 1745 uint16_t val; 1746 int i; 1747 1748 /* Set endianness before we access any non-PCI registers. */ 1749 misc_ctl = BGE_INIT; 1750 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS) 1751 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; 1752 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4); 1753 1754 /* 1755 * Clear the MAC statistics block in the NIC's 1756 * internal memory. 1757 */ 1758 for (i = BGE_STATS_BLOCK; 1759 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1760 BGE_MEMWIN_WRITE(sc, i, 0); 1761 1762 for (i = BGE_STATUS_BLOCK; 1763 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1764 BGE_MEMWIN_WRITE(sc, i, 0); 1765 1766 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { 1767 /* 1768 * Fix data corruption caused by non-qword write with WB. 1769 * Fix master abort in PCI mode. 1770 * Fix PCI latency timer. 1771 */ 1772 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); 1773 val |= (1 << 10) | (1 << 12) | (1 << 13); 1774 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); 1775 } 1776 1777 if (sc->bge_asicrev == BGE_ASICREV_BCM57765 || 1778 sc->bge_asicrev == BGE_ASICREV_BCM57766) { 1779 /* 1780 * For the 57766 and non Ax versions of 57765, bootcode 1781 * needs to setup the PCIE Fast Training Sequence (FTS) 1782 * value to prevent transmit hangs. 1783 */ 1784 if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) { 1785 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 1786 CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) | 1787 BGE_CPMU_PADRNG_CTL_RDIV2); 1788 } 1789 } 1790 1791 /* 1792 * Set up the PCI DMA control register. 1793 */ 1794 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1795 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1796 if (sc->bge_flags & BGE_FLAG_PCIE) { 1797 if (sc->bge_mps >= 256) 1798 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1799 else 1800 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1801 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1802 if (BGE_IS_5714_FAMILY(sc)) { 1803 /* 256 bytes for read and write. */ 1804 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1805 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1806 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? 1807 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : 1808 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1809 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { 1810 /* 1811 * In the BCM5703, the DMA read watermark should 1812 * be set to less than or equal to the maximum 1813 * memory read byte count of the PCI-X command 1814 * register. 1815 */ 1816 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | 1817 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1818 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1819 /* 1536 bytes for read, 384 bytes for write. */ 1820 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1821 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1822 } else { 1823 /* 384 bytes for read and write. */ 1824 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1825 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 1826 0x0F; 1827 } 1828 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1829 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1830 uint32_t tmp; 1831 1832 /* Set ONE_DMA_AT_ONCE for hardware workaround. */ 1833 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; 1834 if (tmp == 6 || tmp == 7) 1835 dma_rw_ctl |= 1836 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1837 1838 /* Set PCI-X DMA write workaround. */ 1839 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1840 } 1841 } else { 1842 /* Conventional PCI bus: 256 bytes for read and write. */ 1843 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1844 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1845 1846 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1847 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1848 dma_rw_ctl |= 0x0F; 1849 } 1850 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1851 sc->bge_asicrev == BGE_ASICREV_BCM5701) 1852 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1853 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1854 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1855 sc->bge_asicrev == BGE_ASICREV_BCM5704) 1856 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1857 if (BGE_IS_5717_PLUS(sc)) { 1858 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1859 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 1860 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1861 /* 1862 * Enable HW workaround for controllers that misinterpret 1863 * a status tag update and leave interrupts permanently 1864 * disabled. 1865 */ 1866 if (!BGE_IS_57765_PLUS(sc) && 1867 sc->bge_asicrev != BGE_ASICREV_BCM5717 && 1868 sc->bge_asicrev != BGE_ASICREV_BCM5762) 1869 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1870 } 1871 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1872 1873 /* 1874 * Set up general mode register. 1875 */ 1876 mode_ctl = bge_dma_swap_options(sc); 1877 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || 1878 sc->bge_asicrev == BGE_ASICREV_BCM5762) { 1879 /* Retain Host-2-BMC settings written by APE firmware. */ 1880 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 1881 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1882 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 1883 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 1884 } 1885 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1886 BGE_MODECTL_TX_NO_PHDR_CSUM; 1887 1888 /* 1889 * BCM5701 B5 have a bug causing data corruption when using 1890 * 64-bit DMA reads, which can be terminated early and then 1891 * completed later as 32-bit accesses, in combination with 1892 * certain bridges. 1893 */ 1894 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 1895 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1896 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 1897 1898 /* 1899 * Tell the firmware the driver is running 1900 */ 1901 if (sc->bge_asf_mode & ASF_STACKUP) 1902 mode_ctl |= BGE_MODECTL_STACKUP; 1903 1904 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1905 1906 /* 1907 * Disable memory write invalidate. Apparently it is not supported 1908 * properly by these devices. 1909 */ 1910 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1911 1912 /* Set the timer prescaler (always 66 MHz). */ 1913 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 1914 1915 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ 1916 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1917 DELAY(40); /* XXX */ 1918 1919 /* Put PHY into ready state */ 1920 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1921 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1922 DELAY(40); 1923 } 1924 1925 return (0); 1926} 1927 1928static int 1929bge_blockinit(struct bge_softc *sc) 1930{ 1931 struct bge_rcb *rcb; 1932 bus_size_t vrcb; 1933 bge_hostaddr taddr; 1934 uint32_t dmactl, rdmareg, val; 1935 int i, limit; 1936 1937 /* 1938 * Initialize the memory window pointer register so that 1939 * we can access the first 32K of internal NIC RAM. This will 1940 * allow us to set up the TX send ring RCBs and the RX return 1941 * ring RCBs, plus other things which live in NIC memory. 1942 */ 1943 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1944 1945 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1946 1947 if (!(BGE_IS_5705_PLUS(sc))) { 1948 /* Configure mbuf memory pool */ 1949 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1950 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1951 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1952 else 1953 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1954 1955 /* Configure DMA resource pool */ 1956 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1957 BGE_DMA_DESCRIPTORS); 1958 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1959 } 1960 1961 /* Configure mbuf pool watermarks */ 1962 if (BGE_IS_5717_PLUS(sc)) { 1963 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1964 if (if_getmtu(sc->bge_ifp) > ETHERMTU) { 1965 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 1966 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 1967 } else { 1968 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1969 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1970 } 1971 } else if (!BGE_IS_5705_PLUS(sc)) { 1972 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1973 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1974 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1975 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1976 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1977 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1978 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1979 } else { 1980 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1981 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1982 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1983 } 1984 1985 /* Configure DMA resource watermarks */ 1986 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1987 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1988 1989 /* Enable buffer manager */ 1990 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1991 /* 1992 * Change the arbitration algorithm of TXMBUF read request to 1993 * round-robin instead of priority based for BCM5719. When 1994 * TXFIFO is almost empty, RDMA will hold its request until 1995 * TXFIFO is not almost empty. 1996 */ 1997 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) 1998 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1999 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 2000 2001 /* Poll for buffer manager start indication */ 2002 for (i = 0; i < BGE_TIMEOUT; i++) { 2003 DELAY(10); 2004 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2005 break; 2006 } 2007 2008 if (i == BGE_TIMEOUT) { 2009 device_printf(sc->bge_dev, "buffer manager failed to start\n"); 2010 return (ENXIO); 2011 } 2012 2013 /* Enable flow-through queues */ 2014 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2015 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2016 2017 /* Wait until queue initialization is complete */ 2018 for (i = 0; i < BGE_TIMEOUT; i++) { 2019 DELAY(10); 2020 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2021 break; 2022 } 2023 2024 if (i == BGE_TIMEOUT) { 2025 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 2026 return (ENXIO); 2027 } 2028 2029 /* 2030 * Summary of rings supported by the controller: 2031 * 2032 * Standard Receive Producer Ring 2033 * - This ring is used to feed receive buffers for "standard" 2034 * sized frames (typically 1536 bytes) to the controller. 2035 * 2036 * Jumbo Receive Producer Ring 2037 * - This ring is used to feed receive buffers for jumbo sized 2038 * frames (i.e. anything bigger than the "standard" frames) 2039 * to the controller. 2040 * 2041 * Mini Receive Producer Ring 2042 * - This ring is used to feed receive buffers for "mini" 2043 * sized frames to the controller. 2044 * - This feature required external memory for the controller 2045 * but was never used in a production system. Should always 2046 * be disabled. 2047 * 2048 * Receive Return Ring 2049 * - After the controller has placed an incoming frame into a 2050 * receive buffer that buffer is moved into a receive return 2051 * ring. The driver is then responsible to passing the 2052 * buffer up to the stack. Many versions of the controller 2053 * support multiple RR rings. 2054 * 2055 * Send Ring 2056 * - This ring is used for outgoing frames. Many versions of 2057 * the controller support multiple send rings. 2058 */ 2059 2060 /* Initialize the standard receive producer ring control block. */ 2061 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 2062 rcb->bge_hostaddr.bge_addr_lo = 2063 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 2064 rcb->bge_hostaddr.bge_addr_hi = 2065 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 2066 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2067 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 2068 if (BGE_IS_5717_PLUS(sc)) { 2069 /* 2070 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 2071 * Bits 15-2 : Maximum RX frame size 2072 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 2073 * Bit 0 : Reserved 2074 */ 2075 rcb->bge_maxlen_flags = 2076 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); 2077 } else if (BGE_IS_5705_PLUS(sc)) { 2078 /* 2079 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2080 * Bits 15-2 : Reserved (should be 0) 2081 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2082 * Bit 0 : Reserved 2083 */ 2084 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2085 } else { 2086 /* 2087 * Ring size is always XXX entries 2088 * Bits 31-16: Maximum RX frame size 2089 * Bits 15-2 : Reserved (should be 0) 2090 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2091 * Bit 0 : Reserved 2092 */ 2093 rcb->bge_maxlen_flags = 2094 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2095 } 2096 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || 2097 sc->bge_asicrev == BGE_ASICREV_BCM5719 || 2098 sc->bge_asicrev == BGE_ASICREV_BCM5720) 2099 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2100 else 2101 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2102 /* Write the standard receive producer ring control block. */ 2103 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2104 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2105 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2106 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2107 2108 /* Reset the standard receive producer ring producer index. */ 2109 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2110 2111 /* 2112 * Initialize the jumbo RX producer ring control 2113 * block. We set the 'ring disabled' bit in the 2114 * flags field until we're actually ready to start 2115 * using this ring (i.e. once we set the MTU 2116 * high enough to require it). 2117 */ 2118 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2119 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 2120 /* Get the jumbo receive producer ring RCB parameters. */ 2121 rcb->bge_hostaddr.bge_addr_lo = 2122 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 2123 rcb->bge_hostaddr.bge_addr_hi = 2124 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 2125 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2126 sc->bge_cdata.bge_rx_jumbo_ring_map, 2127 BUS_DMASYNC_PREREAD); 2128 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2129 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2130 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || 2131 sc->bge_asicrev == BGE_ASICREV_BCM5719 || 2132 sc->bge_asicrev == BGE_ASICREV_BCM5720) 2133 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2134 else 2135 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2136 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2137 rcb->bge_hostaddr.bge_addr_hi); 2138 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2139 rcb->bge_hostaddr.bge_addr_lo); 2140 /* Program the jumbo receive producer ring RCB parameters. */ 2141 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2142 rcb->bge_maxlen_flags); 2143 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2144 /* Reset the jumbo receive producer ring producer index. */ 2145 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2146 } 2147 2148 /* Disable the mini receive producer ring RCB. */ 2149 if (BGE_IS_5700_FAMILY(sc)) { 2150 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 2151 rcb->bge_maxlen_flags = 2152 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2153 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2154 rcb->bge_maxlen_flags); 2155 /* Reset the mini receive producer ring producer index. */ 2156 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2157 } 2158 2159 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2160 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2161 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2162 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2163 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2164 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2165 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2166 } 2167 /* 2168 * The BD ring replenish thresholds control how often the 2169 * hardware fetches new BD's from the producer rings in host 2170 * memory. Setting the value too low on a busy system can 2171 * starve the hardware and recue the throughpout. 2172 * 2173 * Set the BD ring replentish thresholds. The recommended 2174 * values are 1/8th the number of descriptors allocated to 2175 * each ring. 2176 * XXX The 5754 requires a lower threshold, so it might be a 2177 * requirement of all 575x family chips. The Linux driver sets 2178 * the lower threshold for all 5705 family chips as well, but there 2179 * are reports that it might not need to be so strict. 2180 * 2181 * XXX Linux does some extra fiddling here for the 5906 parts as 2182 * well. 2183 */ 2184 if (BGE_IS_5705_PLUS(sc)) 2185 val = 8; 2186 else 2187 val = BGE_STD_RX_RING_CNT / 8; 2188 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 2189 if (BGE_IS_JUMBO_CAPABLE(sc)) 2190 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 2191 BGE_JUMBO_RX_RING_CNT/8); 2192 if (BGE_IS_5717_PLUS(sc)) { 2193 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); 2194 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); 2195 } 2196 2197 /* 2198 * Disable all send rings by setting the 'ring disabled' bit 2199 * in the flags field of all the TX send ring control blocks, 2200 * located in NIC memory. 2201 */ 2202 if (!BGE_IS_5705_PLUS(sc)) 2203 /* 5700 to 5704 had 16 send rings. */ 2204 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2205 else if (BGE_IS_57765_PLUS(sc) || 2206 sc->bge_asicrev == BGE_ASICREV_BCM5762) 2207 limit = 2; 2208 else if (BGE_IS_5717_PLUS(sc)) 2209 limit = 4; 2210 else 2211 limit = 1; 2212 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2213 for (i = 0; i < limit; i++) { 2214 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 2215 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2216 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 2217 vrcb += sizeof(struct bge_rcb); 2218 } 2219 2220 /* Configure send ring RCB 0 (we use only the first ring) */ 2221 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2222 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 2223 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2224 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2225 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || 2226 sc->bge_asicrev == BGE_ASICREV_BCM5719 || 2227 sc->bge_asicrev == BGE_ASICREV_BCM5720) 2228 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); 2229 else 2230 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 2231 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2232 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 2233 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2234 2235 /* 2236 * Disable all receive return rings by setting the 2237 * 'ring diabled' bit in the flags field of all the receive 2238 * return ring control blocks, located in NIC memory. 2239 */ 2240 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || 2241 sc->bge_asicrev == BGE_ASICREV_BCM5719 || 2242 sc->bge_asicrev == BGE_ASICREV_BCM5720) { 2243 /* Should be 17, use 16 until we get an SRAM map. */ 2244 limit = 16; 2245 } else if (!BGE_IS_5705_PLUS(sc)) 2246 limit = BGE_RX_RINGS_MAX; 2247 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2248 sc->bge_asicrev == BGE_ASICREV_BCM5762 || 2249 BGE_IS_57765_PLUS(sc)) 2250 limit = 4; 2251 else 2252 limit = 1; 2253 /* Disable all receive return rings. */ 2254 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2255 for (i = 0; i < limit; i++) { 2256 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 2257 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 2258 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 2259 BGE_RCB_FLAG_RING_DISABLED); 2260 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 2261 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2262 (i * (sizeof(uint64_t))), 0); 2263 vrcb += sizeof(struct bge_rcb); 2264 } 2265 2266 /* 2267 * Set up receive return ring 0. Note that the NIC address 2268 * for RX return rings is 0x0. The return rings live entirely 2269 * within the host, so the nicaddr field in the RCB isn't used. 2270 */ 2271 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2272 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 2273 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2274 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2275 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 2276 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 2277 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2278 2279 /* Set random backoff seed for TX */ 2280 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2281 (IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 2282 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 2283 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5]) & 2284 BGE_TX_BACKOFF_SEED_MASK); 2285 2286 /* Set inter-packet gap */ 2287 val = 0x2620; 2288 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || 2289 sc->bge_asicrev == BGE_ASICREV_BCM5762) 2290 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2291 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2292 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2293 2294 /* 2295 * Specify which ring to use for packets that don't match 2296 * any RX rules. 2297 */ 2298 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2299 2300 /* 2301 * Configure number of RX lists. One interrupt distribution 2302 * list, sixteen active lists, one bad frames class. 2303 */ 2304 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2305 2306 /* Inialize RX list placement stats mask. */ 2307 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2308 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2309 2310 /* Disable host coalescing until we get it set up */ 2311 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2312 2313 /* Poll to make sure it's shut down. */ 2314 for (i = 0; i < BGE_TIMEOUT; i++) { 2315 DELAY(10); 2316 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2317 break; 2318 } 2319 2320 if (i == BGE_TIMEOUT) { 2321 device_printf(sc->bge_dev, 2322 "host coalescing engine failed to idle\n"); 2323 return (ENXIO); 2324 } 2325 2326 /* Set up host coalescing defaults */ 2327 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2328 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2329 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2330 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2331 if (!(BGE_IS_5705_PLUS(sc))) { 2332 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2333 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2334 } 2335 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 2336 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 2337 2338 /* Set up address of statistics block */ 2339 if (!(BGE_IS_5705_PLUS(sc))) { 2340 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 2341 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 2342 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 2343 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 2344 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2345 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2346 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2347 } 2348 2349 /* Set up address of status block */ 2350 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 2351 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 2352 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 2353 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 2354 2355 /* Set up status block size. */ 2356 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2357 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2358 val = BGE_STATBLKSZ_FULL; 2359 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2360 } else { 2361 val = BGE_STATBLKSZ_32BYTE; 2362 bzero(sc->bge_ldata.bge_status_block, 32); 2363 } 2364 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2365 sc->bge_cdata.bge_status_map, 2366 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2367 2368 /* Turn on host coalescing state machine */ 2369 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2370 2371 /* Turn on RX BD completion state machine and enable attentions */ 2372 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2373 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2374 2375 /* Turn on RX list placement state machine */ 2376 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2377 2378 /* Turn on RX list selector state machine. */ 2379 if (!(BGE_IS_5705_PLUS(sc))) 2380 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2381 2382 /* Turn on DMA, clear stats. */ 2383 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2384 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2385 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2386 BGE_MACMODE_FRMHDR_DMA_ENB; 2387 2388 if (sc->bge_flags & BGE_FLAG_TBI) 2389 val |= BGE_PORTMODE_TBI; 2390 else if (sc->bge_flags & BGE_FLAG_MII_SERDES) 2391 val |= BGE_PORTMODE_GMII; 2392 else 2393 val |= BGE_PORTMODE_MII; 2394 2395 /* Allow APE to send/receive frames. */ 2396 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2397 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2398 2399 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2400 DELAY(40); 2401 2402 /* Set misc. local control, enable interrupts on attentions */ 2403 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2404 2405#ifdef notdef 2406 /* Assert GPIO pins for PHY reset */ 2407 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 2408 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 2409 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 2410 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 2411#endif 2412 2413 /* Turn on DMA completion state machine */ 2414 if (!(BGE_IS_5705_PLUS(sc))) 2415 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2416 2417 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 2418 2419 /* Enable host coalescing bug fix. */ 2420 if (BGE_IS_5755_PLUS(sc)) 2421 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2422 2423 /* Request larger DMA burst size to get better performance. */ 2424 if (sc->bge_asicrev == BGE_ASICREV_BCM5785) 2425 val |= BGE_WDMAMODE_BURST_ALL_DATA; 2426 2427 /* Turn on write DMA state machine */ 2428 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 2429 DELAY(40); 2430 2431 /* Turn on read DMA state machine */ 2432 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2433 2434 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) 2435 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 2436 2437 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 2438 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 2439 sc->bge_asicrev == BGE_ASICREV_BCM57780) 2440 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2441 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2442 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2443 if (sc->bge_flags & BGE_FLAG_PCIE) 2444 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 2445 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { 2446 val |= BGE_RDMAMODE_TSO4_ENABLE; 2447 if (sc->bge_flags & BGE_FLAG_TSO3 || 2448 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 2449 sc->bge_asicrev == BGE_ASICREV_BCM57780) 2450 val |= BGE_RDMAMODE_TSO6_ENABLE; 2451 } 2452 2453 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || 2454 sc->bge_asicrev == BGE_ASICREV_BCM5762) { 2455 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 2456 BGE_RDMAMODE_H2BNC_VLAN_DET; 2457 /* 2458 * Allow multiple outstanding read requests from 2459 * non-LSO read DMA engine. 2460 */ 2461 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 2462 } 2463 2464 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2465 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 2466 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 2467 sc->bge_asicrev == BGE_ASICREV_BCM57780 || 2468 BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) { 2469 if (sc->bge_asicrev == BGE_ASICREV_BCM5762) 2470 rdmareg = BGE_RDMA_RSRVCTRL_REG2; 2471 else 2472 rdmareg = BGE_RDMA_RSRVCTRL; 2473 dmactl = CSR_READ_4(sc, rdmareg); 2474 /* 2475 * Adjust tx margin to prevent TX data corruption and 2476 * fix internal FIFO overflow. 2477 */ 2478 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2479 sc->bge_asicrev == BGE_ASICREV_BCM5762) { 2480 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 2481 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 2482 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 2483 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 2484 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 2485 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 2486 } 2487 /* 2488 * Enable fix for read DMA FIFO overruns. 2489 * The fix is to limit the number of RX BDs 2490 * the hardware would fetch at a fime. 2491 */ 2492 CSR_WRITE_4(sc, rdmareg, dmactl | 2493 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 2494 } 2495 2496 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) { 2497 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2498 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2499 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2500 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2501 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) { 2502 /* 2503 * Allow 4KB burst length reads for non-LSO frames. 2504 * Enable 512B burst length reads for buffer descriptors. 2505 */ 2506 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2507 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2508 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 2509 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2510 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) { 2511 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, 2512 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | 2513 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2514 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2515 } 2516 2517 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 2518 DELAY(40); 2519 2520 if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { 2521 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { 2522 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); 2523 if ((val & 0xFFFF) > BGE_FRAMELEN) 2524 break; 2525 if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) 2526 break; 2527 } 2528 if (i != BGE_NUM_RDMA_CHANNELS / 2) { 2529 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 2530 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) 2531 val |= BGE_RDMA_TX_LENGTH_WA_5719; 2532 else 2533 val |= BGE_RDMA_TX_LENGTH_WA_5720; 2534 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 2535 } 2536 } 2537 2538 /* Turn on RX data completion state machine */ 2539 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2540 2541 /* Turn on RX BD initiator state machine */ 2542 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2543 2544 /* Turn on RX data and RX BD initiator state machine */ 2545 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2546 2547 /* Turn on Mbuf cluster free state machine */ 2548 if (!(BGE_IS_5705_PLUS(sc))) 2549 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2550 2551 /* Turn on send BD completion state machine */ 2552 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2553 2554 /* Turn on send data completion state machine */ 2555 val = BGE_SDCMODE_ENABLE; 2556 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 2557 val |= BGE_SDCMODE_CDELAY; 2558 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2559 2560 /* Turn on send data initiator state machine */ 2561 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) 2562 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 2563 BGE_SDIMODE_HW_LSO_PRE_DMA); 2564 else 2565 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2566 2567 /* Turn on send BD initiator state machine */ 2568 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2569 2570 /* Turn on send BD selector state machine */ 2571 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2572 2573 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 2574 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2575 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 2576 2577 /* ack/clear link change events */ 2578 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2579 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2580 BGE_MACSTAT_LINK_CHANGED); 2581 CSR_WRITE_4(sc, BGE_MI_STS, 0); 2582 2583 /* 2584 * Enable attention when the link has changed state for 2585 * devices that use auto polling. 2586 */ 2587 if (sc->bge_flags & BGE_FLAG_TBI) { 2588 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 2589 } else { 2590 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 2591 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 2592 DELAY(80); 2593 } 2594 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2595 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 2596 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2597 BGE_EVTENB_MI_INTERRUPT); 2598 } 2599 2600 /* 2601 * Clear any pending link state attention. 2602 * Otherwise some link state change events may be lost until attention 2603 * is cleared by bge_intr() -> bge_link_upd() sequence. 2604 * It's not necessary on newer BCM chips - perhaps enabling link 2605 * state change attentions implies clearing pending attention. 2606 */ 2607 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2608 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2609 BGE_MACSTAT_LINK_CHANGED); 2610 2611 /* Enable link state change attentions. */ 2612 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 2613 2614 return (0); 2615} 2616 2617static const struct bge_revision * 2618bge_lookup_rev(uint32_t chipid) 2619{ 2620 const struct bge_revision *br; 2621 2622 for (br = bge_revisions; br->br_name != NULL; br++) { 2623 if (br->br_chipid == chipid) 2624 return (br); 2625 } 2626 2627 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2628 if (br->br_chipid == BGE_ASICREV(chipid)) 2629 return (br); 2630 } 2631 2632 return (NULL); 2633} 2634 2635static const struct bge_vendor * 2636bge_lookup_vendor(uint16_t vid) 2637{ 2638 const struct bge_vendor *v; 2639 2640 for (v = bge_vendors; v->v_name != NULL; v++) 2641 if (v->v_id == vid) 2642 return (v); 2643 2644 return (NULL); 2645} 2646 2647static uint32_t 2648bge_chipid(device_t dev) 2649{ 2650 uint32_t id; 2651 2652 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 2653 BGE_PCIMISCCTL_ASICREV_SHIFT; 2654 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { 2655 /* 2656 * Find the ASCI revision. Different chips use different 2657 * registers. 2658 */ 2659 switch (pci_get_device(dev)) { 2660 case BCOM_DEVICEID_BCM5717C: 2661 /* 5717 C0 seems to belong to 5720 line. */ 2662 id = BGE_CHIPID_BCM5720_A0; 2663 break; 2664 case BCOM_DEVICEID_BCM5717: 2665 case BCOM_DEVICEID_BCM5718: 2666 case BCOM_DEVICEID_BCM5719: 2667 case BCOM_DEVICEID_BCM5720: 2668 case BCOM_DEVICEID_BCM5725: 2669 case BCOM_DEVICEID_BCM5727: 2670 case BCOM_DEVICEID_BCM5762: 2671 case BCOM_DEVICEID_BCM57764: 2672 case BCOM_DEVICEID_BCM57767: 2673 case BCOM_DEVICEID_BCM57787: 2674 id = pci_read_config(dev, 2675 BGE_PCI_GEN2_PRODID_ASICREV, 4); 2676 break; 2677 case BCOM_DEVICEID_BCM57761: 2678 case BCOM_DEVICEID_BCM57762: 2679 case BCOM_DEVICEID_BCM57765: 2680 case BCOM_DEVICEID_BCM57766: 2681 case BCOM_DEVICEID_BCM57781: 2682 case BCOM_DEVICEID_BCM57782: 2683 case BCOM_DEVICEID_BCM57785: 2684 case BCOM_DEVICEID_BCM57786: 2685 case BCOM_DEVICEID_BCM57791: 2686 case BCOM_DEVICEID_BCM57795: 2687 id = pci_read_config(dev, 2688 BGE_PCI_GEN15_PRODID_ASICREV, 4); 2689 break; 2690 default: 2691 id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); 2692 } 2693 } 2694 return (id); 2695} 2696 2697/* 2698 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2699 * against our list and return its name if we find a match. 2700 * 2701 * Note that since the Broadcom controller contains VPD support, we 2702 * try to get the device name string from the controller itself instead 2703 * of the compiled-in string. It guarantees we'll always announce the 2704 * right product name. We fall back to the compiled-in string when 2705 * VPD is unavailable or corrupt. 2706 */ 2707static int 2708bge_probe(device_t dev) 2709{ 2710 char buf[96]; 2711 char model[64]; 2712 const struct bge_revision *br; 2713 const char *pname; 2714 struct bge_softc *sc; 2715 const struct bge_type *t = bge_devs; 2716 const struct bge_vendor *v; 2717 uint32_t id; 2718 uint16_t did, vid; 2719 2720 sc = device_get_softc(dev); 2721 sc->bge_dev = dev; 2722 vid = pci_get_vendor(dev); 2723 did = pci_get_device(dev); 2724 while(t->bge_vid != 0) { 2725 if ((vid == t->bge_vid) && (did == t->bge_did)) { 2726 id = bge_chipid(dev); 2727 br = bge_lookup_rev(id); 2728 if (bge_has_eaddr(sc) && 2729 pci_get_vpd_ident(dev, &pname) == 0) 2730 snprintf(model, sizeof(model), "%s", pname); 2731 else { 2732 v = bge_lookup_vendor(vid); 2733 snprintf(model, sizeof(model), "%s %s", 2734 v != NULL ? v->v_name : "Unknown", 2735 br != NULL ? br->br_name : 2736 "NetXtreme/NetLink Ethernet Controller"); 2737 } 2738 snprintf(buf, sizeof(buf), "%s, %sASIC rev. %#08x", 2739 model, br != NULL ? "" : "unknown ", id); 2740 device_set_desc_copy(dev, buf); 2741 return (BUS_PROBE_DEFAULT); 2742 } 2743 t++; 2744 } 2745 2746 return (ENXIO); 2747} 2748 2749static void 2750bge_dma_free(struct bge_softc *sc) 2751{ 2752 int i; 2753 2754 /* Destroy DMA maps for RX buffers. */ 2755 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 2756 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 2757 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2758 sc->bge_cdata.bge_rx_std_dmamap[i]); 2759 } 2760 if (sc->bge_cdata.bge_rx_std_sparemap) 2761 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2762 sc->bge_cdata.bge_rx_std_sparemap); 2763 2764 /* Destroy DMA maps for jumbo RX buffers. */ 2765 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2766 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 2767 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2768 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2769 } 2770 if (sc->bge_cdata.bge_rx_jumbo_sparemap) 2771 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2772 sc->bge_cdata.bge_rx_jumbo_sparemap); 2773 2774 /* Destroy DMA maps for TX buffers. */ 2775 for (i = 0; i < BGE_TX_RING_CNT; i++) { 2776 if (sc->bge_cdata.bge_tx_dmamap[i]) 2777 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 2778 sc->bge_cdata.bge_tx_dmamap[i]); 2779 } 2780 2781 if (sc->bge_cdata.bge_rx_mtag) 2782 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 2783 if (sc->bge_cdata.bge_mtag_jumbo) 2784 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo); 2785 if (sc->bge_cdata.bge_tx_mtag) 2786 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 2787 2788 /* Destroy standard RX ring. */ 2789 if (sc->bge_ldata.bge_rx_std_ring_paddr) 2790 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 2791 sc->bge_cdata.bge_rx_std_ring_map); 2792 if (sc->bge_ldata.bge_rx_std_ring) 2793 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 2794 sc->bge_ldata.bge_rx_std_ring, 2795 sc->bge_cdata.bge_rx_std_ring_map); 2796 2797 if (sc->bge_cdata.bge_rx_std_ring_tag) 2798 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 2799 2800 /* Destroy jumbo RX ring. */ 2801 if (sc->bge_ldata.bge_rx_jumbo_ring_paddr) 2802 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2803 sc->bge_cdata.bge_rx_jumbo_ring_map); 2804 2805 if (sc->bge_ldata.bge_rx_jumbo_ring) 2806 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2807 sc->bge_ldata.bge_rx_jumbo_ring, 2808 sc->bge_cdata.bge_rx_jumbo_ring_map); 2809 2810 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 2811 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 2812 2813 /* Destroy RX return ring. */ 2814 if (sc->bge_ldata.bge_rx_return_ring_paddr) 2815 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 2816 sc->bge_cdata.bge_rx_return_ring_map); 2817 2818 if (sc->bge_ldata.bge_rx_return_ring) 2819 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 2820 sc->bge_ldata.bge_rx_return_ring, 2821 sc->bge_cdata.bge_rx_return_ring_map); 2822 2823 if (sc->bge_cdata.bge_rx_return_ring_tag) 2824 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 2825 2826 /* Destroy TX ring. */ 2827 if (sc->bge_ldata.bge_tx_ring_paddr) 2828 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 2829 sc->bge_cdata.bge_tx_ring_map); 2830 2831 if (sc->bge_ldata.bge_tx_ring) 2832 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 2833 sc->bge_ldata.bge_tx_ring, 2834 sc->bge_cdata.bge_tx_ring_map); 2835 2836 if (sc->bge_cdata.bge_tx_ring_tag) 2837 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 2838 2839 /* Destroy status block. */ 2840 if (sc->bge_ldata.bge_status_block_paddr) 2841 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 2842 sc->bge_cdata.bge_status_map); 2843 2844 if (sc->bge_ldata.bge_status_block) 2845 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 2846 sc->bge_ldata.bge_status_block, 2847 sc->bge_cdata.bge_status_map); 2848 2849 if (sc->bge_cdata.bge_status_tag) 2850 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 2851 2852 /* Destroy statistics block. */ 2853 if (sc->bge_ldata.bge_stats_paddr) 2854 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 2855 sc->bge_cdata.bge_stats_map); 2856 2857 if (sc->bge_ldata.bge_stats) 2858 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 2859 sc->bge_ldata.bge_stats, 2860 sc->bge_cdata.bge_stats_map); 2861 2862 if (sc->bge_cdata.bge_stats_tag) 2863 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 2864 2865 if (sc->bge_cdata.bge_buffer_tag) 2866 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag); 2867 2868 /* Destroy the parent tag. */ 2869 if (sc->bge_cdata.bge_parent_tag) 2870 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 2871} 2872 2873static int 2874bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment, 2875 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, 2876 bus_addr_t *paddr, const char *msg) 2877{ 2878 struct bge_dmamap_arg ctx; 2879 bus_addr_t lowaddr; 2880 bus_size_t ring_end; 2881 int error; 2882 2883 lowaddr = BUS_SPACE_MAXADDR; 2884again: 2885 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2886 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, 2887 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag); 2888 if (error != 0) { 2889 device_printf(sc->bge_dev, 2890 "could not create %s dma tag\n", msg); 2891 return (ENOMEM); 2892 } 2893 /* Allocate DMA'able memory for ring. */ 2894 error = bus_dmamem_alloc(*tag, (void **)ring, 2895 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); 2896 if (error != 0) { 2897 device_printf(sc->bge_dev, 2898 "could not allocate DMA'able memory for %s\n", msg); 2899 return (ENOMEM); 2900 } 2901 /* Load the address of the ring. */ 2902 ctx.bge_busaddr = 0; 2903 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr, 2904 &ctx, BUS_DMA_NOWAIT); 2905 if (error != 0) { 2906 device_printf(sc->bge_dev, 2907 "could not load DMA'able memory for %s\n", msg); 2908 return (ENOMEM); 2909 } 2910 *paddr = ctx.bge_busaddr; 2911 ring_end = *paddr + maxsize; 2912 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 && 2913 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) { 2914 /* 2915 * 4GB boundary crossed. Limit maximum allowable DMA 2916 * address space to 32bit and try again. 2917 */ 2918 bus_dmamap_unload(*tag, *map); 2919 bus_dmamem_free(*tag, *ring, *map); 2920 bus_dma_tag_destroy(*tag); 2921 if (bootverbose) 2922 device_printf(sc->bge_dev, "4GB boundary crossed, " 2923 "limit DMA address space to 32bit for %s\n", msg); 2924 *ring = NULL; 2925 *tag = NULL; 2926 *map = NULL; 2927 lowaddr = BUS_SPACE_MAXADDR_32BIT; 2928 goto again; 2929 } 2930 return (0); 2931} 2932 2933static int 2934bge_dma_alloc(struct bge_softc *sc) 2935{ 2936 bus_addr_t lowaddr; 2937 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz; 2938 int i, error; 2939 2940 lowaddr = BUS_SPACE_MAXADDR; 2941 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) 2942 lowaddr = BGE_DMA_MAXADDR; 2943 /* 2944 * Allocate the parent bus DMA tag appropriate for PCI. 2945 */ 2946 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 2947 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, 2948 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 2949 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); 2950 if (error != 0) { 2951 device_printf(sc->bge_dev, 2952 "could not allocate parent dma tag\n"); 2953 return (ENOMEM); 2954 } 2955 2956 /* Create tag for standard RX ring. */ 2957 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ, 2958 &sc->bge_cdata.bge_rx_std_ring_tag, 2959 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring, 2960 &sc->bge_cdata.bge_rx_std_ring_map, 2961 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring"); 2962 if (error) 2963 return (error); 2964 2965 /* Create tag for RX return ring. */ 2966 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc), 2967 &sc->bge_cdata.bge_rx_return_ring_tag, 2968 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring, 2969 &sc->bge_cdata.bge_rx_return_ring_map, 2970 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring"); 2971 if (error) 2972 return (error); 2973 2974 /* Create tag for TX ring. */ 2975 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ, 2976 &sc->bge_cdata.bge_tx_ring_tag, 2977 (uint8_t **)&sc->bge_ldata.bge_tx_ring, 2978 &sc->bge_cdata.bge_tx_ring_map, 2979 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring"); 2980 if (error) 2981 return (error); 2982 2983 /* 2984 * Create tag for status block. 2985 * Because we only use single Tx/Rx/Rx return ring, use 2986 * minimum status block size except BCM5700 AX/BX which 2987 * seems to want to see full status block size regardless 2988 * of configured number of ring. 2989 */ 2990 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2991 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) 2992 sbsz = BGE_STATUS_BLK_SZ; 2993 else 2994 sbsz = 32; 2995 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz, 2996 &sc->bge_cdata.bge_status_tag, 2997 (uint8_t **)&sc->bge_ldata.bge_status_block, 2998 &sc->bge_cdata.bge_status_map, 2999 &sc->bge_ldata.bge_status_block_paddr, "status block"); 3000 if (error) 3001 return (error); 3002 3003 /* Create tag for statistics block. */ 3004 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ, 3005 &sc->bge_cdata.bge_stats_tag, 3006 (uint8_t **)&sc->bge_ldata.bge_stats, 3007 &sc->bge_cdata.bge_stats_map, 3008 &sc->bge_ldata.bge_stats_paddr, "statistics block"); 3009 if (error) 3010 return (error); 3011 3012 /* Create tag for jumbo RX ring. */ 3013 if (BGE_IS_JUMBO_CAPABLE(sc)) { 3014 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ, 3015 &sc->bge_cdata.bge_rx_jumbo_ring_tag, 3016 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring, 3017 &sc->bge_cdata.bge_rx_jumbo_ring_map, 3018 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring"); 3019 if (error) 3020 return (error); 3021 } 3022 3023 /* Create parent tag for buffers. */ 3024 boundary = 0; 3025 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) { 3026 boundary = BGE_DMA_BNDRY; 3027 /* 3028 * XXX 3029 * watchdog timeout issue was observed on BCM5704 which 3030 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge). 3031 * Both limiting DMA address space to 32bits and flushing 3032 * mailbox write seem to address the issue. 3033 */ 3034 if (sc->bge_pcixcap != 0) 3035 lowaddr = BUS_SPACE_MAXADDR_32BIT; 3036 } 3037 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 3038 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL, 3039 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3040 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag); 3041 if (error != 0) { 3042 device_printf(sc->bge_dev, 3043 "could not allocate buffer dma tag\n"); 3044 return (ENOMEM); 3045 } 3046 /* Create tag for Tx mbufs. */ 3047 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { 3048 txsegsz = BGE_TSOSEG_SZ; 3049 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); 3050 } else { 3051 txsegsz = MCLBYTES; 3052 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; 3053 } 3054 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 3055 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 3056 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, 3057 &sc->bge_cdata.bge_tx_mtag); 3058 3059 if (error) { 3060 device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); 3061 return (ENOMEM); 3062 } 3063 3064 /* Create tag for Rx mbufs. */ 3065 if (sc->bge_flags & BGE_FLAG_JUMBO_STD) 3066 rxmaxsegsz = MJUM9BYTES; 3067 else 3068 rxmaxsegsz = MCLBYTES; 3069 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, 3070 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1, 3071 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); 3072 3073 if (error) { 3074 device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); 3075 return (ENOMEM); 3076 } 3077 3078 /* Create DMA maps for RX buffers. */ 3079 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 3080 &sc->bge_cdata.bge_rx_std_sparemap); 3081 if (error) { 3082 device_printf(sc->bge_dev, 3083 "can't create spare DMA map for RX\n"); 3084 return (ENOMEM); 3085 } 3086 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 3087 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 3088 &sc->bge_cdata.bge_rx_std_dmamap[i]); 3089 if (error) { 3090 device_printf(sc->bge_dev, 3091 "can't create DMA map for RX\n"); 3092 return (ENOMEM); 3093 } 3094 } 3095 3096 /* Create DMA maps for TX buffers. */ 3097 for (i = 0; i < BGE_TX_RING_CNT; i++) { 3098 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, 3099 &sc->bge_cdata.bge_tx_dmamap[i]); 3100 if (error) { 3101 device_printf(sc->bge_dev, 3102 "can't create DMA map for TX\n"); 3103 return (ENOMEM); 3104 } 3105 } 3106 3107 /* Create tags for jumbo RX buffers. */ 3108 if (BGE_IS_JUMBO_CAPABLE(sc)) { 3109 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 3110 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 3111 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 3112 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 3113 if (error) { 3114 device_printf(sc->bge_dev, 3115 "could not allocate jumbo dma tag\n"); 3116 return (ENOMEM); 3117 } 3118 /* Create DMA maps for jumbo RX buffers. */ 3119 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 3120 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); 3121 if (error) { 3122 device_printf(sc->bge_dev, 3123 "can't create spare DMA map for jumbo RX\n"); 3124 return (ENOMEM); 3125 } 3126 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 3127 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 3128 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 3129 if (error) { 3130 device_printf(sc->bge_dev, 3131 "can't create DMA map for jumbo RX\n"); 3132 return (ENOMEM); 3133 } 3134 } 3135 } 3136 3137 return (0); 3138} 3139 3140/* 3141 * Return true if this device has more than one port. 3142 */ 3143static int 3144bge_has_multiple_ports(struct bge_softc *sc) 3145{ 3146 device_t dev = sc->bge_dev; 3147 u_int b, d, f, fscan, s; 3148 3149 d = pci_get_domain(dev); 3150 b = pci_get_bus(dev); 3151 s = pci_get_slot(dev); 3152 f = pci_get_function(dev); 3153 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 3154 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) 3155 return (1); 3156 return (0); 3157} 3158 3159/* 3160 * Return true if MSI can be used with this device. 3161 */ 3162static int 3163bge_can_use_msi(struct bge_softc *sc) 3164{ 3165 int can_use_msi = 0; 3166 3167#ifdef __HAIKU__ 3168 // temporary workaround, the int disable happens in msi enable through 3169 // setup intr in our case which undoes the re-enabling done by the driver 3170 return 0; 3171#endif 3172 3173 if (sc->bge_msi == 0) 3174 return (0); 3175 3176 /* Disable MSI for polling(4). */ 3177#ifdef DEVICE_POLLING 3178 return (0); 3179#endif 3180 switch (sc->bge_asicrev) { 3181 case BGE_ASICREV_BCM5714_A0: 3182 case BGE_ASICREV_BCM5714: 3183 /* 3184 * Apparently, MSI doesn't work when these chips are 3185 * configured in single-port mode. 3186 */ 3187 if (bge_has_multiple_ports(sc)) 3188 can_use_msi = 1; 3189 break; 3190 case BGE_ASICREV_BCM5750: 3191 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 3192 sc->bge_chiprev != BGE_CHIPREV_5750_BX) 3193 can_use_msi = 1; 3194 break; 3195 case BGE_ASICREV_BCM5784: 3196 /* 3197 * Prevent infinite "watchdog timeout" errors 3198 * in some MacBook Pro and make it work out-of-the-box. 3199 */ 3200 if (sc->bge_chiprev == BGE_CHIPREV_5784_AX) 3201 break; 3202 /* FALLTHROUGH */ 3203 default: 3204 if (BGE_IS_575X_PLUS(sc)) 3205 can_use_msi = 1; 3206 } 3207 return (can_use_msi); 3208} 3209 3210static int 3211bge_mbox_reorder(struct bge_softc *sc) 3212{ 3213#ifndef __HAIKU__ 3214 /* Lists of PCI bridges that are known to reorder mailbox writes. */ 3215 static const struct mbox_reorder { 3216 const uint16_t vendor; 3217 const uint16_t device; 3218 const char *desc; 3219 } mbox_reorder_lists[] = { 3220 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" }, 3221 }; 3222 devclass_t pci, pcib; 3223 device_t bus, dev; 3224 int i; 3225 3226 pci = devclass_find("pci"); 3227 pcib = devclass_find("pcib"); 3228 dev = sc->bge_dev; 3229 bus = device_get_parent(dev); 3230 for (;;) { 3231 dev = device_get_parent(bus); 3232 bus = device_get_parent(dev); 3233 if (device_get_devclass(dev) != pcib) 3234 break; 3235 if (device_get_devclass(bus) != pci) 3236 break; 3237 for (i = 0; i < nitems(mbox_reorder_lists); i++) { 3238 if (pci_get_vendor(dev) == 3239 mbox_reorder_lists[i].vendor && 3240 pci_get_device(dev) == 3241 mbox_reorder_lists[i].device) { 3242 device_printf(sc->bge_dev, 3243 "enabling MBOX workaround for %s\n", 3244 mbox_reorder_lists[i].desc); 3245 return (1); 3246 } 3247 } 3248 } 3249#endif 3250 return (0); 3251} 3252 3253static void 3254bge_devinfo(struct bge_softc *sc) 3255{ 3256 uint32_t cfg, clk; 3257 3258 device_printf(sc->bge_dev, 3259 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ", 3260 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev); 3261 if (sc->bge_flags & BGE_FLAG_PCIE) 3262 printf("PCI-E\n"); 3263 else if (sc->bge_flags & BGE_FLAG_PCIX) { 3264 printf("PCI-X "); 3265 cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; 3266 if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE) 3267 clk = 133; 3268 else { 3269 clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; 3270 switch (clk) { 3271 case 0: 3272 clk = 33; 3273 break; 3274 case 2: 3275 clk = 50; 3276 break; 3277 case 4: 3278 clk = 66; 3279 break; 3280 case 6: 3281 clk = 100; 3282 break; 3283 case 7: 3284 clk = 133; 3285 break; 3286 } 3287 } 3288 printf("%u MHz\n", clk); 3289 } else { 3290 if (sc->bge_pcixcap != 0) 3291 printf("PCI on PCI-X "); 3292 else 3293 printf("PCI "); 3294 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); 3295 if (cfg & BGE_PCISTATE_PCI_BUSSPEED) 3296 clk = 66; 3297 else 3298 clk = 33; 3299 if (cfg & BGE_PCISTATE_32BIT_BUS) 3300 printf("%u MHz; 32bit\n", clk); 3301 else 3302 printf("%u MHz; 64bit\n", clk); 3303 } 3304} 3305 3306static int 3307bge_attach(device_t dev) 3308{ 3309 if_t ifp; 3310 struct bge_softc *sc; 3311 uint32_t hwcfg = 0, misccfg, pcistate; 3312 u_char eaddr[ETHER_ADDR_LEN]; 3313 int capmask, error, reg, rid, trys; 3314 3315 sc = device_get_softc(dev); 3316 sc->bge_dev = dev; 3317 3318 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 3319 NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); 3320 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 3321 3322 pci_enable_busmaster(dev); 3323 3324 /* 3325 * Allocate control/status registers. 3326 */ 3327 rid = PCIR_BAR(0); 3328 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 3329 RF_ACTIVE); 3330 3331 if (sc->bge_res == NULL) { 3332 device_printf (sc->bge_dev, "couldn't map BAR0 memory\n"); 3333 error = ENXIO; 3334 goto fail; 3335 } 3336 3337 /* Save various chip information. */ 3338 sc->bge_func_addr = pci_get_function(dev); 3339 sc->bge_chipid = bge_chipid(dev); 3340 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 3341 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 3342 3343 /* Set default PHY address. */ 3344 sc->bge_phy_addr = 1; 3345 /* 3346 * PHY address mapping for various devices. 3347 * 3348 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 3349 * ---------+-------+-------+-------+-------+ 3350 * BCM57XX | 1 | X | X | X | 3351 * BCM5704 | 1 | X | 1 | X | 3352 * BCM5717 | 1 | 8 | 2 | 9 | 3353 * BCM5719 | 1 | 8 | 2 | 9 | 3354 * BCM5720 | 1 | 8 | 2 | 9 | 3355 * 3356 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 3357 * ---------+-------+-------+-------+-------+ 3358 * BCM57XX | X | X | X | X | 3359 * BCM5704 | X | X | X | X | 3360 * BCM5717 | X | X | X | X | 3361 * BCM5719 | 3 | 10 | 4 | 11 | 3362 * BCM5720 | X | X | X | X | 3363 * 3364 * Other addresses may respond but they are not 3365 * IEEE compliant PHYs and should be ignored. 3366 */ 3367 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || 3368 sc->bge_asicrev == BGE_ASICREV_BCM5719 || 3369 sc->bge_asicrev == BGE_ASICREV_BCM5720) { 3370 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 3371 if (CSR_READ_4(sc, BGE_SGDIG_STS) & 3372 BGE_SGDIGSTS_IS_SERDES) 3373 sc->bge_phy_addr = sc->bge_func_addr + 8; 3374 else 3375 sc->bge_phy_addr = sc->bge_func_addr + 1; 3376 } else { 3377 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 3378 BGE_CPMU_PHY_STRAP_IS_SERDES) 3379 sc->bge_phy_addr = sc->bge_func_addr + 8; 3380 else 3381 sc->bge_phy_addr = sc->bge_func_addr + 1; 3382 } 3383 } 3384 3385 if (bge_has_eaddr(sc)) 3386 sc->bge_flags |= BGE_FLAG_EADDR; 3387 3388 /* Save chipset family. */ 3389 switch (sc->bge_asicrev) { 3390 case BGE_ASICREV_BCM5762: 3391 case BGE_ASICREV_BCM57765: 3392 case BGE_ASICREV_BCM57766: 3393 sc->bge_flags |= BGE_FLAG_57765_PLUS; 3394 /* FALLTHROUGH */ 3395 case BGE_ASICREV_BCM5717: 3396 case BGE_ASICREV_BCM5719: 3397 case BGE_ASICREV_BCM5720: 3398 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS | 3399 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO | 3400 BGE_FLAG_JUMBO_FRAME; 3401 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || 3402 sc->bge_asicrev == BGE_ASICREV_BCM5720) { 3403 /* 3404 * Enable work around for DMA engine miscalculation 3405 * of TXMBUF available space. 3406 */ 3407 sc->bge_flags |= BGE_FLAG_RDMA_BUG; 3408 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && 3409 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { 3410 /* Jumbo frame on BCM5719 A0 does not work. */ 3411 sc->bge_flags &= ~BGE_FLAG_JUMBO; 3412 } 3413 } 3414 break; 3415 case BGE_ASICREV_BCM5755: 3416 case BGE_ASICREV_BCM5761: 3417 case BGE_ASICREV_BCM5784: 3418 case BGE_ASICREV_BCM5785: 3419 case BGE_ASICREV_BCM5787: 3420 case BGE_ASICREV_BCM57780: 3421 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 3422 BGE_FLAG_5705_PLUS; 3423 break; 3424 case BGE_ASICREV_BCM5700: 3425 case BGE_ASICREV_BCM5701: 3426 case BGE_ASICREV_BCM5703: 3427 case BGE_ASICREV_BCM5704: 3428 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 3429 break; 3430 case BGE_ASICREV_BCM5714_A0: 3431 case BGE_ASICREV_BCM5780: 3432 case BGE_ASICREV_BCM5714: 3433 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD; 3434 /* FALLTHROUGH */ 3435 case BGE_ASICREV_BCM5750: 3436 case BGE_ASICREV_BCM5752: 3437 case BGE_ASICREV_BCM5906: 3438 sc->bge_flags |= BGE_FLAG_575X_PLUS; 3439 /* FALLTHROUGH */ 3440 case BGE_ASICREV_BCM5705: 3441 sc->bge_flags |= BGE_FLAG_5705_PLUS; 3442 break; 3443 } 3444 3445 /* Identify chips with APE processor. */ 3446 switch (sc->bge_asicrev) { 3447 case BGE_ASICREV_BCM5717: 3448 case BGE_ASICREV_BCM5719: 3449 case BGE_ASICREV_BCM5720: 3450 case BGE_ASICREV_BCM5761: 3451 case BGE_ASICREV_BCM5762: 3452 sc->bge_flags |= BGE_FLAG_APE; 3453 break; 3454 } 3455 3456 /* Chips with APE need BAR2 access for APE registers/memory. */ 3457 if ((sc->bge_flags & BGE_FLAG_APE) != 0) { 3458 rid = PCIR_BAR(2); 3459 sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 3460 RF_ACTIVE); 3461 if (sc->bge_res2 == NULL) { 3462 device_printf (sc->bge_dev, 3463 "couldn't map BAR2 memory\n"); 3464 error = ENXIO; 3465 goto fail; 3466 } 3467 3468 /* Enable APE register/memory access by host driver. */ 3469 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 3470 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3471 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3472 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3473 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); 3474 3475 bge_ape_lock_init(sc); 3476 bge_ape_read_fw_ver(sc); 3477 } 3478 3479 /* Add SYSCTLs, requires the chipset family to be set. */ 3480 bge_add_sysctls(sc); 3481 3482 /* Identify the chips that use an CPMU. */ 3483 if (BGE_IS_5717_PLUS(sc) || 3484 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 3485 sc->bge_asicrev == BGE_ASICREV_BCM5761 || 3486 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 3487 sc->bge_asicrev == BGE_ASICREV_BCM57780) 3488 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT; 3489 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0) 3490 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; 3491 else 3492 sc->bge_mi_mode = BGE_MIMODE_BASE; 3493 /* Enable auto polling for BCM570[0-5]. */ 3494 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) 3495 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; 3496 3497 /* 3498 * All Broadcom controllers have 4GB boundary DMA bug. 3499 * Whenever an address crosses a multiple of the 4GB boundary 3500 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 3501 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 3502 * state machine will lockup and cause the device to hang. 3503 */ 3504 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; 3505 3506 /* BCM5755 or higher and BCM5906 have short DMA bug. */ 3507 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) 3508 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG; 3509 3510 /* 3511 * BCM5719 cannot handle DMA requests for DMA segments that 3512 * have larger than 4KB in size. However the maximum DMA 3513 * segment size created in DMA tag is 4KB for TSO, so we 3514 * wouldn't encounter the issue here. 3515 */ 3516 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) 3517 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG; 3518 3519 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; 3520 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { 3521 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 3522 misccfg == BGE_MISCCFG_BOARD_ID_5788M) 3523 sc->bge_flags |= BGE_FLAG_5788; 3524 } 3525 3526 capmask = BMSR_DEFCAPMASK; 3527 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && 3528 (misccfg == 0x4000 || misccfg == 0x8000)) || 3529 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 3530 pci_get_vendor(dev) == BCOM_VENDORID && 3531 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 || 3532 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 || 3533 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) || 3534 (pci_get_vendor(dev) == BCOM_VENDORID && 3535 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F || 3536 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F || 3537 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) || 3538 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 || 3539 pci_get_device(dev) == BCOM_DEVICEID_BCM57791 || 3540 pci_get_device(dev) == BCOM_DEVICEID_BCM57795 || 3541 sc->bge_asicrev == BGE_ASICREV_BCM5906) { 3542 /* These chips are 10/100 only. */ 3543 capmask &= ~BMSR_EXTSTAT; 3544 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; 3545 } 3546 3547 /* 3548 * Some controllers seem to require a special firmware to use 3549 * TSO. But the firmware is not available to FreeBSD and Linux 3550 * claims that the TSO performed by the firmware is slower than 3551 * hardware based TSO. Moreover the firmware based TSO has one 3552 * known bug which can't handle TSO if Ethernet header + IP/TCP 3553 * header is greater than 80 bytes. A workaround for the TSO 3554 * bug exist but it seems it's too expensive than not using 3555 * TSO at all. Some hardwares also have the TSO bug so limit 3556 * the TSO to the controllers that are not affected TSO issues 3557 * (e.g. 5755 or higher). 3558 */ 3559 if (BGE_IS_5717_PLUS(sc)) { 3560 /* BCM5717 requires different TSO configuration. */ 3561 sc->bge_flags |= BGE_FLAG_TSO3; 3562 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && 3563 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { 3564 /* TSO on BCM5719 A0 does not work. */ 3565 sc->bge_flags &= ~BGE_FLAG_TSO3; 3566 } 3567 } else if (BGE_IS_5755_PLUS(sc)) { 3568 /* 3569 * BCM5754 and BCM5787 shares the same ASIC id so 3570 * explicit device id check is required. 3571 * Due to unknown reason TSO does not work on BCM5755M. 3572 */ 3573 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 && 3574 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M && 3575 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M) 3576 sc->bge_flags |= BGE_FLAG_TSO; 3577 } 3578 3579 /* 3580 * Check if this is a PCI-X or PCI Express device. 3581 */ 3582 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 3583 /* 3584 * Found a PCI Express capabilities register, this 3585 * must be a PCI Express device. 3586 */ 3587 sc->bge_flags |= BGE_FLAG_PCIE; 3588 sc->bge_expcap = reg; 3589 /* Extract supported maximum payload size. */ 3590 sc->bge_mps = pci_read_config(dev, sc->bge_expcap + 3591 PCIER_DEVICE_CAP, 2); 3592 sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD); 3593 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || 3594 sc->bge_asicrev == BGE_ASICREV_BCM5720) 3595 sc->bge_expmrq = 2048; 3596 else 3597 sc->bge_expmrq = 4096; 3598 pci_set_max_read_req(dev, sc->bge_expmrq); 3599 } else { 3600 /* 3601 * Check if the device is in PCI-X Mode. 3602 * (This bit is not valid on PCI Express controllers.) 3603 */ 3604 if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) 3605 sc->bge_pcixcap = reg; 3606 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 3607 BGE_PCISTATE_PCI_BUSMODE) == 0) 3608 sc->bge_flags |= BGE_FLAG_PCIX; 3609 } 3610 3611 /* 3612 * The 40bit DMA bug applies to the 5714/5715 controllers and is 3613 * not actually a MAC controller bug but an issue with the embedded 3614 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 3615 */ 3616 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) 3617 sc->bge_flags |= BGE_FLAG_40BIT_BUG; 3618 /* 3619 * Some PCI-X bridges are known to trigger write reordering to 3620 * the mailbox registers. Typical phenomena is watchdog timeouts 3621 * caused by out-of-order TX completions. Enable workaround for 3622 * PCI-X devices that live behind these bridges. 3623 * Note, PCI-X controllers can run in PCI mode so we can't use 3624 * BGE_FLAG_PCIX flag to detect PCI-X controllers. 3625 */ 3626 if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0) 3627 sc->bge_flags |= BGE_FLAG_MBOX_REORDER; 3628 /* 3629 * Allocate the interrupt, using MSI if possible. These devices 3630 * support 8 MSI messages, but only the first one is used in 3631 * normal operation. 3632 */ 3633 rid = 0; 3634 if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) { 3635 sc->bge_msicap = reg; 3636 reg = 1; 3637 if (bge_can_use_msi(sc) && pci_alloc_msi(dev, ®) == 0) { 3638 rid = 1; 3639 sc->bge_flags |= BGE_FLAG_MSI; 3640 } 3641 } 3642 3643 /* 3644 * All controllers except BCM5700 supports tagged status but 3645 * we use tagged status only for MSI case on BCM5717. Otherwise 3646 * MSI on BCM5717 does not work. 3647 */ 3648#ifndef DEVICE_POLLING 3649 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc)) 3650 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS; 3651#endif 3652 3653 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 3654 RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); 3655 3656 if (sc->bge_irq == NULL) { 3657 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 3658 error = ENXIO; 3659 goto fail; 3660 } 3661 3662 bge_devinfo(sc); 3663 3664 sc->bge_asf_mode = 0; 3665 /* No ASF if APE present. */ 3666 if ((sc->bge_flags & BGE_FLAG_APE) == 0) { 3667 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3668 BGE_SRAM_DATA_SIG_MAGIC)) { 3669 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & 3670 BGE_HWCFG_ASF) { 3671 sc->bge_asf_mode |= ASF_ENABLE; 3672 sc->bge_asf_mode |= ASF_STACKUP; 3673 if (BGE_IS_575X_PLUS(sc)) 3674 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 3675 } 3676 } 3677 } 3678 3679 bge_stop_fw(sc); 3680 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 3681 if (bge_reset(sc)) { 3682 device_printf(sc->bge_dev, "chip reset failed\n"); 3683 error = ENXIO; 3684 goto fail; 3685 } 3686 3687 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 3688 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 3689 3690 if (bge_chipinit(sc)) { 3691 device_printf(sc->bge_dev, "chip initialization failed\n"); 3692 error = ENXIO; 3693 goto fail; 3694 } 3695 3696 error = bge_get_eaddr(sc, eaddr); 3697 if (error) { 3698 device_printf(sc->bge_dev, 3699 "failed to read station address\n"); 3700 error = ENXIO; 3701 goto fail; 3702 } 3703 3704 /* 5705 limits RX return ring to 512 entries. */ 3705 if (BGE_IS_5717_PLUS(sc)) 3706 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3707 else if (BGE_IS_5705_PLUS(sc)) 3708 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3709 else 3710 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3711 3712 if (bge_dma_alloc(sc)) { 3713 device_printf(sc->bge_dev, 3714 "failed to allocate DMA resources\n"); 3715 error = ENXIO; 3716 goto fail; 3717 } 3718 3719 /* Set default tuneable values. */ 3720 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3721 sc->bge_rx_coal_ticks = 150; 3722 sc->bge_tx_coal_ticks = 150; 3723 sc->bge_rx_max_coal_bds = 10; 3724 sc->bge_tx_max_coal_bds = 10; 3725 3726 /* Initialize checksum features to use. */ 3727 sc->bge_csum_features = BGE_CSUM_FEATURES; 3728 if (sc->bge_forced_udpcsum != 0) 3729 sc->bge_csum_features |= CSUM_UDP; 3730 3731 /* Set up ifnet structure */ 3732 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 3733 if (ifp == NULL) { 3734 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 3735 error = ENXIO; 3736 goto fail; 3737 } 3738 if_setsoftc(ifp, sc); 3739 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 3740 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 3741 if_setioctlfn(ifp, bge_ioctl); 3742 if_setstartfn(ifp, bge_start); 3743 if_setinitfn(ifp, bge_init); 3744 if_setgetcounterfn(ifp, bge_get_counter); 3745 if_setsendqlen(ifp, BGE_TX_RING_CNT - 1); 3746 if_setsendqready(ifp); 3747 if_sethwassist(ifp, sc->bge_csum_features); 3748 if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 3749 IFCAP_VLAN_MTU); 3750 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) { 3751 if_sethwassistbits(ifp, CSUM_TSO, 0); 3752 if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0); 3753 } 3754#ifdef IFCAP_VLAN_HWCSUM 3755 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); 3756#endif 3757 if_setcapenable(ifp, if_getcapabilities(ifp)); 3758#ifdef DEVICE_POLLING 3759 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 3760#endif 3761 3762 /* 3763 * 5700 B0 chips do not support checksumming correctly due 3764 * to hardware bugs. 3765 */ 3766 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 3767 if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM); 3768 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); 3769 if_sethwassist(ifp, 0); 3770 } 3771 3772 /* 3773 * Figure out what sort of media we have by checking the 3774 * hardware config word in the first 32k of NIC internal memory, 3775 * or fall back to examining the EEPROM if necessary. 3776 * Note: on some BCM5700 cards, this value appears to be unset. 3777 * If that's the case, we have to rely on identifying the NIC 3778 * by its PCI subsystem ID, as we do below for the SysKonnect 3779 * SK-9D41. 3780 */ 3781 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) 3782 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); 3783 else if ((sc->bge_flags & BGE_FLAG_EADDR) && 3784 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 3785 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 3786 sizeof(hwcfg))) { 3787 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 3788 error = ENXIO; 3789 goto fail; 3790 } 3791 hwcfg = ntohl(hwcfg); 3792 } 3793 3794 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 3795 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == 3796 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3797 if (BGE_IS_5705_PLUS(sc)) { 3798 sc->bge_flags |= BGE_FLAG_MII_SERDES; 3799 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; 3800 } else 3801 sc->bge_flags |= BGE_FLAG_TBI; 3802 } 3803 3804 /* Set various PHY bug flags. */ 3805 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 3806 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 3807 sc->bge_phy_flags |= BGE_PHY_CRC_BUG; 3808 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 3809 sc->bge_chiprev == BGE_CHIPREV_5704_AX) 3810 sc->bge_phy_flags |= BGE_PHY_ADC_BUG; 3811 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 3812 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; 3813 if (pci_get_subvendor(dev) == DELL_VENDORID) 3814 sc->bge_phy_flags |= BGE_PHY_NO_3LED; 3815 if ((BGE_IS_5705_PLUS(sc)) && 3816 sc->bge_asicrev != BGE_ASICREV_BCM5906 && 3817 sc->bge_asicrev != BGE_ASICREV_BCM5785 && 3818 sc->bge_asicrev != BGE_ASICREV_BCM57780 && 3819 !BGE_IS_5717_PLUS(sc)) { 3820 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 3821 sc->bge_asicrev == BGE_ASICREV_BCM5761 || 3822 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 3823 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 3824 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 && 3825 pci_get_device(dev) != BCOM_DEVICEID_BCM5756) 3826 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; 3827 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M) 3828 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; 3829 } else 3830 sc->bge_phy_flags |= BGE_PHY_BER_BUG; 3831 } 3832 3833 /* 3834 * Don't enable Ethernet@WireSpeed for the 5700 or the 3835 * 5705 A0 and A1 chips. 3836 */ 3837 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 3838 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 3839 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 3840 sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) 3841 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; 3842 3843 if (sc->bge_flags & BGE_FLAG_TBI) { 3844 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3845 bge_ifmedia_sts); 3846 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 3847 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 3848 0, NULL); 3849 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 3850 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 3851 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3852 } else { 3853 /* 3854 * Do transceiver setup and tell the firmware the 3855 * driver is down so we can try to get access the 3856 * probe if ASF is running. Retry a couple of times 3857 * if we get a conflict with the ASF firmware accessing 3858 * the PHY. 3859 */ 3860 trys = 0; 3861 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3862again: 3863 bge_asf_driver_up(sc); 3864 3865 error = mii_attach(dev, &sc->bge_miibus, ifp, 3866 (ifm_change_cb_t)bge_ifmedia_upd, 3867 (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr, 3868 MII_OFFSET_ANY, MIIF_DOPAUSE); 3869 if (error != 0) { 3870 if (trys++ < 4) { 3871 device_printf(sc->bge_dev, "Try again\n"); 3872 bge_miibus_writereg(sc->bge_dev, 3873 sc->bge_phy_addr, MII_BMCR, BMCR_RESET); 3874 goto again; 3875 } 3876 device_printf(sc->bge_dev, "attaching PHYs failed\n"); 3877 goto fail; 3878 } 3879 3880 /* 3881 * Now tell the firmware we are going up after probing the PHY 3882 */ 3883 if (sc->bge_asf_mode & ASF_STACKUP) 3884 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3885 } 3886 3887 /* 3888 * When using the BCM5701 in PCI-X mode, data corruption has 3889 * been observed in the first few bytes of some received packets. 3890 * Aligning the packet buffer in memory eliminates the corruption. 3891 * Unfortunately, this misaligns the packet payloads. On platforms 3892 * which do not support unaligned accesses, we will realign the 3893 * payloads by copying the received packets. 3894 */ 3895 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 3896 sc->bge_flags & BGE_FLAG_PCIX) 3897 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 3898 3899 /* 3900 * Call MI attach routine. 3901 */ 3902 ether_ifattach(ifp, eaddr); 3903 3904 /* Tell upper layer we support long frames. */ 3905 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 3906 3907 /* 3908 * Hookup IRQ last. 3909 */ 3910 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { 3911 /* Take advantage of single-shot MSI. */ 3912 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & 3913 ~BGE_MSIMODE_ONE_SHOT_DISABLE); 3914 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, 3915 taskqueue_thread_enqueue, &sc->bge_tq); 3916 if (sc->bge_tq == NULL) { 3917 device_printf(dev, "could not create taskqueue.\n"); 3918 ether_ifdetach(ifp); 3919 error = ENOMEM; 3920 goto fail; 3921 } 3922 error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, 3923 "%s taskq", device_get_nameunit(sc->bge_dev)); 3924 if (error != 0) { 3925 device_printf(dev, "could not start threads.\n"); 3926 ether_ifdetach(ifp); 3927 goto fail; 3928 } 3929 error = bus_setup_intr(dev, sc->bge_irq, 3930 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, 3931 &sc->bge_intrhand); 3932 } else 3933 error = bus_setup_intr(dev, sc->bge_irq, 3934 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, 3935 &sc->bge_intrhand); 3936 3937 if (error) { 3938 ether_ifdetach(ifp); 3939 device_printf(sc->bge_dev, "couldn't set up irq\n"); 3940 goto fail; 3941 } 3942 3943 /* Attach driver debugnet methods. */ 3944 DEBUGNET_SET(ifp, bge); 3945 3946fail: 3947 if (error) 3948 bge_detach(dev); 3949 return (error); 3950} 3951 3952static int 3953bge_detach(device_t dev) 3954{ 3955 struct bge_softc *sc; 3956 if_t ifp; 3957 3958 sc = device_get_softc(dev); 3959 ifp = sc->bge_ifp; 3960 3961#ifdef DEVICE_POLLING 3962 if (if_getcapenable(ifp) & IFCAP_POLLING) 3963 ether_poll_deregister(ifp); 3964#endif 3965 3966 if (device_is_attached(dev)) { 3967 ether_ifdetach(ifp); 3968 BGE_LOCK(sc); 3969 bge_stop(sc); 3970 BGE_UNLOCK(sc); 3971 callout_drain(&sc->bge_stat_ch); 3972 } 3973 3974 if (sc->bge_tq) 3975 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); 3976 3977 if (sc->bge_flags & BGE_FLAG_TBI) 3978 ifmedia_removeall(&sc->bge_ifmedia); 3979 else if (sc->bge_miibus != NULL) { 3980 bus_generic_detach(dev); 3981 device_delete_child(dev, sc->bge_miibus); 3982 } 3983 3984 bge_release_resources(sc); 3985 3986 return (0); 3987} 3988 3989static void 3990bge_release_resources(struct bge_softc *sc) 3991{ 3992 device_t dev; 3993 3994 dev = sc->bge_dev; 3995 3996 if (sc->bge_tq != NULL) 3997 taskqueue_free(sc->bge_tq); 3998 3999 if (sc->bge_intrhand != NULL) 4000 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 4001 4002 if (sc->bge_irq != NULL) { 4003 bus_release_resource(dev, SYS_RES_IRQ, 4004 rman_get_rid(sc->bge_irq), sc->bge_irq); 4005 pci_release_msi(dev); 4006 } 4007 4008 if (sc->bge_res != NULL) 4009 bus_release_resource(dev, SYS_RES_MEMORY, 4010 rman_get_rid(sc->bge_res), sc->bge_res); 4011 4012 if (sc->bge_res2 != NULL) 4013 bus_release_resource(dev, SYS_RES_MEMORY, 4014 rman_get_rid(sc->bge_res2), sc->bge_res2); 4015 4016 if (sc->bge_ifp != NULL) 4017 if_free(sc->bge_ifp); 4018 4019 bge_dma_free(sc); 4020 4021 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 4022 BGE_LOCK_DESTROY(sc); 4023} 4024 4025static int 4026bge_reset(struct bge_softc *sc) 4027{ 4028 device_t dev; 4029 uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val; 4030 void (*write_op)(struct bge_softc *, int, int); 4031 uint16_t devctl; 4032 int i; 4033 4034 dev = sc->bge_dev; 4035 4036 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 4037 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4038 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 4039 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 4040 4041 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 4042 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 4043 if (sc->bge_flags & BGE_FLAG_PCIE) 4044 write_op = bge_writemem_direct; 4045 else 4046 write_op = bge_writemem_ind; 4047 } else 4048 write_op = bge_writereg_ind; 4049 4050 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && 4051 sc->bge_asicrev != BGE_ASICREV_BCM5701) { 4052 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 4053 for (i = 0; i < 8000; i++) { 4054 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 4055 BGE_NVRAMSWARB_GNT1) 4056 break; 4057 DELAY(20); 4058 } 4059 if (i == 8000) { 4060 if (bootverbose) 4061 device_printf(dev, "NVRAM lock timedout!\n"); 4062 } 4063 } 4064 /* Take APE lock when performing reset. */ 4065 bge_ape_lock(sc, BGE_APE_LOCK_GRC); 4066 4067 /* Save some important PCI state. */ 4068 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 4069 command = pci_read_config(dev, BGE_PCI_CMD, 4); 4070 4071 pci_write_config(dev, BGE_PCI_MISC_CTL, 4072 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4073 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 4074 4075 /* Disable fastboot on controllers that support it. */ 4076 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 4077 BGE_IS_5755_PLUS(sc)) { 4078 if (bootverbose) 4079 device_printf(dev, "Disabling fastboot\n"); 4080 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 4081 } 4082 4083 /* 4084 * Write the magic number to SRAM at offset 0xB50. 4085 * When firmware finishes its initialization it will 4086 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 4087 */ 4088 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 4089 4090 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 4091 4092 /* XXX: Broadcom Linux driver. */ 4093 if (sc->bge_flags & BGE_FLAG_PCIE) { 4094 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && 4095 (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) { 4096 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 4097 CSR_WRITE_4(sc, 0x7E2C, 0x20); 4098 } 4099 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 4100 /* Prevent PCIE link training during global reset */ 4101 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 4102 reset |= 1 << 29; 4103 } 4104 } 4105 4106 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 4107 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 4108 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 4109 val | BGE_VCPU_STATUS_DRV_RESET); 4110 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 4111 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 4112 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 4113 } 4114 4115 /* 4116 * Set GPHY Power Down Override to leave GPHY 4117 * powered up in D0 uninitialized. 4118 */ 4119 if (BGE_IS_5705_PLUS(sc) && 4120 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0) 4121 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 4122 4123 /* Issue global reset */ 4124 write_op(sc, BGE_MISC_CFG, reset); 4125 4126 if (sc->bge_flags & BGE_FLAG_PCIE) 4127 DELAY(100 * 1000); 4128 else 4129 DELAY(1000); 4130 4131 /* XXX: Broadcom Linux driver. */ 4132 if (sc->bge_flags & BGE_FLAG_PCIE) { 4133 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 4134 DELAY(500000); /* wait for link training to complete */ 4135 val = pci_read_config(dev, 0xC4, 4); 4136 pci_write_config(dev, 0xC4, val | (1 << 15), 4); 4137 } 4138 devctl = pci_read_config(dev, 4139 sc->bge_expcap + PCIER_DEVICE_CTL, 2); 4140 /* Clear enable no snoop and disable relaxed ordering. */ 4141 devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE | 4142 PCIEM_CTL_NOSNOOP_ENABLE); 4143 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, 4144 devctl, 2); 4145 pci_set_max_read_req(dev, sc->bge_expmrq); 4146 /* Clear error status. */ 4147 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA, 4148 PCIEM_STA_CORRECTABLE_ERROR | 4149 PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | 4150 PCIEM_STA_UNSUPPORTED_REQ, 2); 4151 } 4152 4153 /* Reset some of the PCI state that got zapped by reset. */ 4154 pci_write_config(dev, BGE_PCI_MISC_CTL, 4155 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4156 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 4157 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 4158 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 4159 (sc->bge_flags & BGE_FLAG_PCIX) != 0) 4160 val |= BGE_PCISTATE_RETRY_SAME_DMA; 4161 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4162 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 4163 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 4164 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 4165 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); 4166 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 4167 pci_write_config(dev, BGE_PCI_CMD, command, 4); 4168 /* 4169 * Disable PCI-X relaxed ordering to ensure status block update 4170 * comes first then packet buffer DMA. Otherwise driver may 4171 * read stale status block. 4172 */ 4173 if (sc->bge_flags & BGE_FLAG_PCIX) { 4174 devctl = pci_read_config(dev, 4175 sc->bge_pcixcap + PCIXR_COMMAND, 2); 4176 devctl &= ~PCIXM_COMMAND_ERO; 4177 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { 4178 devctl &= ~PCIXM_COMMAND_MAX_READ; 4179 devctl |= PCIXM_COMMAND_MAX_READ_2048; 4180 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 4181 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | 4182 PCIXM_COMMAND_MAX_READ); 4183 devctl |= PCIXM_COMMAND_MAX_READ_2048; 4184 } 4185 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 4186 devctl, 2); 4187 } 4188 /* Re-enable MSI, if necessary, and enable the memory arbiter. */ 4189 if (BGE_IS_5714_FAMILY(sc)) { 4190 /* This chip disables MSI on reset. */ 4191 if (sc->bge_flags & BGE_FLAG_MSI) { 4192 val = pci_read_config(dev, 4193 sc->bge_msicap + PCIR_MSI_CTRL, 2); 4194 pci_write_config(dev, 4195 sc->bge_msicap + PCIR_MSI_CTRL, 4196 val | PCIM_MSICTRL_MSI_ENABLE, 2); 4197 val = CSR_READ_4(sc, BGE_MSI_MODE); 4198 CSR_WRITE_4(sc, BGE_MSI_MODE, 4199 val | BGE_MSIMODE_ENABLE); 4200 } 4201 val = CSR_READ_4(sc, BGE_MARB_MODE); 4202 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 4203 } else 4204 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4205 4206 /* Fix up byte swapping. */ 4207 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc)); 4208 4209 val = CSR_READ_4(sc, BGE_MAC_MODE); 4210 val = (val & ~mac_mode_mask) | mac_mode; 4211 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 4212 DELAY(40); 4213 4214 bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 4215 4216 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 4217 for (i = 0; i < BGE_TIMEOUT; i++) { 4218 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 4219 if (val & BGE_VCPU_STATUS_INIT_DONE) 4220 break; 4221 DELAY(100); 4222 } 4223 if (i == BGE_TIMEOUT) { 4224 device_printf(dev, "reset timed out\n"); 4225 return (1); 4226 } 4227 } else { 4228 /* 4229 * Poll until we see the 1's complement of the magic number. 4230 * This indicates that the firmware initialization is complete. 4231 * We expect this to fail if no chip containing the Ethernet 4232 * address is fitted though. 4233 */ 4234 for (i = 0; i < BGE_TIMEOUT; i++) { 4235 DELAY(10); 4236 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 4237 if (val == ~BGE_SRAM_FW_MB_MAGIC) 4238 break; 4239 } 4240 4241 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) 4242 device_printf(dev, 4243 "firmware handshake timed out, found 0x%08x\n", 4244 val); 4245 /* BCM57765 A0 needs additional time before accessing. */ 4246 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 4247 DELAY(10 * 1000); /* XXX */ 4248 } 4249 4250 /* 4251 * The 5704 in TBI mode apparently needs some special 4252 * adjustment to insure the SERDES drive level is set 4253 * to 1.2V. 4254 */ 4255 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 4256 sc->bge_flags & BGE_FLAG_TBI) { 4257 val = CSR_READ_4(sc, BGE_SERDES_CFG); 4258 val = (val & ~0xFFF) | 0x880; 4259 CSR_WRITE_4(sc, BGE_SERDES_CFG, val); 4260 } 4261 4262 /* XXX: Broadcom Linux driver. */ 4263 if (sc->bge_flags & BGE_FLAG_PCIE && 4264 !BGE_IS_5717_PLUS(sc) && 4265 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 4266 sc->bge_asicrev != BGE_ASICREV_BCM5785) { 4267 /* Enable Data FIFO protection. */ 4268 val = CSR_READ_4(sc, 0x7C00); 4269 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); 4270 } 4271 4272 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) 4273 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 4274 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 4275 4276 return (0); 4277} 4278 4279static __inline void 4280bge_rxreuse_std(struct bge_softc *sc, int i) 4281{ 4282 struct bge_rx_bd *r; 4283 4284 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; 4285 r->bge_flags = BGE_RXBDFLAG_END; 4286 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i]; 4287 r->bge_idx = i; 4288 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 4289} 4290 4291static __inline void 4292bge_rxreuse_jumbo(struct bge_softc *sc, int i) 4293{ 4294 struct bge_extrx_bd *r; 4295 4296 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; 4297 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 4298 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0]; 4299 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1]; 4300 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2]; 4301 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3]; 4302 r->bge_idx = i; 4303 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 4304} 4305 4306/* 4307 * Frame reception handling. This is called if there's a frame 4308 * on the receive return list. 4309 * 4310 * Note: we have to be able to handle two possibilities here: 4311 * 1) the frame is from the jumbo receive ring 4312 * 2) the frame is from the standard receive ring 4313 */ 4314 4315static int 4316bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) 4317{ 4318 if_t ifp; 4319 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; 4320 uint16_t rx_cons; 4321 4322 rx_cons = sc->bge_rx_saved_considx; 4323 4324 /* Nothing to do. */ 4325 if (rx_cons == rx_prod) 4326 return (rx_npkts); 4327 4328 ifp = sc->bge_ifp; 4329 4330 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 4331 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 4332 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 4333 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); 4334 if (BGE_IS_JUMBO_CAPABLE(sc) && 4335 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + 4336 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) 4337 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 4338 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); 4339 4340 while (rx_cons != rx_prod) { 4341 struct bge_rx_bd *cur_rx; 4342 uint32_t rxidx; 4343 struct mbuf *m = NULL; 4344 uint16_t vlan_tag = 0; 4345 int have_tag = 0; 4346 4347#ifdef DEVICE_POLLING 4348 if (if_getcapenable(ifp) & IFCAP_POLLING) { 4349 if (sc->rxcycles <= 0) 4350 break; 4351 sc->rxcycles--; 4352 } 4353#endif 4354 4355 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; 4356 4357 rxidx = cur_rx->bge_idx; 4358 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 4359 4360 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && 4361 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 4362 have_tag = 1; 4363 vlan_tag = cur_rx->bge_vlan_tag; 4364 } 4365 4366 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 4367 jumbocnt++; 4368 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 4369 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4370 bge_rxreuse_jumbo(sc, rxidx); 4371 continue; 4372 } 4373 if (bge_newbuf_jumbo(sc, rxidx) != 0) { 4374 bge_rxreuse_jumbo(sc, rxidx); 4375 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 4376 continue; 4377 } 4378 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 4379 } else { 4380 stdcnt++; 4381 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 4382 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4383 bge_rxreuse_std(sc, rxidx); 4384 continue; 4385 } 4386 if (bge_newbuf_std(sc, rxidx) != 0) { 4387 bge_rxreuse_std(sc, rxidx); 4388 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 4389 continue; 4390 } 4391 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 4392 } 4393 4394 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 4395#ifndef __NO_STRICT_ALIGNMENT 4396 /* 4397 * For architectures with strict alignment we must make sure 4398 * the payload is aligned. 4399 */ 4400 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 4401 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 4402 cur_rx->bge_len); 4403 m->m_data += ETHER_ALIGN; 4404 } 4405#endif 4406 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 4407 m->m_pkthdr.rcvif = ifp; 4408 4409 if (if_getcapenable(ifp) & IFCAP_RXCSUM) 4410 bge_rxcsum(sc, cur_rx, m); 4411 4412 /* 4413 * If we received a packet with a vlan tag, 4414 * attach that information to the packet. 4415 */ 4416 if (have_tag) { 4417 m->m_pkthdr.ether_vtag = vlan_tag; 4418 m->m_flags |= M_VLANTAG; 4419 } 4420 4421 if (holdlck != 0) { 4422 BGE_UNLOCK(sc); 4423 if_input(ifp, m); 4424 BGE_LOCK(sc); 4425 } else 4426 if_input(ifp, m); 4427 rx_npkts++; 4428 4429 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 4430 return (rx_npkts); 4431 } 4432 4433 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 4434 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); 4435 if (stdcnt > 0) 4436 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 4437 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 4438 4439 if (jumbocnt > 0) 4440 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 4441 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 4442 4443 sc->bge_rx_saved_considx = rx_cons; 4444 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 4445 if (stdcnt) 4446 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + 4447 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); 4448 if (jumbocnt) 4449 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo + 4450 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT); 4451#ifdef notyet 4452 /* 4453 * This register wraps very quickly under heavy packet drops. 4454 * If you need correct statistics, you can enable this check. 4455 */ 4456 if (BGE_IS_5705_PLUS(sc)) 4457 if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); 4458#endif 4459 return (rx_npkts); 4460} 4461 4462static void 4463bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 4464{ 4465 4466 if (BGE_IS_5717_PLUS(sc)) { 4467 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 4468 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 4469 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4470 if ((cur_rx->bge_error_flag & 4471 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 4472 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4473 } 4474 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 4475 m->m_pkthdr.csum_data = 4476 cur_rx->bge_tcp_udp_csum; 4477 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 4478 CSUM_PSEUDO_HDR; 4479 } 4480 } 4481 } else { 4482 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 4483 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4484 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 4485 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4486 } 4487 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 4488 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 4489 m->m_pkthdr.csum_data = 4490 cur_rx->bge_tcp_udp_csum; 4491 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 4492 CSUM_PSEUDO_HDR; 4493 } 4494 } 4495} 4496 4497static void 4498bge_txeof(struct bge_softc *sc, uint16_t tx_cons) 4499{ 4500 struct bge_tx_bd *cur_tx; 4501 if_t ifp; 4502 4503 BGE_LOCK_ASSERT(sc); 4504 4505 /* Nothing to do. */ 4506 if (sc->bge_tx_saved_considx == tx_cons) 4507 return; 4508 4509 ifp = sc->bge_ifp; 4510 4511 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 4512 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); 4513 /* 4514 * Go through our tx ring and free mbufs for those 4515 * frames that have been sent. 4516 */ 4517 while (sc->bge_tx_saved_considx != tx_cons) { 4518 uint32_t idx; 4519 4520 idx = sc->bge_tx_saved_considx; 4521 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 4522 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 4523 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 4524 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 4525 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 4526 sc->bge_cdata.bge_tx_dmamap[idx], 4527 BUS_DMASYNC_POSTWRITE); 4528 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 4529 sc->bge_cdata.bge_tx_dmamap[idx]); 4530 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 4531 sc->bge_cdata.bge_tx_chain[idx] = NULL; 4532 } 4533 sc->bge_txcnt--; 4534 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 4535 } 4536 4537 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 4538 if (sc->bge_txcnt == 0) 4539 sc->bge_timer = 0; 4540} 4541 4542#ifdef DEVICE_POLLING 4543static int 4544bge_poll(if_t ifp, enum poll_cmd cmd, int count) 4545{ 4546 struct bge_softc *sc = if_getsoftc(ifp); 4547 uint16_t rx_prod, tx_cons; 4548 uint32_t statusword; 4549 int rx_npkts = 0; 4550 4551 BGE_LOCK(sc); 4552 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 4553 BGE_UNLOCK(sc); 4554 return (rx_npkts); 4555 } 4556 4557 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 4558 sc->bge_cdata.bge_status_map, 4559 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4560 /* Fetch updates from the status block. */ 4561 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 4562 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 4563 4564 statusword = sc->bge_ldata.bge_status_block->bge_status; 4565 /* Clear the status so the next pass only sees the changes. */ 4566 sc->bge_ldata.bge_status_block->bge_status = 0; 4567 4568 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 4569 sc->bge_cdata.bge_status_map, 4570 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4571 4572 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 4573 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 4574 sc->bge_link_evt++; 4575 4576 if (cmd == POLL_AND_CHECK_STATUS) 4577 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 4578 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 4579 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 4580 bge_link_upd(sc); 4581 4582 sc->rxcycles = count; 4583 rx_npkts = bge_rxeof(sc, rx_prod, 1); 4584 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 4585 BGE_UNLOCK(sc); 4586 return (rx_npkts); 4587 } 4588 bge_txeof(sc, tx_cons); 4589 if (!if_sendq_empty(ifp)) 4590 bge_start_locked(ifp); 4591 4592 BGE_UNLOCK(sc); 4593 return (rx_npkts); 4594} 4595#endif /* DEVICE_POLLING */ 4596 4597static int 4598bge_msi_intr(void *arg) 4599{ 4600 struct bge_softc *sc; 4601 4602 sc = (struct bge_softc *)arg; 4603 /* 4604 * This interrupt is not shared and controller already 4605 * disabled further interrupt. 4606 */ 4607 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); 4608 return (FILTER_HANDLED); 4609} 4610 4611static void 4612bge_intr_task(void *arg, int pending) 4613{ 4614 struct bge_softc *sc; 4615 if_t ifp; 4616 uint32_t status, status_tag; 4617 uint16_t rx_prod, tx_cons; 4618 4619 sc = (struct bge_softc *)arg; 4620 ifp = sc->bge_ifp; 4621 4622 BGE_LOCK(sc); 4623 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 4624 BGE_UNLOCK(sc); 4625 return; 4626 } 4627 4628 /* Get updated status block. */ 4629 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 4630 sc->bge_cdata.bge_status_map, 4631 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4632 4633 /* Save producer/consumer indices. */ 4634 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 4635 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 4636 status = sc->bge_ldata.bge_status_block->bge_status; 4637 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24; 4638 /* Dirty the status flag. */ 4639 sc->bge_ldata.bge_status_block->bge_status = 0; 4640 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 4641 sc->bge_cdata.bge_status_map, 4642 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4643 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0) 4644 status_tag = 0; 4645 4646 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) 4647 bge_link_upd(sc); 4648 4649 /* Let controller work. */ 4650 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag); 4651 4652 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && 4653 sc->bge_rx_saved_considx != rx_prod) { 4654 /* Check RX return ring producer/consumer. */ 4655 BGE_UNLOCK(sc); 4656 bge_rxeof(sc, rx_prod, 0); 4657 BGE_LOCK(sc); 4658 } 4659 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4660 /* Check TX ring producer/consumer. */ 4661 bge_txeof(sc, tx_cons); 4662 if (!if_sendq_empty(ifp)) 4663 bge_start_locked(ifp); 4664 } 4665 BGE_UNLOCK(sc); 4666} 4667 4668static void 4669bge_intr(void *xsc) 4670{ 4671 struct bge_softc *sc; 4672 if_t ifp; 4673 uint32_t statusword; 4674 uint16_t rx_prod, tx_cons; 4675 4676 sc = xsc; 4677 4678 BGE_LOCK(sc); 4679 4680 ifp = sc->bge_ifp; 4681 4682#ifdef DEVICE_POLLING 4683 if (if_getcapenable(ifp) & IFCAP_POLLING) { 4684 BGE_UNLOCK(sc); 4685 return; 4686 } 4687#endif 4688 4689 /* 4690 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 4691 * disable interrupts by writing nonzero like we used to, since with 4692 * our current organization this just gives complications and 4693 * pessimizations for re-enabling interrupts. We used to have races 4694 * instead of the necessary complications. Disabling interrupts 4695 * would just reduce the chance of a status update while we are 4696 * running (by switching to the interrupt-mode coalescence 4697 * parameters), but this chance is already very low so it is more 4698 * efficient to get another interrupt than prevent it. 4699 * 4700 * We do the ack first to ensure another interrupt if there is a 4701 * status update after the ack. We don't check for the status 4702 * changing later because it is more efficient to get another 4703 * interrupt than prevent it, not quite as above (not checking is 4704 * a smaller optimization than not toggling the interrupt enable, 4705 * since checking doesn't involve PCI accesses and toggling require 4706 * the status check). So toggling would probably be a pessimization 4707 * even with MSI. It would only be needed for using a task queue. 4708 */ 4709 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 4710 4711 /* 4712 * Do the mandatory PCI flush as well as get the link status. 4713 */ 4714 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 4715 4716 /* Make sure the descriptor ring indexes are coherent. */ 4717 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 4718 sc->bge_cdata.bge_status_map, 4719 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4720 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 4721 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 4722 sc->bge_ldata.bge_status_block->bge_status = 0; 4723 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 4724 sc->bge_cdata.bge_status_map, 4725 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4726 4727 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 4728 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 4729 statusword || sc->bge_link_evt) 4730 bge_link_upd(sc); 4731 4732 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4733 /* Check RX return ring producer/consumer. */ 4734 bge_rxeof(sc, rx_prod, 1); 4735 } 4736 4737 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4738 /* Check TX ring producer/consumer. */ 4739 bge_txeof(sc, tx_cons); 4740 } 4741 4742 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && 4743 !if_sendq_empty(ifp)) 4744 bge_start_locked(ifp); 4745 4746 BGE_UNLOCK(sc); 4747} 4748 4749static void 4750bge_asf_driver_up(struct bge_softc *sc) 4751{ 4752 if (sc->bge_asf_mode & ASF_STACKUP) { 4753 /* Send ASF heartbeat aprox. every 2s */ 4754 if (sc->bge_asf_count) 4755 sc->bge_asf_count --; 4756 else { 4757 sc->bge_asf_count = 2; 4758 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, 4759 BGE_FW_CMD_DRV_ALIVE); 4760 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); 4761 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 4762 BGE_FW_HB_TIMEOUT_SEC); 4763 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, 4764 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | 4765 BGE_RX_CPU_DRV_EVENT); 4766 } 4767 } 4768} 4769 4770static void 4771bge_tick(void *xsc) 4772{ 4773 struct bge_softc *sc = xsc; 4774 struct mii_data *mii = NULL; 4775 4776 BGE_LOCK_ASSERT(sc); 4777 4778 /* Synchronize with possible callout reset/stop. */ 4779 if (callout_pending(&sc->bge_stat_ch) || 4780 !callout_active(&sc->bge_stat_ch)) 4781 return; 4782 4783 if (BGE_IS_5705_PLUS(sc)) 4784 bge_stats_update_regs(sc); 4785 else 4786 bge_stats_update(sc); 4787 4788 /* XXX Add APE heartbeat check here? */ 4789 4790 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 4791 mii = device_get_softc(sc->bge_miibus); 4792 /* 4793 * Do not touch PHY if we have link up. This could break 4794 * IPMI/ASF mode or produce extra input errors 4795 * (extra errors was reported for bcm5701 & bcm5704). 4796 */ 4797 if (!sc->bge_link) 4798 mii_tick(mii); 4799 } else { 4800 /* 4801 * Since in TBI mode auto-polling can't be used we should poll 4802 * link status manually. Here we register pending link event 4803 * and trigger interrupt. 4804 */ 4805#ifdef DEVICE_POLLING 4806 /* In polling mode we poll link state in bge_poll(). */ 4807 if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING)) 4808#endif 4809 { 4810 sc->bge_link_evt++; 4811 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 4812 sc->bge_flags & BGE_FLAG_5788) 4813 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4814 else 4815 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4816 } 4817 } 4818 4819 bge_asf_driver_up(sc); 4820 bge_watchdog(sc); 4821 4822 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 4823} 4824 4825static void 4826bge_stats_update_regs(struct bge_softc *sc) 4827{ 4828 if_t ifp; 4829 struct bge_mac_stats *stats; 4830 uint32_t val; 4831 4832 ifp = sc->bge_ifp; 4833 stats = &sc->bge_mac_stats; 4834 4835 stats->ifHCOutOctets += 4836 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); 4837 stats->etherStatsCollisions += 4838 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); 4839 stats->outXonSent += 4840 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); 4841 stats->outXoffSent += 4842 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); 4843 stats->dot3StatsInternalMacTransmitErrors += 4844 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); 4845 stats->dot3StatsSingleCollisionFrames += 4846 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); 4847 stats->dot3StatsMultipleCollisionFrames += 4848 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); 4849 stats->dot3StatsDeferredTransmissions += 4850 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); 4851 stats->dot3StatsExcessiveCollisions += 4852 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); 4853 stats->dot3StatsLateCollisions += 4854 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); 4855 stats->ifHCOutUcastPkts += 4856 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); 4857 stats->ifHCOutMulticastPkts += 4858 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); 4859 stats->ifHCOutBroadcastPkts += 4860 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); 4861 4862 stats->ifHCInOctets += 4863 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); 4864 stats->etherStatsFragments += 4865 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); 4866 stats->ifHCInUcastPkts += 4867 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); 4868 stats->ifHCInMulticastPkts += 4869 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); 4870 stats->ifHCInBroadcastPkts += 4871 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); 4872 stats->dot3StatsFCSErrors += 4873 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); 4874 stats->dot3StatsAlignmentErrors += 4875 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); 4876 stats->xonPauseFramesReceived += 4877 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); 4878 stats->xoffPauseFramesReceived += 4879 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); 4880 stats->macControlFramesReceived += 4881 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); 4882 stats->xoffStateEntered += 4883 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); 4884 stats->dot3StatsFramesTooLong += 4885 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); 4886 stats->etherStatsJabbers += 4887 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); 4888 stats->etherStatsUndersizePkts += 4889 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); 4890 4891 stats->FramesDroppedDueToFilters += 4892 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); 4893 stats->DmaWriteQueueFull += 4894 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); 4895 stats->DmaWriteHighPriQueueFull += 4896 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); 4897 stats->NoMoreRxBDs += 4898 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 4899 /* 4900 * XXX 4901 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS 4902 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0 4903 * includes number of unwanted multicast frames. This comes 4904 * from silicon bug and known workaround to get rough(not 4905 * exact) counter is to enable interrupt on MBUF low water 4906 * attention. This can be accomplished by setting 4907 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE, 4908 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and 4909 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. 4910 * However that change would generate more interrupts and 4911 * there are still possibilities of losing multiple frames 4912 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. 4913 * Given that the workaround still would not get correct 4914 * counter I don't think it's worth to implement it. So 4915 * ignore reading the counter on controllers that have the 4916 * silicon bug. 4917 */ 4918 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 && 4919 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && 4920 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) 4921 stats->InputDiscards += 4922 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 4923 stats->InputErrors += 4924 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 4925 stats->RecvThresholdHit += 4926 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); 4927 4928 if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { 4929 /* 4930 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS 4931 * frames, it's safe to disable workaround for DMA engine's 4932 * miscalculation of TXMBUF space. 4933 */ 4934 if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts + 4935 stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) { 4936 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 4937 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) 4938 val &= ~BGE_RDMA_TX_LENGTH_WA_5719; 4939 else 4940 val &= ~BGE_RDMA_TX_LENGTH_WA_5720; 4941 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 4942 sc->bge_flags &= ~BGE_FLAG_RDMA_BUG; 4943 } 4944 } 4945} 4946 4947static void 4948bge_stats_clear_regs(struct bge_softc *sc) 4949{ 4950 4951 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); 4952 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); 4953 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); 4954 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); 4955 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); 4956 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); 4957 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); 4958 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); 4959 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); 4960 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); 4961 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); 4962 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); 4963 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); 4964 4965 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); 4966 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); 4967 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); 4968 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); 4969 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); 4970 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); 4971 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); 4972 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); 4973 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); 4974 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); 4975 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); 4976 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); 4977 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); 4978 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); 4979 4980 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); 4981 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); 4982 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); 4983 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 4984 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 4985 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 4986 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); 4987} 4988 4989static void 4990bge_stats_update(struct bge_softc *sc) 4991{ 4992 if_t ifp; 4993 bus_size_t stats; 4994 uint32_t cnt; /* current register value */ 4995 4996 ifp = sc->bge_ifp; 4997 4998 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4999 5000#define READ_STAT(sc, stats, stat) \ 5001 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 5002 5003 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 5004 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions); 5005 sc->bge_tx_collisions = cnt; 5006 5007 cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); 5008 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds); 5009 sc->bge_rx_nobds = cnt; 5010 cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); 5011 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs); 5012 sc->bge_rx_inerrs = cnt; 5013 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 5014 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards); 5015 sc->bge_rx_discards = cnt; 5016 5017 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 5018 if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards); 5019 sc->bge_tx_discards = cnt; 5020 5021#undef READ_STAT 5022} 5023 5024/* 5025 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 5026 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 5027 * but when such padded frames employ the bge IP/TCP checksum offload, 5028 * the hardware checksum assist gives incorrect results (possibly 5029 * from incorporating its own padding into the UDP/TCP checksum; who knows). 5030 * If we pad such runts with zeros, the onboard checksum comes out correct. 5031 */ 5032static __inline int 5033bge_cksum_pad(struct mbuf *m) 5034{ 5035 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 5036 struct mbuf *last; 5037 5038 /* If there's only the packet-header and we can pad there, use it. */ 5039 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 5040 M_TRAILINGSPACE(m) >= padlen) { 5041 last = m; 5042 } else { 5043 /* 5044 * Walk packet chain to find last mbuf. We will either 5045 * pad there, or append a new mbuf and pad it. 5046 */ 5047 for (last = m; last->m_next != NULL; last = last->m_next); 5048 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 5049 /* Allocate new empty mbuf, pad it. Compact later. */ 5050 struct mbuf *n; 5051 5052 MGET(n, M_NOWAIT, MT_DATA); 5053 if (n == NULL) 5054 return (ENOBUFS); 5055 n->m_len = 0; 5056 last->m_next = n; 5057 last = n; 5058 } 5059 } 5060 5061 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 5062 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 5063 last->m_len += padlen; 5064 m->m_pkthdr.len += padlen; 5065 5066 return (0); 5067} 5068 5069static struct mbuf * 5070bge_check_short_dma(struct mbuf *m) 5071{ 5072 struct mbuf *n; 5073 int found; 5074 5075 /* 5076 * If device receive two back-to-back send BDs with less than 5077 * or equal to 8 total bytes then the device may hang. The two 5078 * back-to-back send BDs must in the same frame for this failure 5079 * to occur. Scan mbuf chains and see whether two back-to-back 5080 * send BDs are there. If this is the case, allocate new mbuf 5081 * and copy the frame to workaround the silicon bug. 5082 */ 5083 for (n = m, found = 0; n != NULL; n = n->m_next) { 5084 if (n->m_len < 8) { 5085 found++; 5086 if (found > 1) 5087 break; 5088 continue; 5089 } 5090 found = 0; 5091 } 5092 5093 if (found > 1) { 5094 n = m_defrag(m, M_NOWAIT); 5095 if (n == NULL) 5096 m_freem(m); 5097 } else 5098 n = m; 5099 return (n); 5100} 5101 5102static struct mbuf * 5103bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss, 5104 uint16_t *flags) 5105{ 5106 struct ip *ip; 5107 struct tcphdr *tcp; 5108 struct mbuf *n; 5109 uint16_t hlen; 5110 uint32_t poff; 5111 5112 if (M_WRITABLE(m) == 0) { 5113 /* Get a writable copy. */ 5114 n = m_dup(m, M_NOWAIT); 5115 m_freem(m); 5116 if (n == NULL) 5117 return (NULL); 5118 m = n; 5119 } 5120 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip)); 5121 if (m == NULL) 5122 return (NULL); 5123 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); 5124 poff = sizeof(struct ether_header) + (ip->ip_hl << 2); 5125 m = m_pullup(m, poff + sizeof(struct tcphdr)); 5126 if (m == NULL) 5127 return (NULL); 5128 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 5129 m = m_pullup(m, poff + (tcp->th_off << 2)); 5130 if (m == NULL) 5131 return (NULL); 5132 /* 5133 * It seems controller doesn't modify IP length and TCP pseudo 5134 * checksum. These checksum computed by upper stack should be 0. 5135 */ 5136 *mss = m->m_pkthdr.tso_segsz; 5137 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); 5138 ip->ip_sum = 0; 5139 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); 5140 /* Clear pseudo checksum computed by TCP stack. */ 5141 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 5142 tcp->th_sum = 0; 5143 /* 5144 * Broadcom controllers uses different descriptor format for 5145 * TSO depending on ASIC revision. Due to TSO-capable firmware 5146 * license issue and lower performance of firmware based TSO 5147 * we only support hardware based TSO. 5148 */ 5149 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */ 5150 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; 5151 if (sc->bge_flags & BGE_FLAG_TSO3) { 5152 /* 5153 * For BCM5717 and newer controllers, hardware based TSO 5154 * uses the 14 lower bits of the bge_mss field to store the 5155 * MSS and the upper 2 bits to store the lowest 2 bits of 5156 * the IP/TCP header length. The upper 6 bits of the header 5157 * length are stored in the bge_flags[14:10,4] field. Jumbo 5158 * frames are supported. 5159 */ 5160 *mss |= ((hlen & 0x3) << 14); 5161 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2); 5162 } else { 5163 /* 5164 * For BCM5755 and newer controllers, hardware based TSO uses 5165 * the lower 11 bits to store the MSS and the upper 5 bits to 5166 * store the IP/TCP header length. Jumbo frames are not 5167 * supported. 5168 */ 5169 *mss |= (hlen << 11); 5170 } 5171 return (m); 5172} 5173 5174/* 5175 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 5176 * pointers to descriptors. 5177 */ 5178static int 5179bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 5180{ 5181 bus_dma_segment_t segs[BGE_NSEG_NEW]; 5182 bus_dmamap_t map; 5183 struct bge_tx_bd *d; 5184 struct mbuf *m = *m_head; 5185 uint32_t idx = *txidx; 5186 uint16_t csum_flags, mss, vlan_tag; 5187 int nsegs, i, error; 5188 5189 csum_flags = 0; 5190 mss = 0; 5191 vlan_tag = 0; 5192 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 && 5193 m->m_next != NULL) { 5194 *m_head = bge_check_short_dma(m); 5195 if (*m_head == NULL) 5196 return (ENOBUFS); 5197 m = *m_head; 5198 } 5199 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 5200 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags); 5201 if (*m_head == NULL) 5202 return (ENOBUFS); 5203 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | 5204 BGE_TXBDFLAG_CPU_POST_DMA; 5205 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) { 5206 if (m->m_pkthdr.csum_flags & CSUM_IP) 5207 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 5208 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 5209 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 5210 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 5211 (error = bge_cksum_pad(m)) != 0) { 5212 m_freem(m); 5213 *m_head = NULL; 5214 return (error); 5215 } 5216 } 5217 } 5218 5219 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) { 5220 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME && 5221 m->m_pkthdr.len > ETHER_MAX_LEN) 5222 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME; 5223 if (sc->bge_forced_collapse > 0 && 5224 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) { 5225 /* 5226 * Forcedly collapse mbuf chains to overcome hardware 5227 * limitation which only support a single outstanding 5228 * DMA read operation. 5229 */ 5230 if (sc->bge_forced_collapse == 1) 5231 m = m_defrag(m, M_NOWAIT); 5232 else 5233 m = m_collapse(m, M_NOWAIT, 5234 sc->bge_forced_collapse); 5235 if (m == NULL) 5236 m = *m_head; 5237 *m_head = m; 5238 } 5239 } 5240 5241 map = sc->bge_cdata.bge_tx_dmamap[idx]; 5242 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, 5243 &nsegs, BUS_DMA_NOWAIT); 5244 if (error == EFBIG) { 5245 m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW); 5246 if (m == NULL) { 5247 m_freem(*m_head); 5248 *m_head = NULL; 5249 return (ENOBUFS); 5250 } 5251 *m_head = m; 5252 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, 5253 m, segs, &nsegs, BUS_DMA_NOWAIT); 5254 if (error) { 5255 m_freem(m); 5256 *m_head = NULL; 5257 return (error); 5258 } 5259 } else if (error != 0) 5260 return (error); 5261 5262 /* Check if we have enough free send BDs. */ 5263 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { 5264 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); 5265 return (ENOBUFS); 5266 } 5267 5268 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 5269 5270 if (m->m_flags & M_VLANTAG) { 5271 csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 5272 vlan_tag = m->m_pkthdr.ether_vtag; 5273 } 5274 5275 if (sc->bge_asicrev == BGE_ASICREV_BCM5762 && 5276 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 5277 /* 5278 * 5725 family of devices corrupts TSO packets when TSO DMA 5279 * buffers cross into regions which are within MSS bytes of 5280 * a 4GB boundary. If we encounter the condition, drop the 5281 * packet. 5282 */ 5283 for (i = 0; ; i++) { 5284 d = &sc->bge_ldata.bge_tx_ring[idx]; 5285 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 5286 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 5287 d->bge_len = segs[i].ds_len; 5288 if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss < 5289 d->bge_addr.bge_addr_lo) 5290 break; 5291 d->bge_flags = csum_flags; 5292 d->bge_vlan_tag = vlan_tag; 5293 d->bge_mss = mss; 5294 if (i == nsegs - 1) 5295 break; 5296 BGE_INC(idx, BGE_TX_RING_CNT); 5297 } 5298 if (i != nsegs - 1) { 5299 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, 5300 BUS_DMASYNC_POSTWRITE); 5301 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); 5302 m_freem(*m_head); 5303 *m_head = NULL; 5304 return (EIO); 5305 } 5306 } else { 5307 for (i = 0; ; i++) { 5308 d = &sc->bge_ldata.bge_tx_ring[idx]; 5309 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 5310 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 5311 d->bge_len = segs[i].ds_len; 5312 d->bge_flags = csum_flags; 5313 d->bge_vlan_tag = vlan_tag; 5314 d->bge_mss = mss; 5315 if (i == nsegs - 1) 5316 break; 5317 BGE_INC(idx, BGE_TX_RING_CNT); 5318 } 5319 } 5320 5321 /* Mark the last segment as end of packet... */ 5322 d->bge_flags |= BGE_TXBDFLAG_END; 5323 5324 /* 5325 * Insure that the map for this transmission 5326 * is placed at the array index of the last descriptor 5327 * in this chain. 5328 */ 5329 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 5330 sc->bge_cdata.bge_tx_dmamap[idx] = map; 5331 sc->bge_cdata.bge_tx_chain[idx] = m; 5332 sc->bge_txcnt += nsegs; 5333 5334 BGE_INC(idx, BGE_TX_RING_CNT); 5335 *txidx = idx; 5336 5337 return (0); 5338} 5339 5340/* 5341 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 5342 * to the mbuf data regions directly in the transmit descriptors. 5343 */ 5344static void 5345bge_start_locked(if_t ifp) 5346{ 5347 struct bge_softc *sc; 5348 struct mbuf *m_head; 5349 uint32_t prodidx; 5350 int count; 5351 5352 sc = if_getsoftc(ifp); 5353 BGE_LOCK_ASSERT(sc); 5354 5355 if (!sc->bge_link || 5356 (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 5357 IFF_DRV_RUNNING) 5358 return; 5359 5360 prodidx = sc->bge_tx_prodidx; 5361 5362 for (count = 0; !if_sendq_empty(ifp);) { 5363 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { 5364 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5365 break; 5366 } 5367 m_head = if_dequeue(ifp); 5368 if (m_head == NULL) 5369 break; 5370 5371 /* 5372 * Pack the data into the transmit ring. If we 5373 * don't have room, set the OACTIVE flag and wait 5374 * for the NIC to drain the ring. 5375 */ 5376 if (bge_encap(sc, &m_head, &prodidx)) { 5377 if (m_head == NULL) 5378 break; 5379 if_sendq_prepend(ifp, m_head); 5380 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5381 break; 5382 } 5383 ++count; 5384 5385 /* 5386 * If there's a BPF listener, bounce a copy of this frame 5387 * to him. 5388 */ 5389 if_bpfmtap(ifp, m_head); 5390 } 5391 5392 if (count > 0) 5393 bge_start_tx(sc, prodidx); 5394} 5395 5396static void 5397bge_start_tx(struct bge_softc *sc, uint32_t prodidx) 5398{ 5399 5400 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 5401 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 5402 /* Transmit. */ 5403 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5404 /* 5700 b2 errata */ 5405 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 5406 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5407 5408 sc->bge_tx_prodidx = prodidx; 5409 5410 /* Set a timeout in case the chip goes out to lunch. */ 5411 sc->bge_timer = BGE_TX_TIMEOUT; 5412} 5413 5414/* 5415 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 5416 * to the mbuf data regions directly in the transmit descriptors. 5417 */ 5418static void 5419bge_start(if_t ifp) 5420{ 5421 struct bge_softc *sc; 5422 5423 sc = if_getsoftc(ifp); 5424 BGE_LOCK(sc); 5425 bge_start_locked(ifp); 5426 BGE_UNLOCK(sc); 5427} 5428 5429static void 5430bge_init_locked(struct bge_softc *sc) 5431{ 5432 if_t ifp; 5433 uint16_t *m; 5434 uint32_t mode; 5435 5436 BGE_LOCK_ASSERT(sc); 5437 5438 ifp = sc->bge_ifp; 5439 5440 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 5441 return; 5442 5443 /* Cancel pending I/O and flush buffers. */ 5444 bge_stop(sc); 5445 5446 bge_stop_fw(sc); 5447 bge_sig_pre_reset(sc, BGE_RESET_START); 5448 bge_reset(sc); 5449 bge_sig_legacy(sc, BGE_RESET_START); 5450 bge_sig_post_reset(sc, BGE_RESET_START); 5451 5452 bge_chipinit(sc); 5453 5454 /* 5455 * Init the various state machines, ring 5456 * control blocks and firmware. 5457 */ 5458 if (bge_blockinit(sc)) { 5459 device_printf(sc->bge_dev, "initialization failure\n"); 5460 return; 5461 } 5462 5463 ifp = sc->bge_ifp; 5464 5465 /* Specify MTU. */ 5466 CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) + 5467 ETHER_HDR_LEN + ETHER_CRC_LEN + 5468 (if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); 5469 5470 /* Load our MAC address. */ 5471 m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 5472 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 5473 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 5474 5475 /* Program promiscuous mode. */ 5476 bge_setpromisc(sc); 5477 5478 /* Program multicast filter. */ 5479 bge_setmulti(sc); 5480 5481 /* Program VLAN tag stripping. */ 5482 bge_setvlan(sc); 5483 5484 /* Override UDP checksum offloading. */ 5485 if (sc->bge_forced_udpcsum == 0) 5486 sc->bge_csum_features &= ~CSUM_UDP; 5487 else 5488 sc->bge_csum_features |= CSUM_UDP; 5489 if (if_getcapabilities(ifp) & IFCAP_TXCSUM && 5490 if_getcapenable(ifp) & IFCAP_TXCSUM) { 5491 if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP)); 5492 if_sethwassistbits(ifp, sc->bge_csum_features, 0); 5493 } 5494 5495 /* Init RX ring. */ 5496 if (bge_init_rx_ring_std(sc) != 0) { 5497 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 5498 bge_stop(sc); 5499 return; 5500 } 5501 5502 /* 5503 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 5504 * memory to insure that the chip has in fact read the first 5505 * entry of the ring. 5506 */ 5507 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 5508 uint32_t v, i; 5509 for (i = 0; i < 10; i++) { 5510 DELAY(20); 5511 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 5512 if (v == (MCLBYTES - ETHER_ALIGN)) 5513 break; 5514 } 5515 if (i == 10) 5516 device_printf (sc->bge_dev, 5517 "5705 A0 chip failed to load RX ring\n"); 5518 } 5519 5520 /* Init jumbo RX ring. */ 5521 if (BGE_IS_JUMBO_CAPABLE(sc) && 5522 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + 5523 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) { 5524 if (bge_init_rx_ring_jumbo(sc) != 0) { 5525 device_printf(sc->bge_dev, 5526 "no memory for jumbo Rx buffers.\n"); 5527 bge_stop(sc); 5528 return; 5529 } 5530 } 5531 5532 /* Init our RX return ring index. */ 5533 sc->bge_rx_saved_considx = 0; 5534 5535 /* Init our RX/TX stat counters. */ 5536 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 5537 5538 /* Init TX ring. */ 5539 bge_init_tx_ring(sc); 5540 5541 /* Enable TX MAC state machine lockup fix. */ 5542 mode = CSR_READ_4(sc, BGE_TX_MODE); 5543 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) 5544 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 5545 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || 5546 sc->bge_asicrev == BGE_ASICREV_BCM5762) { 5547 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5548 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 5549 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5550 } 5551 /* Turn on transmitter. */ 5552 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 5553 DELAY(100); 5554 5555 /* Turn on receiver. */ 5556 mode = CSR_READ_4(sc, BGE_RX_MODE); 5557 if (BGE_IS_5755_PLUS(sc)) 5558 mode |= BGE_RXMODE_IPV6_ENABLE; 5559 if (sc->bge_asicrev == BGE_ASICREV_BCM5762) 5560 mode |= BGE_RXMODE_IPV4_FRAG_FIX; 5561 CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 5562 DELAY(10); 5563 5564 /* 5565 * Set the number of good frames to receive after RX MBUF 5566 * Low Watermark has been reached. After the RX MAC receives 5567 * this number of frames, it will drop subsequent incoming 5568 * frames until the MBUF High Watermark is reached. 5569 */ 5570 if (BGE_IS_57765_PLUS(sc)) 5571 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 5572 else 5573 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 5574 5575 /* Clear MAC statistics. */ 5576 if (BGE_IS_5705_PLUS(sc)) 5577 bge_stats_clear_regs(sc); 5578 5579 /* Tell firmware we're alive. */ 5580 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5581 5582#ifdef DEVICE_POLLING 5583 /* Disable interrupts if we are polling. */ 5584 if (if_getcapenable(ifp) & IFCAP_POLLING) { 5585 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 5586 BGE_PCIMISCCTL_MASK_PCI_INTR); 5587 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 5588 } else 5589#endif 5590 5591 /* Enable host interrupts. */ 5592 { 5593 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 5594 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5595 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 5596 } 5597 5598 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 5599 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 5600 5601 bge_ifmedia_upd_locked(ifp); 5602 5603 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 5604} 5605 5606static void 5607bge_init(void *xsc) 5608{ 5609 struct bge_softc *sc = xsc; 5610 5611 BGE_LOCK(sc); 5612 bge_init_locked(sc); 5613 BGE_UNLOCK(sc); 5614} 5615 5616/* 5617 * Set media options. 5618 */ 5619static int 5620bge_ifmedia_upd(if_t ifp) 5621{ 5622 struct bge_softc *sc = if_getsoftc(ifp); 5623 int res; 5624 5625 BGE_LOCK(sc); 5626 res = bge_ifmedia_upd_locked(ifp); 5627 BGE_UNLOCK(sc); 5628 5629 return (res); 5630} 5631 5632static int 5633bge_ifmedia_upd_locked(if_t ifp) 5634{ 5635 struct bge_softc *sc = if_getsoftc(ifp); 5636 struct mii_data *mii; 5637 struct mii_softc *miisc; 5638 struct ifmedia *ifm; 5639 5640 BGE_LOCK_ASSERT(sc); 5641 5642 ifm = &sc->bge_ifmedia; 5643 5644 /* If this is a 1000baseX NIC, enable the TBI port. */ 5645 if (sc->bge_flags & BGE_FLAG_TBI) { 5646 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 5647 return (EINVAL); 5648 switch(IFM_SUBTYPE(ifm->ifm_media)) { 5649 case IFM_AUTO: 5650 /* 5651 * The BCM5704 ASIC appears to have a special 5652 * mechanism for programming the autoneg 5653 * advertisement registers in TBI mode. 5654 */ 5655 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 5656 uint32_t sgdig; 5657 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 5658 if (sgdig & BGE_SGDIGSTS_DONE) { 5659 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 5660 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 5661 sgdig |= BGE_SGDIGCFG_AUTO | 5662 BGE_SGDIGCFG_PAUSE_CAP | 5663 BGE_SGDIGCFG_ASYM_PAUSE; 5664 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 5665 sgdig | BGE_SGDIGCFG_SEND); 5666 DELAY(5); 5667 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 5668 } 5669 } 5670 break; 5671 case IFM_1000_SX: 5672 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 5673 BGE_CLRBIT(sc, BGE_MAC_MODE, 5674 BGE_MACMODE_HALF_DUPLEX); 5675 } else { 5676 BGE_SETBIT(sc, BGE_MAC_MODE, 5677 BGE_MACMODE_HALF_DUPLEX); 5678 } 5679 DELAY(40); 5680 break; 5681 default: 5682 return (EINVAL); 5683 } 5684 return (0); 5685 } 5686 5687 sc->bge_link_evt++; 5688 mii = device_get_softc(sc->bge_miibus); 5689 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 5690 PHY_RESET(miisc); 5691 mii_mediachg(mii); 5692 5693 /* 5694 * Force an interrupt so that we will call bge_link_upd 5695 * if needed and clear any pending link state attention. 5696 * Without this we are not getting any further interrupts 5697 * for link state changes and thus will not UP the link and 5698 * not be able to send in bge_start_locked. The only 5699 * way to get things working was to receive a packet and 5700 * get an RX intr. 5701 * bge_tick should help for fiber cards and we might not 5702 * need to do this here if BGE_FLAG_TBI is set but as 5703 * we poll for fiber anyway it should not harm. 5704 */ 5705 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 5706 sc->bge_flags & BGE_FLAG_5788) 5707 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 5708 else 5709 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 5710 5711 return (0); 5712} 5713 5714/* 5715 * Report current media status. 5716 */ 5717static void 5718bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 5719{ 5720 struct bge_softc *sc = if_getsoftc(ifp); 5721 struct mii_data *mii; 5722 5723 BGE_LOCK(sc); 5724 5725 if ((if_getflags(ifp) & IFF_UP) == 0) { 5726 BGE_UNLOCK(sc); 5727 return; 5728 } 5729 if (sc->bge_flags & BGE_FLAG_TBI) { 5730 ifmr->ifm_status = IFM_AVALID; 5731 ifmr->ifm_active = IFM_ETHER; 5732 if (CSR_READ_4(sc, BGE_MAC_STS) & 5733 BGE_MACSTAT_TBI_PCS_SYNCHED) 5734 ifmr->ifm_status |= IFM_ACTIVE; 5735 else { 5736 ifmr->ifm_active |= IFM_NONE; 5737 BGE_UNLOCK(sc); 5738 return; 5739 } 5740 ifmr->ifm_active |= IFM_1000_SX; 5741 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 5742 ifmr->ifm_active |= IFM_HDX; 5743 else 5744 ifmr->ifm_active |= IFM_FDX; 5745 BGE_UNLOCK(sc); 5746 return; 5747 } 5748 5749 mii = device_get_softc(sc->bge_miibus); 5750 mii_pollstat(mii); 5751 ifmr->ifm_active = mii->mii_media_active; 5752 ifmr->ifm_status = mii->mii_media_status; 5753 5754 BGE_UNLOCK(sc); 5755} 5756 5757static int 5758bge_ioctl(if_t ifp, u_long command, caddr_t data) 5759{ 5760 struct bge_softc *sc = if_getsoftc(ifp); 5761 struct ifreq *ifr = (struct ifreq *) data; 5762 struct mii_data *mii; 5763 int flags, mask, error = 0; 5764 5765 switch (command) { 5766 case SIOCSIFMTU: 5767 if (BGE_IS_JUMBO_CAPABLE(sc) || 5768 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) { 5769 if (ifr->ifr_mtu < ETHERMIN || 5770 ifr->ifr_mtu > BGE_JUMBO_MTU) { 5771 error = EINVAL; 5772 break; 5773 } 5774 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) { 5775 error = EINVAL; 5776 break; 5777 } 5778 BGE_LOCK(sc); 5779 if (if_getmtu(ifp) != ifr->ifr_mtu) { 5780 if_setmtu(ifp, ifr->ifr_mtu); 5781 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 5782 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 5783 bge_init_locked(sc); 5784 } 5785 } 5786 BGE_UNLOCK(sc); 5787 break; 5788 case SIOCSIFFLAGS: 5789 BGE_LOCK(sc); 5790 if (if_getflags(ifp) & IFF_UP) { 5791 /* 5792 * If only the state of the PROMISC flag changed, 5793 * then just use the 'set promisc mode' command 5794 * instead of reinitializing the entire NIC. Doing 5795 * a full re-init means reloading the firmware and 5796 * waiting for it to start up, which may take a 5797 * second or two. Similarly for ALLMULTI. 5798 */ 5799 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 5800 flags = if_getflags(ifp) ^ sc->bge_if_flags; 5801 if (flags & IFF_PROMISC) 5802 bge_setpromisc(sc); 5803 if (flags & IFF_ALLMULTI) 5804 bge_setmulti(sc); 5805 } else 5806 bge_init_locked(sc); 5807 } else { 5808 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 5809 bge_stop(sc); 5810 } 5811 } 5812 sc->bge_if_flags = if_getflags(ifp); 5813 BGE_UNLOCK(sc); 5814 error = 0; 5815 break; 5816 case SIOCADDMULTI: 5817 case SIOCDELMULTI: 5818 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 5819 BGE_LOCK(sc); 5820 bge_setmulti(sc); 5821 BGE_UNLOCK(sc); 5822 error = 0; 5823 } 5824 break; 5825 case SIOCSIFMEDIA: 5826 case SIOCGIFMEDIA: 5827 if (sc->bge_flags & BGE_FLAG_TBI) { 5828 error = ifmedia_ioctl(ifp, ifr, 5829 &sc->bge_ifmedia, command); 5830 } else { 5831 mii = device_get_softc(sc->bge_miibus); 5832 error = ifmedia_ioctl(ifp, ifr, 5833 &mii->mii_media, command); 5834 } 5835 break; 5836 case SIOCSIFCAP: 5837 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 5838#ifdef DEVICE_POLLING 5839 if (mask & IFCAP_POLLING) { 5840 if (ifr->ifr_reqcap & IFCAP_POLLING) { 5841 error = ether_poll_register(bge_poll, ifp); 5842 if (error) 5843 return (error); 5844 BGE_LOCK(sc); 5845 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 5846 BGE_PCIMISCCTL_MASK_PCI_INTR); 5847 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 5848 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 5849 BGE_UNLOCK(sc); 5850 } else { 5851 error = ether_poll_deregister(ifp); 5852 /* Enable interrupt even in error case */ 5853 BGE_LOCK(sc); 5854 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 5855 BGE_PCIMISCCTL_MASK_PCI_INTR); 5856 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 5857 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 5858 BGE_UNLOCK(sc); 5859 } 5860 } 5861#endif 5862 if ((mask & IFCAP_TXCSUM) != 0 && 5863 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 5864 if_togglecapenable(ifp, IFCAP_TXCSUM); 5865 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 5866 if_sethwassistbits(ifp, 5867 sc->bge_csum_features, 0); 5868 else 5869 if_sethwassistbits(ifp, 0, 5870 sc->bge_csum_features); 5871 } 5872 5873 if ((mask & IFCAP_RXCSUM) != 0 && 5874 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) 5875 if_togglecapenable(ifp, IFCAP_RXCSUM); 5876 5877 if ((mask & IFCAP_TSO4) != 0 && 5878 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { 5879 if_togglecapenable(ifp, IFCAP_TSO4); 5880 if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) 5881 if_sethwassistbits(ifp, CSUM_TSO, 0); 5882 else 5883 if_sethwassistbits(ifp, 0, CSUM_TSO); 5884 } 5885 5886 if (mask & IFCAP_VLAN_MTU) { 5887 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 5888 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 5889 bge_init(sc); 5890 } 5891 5892 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 5893 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) 5894 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 5895 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 5896 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 5897 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 5898 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 5899 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); 5900 BGE_LOCK(sc); 5901 bge_setvlan(sc); 5902 BGE_UNLOCK(sc); 5903 } 5904#ifdef VLAN_CAPABILITIES 5905 if_vlancap(ifp); 5906#endif 5907 break; 5908 default: 5909 error = ether_ioctl(ifp, command, data); 5910 break; 5911 } 5912 5913 return (error); 5914} 5915 5916static void 5917bge_watchdog(struct bge_softc *sc) 5918{ 5919 if_t ifp; 5920 uint32_t status; 5921 5922 BGE_LOCK_ASSERT(sc); 5923 5924 if (sc->bge_timer == 0 || --sc->bge_timer) 5925 return; 5926 5927 /* If pause frames are active then don't reset the hardware. */ 5928 if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { 5929 status = CSR_READ_4(sc, BGE_RX_STS); 5930 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { 5931 /* 5932 * If link partner has us in XOFF state then wait for 5933 * the condition to clear. 5934 */ 5935 CSR_WRITE_4(sc, BGE_RX_STS, status); 5936 sc->bge_timer = BGE_TX_TIMEOUT; 5937 return; 5938 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && 5939 (status & BGE_RXSTAT_RCVD_XON) != 0) { 5940 /* 5941 * If link partner has us in XOFF state then wait for 5942 * the condition to clear. 5943 */ 5944 CSR_WRITE_4(sc, BGE_RX_STS, status); 5945 sc->bge_timer = BGE_TX_TIMEOUT; 5946 return; 5947 } 5948 /* 5949 * Any other condition is unexpected and the controller 5950 * should be reset. 5951 */ 5952 } 5953 5954 ifp = sc->bge_ifp; 5955 5956 if_printf(ifp, "watchdog timeout -- resetting\n"); 5957 5958 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 5959 bge_init_locked(sc); 5960 5961 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 5962} 5963 5964static void 5965bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit) 5966{ 5967 int i; 5968 5969 BGE_CLRBIT(sc, reg, bit); 5970 5971 for (i = 0; i < BGE_TIMEOUT; i++) { 5972 if ((CSR_READ_4(sc, reg) & bit) == 0) 5973 return; 5974 DELAY(100); 5975 } 5976} 5977 5978/* 5979 * Stop the adapter and free any mbufs allocated to the 5980 * RX and TX lists. 5981 */ 5982static void 5983bge_stop(struct bge_softc *sc) 5984{ 5985 if_t ifp; 5986 5987 BGE_LOCK_ASSERT(sc); 5988 5989 ifp = sc->bge_ifp; 5990 5991 callout_stop(&sc->bge_stat_ch); 5992 5993 /* Disable host interrupts. */ 5994 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5995 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 5996 5997 /* 5998 * Tell firmware we're shutting down. 5999 */ 6000 bge_stop_fw(sc); 6001 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 6002 6003 /* 6004 * Disable all of the receiver blocks. 6005 */ 6006 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 6007 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 6008 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 6009 if (BGE_IS_5700_FAMILY(sc)) 6010 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 6011 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 6012 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 6013 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 6014 6015 /* 6016 * Disable all of the transmit blocks. 6017 */ 6018 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 6019 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 6020 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 6021 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 6022 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 6023 if (BGE_IS_5700_FAMILY(sc)) 6024 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 6025 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 6026 6027 /* 6028 * Shut down all of the memory managers and related 6029 * state machines. 6030 */ 6031 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 6032 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 6033 if (BGE_IS_5700_FAMILY(sc)) 6034 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 6035 6036 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 6037 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 6038 if (!(BGE_IS_5705_PLUS(sc))) { 6039 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 6040 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 6041 } 6042 /* Update MAC statistics. */ 6043 if (BGE_IS_5705_PLUS(sc)) 6044 bge_stats_update_regs(sc); 6045 6046 bge_reset(sc); 6047 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 6048 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 6049 6050 /* 6051 * Keep the ASF firmware running if up. 6052 */ 6053 if (sc->bge_asf_mode & ASF_STACKUP) 6054 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6055 else 6056 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6057 6058 /* Free the RX lists. */ 6059 bge_free_rx_ring_std(sc); 6060 6061 /* Free jumbo RX list. */ 6062 if (BGE_IS_JUMBO_CAPABLE(sc)) 6063 bge_free_rx_ring_jumbo(sc); 6064 6065 /* Free TX buffers. */ 6066 bge_free_tx_ring(sc); 6067 6068 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 6069 6070 /* Clear MAC's link state (PHY may still have link UP). */ 6071 if (bootverbose && sc->bge_link) 6072 if_printf(sc->bge_ifp, "link DOWN\n"); 6073 sc->bge_link = 0; 6074 6075 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 6076} 6077 6078/* 6079 * Stop all chip I/O so that the kernel's probe routines don't 6080 * get confused by errant DMAs when rebooting. 6081 */ 6082static int 6083bge_shutdown(device_t dev) 6084{ 6085 struct bge_softc *sc; 6086 6087 sc = device_get_softc(dev); 6088 BGE_LOCK(sc); 6089 bge_stop(sc); 6090 BGE_UNLOCK(sc); 6091 6092 return (0); 6093} 6094 6095static int 6096bge_suspend(device_t dev) 6097{ 6098 struct bge_softc *sc; 6099 6100 sc = device_get_softc(dev); 6101 BGE_LOCK(sc); 6102 bge_stop(sc); 6103 BGE_UNLOCK(sc); 6104 6105 return (0); 6106} 6107 6108static int 6109bge_resume(device_t dev) 6110{ 6111 struct bge_softc *sc; 6112 if_t ifp; 6113 6114 sc = device_get_softc(dev); 6115 BGE_LOCK(sc); 6116 ifp = sc->bge_ifp; 6117 if (if_getflags(ifp) & IFF_UP) { 6118 bge_init_locked(sc); 6119 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 6120 bge_start_locked(ifp); 6121 } 6122 BGE_UNLOCK(sc); 6123 6124 return (0); 6125} 6126 6127static void 6128bge_link_upd(struct bge_softc *sc) 6129{ 6130 struct mii_data *mii; 6131 uint32_t link, status; 6132 6133 BGE_LOCK_ASSERT(sc); 6134 6135 /* Clear 'pending link event' flag. */ 6136 sc->bge_link_evt = 0; 6137 6138 /* 6139 * Process link state changes. 6140 * Grrr. The link status word in the status block does 6141 * not work correctly on the BCM5700 rev AX and BX chips, 6142 * according to all available information. Hence, we have 6143 * to enable MII interrupts in order to properly obtain 6144 * async link changes. Unfortunately, this also means that 6145 * we have to read the MAC status register to detect link 6146 * changes, thereby adding an additional register access to 6147 * the interrupt handler. 6148 * 6149 * XXX: perhaps link state detection procedure used for 6150 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 6151 */ 6152 6153 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 6154 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 6155 status = CSR_READ_4(sc, BGE_MAC_STS); 6156 if (status & BGE_MACSTAT_MI_INTERRUPT) { 6157 mii = device_get_softc(sc->bge_miibus); 6158 mii_pollstat(mii); 6159 if (!sc->bge_link && 6160 mii->mii_media_status & IFM_ACTIVE && 6161 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 6162 sc->bge_link++; 6163 if (bootverbose) 6164 if_printf(sc->bge_ifp, "link UP\n"); 6165 } else if (sc->bge_link && 6166 (!(mii->mii_media_status & IFM_ACTIVE) || 6167 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 6168 sc->bge_link = 0; 6169 if (bootverbose) 6170 if_printf(sc->bge_ifp, "link DOWN\n"); 6171 } 6172 6173 /* Clear the interrupt. */ 6174 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 6175 BGE_EVTENB_MI_INTERRUPT); 6176 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 6177 BRGPHY_MII_ISR); 6178 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 6179 BRGPHY_MII_IMR, BRGPHY_INTRS); 6180 } 6181 return; 6182 } 6183 6184 if (sc->bge_flags & BGE_FLAG_TBI) { 6185 status = CSR_READ_4(sc, BGE_MAC_STS); 6186 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 6187 if (!sc->bge_link) { 6188 sc->bge_link++; 6189 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 6190 BGE_CLRBIT(sc, BGE_MAC_MODE, 6191 BGE_MACMODE_TBI_SEND_CFGS); 6192 DELAY(40); 6193 } 6194 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 6195 if (bootverbose) 6196 if_printf(sc->bge_ifp, "link UP\n"); 6197 if_link_state_change(sc->bge_ifp, 6198 LINK_STATE_UP); 6199 } 6200 } else if (sc->bge_link) { 6201 sc->bge_link = 0; 6202 if (bootverbose) 6203 if_printf(sc->bge_ifp, "link DOWN\n"); 6204 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 6205 } 6206 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { 6207 /* 6208 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 6209 * in status word always set. Workaround this bug by reading 6210 * PHY link status directly. 6211 */ 6212 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 6213 6214 if (link != sc->bge_link || 6215 sc->bge_asicrev == BGE_ASICREV_BCM5700) { 6216 mii = device_get_softc(sc->bge_miibus); 6217 mii_pollstat(mii); 6218 if (!sc->bge_link && 6219 mii->mii_media_status & IFM_ACTIVE && 6220 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 6221 sc->bge_link++; 6222 if (bootverbose) 6223 if_printf(sc->bge_ifp, "link UP\n"); 6224 } else if (sc->bge_link && 6225 (!(mii->mii_media_status & IFM_ACTIVE) || 6226 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 6227 sc->bge_link = 0; 6228 if (bootverbose) 6229 if_printf(sc->bge_ifp, "link DOWN\n"); 6230 } 6231 } 6232 } else { 6233 /* 6234 * For controllers that call mii_tick, we have to poll 6235 * link status. 6236 */ 6237 mii = device_get_softc(sc->bge_miibus); 6238 mii_pollstat(mii); 6239 bge_miibus_statchg(sc->bge_dev); 6240 } 6241 6242 /* Disable MAC attention when link is up. */ 6243 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 6244 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 6245 BGE_MACSTAT_LINK_CHANGED); 6246} 6247 6248static void 6249bge_add_sysctls(struct bge_softc *sc) 6250{ 6251 struct sysctl_ctx_list *ctx; 6252 struct sysctl_oid_list *children; 6253 int unit; 6254 6255 ctx = device_get_sysctl_ctx(sc->bge_dev); 6256 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 6257 6258#ifdef BGE_REGISTER_DEBUG 6259 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 6260 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 6261 bge_sysctl_debug_info, "I", "Debug Information"); 6262 6263 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 6264 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 6265 bge_sysctl_reg_read, "I", "MAC Register Read"); 6266 6267 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read", 6268 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 6269 bge_sysctl_ape_read, "I", "APE Register Read"); 6270 6271 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 6272 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 6273 bge_sysctl_mem_read, "I", "Memory Read"); 6274 6275#endif 6276 6277 unit = device_get_unit(sc->bge_dev); 6278 /* 6279 * A common design characteristic for many Broadcom client controllers 6280 * is that they only support a single outstanding DMA read operation 6281 * on the PCIe bus. This means that it will take twice as long to fetch 6282 * a TX frame that is split into header and payload buffers as it does 6283 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For 6284 * these controllers, coalescing buffers to reduce the number of memory 6285 * reads is effective way to get maximum performance(about 940Mbps). 6286 * Without collapsing TX buffers the maximum TCP bulk transfer 6287 * performance is about 850Mbps. However forcing coalescing mbufs 6288 * consumes a lot of CPU cycles, so leave it off by default. 6289 */ 6290 sc->bge_forced_collapse = 0; 6291 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse", 6292 CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0, 6293 "Number of fragmented TX buffers of a frame allowed before " 6294 "forced collapsing"); 6295 6296 sc->bge_msi = 1; 6297 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi", 6298 CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI"); 6299 6300 /* 6301 * It seems all Broadcom controllers have a bug that can generate UDP 6302 * datagrams with checksum value 0 when TX UDP checksum offloading is 6303 * enabled. Generating UDP checksum value 0 is RFC 768 violation. 6304 * Even though the probability of generating such UDP datagrams is 6305 * low, I don't want to see FreeBSD boxes to inject such datagrams 6306 * into network so disable UDP checksum offloading by default. Users 6307 * still override this behavior by setting a sysctl variable, 6308 * dev.bge.0.forced_udpcsum. 6309 */ 6310 sc->bge_forced_udpcsum = 0; 6311 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum", 6312 CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0, 6313 "Enable UDP checksum offloading even if controller can " 6314 "generate UDP checksum value 0"); 6315 6316 if (BGE_IS_5705_PLUS(sc)) 6317 bge_add_sysctl_stats_regs(sc, ctx, children); 6318 else 6319 bge_add_sysctl_stats(sc, ctx, children); 6320} 6321 6322#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 6323 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, \ 6324 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, \ 6325 offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", desc) 6326 6327static void 6328bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx, 6329 struct sysctl_oid_list *parent) 6330{ 6331 struct sysctl_oid *tree; 6332 struct sysctl_oid_list *children, *schildren; 6333 6334 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 6335 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics"); 6336 schildren = children = SYSCTL_CHILDREN(tree); 6337 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 6338 children, COSFramesDroppedDueToFilters, 6339 "FramesDroppedDueToFilters"); 6340 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 6341 children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 6342 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 6343 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 6344 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 6345 children, nicNoMoreRxBDs, "NoMoreRxBDs"); 6346 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 6347 children, ifInDiscards, "InputDiscards"); 6348 BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 6349 children, ifInErrors, "InputErrors"); 6350 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 6351 children, nicRecvThresholdHit, "RecvThresholdHit"); 6352 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 6353 children, nicDmaReadQueueFull, "DmaReadQueueFull"); 6354 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 6355 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 6356 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 6357 children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 6358 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 6359 children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 6360 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 6361 children, nicRingStatusUpdate, "RingStatusUpdate"); 6362 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 6363 children, nicInterrupts, "Interrupts"); 6364 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 6365 children, nicAvoidedInterrupts, "AvoidedInterrupts"); 6366 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 6367 children, nicSendThresholdHit, "SendThresholdHit"); 6368 6369 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", 6370 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics"); 6371 children = SYSCTL_CHILDREN(tree); 6372 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 6373 children, rxstats.ifHCInOctets, "ifHCInOctets"); 6374 BGE_SYSCTL_STAT(sc, ctx, "Fragments", 6375 children, rxstats.etherStatsFragments, "Fragments"); 6376 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 6377 children, rxstats.ifHCInUcastPkts, "UnicastPkts"); 6378 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 6379 children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 6380 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 6381 children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 6382 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 6383 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 6384 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 6385 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 6386 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 6387 children, rxstats.xoffPauseFramesReceived, 6388 "xoffPauseFramesReceived"); 6389 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 6390 children, rxstats.macControlFramesReceived, 6391 "ControlFramesReceived"); 6392 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 6393 children, rxstats.xoffStateEntered, "xoffStateEntered"); 6394 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 6395 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 6396 BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 6397 children, rxstats.etherStatsJabbers, "Jabbers"); 6398 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 6399 children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 6400 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 6401 children, rxstats.inRangeLengthError, "inRangeLengthError"); 6402 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 6403 children, rxstats.outRangeLengthError, "outRangeLengthError"); 6404 6405 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", 6406 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics"); 6407 children = SYSCTL_CHILDREN(tree); 6408 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 6409 children, txstats.ifHCOutOctets, "ifHCOutOctets"); 6410 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 6411 children, txstats.etherStatsCollisions, "Collisions"); 6412 BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 6413 children, txstats.outXonSent, "XonSent"); 6414 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 6415 children, txstats.outXoffSent, "XoffSent"); 6416 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 6417 children, txstats.flowControlDone, "flowControlDone"); 6418 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 6419 children, txstats.dot3StatsInternalMacTransmitErrors, 6420 "InternalMacTransmitErrors"); 6421 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 6422 children, txstats.dot3StatsSingleCollisionFrames, 6423 "SingleCollisionFrames"); 6424 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 6425 children, txstats.dot3StatsMultipleCollisionFrames, 6426 "MultipleCollisionFrames"); 6427 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 6428 children, txstats.dot3StatsDeferredTransmissions, 6429 "DeferredTransmissions"); 6430 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 6431 children, txstats.dot3StatsExcessiveCollisions, 6432 "ExcessiveCollisions"); 6433 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 6434 children, txstats.dot3StatsLateCollisions, 6435 "LateCollisions"); 6436 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 6437 children, txstats.ifHCOutUcastPkts, "UnicastPkts"); 6438 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 6439 children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 6440 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 6441 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 6442 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 6443 children, txstats.dot3StatsCarrierSenseErrors, 6444 "CarrierSenseErrors"); 6445 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 6446 children, txstats.ifOutDiscards, "Discards"); 6447 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 6448 children, txstats.ifOutErrors, "Errors"); 6449} 6450 6451#undef BGE_SYSCTL_STAT 6452 6453#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 6454 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 6455 6456static void 6457bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx, 6458 struct sysctl_oid_list *parent) 6459{ 6460 struct sysctl_oid *tree; 6461 struct sysctl_oid_list *child, *schild; 6462 struct bge_mac_stats *stats; 6463 6464 stats = &sc->bge_mac_stats; 6465 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 6466 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics"); 6467 schild = child = SYSCTL_CHILDREN(tree); 6468 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters", 6469 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters"); 6470 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull", 6471 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full"); 6472 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull", 6473 &stats->DmaWriteHighPriQueueFull, 6474 "NIC DMA Write High Priority Queue Full"); 6475 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs", 6476 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors"); 6477 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards", 6478 &stats->InputDiscards, "Discarded Input Frames"); 6479 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors", 6480 &stats->InputErrors, "Input Errors"); 6481 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit", 6482 &stats->RecvThresholdHit, "NIC Recv Threshold Hit"); 6483 6484 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", 6485 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics"); 6486 child = SYSCTL_CHILDREN(tree); 6487 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets", 6488 &stats->ifHCInOctets, "Inbound Octets"); 6489 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments", 6490 &stats->etherStatsFragments, "Fragments"); 6491 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", 6492 &stats->ifHCInUcastPkts, "Inbound Unicast Packets"); 6493 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", 6494 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets"); 6495 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", 6496 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets"); 6497 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors", 6498 &stats->dot3StatsFCSErrors, "FCS Errors"); 6499 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors", 6500 &stats->dot3StatsAlignmentErrors, "Alignment Errors"); 6501 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived", 6502 &stats->xonPauseFramesReceived, "XON Pause Frames Received"); 6503 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived", 6504 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received"); 6505 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived", 6506 &stats->macControlFramesReceived, "MAC Control Frames Received"); 6507 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered", 6508 &stats->xoffStateEntered, "XOFF State Entered"); 6509 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong", 6510 &stats->dot3StatsFramesTooLong, "Frames Too Long"); 6511 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers", 6512 &stats->etherStatsJabbers, "Jabbers"); 6513 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts", 6514 &stats->etherStatsUndersizePkts, "Undersized Packets"); 6515 6516 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", 6517 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics"); 6518 child = SYSCTL_CHILDREN(tree); 6519 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets", 6520 &stats->ifHCOutOctets, "Outbound Octets"); 6521 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions", 6522 &stats->etherStatsCollisions, "TX Collisions"); 6523 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent", 6524 &stats->outXonSent, "XON Sent"); 6525 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent", 6526 &stats->outXoffSent, "XOFF Sent"); 6527 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors", 6528 &stats->dot3StatsInternalMacTransmitErrors, 6529 "Internal MAC TX Errors"); 6530 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames", 6531 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames"); 6532 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames", 6533 &stats->dot3StatsMultipleCollisionFrames, 6534 "Multiple Collision Frames"); 6535 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions", 6536 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions"); 6537 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions", 6538 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions"); 6539 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions", 6540 &stats->dot3StatsLateCollisions, "Late Collisions"); 6541 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", 6542 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets"); 6543 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", 6544 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets"); 6545 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", 6546 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets"); 6547} 6548 6549#undef BGE_SYSCTL_STAT_ADD64 6550 6551static int 6552bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 6553{ 6554 struct bge_softc *sc; 6555 uint32_t result; 6556 int offset; 6557 6558 sc = (struct bge_softc *)arg1; 6559 offset = arg2; 6560 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + 6561 offsetof(bge_hostaddr, bge_addr_lo)); 6562 return (sysctl_handle_int(oidp, &result, 0, req)); 6563} 6564 6565#ifdef BGE_REGISTER_DEBUG 6566static int 6567bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 6568{ 6569 struct bge_softc *sc; 6570 uint16_t *sbdata; 6571 int error, result, sbsz; 6572 int i, j; 6573 6574 result = -1; 6575 error = sysctl_handle_int(oidp, &result, 0, req); 6576 if (error || (req->newptr == NULL)) 6577 return (error); 6578 6579 if (result == 1) { 6580 sc = (struct bge_softc *)arg1; 6581 6582 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 6583 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) 6584 sbsz = BGE_STATUS_BLK_SZ; 6585 else 6586 sbsz = 32; 6587 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 6588 printf("Status Block:\n"); 6589 BGE_LOCK(sc); 6590 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 6591 sc->bge_cdata.bge_status_map, 6592 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 6593 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) { 6594 printf("%06x:", i); 6595 for (j = 0; j < 8; j++) 6596 printf(" %04x", sbdata[i++]); 6597 printf("\n"); 6598 } 6599 6600 printf("Registers:\n"); 6601 for (i = 0x800; i < 0xA00; ) { 6602 printf("%06x:", i); 6603 for (j = 0; j < 8; j++) { 6604 printf(" %08x", CSR_READ_4(sc, i)); 6605 i += 4; 6606 } 6607 printf("\n"); 6608 } 6609 BGE_UNLOCK(sc); 6610 6611 printf("Hardware Flags:\n"); 6612 if (BGE_IS_5717_PLUS(sc)) 6613 printf(" - 5717 Plus\n"); 6614 if (BGE_IS_5755_PLUS(sc)) 6615 printf(" - 5755 Plus\n"); 6616 if (BGE_IS_575X_PLUS(sc)) 6617 printf(" - 575X Plus\n"); 6618 if (BGE_IS_5705_PLUS(sc)) 6619 printf(" - 5705 Plus\n"); 6620 if (BGE_IS_5714_FAMILY(sc)) 6621 printf(" - 5714 Family\n"); 6622 if (BGE_IS_5700_FAMILY(sc)) 6623 printf(" - 5700 Family\n"); 6624 if (sc->bge_flags & BGE_FLAG_JUMBO) 6625 printf(" - Supports Jumbo Frames\n"); 6626 if (sc->bge_flags & BGE_FLAG_PCIX) 6627 printf(" - PCI-X Bus\n"); 6628 if (sc->bge_flags & BGE_FLAG_PCIE) 6629 printf(" - PCI Express Bus\n"); 6630 if (sc->bge_phy_flags & BGE_PHY_NO_3LED) 6631 printf(" - No 3 LEDs\n"); 6632 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 6633 printf(" - RX Alignment Bug\n"); 6634 } 6635 6636 return (error); 6637} 6638 6639static int 6640bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 6641{ 6642 struct bge_softc *sc; 6643 int error; 6644 uint16_t result; 6645 uint32_t val; 6646 6647 result = -1; 6648 error = sysctl_handle_int(oidp, &result, 0, req); 6649 if (error || (req->newptr == NULL)) 6650 return (error); 6651 6652 if (result < 0x8000) { 6653 sc = (struct bge_softc *)arg1; 6654 val = CSR_READ_4(sc, result); 6655 printf("reg 0x%06X = 0x%08X\n", result, val); 6656 } 6657 6658 return (error); 6659} 6660 6661static int 6662bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS) 6663{ 6664 struct bge_softc *sc; 6665 int error; 6666 uint16_t result; 6667 uint32_t val; 6668 6669 result = -1; 6670 error = sysctl_handle_int(oidp, &result, 0, req); 6671 if (error || (req->newptr == NULL)) 6672 return (error); 6673 6674 if (result < 0x8000) { 6675 sc = (struct bge_softc *)arg1; 6676 val = APE_READ_4(sc, result); 6677 printf("reg 0x%06X = 0x%08X\n", result, val); 6678 } 6679 6680 return (error); 6681} 6682 6683static int 6684bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 6685{ 6686 struct bge_softc *sc; 6687 int error; 6688 uint16_t result; 6689 uint32_t val; 6690 6691 result = -1; 6692 error = sysctl_handle_int(oidp, &result, 0, req); 6693 if (error || (req->newptr == NULL)) 6694 return (error); 6695 6696 if (result < 0x8000) { 6697 sc = (struct bge_softc *)arg1; 6698 val = bge_readmem_ind(sc, result); 6699 printf("mem 0x%06X = 0x%08X\n", result, val); 6700 } 6701 6702 return (error); 6703} 6704#endif 6705 6706static int 6707bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 6708{ 6709 return (1); 6710} 6711 6712static int 6713bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 6714{ 6715 uint32_t mac_addr; 6716 6717 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); 6718 if ((mac_addr >> 16) == 0x484b) { 6719 ether_addr[0] = (uint8_t)(mac_addr >> 8); 6720 ether_addr[1] = (uint8_t)mac_addr; 6721 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); 6722 ether_addr[2] = (uint8_t)(mac_addr >> 24); 6723 ether_addr[3] = (uint8_t)(mac_addr >> 16); 6724 ether_addr[4] = (uint8_t)(mac_addr >> 8); 6725 ether_addr[5] = (uint8_t)mac_addr; 6726 return (0); 6727 } 6728 return (1); 6729} 6730 6731static int 6732bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 6733{ 6734 int mac_offset = BGE_EE_MAC_OFFSET; 6735 6736 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 6737 mac_offset = BGE_EE_MAC_OFFSET_5906; 6738 6739 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 6740 ETHER_ADDR_LEN)); 6741} 6742 6743static int 6744bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 6745{ 6746 6747 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 6748 return (1); 6749 6750 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 6751 ETHER_ADDR_LEN)); 6752} 6753 6754static int 6755bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 6756{ 6757 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 6758 /* NOTE: Order is critical */ 6759 bge_get_eaddr_fw, 6760 bge_get_eaddr_mem, 6761 bge_get_eaddr_nvram, 6762 bge_get_eaddr_eeprom, 6763 NULL 6764 }; 6765 const bge_eaddr_fcn_t *func; 6766 6767 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 6768 if ((*func)(sc, eaddr) == 0) 6769 break; 6770 } 6771 return (*func == NULL ? ENXIO : 0); 6772} 6773 6774static uint64_t 6775bge_get_counter(if_t ifp, ift_counter cnt) 6776{ 6777 struct bge_softc *sc; 6778 struct bge_mac_stats *stats; 6779 6780 sc = if_getsoftc(ifp); 6781 if (!BGE_IS_5705_PLUS(sc)) 6782 return (if_get_counter_default(ifp, cnt)); 6783 stats = &sc->bge_mac_stats; 6784 6785 switch (cnt) { 6786 case IFCOUNTER_IERRORS: 6787 return (stats->NoMoreRxBDs + stats->InputDiscards + 6788 stats->InputErrors); 6789 case IFCOUNTER_COLLISIONS: 6790 return (stats->etherStatsCollisions); 6791 default: 6792 return (if_get_counter_default(ifp, cnt)); 6793 } 6794} 6795 6796#ifdef DEBUGNET 6797static void 6798bge_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) 6799{ 6800 struct bge_softc *sc; 6801 6802 sc = if_getsoftc(ifp); 6803 BGE_LOCK(sc); 6804 *nrxr = sc->bge_return_ring_cnt; 6805 *ncl = DEBUGNET_MAX_IN_FLIGHT; 6806 if ((sc->bge_flags & BGE_FLAG_JUMBO_STD) != 0 && 6807 (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + 6808 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) 6809 *clsize = MJUM9BYTES; 6810 else 6811 *clsize = MCLBYTES; 6812 BGE_UNLOCK(sc); 6813} 6814 6815static void 6816bge_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused) 6817{ 6818} 6819 6820static int 6821bge_debugnet_transmit(if_t ifp, struct mbuf *m) 6822{ 6823 struct bge_softc *sc; 6824 uint32_t prodidx; 6825 int error; 6826 6827 sc = if_getsoftc(ifp); 6828 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6829 IFF_DRV_RUNNING) 6830 return (1); 6831 6832 prodidx = sc->bge_tx_prodidx; 6833 error = bge_encap(sc, &m, &prodidx); 6834 if (error == 0) 6835 bge_start_tx(sc, prodidx); 6836 return (error); 6837} 6838 6839static int 6840bge_debugnet_poll(if_t ifp, int count) 6841{ 6842 struct bge_softc *sc; 6843 uint32_t rx_prod, tx_cons; 6844 6845 sc = if_getsoftc(ifp); 6846 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6847 IFF_DRV_RUNNING) 6848 return (1); 6849 6850 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 6851 sc->bge_cdata.bge_status_map, 6852 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 6853 6854 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 6855 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 6856 6857 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 6858 sc->bge_cdata.bge_status_map, 6859 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 6860 6861 (void)bge_rxeof(sc, rx_prod, 0); 6862 bge_txeof(sc, tx_cons); 6863 return (0); 6864} 6865#endif /* DEBUGNET */ 6866