if_bce.c revision 179695
1/*- 2 * Copyright (c) 2006-2008 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/dev/bce/if_bce.c 179695 2008-06-10 02:19:11Z davidch $"); 33 34/* 35 * The following controllers are supported by this driver: 36 * BCM5706C A2, A3 37 * BCM5706S A2, A3 38 * BCM5708C B1, B2 39 * BCM5708S B1, B2 40 * 41 * The following controllers are not supported by this driver: 42 * BCM5706C A0, A1 (pre-production) 43 * BCM5706S A0, A1 (pre-production) 44 * BCM5708C A0, B0 (pre-production) 45 * BCM5708S A0, B0 (pre-production) 46 */ 47 48#include "opt_bce.h" 49 50#include <dev/bce/if_bcereg.h> 51#include <dev/bce/if_bcefw.h> 52 53/****************************************************************************/ 54/* BCE Debug Options */ 55/****************************************************************************/ 56#ifdef BCE_DEBUG 57 u32 bce_debug = BCE_WARN; 58 59 /* 0 = Never */ 60 /* 1 = 1 in 2,147,483,648 */ 61 /* 256 = 1 in 8,388,608 */ 62 /* 2048 = 1 in 1,048,576 */ 63 /* 65536 = 1 in 32,768 */ 64 /* 1048576 = 1 in 2,048 */ 65 /* 268435456 = 1 in 8 */ 66 /* 536870912 = 1 in 4 */ 67 /* 1073741824 = 1 in 2 */ 68 69 /* Controls how often the l2_fhdr frame error check will fail. */ 70 int bce_debug_l2fhdr_status_check = 0; 71 72 /* Controls how often the unexpected attention check will fail. */ 73 int bce_debug_unexpected_attention = 0; 74 75 /* Controls how often to simulate an mbuf allocation failure. */ 76 int bce_debug_mbuf_allocation_failure = 0; 77 78 /* Controls how often to simulate a DMA mapping failure. */ 79 int bce_debug_dma_map_addr_failure = 0; 80 81 /* Controls how often to simulate a bootcode failure. */ 82 int bce_debug_bootcode_running_failure = 0; 83#endif 84 85/****************************************************************************/ 86/* BCE Build Time Options */ 87/****************************************************************************/ 88#define BCE_USE_SPLIT_HEADER 1 89/* #define BCE_NVRAM_WRITE_SUPPORT 1 */ 90 91/****************************************************************************/ 92/* PCI Device ID Table */ 93/* */ 94/* Used by bce_probe() to identify the devices supported by this driver. */ 95/****************************************************************************/ 96#define BCE_DEVDESC_MAX 64 97 98static struct bce_type bce_devs[] = { 99 /* BCM5706C Controllers and OEM boards. */ 100 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 101 "HP NC370T Multifunction Gigabit Server Adapter" }, 102 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 103 "HP NC370i Multifunction Gigabit Server Adapter" }, 104 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 105 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 106 107 /* BCM5706S controllers and OEM boards. */ 108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 109 "HP NC370F Multifunction Gigabit Server Adapter" }, 110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 111 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 112 113 /* BCM5708C controllers and OEM boards. */ 114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 115 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 116 117 /* BCM5708S controllers and OEM boards. */ 118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 119 "Broadcom NetXtreme II BCM5708 1000Base-SX" }, 120 { 0, 0, 0, 0, NULL } 121}; 122 123 124/****************************************************************************/ 125/* Supported Flash NVRAM device data. */ 126/****************************************************************************/ 127static struct flash_spec flash_table[] = 128{ 129 /* Slow EEPROM */ 130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 133 "EEPROM - slow"}, 134 /* Expansion entry 0001 */ 135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 138 "Entry 0001"}, 139 /* Saifun SA25F010 (non-buffered flash) */ 140 /* strap, cfg1, & write1 need updates */ 141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 144 "Non-buffered flash (128kB)"}, 145 /* Saifun SA25F020 (non-buffered flash) */ 146 /* strap, cfg1, & write1 need updates */ 147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 150 "Non-buffered flash (256kB)"}, 151 /* Expansion entry 0100 */ 152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 155 "Entry 0100"}, 156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 166 /* Saifun SA25F005 (non-buffered flash) */ 167 /* strap, cfg1, & write1 need updates */ 168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 171 "Non-buffered flash (64kB)"}, 172 /* Fast EEPROM */ 173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 176 "EEPROM - fast"}, 177 /* Expansion entry 1001 */ 178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 181 "Entry 1001"}, 182 /* Expansion entry 1010 */ 183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 186 "Entry 1010"}, 187 /* ATMEL AT45DB011B (buffered flash) */ 188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 191 "Buffered flash (128kB)"}, 192 /* Expansion entry 1100 */ 193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 196 "Entry 1100"}, 197 /* Expansion entry 1101 */ 198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 201 "Entry 1101"}, 202 /* Ateml Expansion entry 1110 */ 203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 206 "Entry 1110 (Atmel)"}, 207 /* ATMEL AT45DB021B (buffered flash) */ 208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 211 "Buffered flash (256kB)"}, 212}; 213 214 215/****************************************************************************/ 216/* FreeBSD device entry points. */ 217/****************************************************************************/ 218static int bce_probe (device_t); 219static int bce_attach (device_t); 220static int bce_detach (device_t); 221static int bce_shutdown (device_t); 222 223 224/****************************************************************************/ 225/* BCE Debug Data Structure Dump Routines */ 226/****************************************************************************/ 227#ifdef BCE_DEBUG 228static u32 bce_ctx_rd (struct bce_softc *, u32, u32); 229static void bce_dump_mbuf (struct bce_softc *, struct mbuf *); 230static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int); 231static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int); 232#ifdef BCE_USE_SPLIT_HEADER 233static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int); 234#endif 235static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *); 236static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *); 237#ifdef BCE_USE_SPLIT_HEADER 238static void bce_dump_pgbd (struct bce_softc *, int, struct rx_bd *); 239#endif 240static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *); 241static void bce_dump_ctx (struct bce_softc *, u16); 242static void bce_dump_ftqs (struct bce_softc *); 243static void bce_dump_tx_chain (struct bce_softc *, u16, int); 244static void bce_dump_rx_chain (struct bce_softc *, u16, int); 245#ifdef BCE_USE_SPLIT_HEADER 246static void bce_dump_pg_chain (struct bce_softc *, u16, int); 247#endif 248static void bce_dump_status_block (struct bce_softc *); 249static void bce_dump_stats_block (struct bce_softc *); 250static void bce_dump_driver_state (struct bce_softc *); 251static void bce_dump_hw_state (struct bce_softc *); 252static void bce_dump_bc_state (struct bce_softc *); 253static void bce_breakpoint (struct bce_softc *); 254#endif 255 256 257/****************************************************************************/ 258/* BCE Register/Memory Access Routines */ 259/****************************************************************************/ 260static u32 bce_reg_rd_ind (struct bce_softc *, u32); 261static void bce_reg_wr_ind (struct bce_softc *, u32, u32); 262static void bce_ctx_wr (struct bce_softc *, u32, u32, u32); 263static int bce_miibus_read_reg (device_t, int, int); 264static int bce_miibus_write_reg (device_t, int, int, int); 265static void bce_miibus_statchg (device_t); 266 267 268/****************************************************************************/ 269/* BCE NVRAM Access Routines */ 270/****************************************************************************/ 271static int bce_acquire_nvram_lock (struct bce_softc *); 272static int bce_release_nvram_lock (struct bce_softc *); 273static void bce_enable_nvram_access (struct bce_softc *); 274static void bce_disable_nvram_access(struct bce_softc *); 275static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32); 276static int bce_init_nvram (struct bce_softc *); 277static int bce_nvram_read (struct bce_softc *, u32, u8 *, int); 278static int bce_nvram_test (struct bce_softc *); 279#ifdef BCE_NVRAM_WRITE_SUPPORT 280static int bce_enable_nvram_write (struct bce_softc *); 281static void bce_disable_nvram_write (struct bce_softc *); 282static int bce_nvram_erase_page (struct bce_softc *, u32); 283static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32); 284static int bce_nvram_write (struct bce_softc *, u32, u8 *, int); 285#endif 286 287/****************************************************************************/ 288/* */ 289/****************************************************************************/ 290static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int); 291static int bce_dma_alloc (device_t); 292static void bce_dma_free (struct bce_softc *); 293static void bce_release_resources (struct bce_softc *); 294 295/****************************************************************************/ 296/* BCE Firmware Synchronization and Load */ 297/****************************************************************************/ 298static int bce_fw_sync (struct bce_softc *, u32); 299static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32); 300static void bce_load_cpu_fw (struct bce_softc *, struct cpu_reg *, struct fw_info *); 301static void bce_init_cpus (struct bce_softc *); 302 303static void bce_stop (struct bce_softc *); 304static int bce_reset (struct bce_softc *, u32); 305static int bce_chipinit (struct bce_softc *); 306static int bce_blockinit (struct bce_softc *); 307static int bce_get_rx_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *); 308#ifdef BCE_USE_SPLIT_HEADER 309static int bce_get_pg_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *); 310#endif 311 312static int bce_init_tx_chain (struct bce_softc *); 313static void bce_free_tx_chain (struct bce_softc *); 314 315static int bce_init_rx_chain (struct bce_softc *); 316static void bce_fill_rx_chain (struct bce_softc *); 317static void bce_free_rx_chain (struct bce_softc *); 318 319#ifdef BCE_USE_SPLIT_HEADER 320static int bce_init_pg_chain (struct bce_softc *); 321static void bce_fill_pg_chain (struct bce_softc *); 322static void bce_free_pg_chain (struct bce_softc *); 323#endif 324 325static int bce_tx_encap (struct bce_softc *, struct mbuf **); 326static void bce_start_locked (struct ifnet *); 327static void bce_start (struct ifnet *); 328static int bce_ioctl (struct ifnet *, u_long, caddr_t); 329static void bce_watchdog (struct bce_softc *); 330static int bce_ifmedia_upd (struct ifnet *); 331static void bce_ifmedia_upd_locked (struct ifnet *); 332static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *); 333static void bce_init_locked (struct bce_softc *); 334static void bce_init (void *); 335static void bce_mgmt_init_locked (struct bce_softc *sc); 336 337static void bce_init_ctx (struct bce_softc *); 338static void bce_get_mac_addr (struct bce_softc *); 339static void bce_set_mac_addr (struct bce_softc *); 340static void bce_phy_intr (struct bce_softc *); 341static inline u16 bce_get_hw_rx_cons(struct bce_softc *); 342static void bce_rx_intr (struct bce_softc *); 343static void bce_tx_intr (struct bce_softc *); 344static void bce_disable_intr (struct bce_softc *); 345static void bce_enable_intr (struct bce_softc *); 346static void bce_intr (void *); 347static void bce_set_rx_mode (struct bce_softc *); 348static void bce_stats_update (struct bce_softc *); 349static void bce_tick (void *); 350static void bce_pulse (void *); 351static void bce_add_sysctls (struct bce_softc *); 352 353 354/****************************************************************************/ 355/* FreeBSD device dispatch table. */ 356/****************************************************************************/ 357static device_method_t bce_methods[] = { 358 /* Device interface (device_if.h) */ 359 DEVMETHOD(device_probe, bce_probe), 360 DEVMETHOD(device_attach, bce_attach), 361 DEVMETHOD(device_detach, bce_detach), 362 DEVMETHOD(device_shutdown, bce_shutdown), 363/* Supported by device interface but not used here. */ 364/* DEVMETHOD(device_identify, bce_identify), */ 365/* DEVMETHOD(device_suspend, bce_suspend), */ 366/* DEVMETHOD(device_resume, bce_resume), */ 367/* DEVMETHOD(device_quiesce, bce_quiesce), */ 368 369 /* Bus interface (bus_if.h) */ 370 DEVMETHOD(bus_print_child, bus_generic_print_child), 371 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 372 373 /* MII interface (miibus_if.h) */ 374 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 375 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 376 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 377/* Supported by MII interface but not used here. */ 378/* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */ 379/* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */ 380 381 { 0, 0 } 382}; 383 384static driver_t bce_driver = { 385 "bce", 386 bce_methods, 387 sizeof(struct bce_softc) 388}; 389 390static devclass_t bce_devclass; 391 392MODULE_DEPEND(bce, pci, 1, 1, 1); 393MODULE_DEPEND(bce, ether, 1, 1, 1); 394MODULE_DEPEND(bce, miibus, 1, 1, 1); 395 396DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0); 397DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0); 398 399 400/****************************************************************************/ 401/* Tunable device values */ 402/****************************************************************************/ 403static int bce_tso_enable = TRUE; 404static int bce_msi_enable = 1; 405 406SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters"); 407 408/* Allowable values are TRUE or FALSE */ 409TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable); 410SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, 411"TSO Enable/Disable"); 412 413/* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */ 414TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable); 415SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, 416"MSI | INTx selector"); 417 418/* ToDo: Add tunable to enable/disable strict MTU handling. */ 419/* Currently allows "loose" RX MTU checking (i.e. sets the */ 420/* h/w RX MTU to the size of the largest receive buffer, or */ 421/* 2048 bytes). */ 422 423/****************************************************************************/ 424/* Device probe function. */ 425/* */ 426/* Compares the device to the driver's list of supported devices and */ 427/* reports back to the OS whether this is the right driver for the device. */ 428/* */ 429/* Returns: */ 430/* BUS_PROBE_DEFAULT on success, positive value on failure. */ 431/****************************************************************************/ 432static int 433bce_probe(device_t dev) 434{ 435 struct bce_type *t; 436 struct bce_softc *sc; 437 char *descbuf; 438 u16 vid = 0, did = 0, svid = 0, sdid = 0; 439 440 t = bce_devs; 441 442 sc = device_get_softc(dev); 443 bzero(sc, sizeof(struct bce_softc)); 444 sc->bce_unit = device_get_unit(dev); 445 sc->bce_dev = dev; 446 447 /* Get the data for the device to be probed. */ 448 vid = pci_get_vendor(dev); 449 did = pci_get_device(dev); 450 svid = pci_get_subvendor(dev); 451 sdid = pci_get_subdevice(dev); 452 453 DBPRINT(sc, BCE_VERBOSE_LOAD, 454 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 455 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 456 457 /* Look through the list of known devices for a match. */ 458 while(t->bce_name != NULL) { 459 460 if ((vid == t->bce_vid) && (did == t->bce_did) && 461 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) && 462 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) { 463 464 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 465 466 if (descbuf == NULL) 467 return(ENOMEM); 468 469 /* Print out the device identity. */ 470 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 471 t->bce_name, 472 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), 473 (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); 474 475 device_set_desc_copy(dev, descbuf); 476 free(descbuf, M_TEMP); 477 return(BUS_PROBE_DEFAULT); 478 } 479 t++; 480 } 481 482 return(ENXIO); 483} 484 485 486/****************************************************************************/ 487/* Device attach function. */ 488/* */ 489/* Allocates device resources, performs secondary chip identification, */ 490/* resets and initializes the hardware, and initializes driver instance */ 491/* variables. */ 492/* */ 493/* Returns: */ 494/* 0 on success, positive value on failure. */ 495/****************************************************************************/ 496static int 497bce_attach(device_t dev) 498{ 499 struct bce_softc *sc; 500 struct ifnet *ifp; 501 u32 val; 502 int count, rid, rc = 0; 503 504 sc = device_get_softc(dev); 505 sc->bce_dev = dev; 506 507 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 508 509 sc->bce_unit = device_get_unit(dev); 510 511 /* Set initial device and PHY flags */ 512 sc->bce_flags = 0; 513 sc->bce_phy_flags = 0; 514 515 pci_enable_busmaster(dev); 516 517 /* Allocate PCI memory resources. */ 518 rid = PCIR_BAR(0); 519 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 520 &rid, RF_ACTIVE); 521 522 if (sc->bce_res_mem == NULL) { 523 BCE_PRINTF("%s(%d): PCI memory allocation failed\n", 524 __FILE__, __LINE__); 525 rc = ENXIO; 526 goto bce_attach_fail; 527 } 528 529 /* Get various resource handles. */ 530 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 531 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 532 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem); 533 534 /* If MSI is enabled in the driver, get the vector count. */ 535 count = bce_msi_enable ? pci_msi_count(dev) : 0; 536 537 /* Allocate PCI IRQ resources. */ 538 if (count == 1 && pci_alloc_msi(dev, &count) == 0 && count == 1) { 539 rid = 1; 540 sc->bce_flags |= BCE_USING_MSI_FLAG; 541 DBPRINT(sc, BCE_VERBOSE_LOAD, 542 "Allocating %d MSI interrupt(s)\n", count); 543 } else { 544 rid = 0; 545 DBPRINT(sc, BCE_VERBOSE_LOAD, "Allocating IRQ interrupt\n"); 546 } 547 548 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 549 RF_SHAREABLE | RF_ACTIVE); 550 551 if (sc->bce_res_irq == NULL) { 552 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n", 553 __FILE__, __LINE__); 554 rc = ENXIO; 555 goto bce_attach_fail; 556 } 557 558 /* Initialize mutex for the current device instance. */ 559 BCE_LOCK_INIT(sc, device_get_nameunit(dev)); 560 561 /* 562 * Configure byte swap and enable indirect register access. 563 * Rely on CPU to do target byte swapping on big endian systems. 564 * Access to registers outside of PCI configurtion space are not 565 * valid until this is done. 566 */ 567 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 568 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 569 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 570 571 /* Save ASIC revsion info. */ 572 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 573 574 /* Weed out any non-production controller revisions. */ 575 switch(BCE_CHIP_ID(sc)) { 576 case BCE_CHIP_ID_5706_A0: 577 case BCE_CHIP_ID_5706_A1: 578 case BCE_CHIP_ID_5708_A0: 579 case BCE_CHIP_ID_5708_B0: 580 BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n", 581 __FILE__, __LINE__, 582 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), 583 (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); 584 rc = ENODEV; 585 goto bce_attach_fail; 586 } 587 588 /* 589 * The embedded PCIe to PCI-X bridge (EPB) 590 * in the 5708 cannot address memory above 591 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 592 */ 593 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 594 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; 595 else 596 sc->max_bus_addr = BUS_SPACE_MAXADDR; 597 598 /* 599 * Find the base address for shared memory access. 600 * Newer versions of bootcode use a signature and offset 601 * while older versions use a fixed address. 602 */ 603 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 604 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) 605 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0); 606 else 607 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 608 609 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n", 610 __FUNCTION__, sc->bce_shmem_base); 611 612 /* Fetch the bootcode revision. */ 613 sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + 614 BCE_DEV_INFO_BC_REV); 615 616 /* Check if any management firmware is running. */ 617 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE); 618 if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) 619 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 620 621 /* Get PCI bus information (speed and type). */ 622 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 623 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 624 u32 clkreg; 625 626 sc->bce_flags |= BCE_PCIX_FLAG; 627 628 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS); 629 630 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 631 switch (clkreg) { 632 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 633 sc->bus_speed_mhz = 133; 634 break; 635 636 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 637 sc->bus_speed_mhz = 100; 638 break; 639 640 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 641 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 642 sc->bus_speed_mhz = 66; 643 break; 644 645 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 646 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 647 sc->bus_speed_mhz = 50; 648 break; 649 650 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 651 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 652 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 653 sc->bus_speed_mhz = 33; 654 break; 655 } 656 } else { 657 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 658 sc->bus_speed_mhz = 66; 659 else 660 sc->bus_speed_mhz = 33; 661 } 662 663 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 664 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 665 666 /* Reset the controller and announce to bootcode that driver is present. */ 667 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 668 BCE_PRINTF("%s(%d): Controller reset failed!\n", 669 __FILE__, __LINE__); 670 rc = ENXIO; 671 goto bce_attach_fail; 672 } 673 674 /* Initialize the controller. */ 675 if (bce_chipinit(sc)) { 676 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 677 __FILE__, __LINE__); 678 rc = ENXIO; 679 goto bce_attach_fail; 680 } 681 682 /* Perform NVRAM test. */ 683 if (bce_nvram_test(sc)) { 684 BCE_PRINTF("%s(%d): NVRAM test failed!\n", 685 __FILE__, __LINE__); 686 rc = ENXIO; 687 goto bce_attach_fail; 688 } 689 690 /* Fetch the permanent Ethernet MAC address. */ 691 bce_get_mac_addr(sc); 692 693 /* 694 * Trip points control how many BDs 695 * should be ready before generating an 696 * interrupt while ticks control how long 697 * a BD can sit in the chain before 698 * generating an interrupt. Set the default 699 * values for the RX and TX chains. 700 */ 701 702#ifdef BCE_DEBUG 703 /* Force more frequent interrupts. */ 704 sc->bce_tx_quick_cons_trip_int = 1; 705 sc->bce_tx_quick_cons_trip = 1; 706 sc->bce_tx_ticks_int = 0; 707 sc->bce_tx_ticks = 0; 708 709 sc->bce_rx_quick_cons_trip_int = 1; 710 sc->bce_rx_quick_cons_trip = 1; 711 sc->bce_rx_ticks_int = 0; 712 sc->bce_rx_ticks = 0; 713#else 714 /* Improve throughput at the expense of increased latency. */ 715 sc->bce_tx_quick_cons_trip_int = 20; 716 sc->bce_tx_quick_cons_trip = 20; 717 sc->bce_tx_ticks_int = 80; 718 sc->bce_tx_ticks = 80; 719 720 sc->bce_rx_quick_cons_trip_int = 6; 721 sc->bce_rx_quick_cons_trip = 6; 722 sc->bce_rx_ticks_int = 18; 723 sc->bce_rx_ticks = 18; 724#endif 725 726 /* Update statistics once every second. */ 727 sc->bce_stats_ticks = 1000000 & 0xffff00; 728 729 /* 730 * The SerDes based NetXtreme II controllers 731 * that support 2.5Gb operation (currently 732 * 5708S) use a PHY at address 2, otherwise 733 * the PHY is present at address 1. 734 */ 735 sc->bce_phy_addr = 1; 736 737 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 738 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 739 sc->bce_flags |= BCE_NO_WOL_FLAG; 740 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 741 sc->bce_phy_addr = 2; 742 val = REG_RD_IND(sc, sc->bce_shmem_base + 743 BCE_SHARED_HW_CFG_CONFIG); 744 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) { 745 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 746 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n"); 747 } 748 } 749 } 750 751 /* Store data needed by PHY driver for backplane applications */ 752 sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base + 753 BCE_SHARED_HW_CFG_CONFIG); 754 sc->bce_port_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base + 755 BCE_SHARED_HW_CFG_CONFIG); 756 757 /* Allocate DMA memory resources. */ 758 if (bce_dma_alloc(dev)) { 759 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n", 760 __FILE__, __LINE__); 761 rc = ENXIO; 762 goto bce_attach_fail; 763 } 764 765 /* Allocate an ifnet structure. */ 766 ifp = sc->bce_ifp = if_alloc(IFT_ETHER); 767 if (ifp == NULL) { 768 BCE_PRINTF("%s(%d): Interface allocation failed!\n", 769 __FILE__, __LINE__); 770 rc = ENXIO; 771 goto bce_attach_fail; 772 } 773 774 /* Initialize the ifnet interface. */ 775 ifp->if_softc = sc; 776 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 777 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 778 ifp->if_ioctl = bce_ioctl; 779 ifp->if_start = bce_start; 780 ifp->if_init = bce_init; 781 ifp->if_mtu = ETHERMTU; 782 783 if (bce_tso_enable) { 784 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO; 785 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4; 786 } else { 787 ifp->if_hwassist = BCE_IF_HWASSIST; 788 ifp->if_capabilities = BCE_IF_CAPABILITIES; 789 } 790 791 ifp->if_capenable = ifp->if_capabilities; 792 793 /* Use standard mbuf sizes for buffer allocation. */ 794#ifdef BCE_USE_SPLIT_HEADER 795 sc->rx_bd_mbuf_alloc_size = MHLEN; 796 /* Make sure offset is 16 byte aligned for hardware. */ 797 sc->rx_bd_mbuf_align_pad = roundup2((MSIZE - MHLEN), 16) - 798 (MSIZE - MHLEN); 799 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 800 sc->rx_bd_mbuf_align_pad; 801 sc->pg_bd_mbuf_alloc_size = MCLBYTES; 802#else 803 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 804 sc->rx_bd_mbuf_align_pad = roundup2(MCLBYTES, 16) - MCLBYTES; 805 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 806 sc->rx_bd_mbuf_align_pad; 807#endif 808 809 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD; 810 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 811 IFQ_SET_READY(&ifp->if_snd); 812 813 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 814 ifp->if_baudrate = IF_Mbps(2500ULL); 815 else 816 ifp->if_baudrate = IF_Mbps(1000); 817 818 /* Check for an MII child bus by probing the PHY. */ 819 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd, 820 bce_ifmedia_sts)) { 821 BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n", 822 __FILE__, __LINE__); 823 rc = ENXIO; 824 goto bce_attach_fail; 825 } 826 827 /* Attach to the Ethernet interface list. */ 828 ether_ifattach(ifp, sc->eaddr); 829 830#if __FreeBSD_version < 500000 831 callout_init(&sc->bce_tick_callout); 832 callout_init(&sc->bce_pulse_callout); 833#else 834 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0); 835 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0); 836#endif 837 838 /* Hookup IRQ last. */ 839 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, 840 bce_intr, sc, &sc->bce_intrhand); 841 842 if (rc) { 843 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n", 844 __FILE__, __LINE__); 845 bce_detach(dev); 846 goto bce_attach_exit; 847 } 848 849 /* 850 * At this point we've acquired all the resources 851 * we need to run so there's no turning back, we're 852 * cleared for launch. 853 */ 854 855 /* Print some important debugging info. */ 856 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc)); 857 858 /* Add the supported sysctls to the kernel. */ 859 bce_add_sysctls(sc); 860 861 BCE_LOCK(sc); 862 /* 863 * The chip reset earlier notified the bootcode that 864 * a driver is present. We now need to start our pulse 865 * routine so that the bootcode is reminded that we're 866 * still running. 867 */ 868 bce_pulse(sc); 869 870 bce_mgmt_init_locked(sc); 871 BCE_UNLOCK(sc); 872 873 /* Finally, print some useful adapter info */ 874 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid); 875 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 876 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 877 printf("Bus (PCI%s, %s, %dMHz); ", 878 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 879 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 880 sc->bus_speed_mhz); 881 printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver); 882#ifdef BCE_USE_SPLIT_HEADER 883 printf("SPLT "); 884#endif 885 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 886 printf("MFW "); 887 if (sc->bce_flags & BCE_USING_MSI_FLAG) 888 printf("MSI "); 889 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 890 printf("2.5G "); 891 printf(")\n"); 892 893 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n", 894 __FUNCTION__, sc); 895 896 goto bce_attach_exit; 897 898bce_attach_fail: 899 bce_release_resources(sc); 900 901bce_attach_exit: 902 903 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 904 905 return(rc); 906} 907 908 909/****************************************************************************/ 910/* Device detach function. */ 911/* */ 912/* Stops the controller, resets the controller, and releases resources. */ 913/* */ 914/* Returns: */ 915/* 0 on success, positive value on failure. */ 916/****************************************************************************/ 917static int 918bce_detach(device_t dev) 919{ 920 struct bce_softc *sc = device_get_softc(dev); 921 struct ifnet *ifp; 922 u32 msg; 923 924 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 925 926 ifp = sc->bce_ifp; 927 928 /* Stop and reset the controller. */ 929 BCE_LOCK(sc); 930 931 /* Stop the pulse so the bootcode can go to driver absent state. */ 932 callout_stop(&sc->bce_pulse_callout); 933 934 bce_stop(sc); 935 if (sc->bce_flags & BCE_NO_WOL_FLAG) 936 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 937 else 938 msg = BCE_DRV_MSG_CODE_UNLOAD; 939 bce_reset(sc, msg); 940 941 BCE_UNLOCK(sc); 942 943 ether_ifdetach(ifp); 944 945 /* If we have a child device on the MII bus remove it too. */ 946 bus_generic_detach(dev); 947 device_delete_child(dev, sc->bce_miibus); 948 949 /* Release all remaining resources. */ 950 bce_release_resources(sc); 951 952 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 953 954 return(0); 955} 956 957 958/****************************************************************************/ 959/* Device shutdown function. */ 960/* */ 961/* Stops and resets the controller. */ 962/* */ 963/* Returns: */ 964/* 0 on success, positive value on failure. */ 965/****************************************************************************/ 966static int 967bce_shutdown(device_t dev) 968{ 969 struct bce_softc *sc = device_get_softc(dev); 970 u32 msg; 971 972 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Entering %s()\n", __FUNCTION__); 973 974 BCE_LOCK(sc); 975 bce_stop(sc); 976 if (sc->bce_flags & BCE_NO_WOL_FLAG) 977 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 978 else 979 msg = BCE_DRV_MSG_CODE_UNLOAD; 980 bce_reset(sc, msg); 981 BCE_UNLOCK(sc); 982 983 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Exiting %s()\n", __FUNCTION__); 984 985 return (0); 986} 987 988 989/****************************************************************************/ 990/* Indirect register read. */ 991/* */ 992/* Reads NetXtreme II registers using an index/data register pair in PCI */ 993/* configuration space. Using this mechanism avoids issues with posted */ 994/* reads but is much slower than memory-mapped I/O. */ 995/* */ 996/* Returns: */ 997/* The value of the register. */ 998/****************************************************************************/ 999static u32 1000bce_reg_rd_ind(struct bce_softc *sc, u32 offset) 1001{ 1002 device_t dev; 1003 dev = sc->bce_dev; 1004 1005 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1006#ifdef BCE_DEBUG 1007 { 1008 u32 val; 1009 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1010 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1011 __FUNCTION__, offset, val); 1012 return val; 1013 } 1014#else 1015 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1016#endif 1017} 1018 1019 1020/****************************************************************************/ 1021/* Indirect register write. */ 1022/* */ 1023/* Writes NetXtreme II registers using an index/data register pair in PCI */ 1024/* configuration space. Using this mechanism avoids issues with posted */ 1025/* writes but is muchh slower than memory-mapped I/O. */ 1026/* */ 1027/* Returns: */ 1028/* Nothing. */ 1029/****************************************************************************/ 1030static void 1031bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val) 1032{ 1033 device_t dev; 1034 dev = sc->bce_dev; 1035 1036 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1037 __FUNCTION__, offset, val); 1038 1039 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1040 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1041} 1042 1043 1044#ifdef BCE_DEBUG 1045/****************************************************************************/ 1046/* Context memory read. */ 1047/* */ 1048/* The NetXtreme II controller uses context memory to track connection */ 1049/* information for L2 and higher network protocols. */ 1050/* */ 1051/* Returns: */ 1052/* The requested 32 bit value of context memory. */ 1053/****************************************************************************/ 1054static u32 1055bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 offset) 1056{ 1057 u32 val; 1058 1059 offset += cid_addr; 1060 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1061 val = REG_RD(sc, BCE_CTX_DATA); 1062 1063 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1064 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val); 1065 1066 return(val); 1067} 1068#endif 1069 1070 1071/****************************************************************************/ 1072/* Context memory write. */ 1073/* */ 1074/* The NetXtreme II controller uses context memory to track connection */ 1075/* information for L2 and higher network protocols. */ 1076/* */ 1077/* Returns: */ 1078/* Nothing. */ 1079/****************************************************************************/ 1080static void 1081bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val) 1082{ 1083 1084 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1085 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val); 1086 1087 offset += cid_addr; 1088 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1089 REG_WR(sc, BCE_CTX_DATA, val); 1090} 1091 1092 1093/****************************************************************************/ 1094/* PHY register read. */ 1095/* */ 1096/* Implements register reads on the MII bus. */ 1097/* */ 1098/* Returns: */ 1099/* The value of the register. */ 1100/****************************************************************************/ 1101static int 1102bce_miibus_read_reg(device_t dev, int phy, int reg) 1103{ 1104 struct bce_softc *sc; 1105 u32 val; 1106 int i; 1107 1108 sc = device_get_softc(dev); 1109 1110 /* Make sure we are accessing the correct PHY address. */ 1111 if (phy != sc->bce_phy_addr) { 1112 DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY read!\n", phy); 1113 return(0); 1114 } 1115 1116 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1117 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1118 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1119 1120 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1121 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1122 1123 DELAY(40); 1124 } 1125 1126 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1127 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1128 BCE_EMAC_MDIO_COMM_START_BUSY; 1129 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1130 1131 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1132 DELAY(10); 1133 1134 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1135 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1136 DELAY(5); 1137 1138 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1139 val &= BCE_EMAC_MDIO_COMM_DATA; 1140 1141 break; 1142 } 1143 } 1144 1145 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1146 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1147 __FILE__, __LINE__, phy, reg); 1148 val = 0x0; 1149 } else { 1150 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1151 } 1152 1153 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1154 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff); 1155 1156 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1157 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1158 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1159 1160 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1161 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1162 1163 DELAY(40); 1164 } 1165 1166 return (val & 0xffff); 1167 1168} 1169 1170 1171/****************************************************************************/ 1172/* PHY register write. */ 1173/* */ 1174/* Implements register writes on the MII bus. */ 1175/* */ 1176/* Returns: */ 1177/* The value of the register. */ 1178/****************************************************************************/ 1179static int 1180bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1181{ 1182 struct bce_softc *sc; 1183 u32 val1; 1184 int i; 1185 1186 sc = device_get_softc(dev); 1187 1188 /* Make sure we are accessing the correct PHY address. */ 1189 if (phy != sc->bce_phy_addr) { 1190 DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY write!\n", phy); 1191 return(0); 1192 } 1193 1194 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1195 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff); 1196 1197 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1198 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1199 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1200 1201 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1202 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1203 1204 DELAY(40); 1205 } 1206 1207 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1208 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1209 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1210 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1211 1212 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1213 DELAY(10); 1214 1215 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1216 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1217 DELAY(5); 1218 break; 1219 } 1220 } 1221 1222 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1223 BCE_PRINTF("%s(%d): PHY write timeout!\n", 1224 __FILE__, __LINE__); 1225 1226 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1227 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1228 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1229 1230 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1231 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1232 1233 DELAY(40); 1234 } 1235 1236 return 0; 1237} 1238 1239 1240/****************************************************************************/ 1241/* MII bus status change. */ 1242/* */ 1243/* Called by the MII bus driver when the PHY establishes link to set the */ 1244/* MAC interface registers. */ 1245/* */ 1246/* Returns: */ 1247/* Nothing. */ 1248/****************************************************************************/ 1249static void 1250bce_miibus_statchg(device_t dev) 1251{ 1252 struct bce_softc *sc; 1253 struct mii_data *mii; 1254 int val; 1255 1256 sc = device_get_softc(dev); 1257 1258 mii = device_get_softc(sc->bce_miibus); 1259 1260 val = REG_RD(sc, BCE_EMAC_MODE); 1261 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | 1262 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK | 1263 BCE_EMAC_MODE_25G); 1264 1265 /* Set MII or GMII interface based on the speed negotiated by the PHY. */ 1266 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1267 case IFM_10_T: 1268 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1269 DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n"); 1270 val |= BCE_EMAC_MODE_PORT_MII_10; 1271 break; 1272 } 1273 /* fall-through */ 1274 case IFM_100_TX: 1275 DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n"); 1276 val |= BCE_EMAC_MODE_PORT_MII; 1277 break; 1278 case IFM_2500_SX: 1279 DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n"); 1280 val |= BCE_EMAC_MODE_25G; 1281 /* fall-through */ 1282 case IFM_1000_T: 1283 case IFM_1000_SX: 1284 DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n"); 1285 val |= BCE_EMAC_MODE_PORT_GMII; 1286 break; 1287 default: 1288 DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII " 1289 "interface.\n"); 1290 val |= BCE_EMAC_MODE_PORT_GMII; 1291 } 1292 1293 /* Set half or full duplex based on the duplicity negotiated by the PHY. */ 1294 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1295 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n"); 1296 val |= BCE_EMAC_MODE_HALF_DUPLEX; 1297 } else 1298 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n"); 1299 1300 REG_WR(sc, BCE_EMAC_MODE, val); 1301 1302#if 0 1303 /* ToDo: Enable flow control support in brgphy and bge. */ 1304 /* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */ 1305 if (mii->mii_media_active & IFM_FLAG0) 1306 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 1307 if (mii->mii_media_active & IFM_FLAG1) 1308 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 1309#endif 1310 1311} 1312 1313 1314/****************************************************************************/ 1315/* Acquire NVRAM lock. */ 1316/* */ 1317/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1318/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1319/* for use by the driver. */ 1320/* */ 1321/* Returns: */ 1322/* 0 on success, positive value on failure. */ 1323/****************************************************************************/ 1324static int 1325bce_acquire_nvram_lock(struct bce_softc *sc) 1326{ 1327 u32 val; 1328 int j; 1329 1330 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Acquiring NVRAM lock.\n"); 1331 1332 /* Request access to the flash interface. */ 1333 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1334 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1335 val = REG_RD(sc, BCE_NVM_SW_ARB); 1336 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1337 break; 1338 1339 DELAY(5); 1340 } 1341 1342 if (j >= NVRAM_TIMEOUT_COUNT) { 1343 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1344 return EBUSY; 1345 } 1346 1347 return 0; 1348} 1349 1350 1351/****************************************************************************/ 1352/* Release NVRAM lock. */ 1353/* */ 1354/* When the caller is finished accessing NVRAM the lock must be released. */ 1355/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1356/* for use by the driver. */ 1357/* */ 1358/* Returns: */ 1359/* 0 on success, positive value on failure. */ 1360/****************************************************************************/ 1361static int 1362bce_release_nvram_lock(struct bce_softc *sc) 1363{ 1364 int j; 1365 u32 val; 1366 1367 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Releasing NVRAM lock.\n"); 1368 1369 /* 1370 * Relinquish nvram interface. 1371 */ 1372 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1373 1374 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1375 val = REG_RD(sc, BCE_NVM_SW_ARB); 1376 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1377 break; 1378 1379 DELAY(5); 1380 } 1381 1382 if (j >= NVRAM_TIMEOUT_COUNT) { 1383 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n"); 1384 return EBUSY; 1385 } 1386 1387 return 0; 1388} 1389 1390 1391#ifdef BCE_NVRAM_WRITE_SUPPORT 1392/****************************************************************************/ 1393/* Enable NVRAM write access. */ 1394/* */ 1395/* Before writing to NVRAM the caller must enable NVRAM writes. */ 1396/* */ 1397/* Returns: */ 1398/* 0 on success, positive value on failure. */ 1399/****************************************************************************/ 1400static int 1401bce_enable_nvram_write(struct bce_softc *sc) 1402{ 1403 u32 val; 1404 1405 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM write.\n"); 1406 1407 val = REG_RD(sc, BCE_MISC_CFG); 1408 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); 1409 1410 if (!sc->bce_flash_info->buffered) { 1411 int j; 1412 1413 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1414 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); 1415 1416 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1417 DELAY(5); 1418 1419 val = REG_RD(sc, BCE_NVM_COMMAND); 1420 if (val & BCE_NVM_COMMAND_DONE) 1421 break; 1422 } 1423 1424 if (j >= NVRAM_TIMEOUT_COUNT) { 1425 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); 1426 return EBUSY; 1427 } 1428 } 1429 return 0; 1430} 1431 1432 1433/****************************************************************************/ 1434/* Disable NVRAM write access. */ 1435/* */ 1436/* When the caller is finished writing to NVRAM write access must be */ 1437/* disabled. */ 1438/* */ 1439/* Returns: */ 1440/* Nothing. */ 1441/****************************************************************************/ 1442static void 1443bce_disable_nvram_write(struct bce_softc *sc) 1444{ 1445 u32 val; 1446 1447 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM write.\n"); 1448 1449 val = REG_RD(sc, BCE_MISC_CFG); 1450 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); 1451} 1452#endif 1453 1454 1455/****************************************************************************/ 1456/* Enable NVRAM access. */ 1457/* */ 1458/* Before accessing NVRAM for read or write operations the caller must */ 1459/* enabled NVRAM access. */ 1460/* */ 1461/* Returns: */ 1462/* Nothing. */ 1463/****************************************************************************/ 1464static void 1465bce_enable_nvram_access(struct bce_softc *sc) 1466{ 1467 u32 val; 1468 1469 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM access.\n"); 1470 1471 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1472 /* Enable both bits, even on read. */ 1473 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1474 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1475} 1476 1477 1478/****************************************************************************/ 1479/* Disable NVRAM access. */ 1480/* */ 1481/* When the caller is finished accessing NVRAM access must be disabled. */ 1482/* */ 1483/* Returns: */ 1484/* Nothing. */ 1485/****************************************************************************/ 1486static void 1487bce_disable_nvram_access(struct bce_softc *sc) 1488{ 1489 u32 val; 1490 1491 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM access.\n"); 1492 1493 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1494 1495 /* Disable both bits, even after read. */ 1496 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1497 val & ~(BCE_NVM_ACCESS_ENABLE_EN | 1498 BCE_NVM_ACCESS_ENABLE_WR_EN)); 1499} 1500 1501 1502#ifdef BCE_NVRAM_WRITE_SUPPORT 1503/****************************************************************************/ 1504/* Erase NVRAM page before writing. */ 1505/* */ 1506/* Non-buffered flash parts require that a page be erased before it is */ 1507/* written. */ 1508/* */ 1509/* Returns: */ 1510/* 0 on success, positive value on failure. */ 1511/****************************************************************************/ 1512static int 1513bce_nvram_erase_page(struct bce_softc *sc, u32 offset) 1514{ 1515 u32 cmd; 1516 int j; 1517 1518 /* Buffered flash doesn't require an erase. */ 1519 if (sc->bce_flash_info->buffered) 1520 return 0; 1521 1522 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Erasing NVRAM page.\n"); 1523 1524 /* Build an erase command. */ 1525 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | 1526 BCE_NVM_COMMAND_DOIT; 1527 1528 /* 1529 * Clear the DONE bit separately, set the NVRAM adress to erase, 1530 * and issue the erase command. 1531 */ 1532 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1533 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1534 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1535 1536 /* Wait for completion. */ 1537 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1538 u32 val; 1539 1540 DELAY(5); 1541 1542 val = REG_RD(sc, BCE_NVM_COMMAND); 1543 if (val & BCE_NVM_COMMAND_DONE) 1544 break; 1545 } 1546 1547 if (j >= NVRAM_TIMEOUT_COUNT) { 1548 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); 1549 return EBUSY; 1550 } 1551 1552 return 0; 1553} 1554#endif /* BCE_NVRAM_WRITE_SUPPORT */ 1555 1556 1557/****************************************************************************/ 1558/* Read a dword (32 bits) from NVRAM. */ 1559/* */ 1560/* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1561/* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1562/* */ 1563/* Returns: */ 1564/* 0 on success and the 32 bit value read, positive value on failure. */ 1565/****************************************************************************/ 1566static int 1567bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val, 1568 u32 cmd_flags) 1569{ 1570 u32 cmd; 1571 int i, rc = 0; 1572 1573 /* Build the command word. */ 1574 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1575 1576 /* Calculate the offset for buffered flash. */ 1577 if (sc->bce_flash_info->buffered) { 1578 offset = ((offset / sc->bce_flash_info->page_size) << 1579 sc->bce_flash_info->page_bits) + 1580 (offset % sc->bce_flash_info->page_size); 1581 } 1582 1583 /* 1584 * Clear the DONE bit separately, set the address to read, 1585 * and issue the read. 1586 */ 1587 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1588 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1589 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1590 1591 /* Wait for completion. */ 1592 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1593 u32 val; 1594 1595 DELAY(5); 1596 1597 val = REG_RD(sc, BCE_NVM_COMMAND); 1598 if (val & BCE_NVM_COMMAND_DONE) { 1599 val = REG_RD(sc, BCE_NVM_READ); 1600 1601 val = bce_be32toh(val); 1602 memcpy(ret_val, &val, 4); 1603 break; 1604 } 1605 } 1606 1607 /* Check for errors. */ 1608 if (i >= NVRAM_TIMEOUT_COUNT) { 1609 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n", 1610 __FILE__, __LINE__, offset); 1611 rc = EBUSY; 1612 } 1613 1614 return(rc); 1615} 1616 1617 1618#ifdef BCE_NVRAM_WRITE_SUPPORT 1619/****************************************************************************/ 1620/* Write a dword (32 bits) to NVRAM. */ 1621/* */ 1622/* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1623/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1624/* enabled NVRAM write access. */ 1625/* */ 1626/* Returns: */ 1627/* 0 on success, positive value on failure. */ 1628/****************************************************************************/ 1629static int 1630bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val, 1631 u32 cmd_flags) 1632{ 1633 u32 cmd, val32; 1634 int j; 1635 1636 /* Build the command word. */ 1637 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; 1638 1639 /* Calculate the offset for buffered flash. */ 1640 if (sc->bce_flash_info->buffered) { 1641 offset = ((offset / sc->bce_flash_info->page_size) << 1642 sc->bce_flash_info->page_bits) + 1643 (offset % sc->bce_flash_info->page_size); 1644 } 1645 1646 /* 1647 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1648 * set the NVRAM address to write, and issue the write command 1649 */ 1650 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1651 memcpy(&val32, val, 4); 1652 val32 = htobe32(val32); 1653 REG_WR(sc, BCE_NVM_WRITE, val32); 1654 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1655 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1656 1657 /* Wait for completion. */ 1658 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1659 DELAY(5); 1660 1661 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) 1662 break; 1663 } 1664 if (j >= NVRAM_TIMEOUT_COUNT) { 1665 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n", 1666 __FILE__, __LINE__, offset); 1667 return EBUSY; 1668 } 1669 1670 return 0; 1671} 1672#endif /* BCE_NVRAM_WRITE_SUPPORT */ 1673 1674 1675/****************************************************************************/ 1676/* Initialize NVRAM access. */ 1677/* */ 1678/* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1679/* access that device. */ 1680/* */ 1681/* Returns: */ 1682/* 0 on success, positive value on failure. */ 1683/****************************************************************************/ 1684static int 1685bce_init_nvram(struct bce_softc *sc) 1686{ 1687 u32 val; 1688 int j, entry_count, rc; 1689 struct flash_spec *flash; 1690 1691 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Entering %s()\n", __FUNCTION__); 1692 1693 /* Determine the selected interface. */ 1694 val = REG_RD(sc, BCE_NVM_CFG1); 1695 1696 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1697 1698 rc = 0; 1699 1700 /* 1701 * Flash reconfiguration is required to support additional 1702 * NVRAM devices not directly supported in hardware. 1703 * Check if the flash interface was reconfigured 1704 * by the bootcode. 1705 */ 1706 1707 if (val & 0x40000000) { 1708 /* Flash interface reconfigured by bootcode. */ 1709 1710 DBPRINT(sc,BCE_INFO_LOAD, 1711 "bce_init_nvram(): Flash WAS reconfigured.\n"); 1712 1713 for (j = 0, flash = &flash_table[0]; j < entry_count; 1714 j++, flash++) { 1715 if ((val & FLASH_BACKUP_STRAP_MASK) == 1716 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1717 sc->bce_flash_info = flash; 1718 break; 1719 } 1720 } 1721 } else { 1722 /* Flash interface not yet reconfigured. */ 1723 u32 mask; 1724 1725 DBPRINT(sc,BCE_INFO_LOAD, 1726 "bce_init_nvram(): Flash was NOT reconfigured.\n"); 1727 1728 if (val & (1 << 23)) 1729 mask = FLASH_BACKUP_STRAP_MASK; 1730 else 1731 mask = FLASH_STRAP_MASK; 1732 1733 /* Look for the matching NVRAM device configuration data. */ 1734 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { 1735 1736 /* Check if the device matches any of the known devices. */ 1737 if ((val & mask) == (flash->strapping & mask)) { 1738 /* Found a device match. */ 1739 sc->bce_flash_info = flash; 1740 1741 /* Request access to the flash interface. */ 1742 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 1743 return rc; 1744 1745 /* Reconfigure the flash interface. */ 1746 bce_enable_nvram_access(sc); 1747 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1748 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1749 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1750 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1751 bce_disable_nvram_access(sc); 1752 bce_release_nvram_lock(sc); 1753 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* Check if a matching device was found. */ 1760 if (j == entry_count) { 1761 sc->bce_flash_info = NULL; 1762 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", 1763 __FILE__, __LINE__); 1764 rc = ENODEV; 1765 } 1766 1767 /* Write the flash config data to the shared memory interface. */ 1768 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2); 1769 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1770 if (val) 1771 sc->bce_flash_size = val; 1772 else 1773 sc->bce_flash_size = sc->bce_flash_info->total_size; 1774 1775 DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n", 1776 sc->bce_flash_info->total_size); 1777 1778 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Exiting %s()\n", __FUNCTION__); 1779 1780 return rc; 1781} 1782 1783 1784/****************************************************************************/ 1785/* Read an arbitrary range of data from NVRAM. */ 1786/* */ 1787/* Prepares the NVRAM interface for access and reads the requested data */ 1788/* into the supplied buffer. */ 1789/* */ 1790/* Returns: */ 1791/* 0 on success and the data read, positive value on failure. */ 1792/****************************************************************************/ 1793static int 1794bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf, 1795 int buf_size) 1796{ 1797 int rc = 0; 1798 u32 cmd_flags, offset32, len32, extra; 1799 1800 if (buf_size == 0) 1801 return 0; 1802 1803 /* Request access to the flash interface. */ 1804 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 1805 return rc; 1806 1807 /* Enable access to flash interface */ 1808 bce_enable_nvram_access(sc); 1809 1810 len32 = buf_size; 1811 offset32 = offset; 1812 extra = 0; 1813 1814 cmd_flags = 0; 1815 1816 if (offset32 & 3) { 1817 u8 buf[4]; 1818 u32 pre_len; 1819 1820 offset32 &= ~3; 1821 pre_len = 4 - (offset & 3); 1822 1823 if (pre_len >= len32) { 1824 pre_len = len32; 1825 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1826 } 1827 else { 1828 cmd_flags = BCE_NVM_COMMAND_FIRST; 1829 } 1830 1831 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1832 1833 if (rc) 1834 return rc; 1835 1836 memcpy(ret_buf, buf + (offset & 3), pre_len); 1837 1838 offset32 += 4; 1839 ret_buf += pre_len; 1840 len32 -= pre_len; 1841 } 1842 1843 if (len32 & 3) { 1844 extra = 4 - (len32 & 3); 1845 len32 = (len32 + 4) & ~3; 1846 } 1847 1848 if (len32 == 4) { 1849 u8 buf[4]; 1850 1851 if (cmd_flags) 1852 cmd_flags = BCE_NVM_COMMAND_LAST; 1853 else 1854 cmd_flags = BCE_NVM_COMMAND_FIRST | 1855 BCE_NVM_COMMAND_LAST; 1856 1857 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1858 1859 memcpy(ret_buf, buf, 4 - extra); 1860 } 1861 else if (len32 > 0) { 1862 u8 buf[4]; 1863 1864 /* Read the first word. */ 1865 if (cmd_flags) 1866 cmd_flags = 0; 1867 else 1868 cmd_flags = BCE_NVM_COMMAND_FIRST; 1869 1870 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1871 1872 /* Advance to the next dword. */ 1873 offset32 += 4; 1874 ret_buf += 4; 1875 len32 -= 4; 1876 1877 while (len32 > 4 && rc == 0) { 1878 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1879 1880 /* Advance to the next dword. */ 1881 offset32 += 4; 1882 ret_buf += 4; 1883 len32 -= 4; 1884 } 1885 1886 if (rc) 1887 return rc; 1888 1889 cmd_flags = BCE_NVM_COMMAND_LAST; 1890 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1891 1892 memcpy(ret_buf, buf, 4 - extra); 1893 } 1894 1895 /* Disable access to flash interface and release the lock. */ 1896 bce_disable_nvram_access(sc); 1897 bce_release_nvram_lock(sc); 1898 1899 return rc; 1900} 1901 1902 1903#ifdef BCE_NVRAM_WRITE_SUPPORT 1904/****************************************************************************/ 1905/* Write an arbitrary range of data from NVRAM. */ 1906/* */ 1907/* Prepares the NVRAM interface for write access and writes the requested */ 1908/* data from the supplied buffer. The caller is responsible for */ 1909/* calculating any appropriate CRCs. */ 1910/* */ 1911/* Returns: */ 1912/* 0 on success, positive value on failure. */ 1913/****************************************************************************/ 1914static int 1915bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf, 1916 int buf_size) 1917{ 1918 u32 written, offset32, len32; 1919 u8 *buf, start[4], end[4]; 1920 int rc = 0; 1921 int align_start, align_end; 1922 1923 buf = data_buf; 1924 offset32 = offset; 1925 len32 = buf_size; 1926 align_start = align_end = 0; 1927 1928 if ((align_start = (offset32 & 3))) { 1929 offset32 &= ~3; 1930 len32 += align_start; 1931 if ((rc = bce_nvram_read(sc, offset32, start, 4))) 1932 return rc; 1933 } 1934 1935 if (len32 & 3) { 1936 if ((len32 > 4) || !align_start) { 1937 align_end = 4 - (len32 & 3); 1938 len32 += align_end; 1939 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4, 1940 end, 4))) { 1941 return rc; 1942 } 1943 } 1944 } 1945 1946 if (align_start || align_end) { 1947 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1948 if (buf == 0) 1949 return ENOMEM; 1950 if (align_start) { 1951 memcpy(buf, start, 4); 1952 } 1953 if (align_end) { 1954 memcpy(buf + len32 - 4, end, 4); 1955 } 1956 memcpy(buf + align_start, data_buf, buf_size); 1957 } 1958 1959 written = 0; 1960 while ((written < len32) && (rc == 0)) { 1961 u32 page_start, page_end, data_start, data_end; 1962 u32 addr, cmd_flags; 1963 int i; 1964 u8 flash_buffer[264]; 1965 1966 /* Find the page_start addr */ 1967 page_start = offset32 + written; 1968 page_start -= (page_start % sc->bce_flash_info->page_size); 1969 /* Find the page_end addr */ 1970 page_end = page_start + sc->bce_flash_info->page_size; 1971 /* Find the data_start addr */ 1972 data_start = (written == 0) ? offset32 : page_start; 1973 /* Find the data_end addr */ 1974 data_end = (page_end > offset32 + len32) ? 1975 (offset32 + len32) : page_end; 1976 1977 /* Request access to the flash interface. */ 1978 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 1979 goto nvram_write_end; 1980 1981 /* Enable access to flash interface */ 1982 bce_enable_nvram_access(sc); 1983 1984 cmd_flags = BCE_NVM_COMMAND_FIRST; 1985 if (sc->bce_flash_info->buffered == 0) { 1986 int j; 1987 1988 /* Read the whole page into the buffer 1989 * (non-buffer flash only) */ 1990 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { 1991 if (j == (sc->bce_flash_info->page_size - 4)) { 1992 cmd_flags |= BCE_NVM_COMMAND_LAST; 1993 } 1994 rc = bce_nvram_read_dword(sc, 1995 page_start + j, 1996 &flash_buffer[j], 1997 cmd_flags); 1998 1999 if (rc) 2000 goto nvram_write_end; 2001 2002 cmd_flags = 0; 2003 } 2004 } 2005 2006 /* Enable writes to flash interface (unlock write-protect) */ 2007 if ((rc = bce_enable_nvram_write(sc)) != 0) 2008 goto nvram_write_end; 2009 2010 /* Erase the page */ 2011 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0) 2012 goto nvram_write_end; 2013 2014 /* Re-enable the write again for the actual write */ 2015 bce_enable_nvram_write(sc); 2016 2017 /* Loop to write back the buffer data from page_start to 2018 * data_start */ 2019 i = 0; 2020 if (sc->bce_flash_info->buffered == 0) { 2021 for (addr = page_start; addr < data_start; 2022 addr += 4, i += 4) { 2023 2024 rc = bce_nvram_write_dword(sc, addr, 2025 &flash_buffer[i], cmd_flags); 2026 2027 if (rc != 0) 2028 goto nvram_write_end; 2029 2030 cmd_flags = 0; 2031 } 2032 } 2033 2034 /* Loop to write the new data from data_start to data_end */ 2035 for (addr = data_start; addr < data_end; addr += 4, i++) { 2036 if ((addr == page_end - 4) || 2037 ((sc->bce_flash_info->buffered) && 2038 (addr == data_end - 4))) { 2039 2040 cmd_flags |= BCE_NVM_COMMAND_LAST; 2041 } 2042 rc = bce_nvram_write_dword(sc, addr, buf, 2043 cmd_flags); 2044 2045 if (rc != 0) 2046 goto nvram_write_end; 2047 2048 cmd_flags = 0; 2049 buf += 4; 2050 } 2051 2052 /* Loop to write back the buffer data from data_end 2053 * to page_end */ 2054 if (sc->bce_flash_info->buffered == 0) { 2055 for (addr = data_end; addr < page_end; 2056 addr += 4, i += 4) { 2057 2058 if (addr == page_end-4) { 2059 cmd_flags = BCE_NVM_COMMAND_LAST; 2060 } 2061 rc = bce_nvram_write_dword(sc, addr, 2062 &flash_buffer[i], cmd_flags); 2063 2064 if (rc != 0) 2065 goto nvram_write_end; 2066 2067 cmd_flags = 0; 2068 } 2069 } 2070 2071 /* Disable writes to flash interface (lock write-protect) */ 2072 bce_disable_nvram_write(sc); 2073 2074 /* Disable access to flash interface */ 2075 bce_disable_nvram_access(sc); 2076 bce_release_nvram_lock(sc); 2077 2078 /* Increment written */ 2079 written += data_end - data_start; 2080 } 2081 2082nvram_write_end: 2083 if (align_start || align_end) 2084 free(buf, M_DEVBUF); 2085 2086 return rc; 2087} 2088#endif /* BCE_NVRAM_WRITE_SUPPORT */ 2089 2090 2091/****************************************************************************/ 2092/* Verifies that NVRAM is accessible and contains valid data. */ 2093/* */ 2094/* Reads the configuration data from NVRAM and verifies that the CRC is */ 2095/* correct. */ 2096/* */ 2097/* Returns: */ 2098/* 0 on success, positive value on failure. */ 2099/****************************************************************************/ 2100static int 2101bce_nvram_test(struct bce_softc *sc) 2102{ 2103 u32 buf[BCE_NVRAM_SIZE / 4]; 2104 u8 *data = (u8 *) buf; 2105 int rc = 0; 2106 u32 magic, csum; 2107 2108 2109 /* 2110 * Check that the device NVRAM is valid by reading 2111 * the magic value at offset 0. 2112 */ 2113 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) 2114 goto bce_nvram_test_done; 2115 2116 2117 magic = bce_be32toh(buf[0]); 2118 if (magic != BCE_NVRAM_MAGIC) { 2119 rc = ENODEV; 2120 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, " 2121 "Found: 0x%08X\n", 2122 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic); 2123 goto bce_nvram_test_done; 2124 } 2125 2126 /* 2127 * Verify that the device NVRAM includes valid 2128 * configuration data. 2129 */ 2130 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) 2131 goto bce_nvram_test_done; 2132 2133 csum = ether_crc32_le(data, 0x100); 2134 if (csum != BCE_CRC32_RESIDUAL) { 2135 rc = ENODEV; 2136 BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! " 2137 "Expected: 0x%08X, Found: 0x%08X\n", 2138 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); 2139 goto bce_nvram_test_done; 2140 } 2141 2142 csum = ether_crc32_le(data + 0x100, 0x100); 2143 if (csum != BCE_CRC32_RESIDUAL) { 2144 BCE_PRINTF("%s(%d): Invalid Feature Configuration Information " 2145 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 2146 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); 2147 rc = ENODEV; 2148 } 2149 2150bce_nvram_test_done: 2151 return rc; 2152} 2153 2154 2155/****************************************************************************/ 2156/* Free any DMA memory owned by the driver. */ 2157/* */ 2158/* Scans through each data structre that requires DMA memory and frees */ 2159/* the memory if allocated. */ 2160/* */ 2161/* Returns: */ 2162/* Nothing. */ 2163/****************************************************************************/ 2164static void 2165bce_dma_free(struct bce_softc *sc) 2166{ 2167 int i; 2168 2169 DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2170 2171 /* Destroy the status block. */ 2172 if (sc->status_block != NULL) { 2173 bus_dmamem_free( 2174 sc->status_tag, 2175 sc->status_block, 2176 sc->status_map); 2177 sc->status_block = NULL; 2178 } 2179 2180 if (sc->status_map != NULL) { 2181 bus_dmamap_unload( 2182 sc->status_tag, 2183 sc->status_map); 2184 bus_dmamap_destroy(sc->status_tag, 2185 sc->status_map); 2186 sc->status_map = NULL; 2187 } 2188 2189 if (sc->status_tag != NULL) { 2190 bus_dma_tag_destroy(sc->status_tag); 2191 sc->status_tag = NULL; 2192 } 2193 2194 2195 /* Destroy the statistics block. */ 2196 if (sc->stats_block != NULL) { 2197 bus_dmamem_free( 2198 sc->stats_tag, 2199 sc->stats_block, 2200 sc->stats_map); 2201 sc->stats_block = NULL; 2202 } 2203 2204 if (sc->stats_map != NULL) { 2205 bus_dmamap_unload( 2206 sc->stats_tag, 2207 sc->stats_map); 2208 bus_dmamap_destroy(sc->stats_tag, 2209 sc->stats_map); 2210 sc->stats_map = NULL; 2211 } 2212 2213 if (sc->stats_tag != NULL) { 2214 bus_dma_tag_destroy(sc->stats_tag); 2215 sc->stats_tag = NULL; 2216 } 2217 2218 2219 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2220 for (i = 0; i < TX_PAGES; i++ ) { 2221 if (sc->tx_bd_chain[i] != NULL) { 2222 bus_dmamem_free( 2223 sc->tx_bd_chain_tag, 2224 sc->tx_bd_chain[i], 2225 sc->tx_bd_chain_map[i]); 2226 sc->tx_bd_chain[i] = NULL; 2227 } 2228 2229 if (sc->tx_bd_chain_map[i] != NULL) { 2230 bus_dmamap_unload( 2231 sc->tx_bd_chain_tag, 2232 sc->tx_bd_chain_map[i]); 2233 bus_dmamap_destroy( 2234 sc->tx_bd_chain_tag, 2235 sc->tx_bd_chain_map[i]); 2236 sc->tx_bd_chain_map[i] = NULL; 2237 } 2238 } 2239 2240 /* Destroy the TX buffer descriptor tag. */ 2241 if (sc->tx_bd_chain_tag != NULL) { 2242 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2243 sc->tx_bd_chain_tag = NULL; 2244 } 2245 2246 2247 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2248 for (i = 0; i < RX_PAGES; i++ ) { 2249 if (sc->rx_bd_chain[i] != NULL) { 2250 bus_dmamem_free( 2251 sc->rx_bd_chain_tag, 2252 sc->rx_bd_chain[i], 2253 sc->rx_bd_chain_map[i]); 2254 sc->rx_bd_chain[i] = NULL; 2255 } 2256 2257 if (sc->rx_bd_chain_map[i] != NULL) { 2258 bus_dmamap_unload( 2259 sc->rx_bd_chain_tag, 2260 sc->rx_bd_chain_map[i]); 2261 bus_dmamap_destroy( 2262 sc->rx_bd_chain_tag, 2263 sc->rx_bd_chain_map[i]); 2264 sc->rx_bd_chain_map[i] = NULL; 2265 } 2266 } 2267 2268 /* Destroy the RX buffer descriptor tag. */ 2269 if (sc->rx_bd_chain_tag != NULL) { 2270 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2271 sc->rx_bd_chain_tag = NULL; 2272 } 2273 2274 2275#ifdef BCE_USE_SPLIT_HEADER 2276 /* Free, unmap and destroy all page buffer descriptor chain pages. */ 2277 for (i = 0; i < PG_PAGES; i++ ) { 2278 if (sc->pg_bd_chain[i] != NULL) { 2279 bus_dmamem_free( 2280 sc->pg_bd_chain_tag, 2281 sc->pg_bd_chain[i], 2282 sc->pg_bd_chain_map[i]); 2283 sc->pg_bd_chain[i] = NULL; 2284 } 2285 2286 if (sc->pg_bd_chain_map[i] != NULL) { 2287 bus_dmamap_unload( 2288 sc->pg_bd_chain_tag, 2289 sc->pg_bd_chain_map[i]); 2290 bus_dmamap_destroy( 2291 sc->pg_bd_chain_tag, 2292 sc->pg_bd_chain_map[i]); 2293 sc->pg_bd_chain_map[i] = NULL; 2294 } 2295 } 2296 2297 /* Destroy the page buffer descriptor tag. */ 2298 if (sc->pg_bd_chain_tag != NULL) { 2299 bus_dma_tag_destroy(sc->pg_bd_chain_tag); 2300 sc->pg_bd_chain_tag = NULL; 2301 } 2302#endif 2303 2304 2305 /* Unload and destroy the TX mbuf maps. */ 2306 for (i = 0; i < TOTAL_TX_BD; i++) { 2307 if (sc->tx_mbuf_map[i] != NULL) { 2308 bus_dmamap_unload(sc->tx_mbuf_tag, 2309 sc->tx_mbuf_map[i]); 2310 bus_dmamap_destroy(sc->tx_mbuf_tag, 2311 sc->tx_mbuf_map[i]); 2312 sc->tx_mbuf_map[i] = NULL; 2313 } 2314 } 2315 2316 /* Destroy the TX mbuf tag. */ 2317 if (sc->tx_mbuf_tag != NULL) { 2318 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2319 sc->tx_mbuf_tag = NULL; 2320 } 2321 2322 /* Unload and destroy the RX mbuf maps. */ 2323 for (i = 0; i < TOTAL_RX_BD; i++) { 2324 if (sc->rx_mbuf_map[i] != NULL) { 2325 bus_dmamap_unload(sc->rx_mbuf_tag, 2326 sc->rx_mbuf_map[i]); 2327 bus_dmamap_destroy(sc->rx_mbuf_tag, 2328 sc->rx_mbuf_map[i]); 2329 sc->rx_mbuf_map[i] = NULL; 2330 } 2331 } 2332 2333 /* Destroy the RX mbuf tag. */ 2334 if (sc->rx_mbuf_tag != NULL) { 2335 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2336 sc->rx_mbuf_tag = NULL; 2337 } 2338 2339#ifdef BCE_USE_SPLIT_HEADER 2340 /* Unload and destroy the page mbuf maps. */ 2341 for (i = 0; i < TOTAL_PG_BD; i++) { 2342 if (sc->pg_mbuf_map[i] != NULL) { 2343 bus_dmamap_unload(sc->pg_mbuf_tag, 2344 sc->pg_mbuf_map[i]); 2345 bus_dmamap_destroy(sc->pg_mbuf_tag, 2346 sc->pg_mbuf_map[i]); 2347 sc->pg_mbuf_map[i] = NULL; 2348 } 2349 } 2350 2351 /* Destroy the page mbuf tag. */ 2352 if (sc->pg_mbuf_tag != NULL) { 2353 bus_dma_tag_destroy(sc->pg_mbuf_tag); 2354 sc->pg_mbuf_tag = NULL; 2355 } 2356#endif 2357 2358 /* Destroy the parent tag */ 2359 if (sc->parent_tag != NULL) { 2360 bus_dma_tag_destroy(sc->parent_tag); 2361 sc->parent_tag = NULL; 2362 } 2363 2364 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2365 2366} 2367 2368 2369/****************************************************************************/ 2370/* Get DMA memory from the OS. */ 2371/* */ 2372/* Validates that the OS has provided DMA buffers in response to a */ 2373/* bus_dmamap_load() call and saves the physical address of those buffers. */ 2374/* When the callback is used the OS will return 0 for the mapping function */ 2375/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2376/* failures back to the caller. */ 2377/* */ 2378/* Returns: */ 2379/* Nothing. */ 2380/****************************************************************************/ 2381static void 2382bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2383{ 2384 bus_addr_t *busaddr = arg; 2385 2386 /* Simulate a mapping failure. */ 2387 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure), 2388 printf("bce: %s(%d): Simulating DMA mapping error.\n", 2389 __FILE__, __LINE__); 2390 error = ENOMEM); 2391 2392 /* Check for an error and signal the caller that an error occurred. */ 2393 if (error) { 2394 printf("bce %s(%d): DMA mapping error! error = %d, " 2395 "nseg = %d\n", __FILE__, __LINE__, error, nseg); 2396 *busaddr = 0; 2397 return; 2398 } 2399 2400 *busaddr = segs->ds_addr; 2401 return; 2402} 2403 2404 2405/****************************************************************************/ 2406/* Allocate any DMA memory needed by the driver. */ 2407/* */ 2408/* Allocates DMA memory needed for the various global structures needed by */ 2409/* hardware. */ 2410/* */ 2411/* Memory alignment requirements: */ 2412/* -----------------+----------+----------+ */ 2413/* Data Structure | 5706 | 5708 | */ 2414/* -----------------+----------+----------+ */ 2415/* Status Block | 8 bytes | 8 bytes | */ 2416/* Statistics Block | 8 bytes | 8 bytes | */ 2417/* RX Buffers | 16 bytes | 16 bytes | */ 2418/* PG Buffers | none | none | */ 2419/* TX Buffers | none | none | */ 2420/* Chain Pages(1) | 4KiB | 4KiB | */ 2421/* -----------------+----------+----------+ */ 2422/* */ 2423/* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2424/* */ 2425/* Returns: */ 2426/* 0 for success, positive value for failure. */ 2427/****************************************************************************/ 2428static int 2429bce_dma_alloc(device_t dev) 2430{ 2431 struct bce_softc *sc; 2432 int i, error, rc = 0; 2433 bus_addr_t busaddr; 2434 bus_size_t max_size, max_seg_size; 2435 int max_segments; 2436 2437 sc = device_get_softc(dev); 2438 2439 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2440 2441 /* 2442 * Allocate the parent bus DMA tag appropriate for PCI. 2443 */ 2444 if (bus_dma_tag_create(NULL, 2445 1, 2446 BCE_DMA_BOUNDARY, 2447 sc->max_bus_addr, 2448 BUS_SPACE_MAXADDR, 2449 NULL, NULL, 2450 MAXBSIZE, 2451 BUS_SPACE_UNRESTRICTED, 2452 BUS_SPACE_MAXSIZE_32BIT, 2453 0, 2454 NULL, NULL, 2455 &sc->parent_tag)) { 2456 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", 2457 __FILE__, __LINE__); 2458 rc = ENOMEM; 2459 goto bce_dma_alloc_exit; 2460 } 2461 2462 /* 2463 * Create a DMA tag for the status block, allocate and clear the 2464 * memory, map the memory into DMA space, and fetch the physical 2465 * address of the block. 2466 */ 2467 if (bus_dma_tag_create(sc->parent_tag, 2468 BCE_DMA_ALIGN, 2469 BCE_DMA_BOUNDARY, 2470 sc->max_bus_addr, 2471 BUS_SPACE_MAXADDR, 2472 NULL, NULL, 2473 BCE_STATUS_BLK_SZ, 2474 1, 2475 BCE_STATUS_BLK_SZ, 2476 0, 2477 NULL, NULL, 2478 &sc->status_tag)) { 2479 BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n", 2480 __FILE__, __LINE__); 2481 rc = ENOMEM; 2482 goto bce_dma_alloc_exit; 2483 } 2484 2485 if(bus_dmamem_alloc(sc->status_tag, 2486 (void **)&sc->status_block, 2487 BUS_DMA_NOWAIT, 2488 &sc->status_map)) { 2489 BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n", 2490 __FILE__, __LINE__); 2491 rc = ENOMEM; 2492 goto bce_dma_alloc_exit; 2493 } 2494 2495 bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ); 2496 2497 error = bus_dmamap_load(sc->status_tag, 2498 sc->status_map, 2499 sc->status_block, 2500 BCE_STATUS_BLK_SZ, 2501 bce_dma_map_addr, 2502 &busaddr, 2503 BUS_DMA_NOWAIT); 2504 2505 if (error) { 2506 BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n", 2507 __FILE__, __LINE__); 2508 rc = ENOMEM; 2509 goto bce_dma_alloc_exit; 2510 } 2511 2512 sc->status_block_paddr = busaddr; 2513 /* DRC - Fix for 64 bit addresses. */ 2514 DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n", 2515 (u32) sc->status_block_paddr); 2516 2517 /* 2518 * Create a DMA tag for the statistics block, allocate and clear the 2519 * memory, map the memory into DMA space, and fetch the physical 2520 * address of the block. 2521 */ 2522 if (bus_dma_tag_create(sc->parent_tag, 2523 BCE_DMA_ALIGN, 2524 BCE_DMA_BOUNDARY, 2525 sc->max_bus_addr, 2526 BUS_SPACE_MAXADDR, 2527 NULL, NULL, 2528 BCE_STATS_BLK_SZ, 2529 1, 2530 BCE_STATS_BLK_SZ, 2531 0, 2532 NULL, NULL, 2533 &sc->stats_tag)) { 2534 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n", 2535 __FILE__, __LINE__); 2536 rc = ENOMEM; 2537 goto bce_dma_alloc_exit; 2538 } 2539 2540 if (bus_dmamem_alloc(sc->stats_tag, 2541 (void **)&sc->stats_block, 2542 BUS_DMA_NOWAIT, 2543 &sc->stats_map)) { 2544 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n", 2545 __FILE__, __LINE__); 2546 rc = ENOMEM; 2547 goto bce_dma_alloc_exit; 2548 } 2549 2550 bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ); 2551 2552 error = bus_dmamap_load(sc->stats_tag, 2553 sc->stats_map, 2554 sc->stats_block, 2555 BCE_STATS_BLK_SZ, 2556 bce_dma_map_addr, 2557 &busaddr, 2558 BUS_DMA_NOWAIT); 2559 2560 if(error) { 2561 BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n", 2562 __FILE__, __LINE__); 2563 rc = ENOMEM; 2564 goto bce_dma_alloc_exit; 2565 } 2566 2567 sc->stats_block_paddr = busaddr; 2568 /* DRC - Fix for 64 bit address. */ 2569 DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n", 2570 (u32) sc->stats_block_paddr); 2571 2572 /* 2573 * Create a DMA tag for the TX buffer descriptor chain, 2574 * allocate and clear the memory, and fetch the 2575 * physical address of the block. 2576 */ 2577 if(bus_dma_tag_create(sc->parent_tag, 2578 BCM_PAGE_SIZE, 2579 BCE_DMA_BOUNDARY, 2580 sc->max_bus_addr, 2581 BUS_SPACE_MAXADDR, 2582 NULL, NULL, 2583 BCE_TX_CHAIN_PAGE_SZ, 2584 1, 2585 BCE_TX_CHAIN_PAGE_SZ, 2586 0, 2587 NULL, NULL, 2588 &sc->tx_bd_chain_tag)) { 2589 BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n", 2590 __FILE__, __LINE__); 2591 rc = ENOMEM; 2592 goto bce_dma_alloc_exit; 2593 } 2594 2595 for (i = 0; i < TX_PAGES; i++) { 2596 2597 if(bus_dmamem_alloc(sc->tx_bd_chain_tag, 2598 (void **)&sc->tx_bd_chain[i], 2599 BUS_DMA_NOWAIT, 2600 &sc->tx_bd_chain_map[i])) { 2601 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 2602 "chain DMA memory!\n", __FILE__, __LINE__); 2603 rc = ENOMEM; 2604 goto bce_dma_alloc_exit; 2605 } 2606 2607 error = bus_dmamap_load(sc->tx_bd_chain_tag, 2608 sc->tx_bd_chain_map[i], 2609 sc->tx_bd_chain[i], 2610 BCE_TX_CHAIN_PAGE_SZ, 2611 bce_dma_map_addr, 2612 &busaddr, 2613 BUS_DMA_NOWAIT); 2614 2615 if (error) { 2616 BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n", 2617 __FILE__, __LINE__); 2618 rc = ENOMEM; 2619 goto bce_dma_alloc_exit; 2620 } 2621 2622 sc->tx_bd_chain_paddr[i] = busaddr; 2623 /* DRC - Fix for 64 bit systems. */ 2624 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2625 i, (u32) sc->tx_bd_chain_paddr[i]); 2626 } 2627 2628 /* Check the required size before mapping to conserve resources. */ 2629 if (bce_tso_enable) { 2630 max_size = BCE_TSO_MAX_SIZE; 2631 max_segments = BCE_MAX_SEGMENTS; 2632 max_seg_size = BCE_TSO_MAX_SEG_SIZE; 2633 } else { 2634 max_size = MCLBYTES * BCE_MAX_SEGMENTS; 2635 max_segments = BCE_MAX_SEGMENTS; 2636 max_seg_size = MCLBYTES; 2637 } 2638 2639 /* Create a DMA tag for TX mbufs. */ 2640 if (bus_dma_tag_create(sc->parent_tag, 2641 1, 2642 BCE_DMA_BOUNDARY, 2643 sc->max_bus_addr, 2644 BUS_SPACE_MAXADDR, 2645 NULL, NULL, 2646 max_size, 2647 max_segments, 2648 max_seg_size, 2649 0, 2650 NULL, NULL, 2651 &sc->tx_mbuf_tag)) { 2652 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n", 2653 __FILE__, __LINE__); 2654 rc = ENOMEM; 2655 goto bce_dma_alloc_exit; 2656 } 2657 2658 /* Create DMA maps for the TX mbufs clusters. */ 2659 for (i = 0; i < TOTAL_TX_BD; i++) { 2660 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, 2661 &sc->tx_mbuf_map[i])) { 2662 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n", 2663 __FILE__, __LINE__); 2664 rc = ENOMEM; 2665 goto bce_dma_alloc_exit; 2666 } 2667 } 2668 2669 /* 2670 * Create a DMA tag for the RX buffer descriptor chain, 2671 * allocate and clear the memory, and fetch the physical 2672 * address of the blocks. 2673 */ 2674 if (bus_dma_tag_create(sc->parent_tag, 2675 BCM_PAGE_SIZE, 2676 BCE_DMA_BOUNDARY, 2677 BUS_SPACE_MAXADDR, 2678 sc->max_bus_addr, 2679 NULL, NULL, 2680 BCE_RX_CHAIN_PAGE_SZ, 2681 1, 2682 BCE_RX_CHAIN_PAGE_SZ, 2683 0, 2684 NULL, NULL, 2685 &sc->rx_bd_chain_tag)) { 2686 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n", 2687 __FILE__, __LINE__); 2688 rc = ENOMEM; 2689 goto bce_dma_alloc_exit; 2690 } 2691 2692 for (i = 0; i < RX_PAGES; i++) { 2693 2694 if (bus_dmamem_alloc(sc->rx_bd_chain_tag, 2695 (void **)&sc->rx_bd_chain[i], 2696 BUS_DMA_NOWAIT, 2697 &sc->rx_bd_chain_map[i])) { 2698 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " 2699 "DMA memory!\n", __FILE__, __LINE__); 2700 rc = ENOMEM; 2701 goto bce_dma_alloc_exit; 2702 } 2703 2704 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 2705 2706 error = bus_dmamap_load(sc->rx_bd_chain_tag, 2707 sc->rx_bd_chain_map[i], 2708 sc->rx_bd_chain[i], 2709 BCE_RX_CHAIN_PAGE_SZ, 2710 bce_dma_map_addr, 2711 &busaddr, 2712 BUS_DMA_NOWAIT); 2713 2714 if (error) { 2715 BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n", 2716 __FILE__, __LINE__); 2717 rc = ENOMEM; 2718 goto bce_dma_alloc_exit; 2719 } 2720 2721 sc->rx_bd_chain_paddr[i] = busaddr; 2722 /* DRC - Fix for 64 bit systems. */ 2723 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2724 i, (u32) sc->rx_bd_chain_paddr[i]); 2725 } 2726 2727 /* 2728 * Create a DMA tag for RX mbufs. 2729 */ 2730#ifdef BCE_USE_SPLIT_HEADER 2731 max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? 2732 MCLBYTES : sc->rx_bd_mbuf_alloc_size); 2733#else 2734 max_size = max_seg_size = MJUM9BYTES; 2735#endif 2736 2737 if (bus_dma_tag_create(sc->parent_tag, 2738 1, 2739 BCE_DMA_BOUNDARY, 2740 sc->max_bus_addr, 2741 BUS_SPACE_MAXADDR, 2742 NULL, NULL, 2743 max_size, 2744 1, 2745 max_seg_size, 2746 0, 2747 NULL, NULL, 2748 &sc->rx_mbuf_tag)) { 2749 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n", 2750 __FILE__, __LINE__); 2751 rc = ENOMEM; 2752 goto bce_dma_alloc_exit; 2753 } 2754 2755 /* Create DMA maps for the RX mbuf clusters. */ 2756 for (i = 0; i < TOTAL_RX_BD; i++) { 2757 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT, 2758 &sc->rx_mbuf_map[i])) { 2759 BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n", 2760 __FILE__, __LINE__); 2761 rc = ENOMEM; 2762 goto bce_dma_alloc_exit; 2763 } 2764 } 2765 2766#ifdef BCE_USE_SPLIT_HEADER 2767 /* 2768 * Create a DMA tag for the page buffer descriptor chain, 2769 * allocate and clear the memory, and fetch the physical 2770 * address of the blocks. 2771 */ 2772 if (bus_dma_tag_create(sc->parent_tag, 2773 BCM_PAGE_SIZE, 2774 BCE_DMA_BOUNDARY, 2775 BUS_SPACE_MAXADDR, 2776 sc->max_bus_addr, 2777 NULL, NULL, 2778 BCE_PG_CHAIN_PAGE_SZ, 2779 1, 2780 BCE_PG_CHAIN_PAGE_SZ, 2781 0, 2782 NULL, NULL, 2783 &sc->pg_bd_chain_tag)) { 2784 BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n", 2785 __FILE__, __LINE__); 2786 rc = ENOMEM; 2787 goto bce_dma_alloc_exit; 2788 } 2789 2790 for (i = 0; i < PG_PAGES; i++) { 2791 2792 if (bus_dmamem_alloc(sc->pg_bd_chain_tag, 2793 (void **)&sc->pg_bd_chain[i], 2794 BUS_DMA_NOWAIT, 2795 &sc->pg_bd_chain_map[i])) { 2796 BCE_PRINTF("%s(%d): Could not allocate page descriptor chain " 2797 "DMA memory!\n", __FILE__, __LINE__); 2798 rc = ENOMEM; 2799 goto bce_dma_alloc_exit; 2800 } 2801 2802 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); 2803 2804 error = bus_dmamap_load(sc->pg_bd_chain_tag, 2805 sc->pg_bd_chain_map[i], 2806 sc->pg_bd_chain[i], 2807 BCE_PG_CHAIN_PAGE_SZ, 2808 bce_dma_map_addr, 2809 &busaddr, 2810 BUS_DMA_NOWAIT); 2811 2812 if (error) { 2813 BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n", 2814 __FILE__, __LINE__); 2815 rc = ENOMEM; 2816 goto bce_dma_alloc_exit; 2817 } 2818 2819 sc->pg_bd_chain_paddr[i] = busaddr; 2820 /* DRC - Fix for 64 bit systems. */ 2821 DBPRINT(sc, BCE_INFO, "pg_bd_chain_paddr[%d] = 0x%08X\n", 2822 i, (u32) sc->pg_bd_chain_paddr[i]); 2823 } 2824 2825 /* 2826 * Create a DMA tag for page mbufs. 2827 */ 2828 max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ? 2829 MCLBYTES : sc->pg_bd_mbuf_alloc_size); 2830 2831 if (bus_dma_tag_create(sc->parent_tag, 2832 1, 2833 BCE_DMA_BOUNDARY, 2834 sc->max_bus_addr, 2835 BUS_SPACE_MAXADDR, 2836 NULL, NULL, 2837 max_size, 2838 1, 2839 max_seg_size, 2840 0, 2841 NULL, NULL, 2842 &sc->pg_mbuf_tag)) { 2843 BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n", 2844 __FILE__, __LINE__); 2845 rc = ENOMEM; 2846 goto bce_dma_alloc_exit; 2847 } 2848 2849 /* Create DMA maps for the page mbuf clusters. */ 2850 for (i = 0; i < TOTAL_PG_BD; i++) { 2851 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT, 2852 &sc->pg_mbuf_map[i])) { 2853 BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n", 2854 __FILE__, __LINE__); 2855 rc = ENOMEM; 2856 goto bce_dma_alloc_exit; 2857 } 2858 } 2859#endif 2860 2861bce_dma_alloc_exit: 2862 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2863 2864 return(rc); 2865} 2866 2867 2868/****************************************************************************/ 2869/* Release all resources used by the driver. */ 2870/* */ 2871/* Releases all resources acquired by the driver including interrupts, */ 2872/* interrupt handler, interfaces, mutexes, and DMA memory. */ 2873/* */ 2874/* Returns: */ 2875/* Nothing. */ 2876/****************************************************************************/ 2877static void 2878bce_release_resources(struct bce_softc *sc) 2879{ 2880 device_t dev; 2881 2882 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2883 2884 dev = sc->bce_dev; 2885 2886 bce_dma_free(sc); 2887 2888 if (sc->bce_intrhand != NULL) { 2889 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n"); 2890 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 2891 } 2892 2893 if (sc->bce_res_irq != NULL) { 2894 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n"); 2895 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0, 2896 sc->bce_res_irq); 2897 } 2898 2899 if (sc->bce_flags & BCE_USING_MSI_FLAG) { 2900 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI vector.\n"); 2901 pci_release_msi(dev); 2902 } 2903 2904 if (sc->bce_res_mem != NULL) { 2905 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n"); 2906 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem); 2907 } 2908 2909 if (sc->bce_ifp != NULL) { 2910 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n"); 2911 if_free(sc->bce_ifp); 2912 } 2913 2914 if (mtx_initialized(&sc->bce_mtx)) 2915 BCE_LOCK_DESTROY(sc); 2916 2917 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2918 2919} 2920 2921 2922/****************************************************************************/ 2923/* Firmware synchronization. */ 2924/* */ 2925/* Before performing certain events such as a chip reset, synchronize with */ 2926/* the firmware first. */ 2927/* */ 2928/* Returns: */ 2929/* 0 for success, positive value for failure. */ 2930/****************************************************************************/ 2931static int 2932bce_fw_sync(struct bce_softc *sc, u32 msg_data) 2933{ 2934 int i, rc = 0; 2935 u32 val; 2936 2937 /* Don't waste any time if we've timed out before. */ 2938 if (sc->bce_fw_timed_out) { 2939 rc = EBUSY; 2940 goto bce_fw_sync_exit; 2941 } 2942 2943 /* Increment the message sequence number. */ 2944 sc->bce_fw_wr_seq++; 2945 msg_data |= sc->bce_fw_wr_seq; 2946 2947 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data); 2948 2949 /* Send the message to the bootcode driver mailbox. */ 2950 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data); 2951 2952 /* Wait for the bootcode to acknowledge the message. */ 2953 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2954 /* Check for a response in the bootcode firmware mailbox. */ 2955 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB); 2956 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2957 break; 2958 DELAY(1000); 2959 } 2960 2961 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2962 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) && 2963 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) { 2964 2965 BCE_PRINTF("%s(%d): Firmware synchronization timeout! " 2966 "msg_data = 0x%08X\n", 2967 __FILE__, __LINE__, msg_data); 2968 2969 msg_data &= ~BCE_DRV_MSG_CODE; 2970 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2971 2972 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data); 2973 2974 sc->bce_fw_timed_out = 1; 2975 rc = EBUSY; 2976 } 2977 2978bce_fw_sync_exit: 2979 return (rc); 2980} 2981 2982 2983/****************************************************************************/ 2984/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2985/* */ 2986/* Returns: */ 2987/* Nothing. */ 2988/****************************************************************************/ 2989static void 2990bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code, 2991 u32 rv2p_code_len, u32 rv2p_proc) 2992{ 2993 int i; 2994 u32 val; 2995 2996 /* Set the page size used by RV2P. */ 2997 if (rv2p_proc == RV2P_PROC2) { 2998 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE); 2999 } 3000 3001 for (i = 0; i < rv2p_code_len; i += 8) { 3002 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 3003 rv2p_code++; 3004 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 3005 rv2p_code++; 3006 3007 if (rv2p_proc == RV2P_PROC1) { 3008 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 3009 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 3010 } 3011 else { 3012 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 3013 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 3014 } 3015 } 3016 3017 /* Reset the processor, un-stall is done later. */ 3018 if (rv2p_proc == RV2P_PROC1) { 3019 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 3020 } 3021 else { 3022 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 3023 } 3024} 3025 3026 3027/****************************************************************************/ 3028/* Load RISC processor firmware. */ 3029/* */ 3030/* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 3031/* associated with a particular processor. */ 3032/* */ 3033/* Returns: */ 3034/* Nothing. */ 3035/****************************************************************************/ 3036static void 3037bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 3038 struct fw_info *fw) 3039{ 3040 u32 offset; 3041 u32 val; 3042 3043 /* Halt the CPU. */ 3044 val = REG_RD_IND(sc, cpu_reg->mode); 3045 val |= cpu_reg->mode_value_halt; 3046 REG_WR_IND(sc, cpu_reg->mode, val); 3047 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3048 3049 /* Load the Text area. */ 3050 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 3051 if (fw->text) { 3052 int j; 3053 3054 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { 3055 REG_WR_IND(sc, offset, fw->text[j]); 3056 } 3057 } 3058 3059 /* Load the Data area. */ 3060 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 3061 if (fw->data) { 3062 int j; 3063 3064 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { 3065 REG_WR_IND(sc, offset, fw->data[j]); 3066 } 3067 } 3068 3069 /* Load the SBSS area. */ 3070 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 3071 if (fw->sbss) { 3072 int j; 3073 3074 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { 3075 REG_WR_IND(sc, offset, fw->sbss[j]); 3076 } 3077 } 3078 3079 /* Load the BSS area. */ 3080 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 3081 if (fw->bss) { 3082 int j; 3083 3084 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { 3085 REG_WR_IND(sc, offset, fw->bss[j]); 3086 } 3087 } 3088 3089 /* Load the Read-Only area. */ 3090 offset = cpu_reg->spad_base + 3091 (fw->rodata_addr - cpu_reg->mips_view_base); 3092 if (fw->rodata) { 3093 int j; 3094 3095 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { 3096 REG_WR_IND(sc, offset, fw->rodata[j]); 3097 } 3098 } 3099 3100 /* Clear the pre-fetch instruction. */ 3101 REG_WR_IND(sc, cpu_reg->inst, 0); 3102 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 3103 3104 /* Start the CPU. */ 3105 val = REG_RD_IND(sc, cpu_reg->mode); 3106 val &= ~cpu_reg->mode_value_halt; 3107 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3108 REG_WR_IND(sc, cpu_reg->mode, val); 3109} 3110 3111 3112/****************************************************************************/ 3113/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 3114/* */ 3115/* Loads the firmware for each CPU and starts the CPU. */ 3116/* */ 3117/* Returns: */ 3118/* Nothing. */ 3119/****************************************************************************/ 3120static void 3121bce_init_cpus(struct bce_softc *sc) 3122{ 3123 struct cpu_reg cpu_reg; 3124 struct fw_info fw; 3125 3126 /* Initialize the RV2P processor. */ 3127 bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1); 3128 bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2); 3129 3130 /* Initialize the RX Processor. */ 3131 cpu_reg.mode = BCE_RXP_CPU_MODE; 3132 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 3133 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 3134 cpu_reg.state = BCE_RXP_CPU_STATE; 3135 cpu_reg.state_value_clear = 0xffffff; 3136 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 3137 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 3138 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 3139 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 3140 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 3141 cpu_reg.spad_base = BCE_RXP_SCRATCH; 3142 cpu_reg.mips_view_base = 0x8000000; 3143 3144 fw.ver_major = bce_RXP_b06FwReleaseMajor; 3145 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 3146 fw.ver_fix = bce_RXP_b06FwReleaseFix; 3147 fw.start_addr = bce_RXP_b06FwStartAddr; 3148 3149 fw.text_addr = bce_RXP_b06FwTextAddr; 3150 fw.text_len = bce_RXP_b06FwTextLen; 3151 fw.text_index = 0; 3152 fw.text = bce_RXP_b06FwText; 3153 3154 fw.data_addr = bce_RXP_b06FwDataAddr; 3155 fw.data_len = bce_RXP_b06FwDataLen; 3156 fw.data_index = 0; 3157 fw.data = bce_RXP_b06FwData; 3158 3159 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 3160 fw.sbss_len = bce_RXP_b06FwSbssLen; 3161 fw.sbss_index = 0; 3162 fw.sbss = bce_RXP_b06FwSbss; 3163 3164 fw.bss_addr = bce_RXP_b06FwBssAddr; 3165 fw.bss_len = bce_RXP_b06FwBssLen; 3166 fw.bss_index = 0; 3167 fw.bss = bce_RXP_b06FwBss; 3168 3169 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 3170 fw.rodata_len = bce_RXP_b06FwRodataLen; 3171 fw.rodata_index = 0; 3172 fw.rodata = bce_RXP_b06FwRodata; 3173 3174 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 3175 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3176 3177 /* Initialize the TX Processor. */ 3178 cpu_reg.mode = BCE_TXP_CPU_MODE; 3179 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 3180 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 3181 cpu_reg.state = BCE_TXP_CPU_STATE; 3182 cpu_reg.state_value_clear = 0xffffff; 3183 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 3184 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 3185 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 3186 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 3187 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 3188 cpu_reg.spad_base = BCE_TXP_SCRATCH; 3189 cpu_reg.mips_view_base = 0x8000000; 3190 3191 fw.ver_major = bce_TXP_b06FwReleaseMajor; 3192 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 3193 fw.ver_fix = bce_TXP_b06FwReleaseFix; 3194 fw.start_addr = bce_TXP_b06FwStartAddr; 3195 3196 fw.text_addr = bce_TXP_b06FwTextAddr; 3197 fw.text_len = bce_TXP_b06FwTextLen; 3198 fw.text_index = 0; 3199 fw.text = bce_TXP_b06FwText; 3200 3201 fw.data_addr = bce_TXP_b06FwDataAddr; 3202 fw.data_len = bce_TXP_b06FwDataLen; 3203 fw.data_index = 0; 3204 fw.data = bce_TXP_b06FwData; 3205 3206 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 3207 fw.sbss_len = bce_TXP_b06FwSbssLen; 3208 fw.sbss_index = 0; 3209 fw.sbss = bce_TXP_b06FwSbss; 3210 3211 fw.bss_addr = bce_TXP_b06FwBssAddr; 3212 fw.bss_len = bce_TXP_b06FwBssLen; 3213 fw.bss_index = 0; 3214 fw.bss = bce_TXP_b06FwBss; 3215 3216 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 3217 fw.rodata_len = bce_TXP_b06FwRodataLen; 3218 fw.rodata_index = 0; 3219 fw.rodata = bce_TXP_b06FwRodata; 3220 3221 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 3222 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3223 3224 /* Initialize the TX Patch-up Processor. */ 3225 cpu_reg.mode = BCE_TPAT_CPU_MODE; 3226 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 3227 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 3228 cpu_reg.state = BCE_TPAT_CPU_STATE; 3229 cpu_reg.state_value_clear = 0xffffff; 3230 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 3231 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 3232 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 3233 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 3234 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 3235 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 3236 cpu_reg.mips_view_base = 0x8000000; 3237 3238 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 3239 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 3240 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 3241 fw.start_addr = bce_TPAT_b06FwStartAddr; 3242 3243 fw.text_addr = bce_TPAT_b06FwTextAddr; 3244 fw.text_len = bce_TPAT_b06FwTextLen; 3245 fw.text_index = 0; 3246 fw.text = bce_TPAT_b06FwText; 3247 3248 fw.data_addr = bce_TPAT_b06FwDataAddr; 3249 fw.data_len = bce_TPAT_b06FwDataLen; 3250 fw.data_index = 0; 3251 fw.data = bce_TPAT_b06FwData; 3252 3253 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 3254 fw.sbss_len = bce_TPAT_b06FwSbssLen; 3255 fw.sbss_index = 0; 3256 fw.sbss = bce_TPAT_b06FwSbss; 3257 3258 fw.bss_addr = bce_TPAT_b06FwBssAddr; 3259 fw.bss_len = bce_TPAT_b06FwBssLen; 3260 fw.bss_index = 0; 3261 fw.bss = bce_TPAT_b06FwBss; 3262 3263 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 3264 fw.rodata_len = bce_TPAT_b06FwRodataLen; 3265 fw.rodata_index = 0; 3266 fw.rodata = bce_TPAT_b06FwRodata; 3267 3268 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 3269 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3270 3271 /* Initialize the Completion Processor. */ 3272 cpu_reg.mode = BCE_COM_CPU_MODE; 3273 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3274 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3275 cpu_reg.state = BCE_COM_CPU_STATE; 3276 cpu_reg.state_value_clear = 0xffffff; 3277 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3278 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3279 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3280 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3281 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3282 cpu_reg.spad_base = BCE_COM_SCRATCH; 3283 cpu_reg.mips_view_base = 0x8000000; 3284 3285 fw.ver_major = bce_COM_b06FwReleaseMajor; 3286 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3287 fw.ver_fix = bce_COM_b06FwReleaseFix; 3288 fw.start_addr = bce_COM_b06FwStartAddr; 3289 3290 fw.text_addr = bce_COM_b06FwTextAddr; 3291 fw.text_len = bce_COM_b06FwTextLen; 3292 fw.text_index = 0; 3293 fw.text = bce_COM_b06FwText; 3294 3295 fw.data_addr = bce_COM_b06FwDataAddr; 3296 fw.data_len = bce_COM_b06FwDataLen; 3297 fw.data_index = 0; 3298 fw.data = bce_COM_b06FwData; 3299 3300 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3301 fw.sbss_len = bce_COM_b06FwSbssLen; 3302 fw.sbss_index = 0; 3303 fw.sbss = bce_COM_b06FwSbss; 3304 3305 fw.bss_addr = bce_COM_b06FwBssAddr; 3306 fw.bss_len = bce_COM_b06FwBssLen; 3307 fw.bss_index = 0; 3308 fw.bss = bce_COM_b06FwBss; 3309 3310 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3311 fw.rodata_len = bce_COM_b06FwRodataLen; 3312 fw.rodata_index = 0; 3313 fw.rodata = bce_COM_b06FwRodata; 3314 3315 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 3316 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3317 3318 /* Initialize the Command Processor. */ 3319 cpu_reg.mode = BCE_CP_CPU_MODE; 3320 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 3321 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3322 cpu_reg.state = BCE_CP_CPU_STATE; 3323 cpu_reg.state_value_clear = 0xffffff; 3324 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3325 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3326 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3327 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3328 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3329 cpu_reg.spad_base = BCE_CP_SCRATCH; 3330 cpu_reg.mips_view_base = 0x8000000; 3331 3332 fw.ver_major = bce_CP_b06FwReleaseMajor; 3333 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3334 fw.ver_fix = bce_CP_b06FwReleaseFix; 3335 fw.start_addr = bce_CP_b06FwStartAddr; 3336 3337 fw.text_addr = bce_CP_b06FwTextAddr; 3338 fw.text_len = bce_CP_b06FwTextLen; 3339 fw.text_index = 0; 3340 fw.text = bce_CP_b06FwText; 3341 3342 fw.data_addr = bce_CP_b06FwDataAddr; 3343 fw.data_len = bce_CP_b06FwDataLen; 3344 fw.data_index = 0; 3345 fw.data = bce_CP_b06FwData; 3346 3347 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3348 fw.sbss_len = bce_CP_b06FwSbssLen; 3349 fw.sbss_index = 0; 3350 fw.sbss = bce_CP_b06FwSbss; 3351 3352 fw.bss_addr = bce_CP_b06FwBssAddr; 3353 fw.bss_len = bce_CP_b06FwBssLen; 3354 fw.bss_index = 0; 3355 fw.bss = bce_CP_b06FwBss; 3356 3357 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3358 fw.rodata_len = bce_CP_b06FwRodataLen; 3359 fw.rodata_index = 0; 3360 fw.rodata = bce_CP_b06FwRodata; 3361 3362 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 3363 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3364} 3365 3366 3367/****************************************************************************/ 3368/* Initialize context memory. */ 3369/* */ 3370/* Clears the memory associated with each Context ID (CID). */ 3371/* */ 3372/* Returns: */ 3373/* Nothing. */ 3374/****************************************************************************/ 3375static void 3376bce_init_ctx(struct bce_softc *sc) 3377{ 3378 u32 vcid = 96; 3379 3380 while (vcid) { 3381 u32 vcid_addr, pcid_addr, offset; 3382 int i; 3383 3384 vcid--; 3385 3386 vcid_addr = GET_CID_ADDR(vcid); 3387 pcid_addr = vcid_addr; 3388 3389 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { 3390 vcid_addr += (i << PHY_CTX_SHIFT); 3391 pcid_addr += (i << PHY_CTX_SHIFT); 3392 3393 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3394 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr); 3395 3396 /* Zero out the context. */ 3397 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3398 CTX_WR(sc, vcid_addr, offset, 0); 3399 } 3400 } 3401} 3402 3403 3404/****************************************************************************/ 3405/* Fetch the permanent MAC address of the controller. */ 3406/* */ 3407/* Returns: */ 3408/* Nothing. */ 3409/****************************************************************************/ 3410static void 3411bce_get_mac_addr(struct bce_softc *sc) 3412{ 3413 u32 mac_lo = 0, mac_hi = 0; 3414 3415 /* 3416 * The NetXtreme II bootcode populates various NIC 3417 * power-on and runtime configuration items in a 3418 * shared memory area. The factory configured MAC 3419 * address is available from both NVRAM and the 3420 * shared memory area so we'll read the value from 3421 * shared memory for speed. 3422 */ 3423 3424 mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + 3425 BCE_PORT_HW_CFG_MAC_UPPER); 3426 mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + 3427 BCE_PORT_HW_CFG_MAC_LOWER); 3428 3429 if ((mac_lo == 0) && (mac_hi == 0)) { 3430 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n", 3431 __FILE__, __LINE__); 3432 } else { 3433 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3434 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3435 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3436 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3437 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3438 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3439 } 3440 3441 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":"); 3442} 3443 3444 3445/****************************************************************************/ 3446/* Program the MAC address. */ 3447/* */ 3448/* Returns: */ 3449/* Nothing. */ 3450/****************************************************************************/ 3451static void 3452bce_set_mac_addr(struct bce_softc *sc) 3453{ 3454 u32 val; 3455 u8 *mac_addr = sc->eaddr; 3456 3457 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":"); 3458 3459 val = (mac_addr[0] << 8) | mac_addr[1]; 3460 3461 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3462 3463 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3464 (mac_addr[4] << 8) | mac_addr[5]; 3465 3466 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3467} 3468 3469 3470/****************************************************************************/ 3471/* Stop the controller. */ 3472/* */ 3473/* Returns: */ 3474/* Nothing. */ 3475/****************************************************************************/ 3476static void 3477bce_stop(struct bce_softc *sc) 3478{ 3479 struct ifnet *ifp; 3480 struct ifmedia_entry *ifm; 3481 struct mii_data *mii = NULL; 3482 int mtmp, itmp; 3483 3484 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3485 3486 BCE_LOCK_ASSERT(sc); 3487 3488 ifp = sc->bce_ifp; 3489 3490 mii = device_get_softc(sc->bce_miibus); 3491 3492 callout_stop(&sc->bce_tick_callout); 3493 3494 /* Disable the transmit/receive blocks. */ 3495 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3496 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3497 DELAY(20); 3498 3499 bce_disable_intr(sc); 3500 3501 /* Free RX buffers. */ 3502#ifdef BCE_USE_SPLIT_HEADER 3503 bce_free_pg_chain(sc); 3504#endif 3505 bce_free_rx_chain(sc); 3506 3507 /* Free TX buffers. */ 3508 bce_free_tx_chain(sc); 3509 3510 /* 3511 * Isolate/power down the PHY, but leave the media selection 3512 * unchanged so that things will be put back to normal when 3513 * we bring the interface back up. 3514 */ 3515 3516 itmp = ifp->if_flags; 3517 ifp->if_flags |= IFF_UP; 3518 3519 /* If we are called from bce_detach(), mii is already NULL. */ 3520 if (mii != NULL) { 3521 ifm = mii->mii_media.ifm_cur; 3522 mtmp = ifm->ifm_media; 3523 ifm->ifm_media = IFM_ETHER | IFM_NONE; 3524 mii_mediachg(mii); 3525 ifm->ifm_media = mtmp; 3526 } 3527 3528 ifp->if_flags = itmp; 3529 sc->watchdog_timer = 0; 3530 3531 sc->bce_link = 0; 3532 3533 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3534 3535 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3536} 3537 3538 3539static int 3540bce_reset(struct bce_softc *sc, u32 reset_code) 3541{ 3542 u32 val; 3543 int i, rc = 0; 3544 3545 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n", 3546 __FUNCTION__, reset_code); 3547 3548 /* Wait for pending PCI transactions to complete. */ 3549 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3550 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3551 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3552 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3553 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3554 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3555 DELAY(5); 3556 3557 /* Assume bootcode is running. */ 3558 sc->bce_fw_timed_out = 0; 3559 3560 /* Give the firmware a chance to prepare for the reset. */ 3561 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3562 if (rc) 3563 goto bce_reset_exit; 3564 3565 /* Set a firmware reminder that this is a soft reset. */ 3566 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE, 3567 BCE_DRV_RESET_SIGNATURE_MAGIC); 3568 3569 /* Dummy read to force the chip to complete all current transactions. */ 3570 val = REG_RD(sc, BCE_MISC_ID); 3571 3572 /* Chip reset. */ 3573 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3574 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3575 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3576 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3577 3578 /* Allow up to 30us for reset to complete. */ 3579 for (i = 0; i < 10; i++) { 3580 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3581 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3582 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3583 break; 3584 } 3585 DELAY(10); 3586 } 3587 3588 /* Check that reset completed successfully. */ 3589 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3590 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3591 BCE_PRINTF("%s(%d): Reset failed!\n", 3592 __FILE__, __LINE__); 3593 rc = EBUSY; 3594 goto bce_reset_exit; 3595 } 3596 3597 /* Make sure byte swapping is properly configured. */ 3598 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3599 if (val != 0x01020304) { 3600 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n", 3601 __FILE__, __LINE__); 3602 rc = ENODEV; 3603 goto bce_reset_exit; 3604 } 3605 3606 /* Just completed a reset, assume that firmware is running again. */ 3607 sc->bce_fw_timed_out = 0; 3608 3609 /* Wait for the firmware to finish its initialization. */ 3610 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3611 if (rc) 3612 BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n", 3613 __FILE__, __LINE__); 3614 3615bce_reset_exit: 3616 return (rc); 3617} 3618 3619 3620static int 3621bce_chipinit(struct bce_softc *sc) 3622{ 3623 u32 val; 3624 int rc = 0; 3625 3626 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3627 3628 /* Make sure the interrupt is not active. */ 3629 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3630 3631 /* 3632 * Initialize DMA byte/word swapping, configure the number of DMA 3633 * channels and PCI clock compensation delay. 3634 */ 3635 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3636 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3637#if BYTE_ORDER == BIG_ENDIAN 3638 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3639#endif 3640 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3641 DMA_READ_CHANS << 12 | 3642 DMA_WRITE_CHANS << 16; 3643 3644 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3645 3646 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3647 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3648 3649 /* 3650 * This setting resolves a problem observed on certain Intel PCI 3651 * chipsets that cannot handle multiple outstanding DMA operations. 3652 * See errata E9_5706A1_65. 3653 */ 3654 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 3655 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) && 3656 !(sc->bce_flags & BCE_PCIX_FLAG)) 3657 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3658 3659 REG_WR(sc, BCE_DMA_CONFIG, val); 3660 3661 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3662 if (sc->bce_flags & BCE_PCIX_FLAG) { 3663 u16 val; 3664 3665 val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2); 3666 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2); 3667 } 3668 3669 /* Enable the RX_V2P and Context state machines before access. */ 3670 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3671 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3672 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3673 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3674 3675 /* Initialize context mapping and zero out the quick contexts. */ 3676 bce_init_ctx(sc); 3677 3678 /* Initialize the on-boards CPUs */ 3679 bce_init_cpus(sc); 3680 3681 /* Prepare NVRAM for access. */ 3682 if (bce_init_nvram(sc)) { 3683 rc = ENODEV; 3684 goto bce_chipinit_exit; 3685 } 3686 3687 /* Set the kernel bypass block size */ 3688 val = REG_RD(sc, BCE_MQ_CONFIG); 3689 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3690 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3691 REG_WR(sc, BCE_MQ_CONFIG, val); 3692 3693 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3694 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3695 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3696 3697 /* Set the page size and clear the RV2P processor stall bits. */ 3698 val = (BCM_PAGE_BITS - 8) << 24; 3699 REG_WR(sc, BCE_RV2P_CONFIG, val); 3700 3701 /* Configure page size. */ 3702 val = REG_RD(sc, BCE_TBDR_CONFIG); 3703 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3704 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3705 REG_WR(sc, BCE_TBDR_CONFIG, val); 3706 3707bce_chipinit_exit: 3708 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3709 3710 return(rc); 3711} 3712 3713 3714/****************************************************************************/ 3715/* Initialize the controller in preparation to send/receive traffic. */ 3716/* */ 3717/* Returns: */ 3718/* 0 for success, positive value for failure. */ 3719/****************************************************************************/ 3720static int 3721bce_blockinit(struct bce_softc *sc) 3722{ 3723 u32 reg, val; 3724 int rc = 0; 3725 3726 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3727 3728 /* Load the hardware default MAC address. */ 3729 bce_set_mac_addr(sc); 3730 3731 /* Set the Ethernet backoff seed value */ 3732 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + 3733 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + 3734 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3735 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3736 3737 sc->last_status_idx = 0; 3738 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3739 3740 /* Set up link change interrupt generation. */ 3741 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3742 3743 /* Program the physical address of the status block. */ 3744 REG_WR(sc, BCE_HC_STATUS_ADDR_L, 3745 BCE_ADDR_LO(sc->status_block_paddr)); 3746 REG_WR(sc, BCE_HC_STATUS_ADDR_H, 3747 BCE_ADDR_HI(sc->status_block_paddr)); 3748 3749 /* Program the physical address of the statistics block. */ 3750 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3751 BCE_ADDR_LO(sc->stats_block_paddr)); 3752 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3753 BCE_ADDR_HI(sc->stats_block_paddr)); 3754 3755 /* Program various host coalescing parameters. */ 3756 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3757 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip); 3758 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3759 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip); 3760 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3761 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3762 REG_WR(sc, BCE_HC_TX_TICKS, 3763 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3764 REG_WR(sc, BCE_HC_RX_TICKS, 3765 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3766 REG_WR(sc, BCE_HC_COM_TICKS, 3767 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3768 REG_WR(sc, BCE_HC_CMD_TICKS, 3769 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3770 REG_WR(sc, BCE_HC_STATS_TICKS, 3771 (sc->bce_stats_ticks & 0xffff00)); 3772 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 3773 0xbb8); /* 3ms */ 3774 REG_WR(sc, BCE_HC_CONFIG, 3775 (BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | 3776 BCE_HC_CONFIG_COLLECT_STATS)); 3777 3778 /* Clear the internal statistics counters. */ 3779 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3780 3781 /* Verify that bootcode is running. */ 3782 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE); 3783 3784 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure), 3785 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n", 3786 __FILE__, __LINE__); 3787 reg = 0); 3788 3789 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3790 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3791 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, " 3792 "Expected: 08%08X\n", __FILE__, __LINE__, 3793 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK), 3794 BCE_DEV_INFO_SIGNATURE_MAGIC); 3795 rc = ENODEV; 3796 goto bce_blockinit_exit; 3797 } 3798 3799 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3800 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3801 3802 /* Enable link state change interrupt generation. */ 3803 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3804 3805 /* Enable all remaining blocks in the MAC. */ 3806 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff); 3807 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3808 DELAY(20); 3809 3810bce_blockinit_exit: 3811 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3812 3813 return (rc); 3814} 3815 3816 3817/****************************************************************************/ 3818/* Encapsulate an mbuf into the rx_bd chain. */ 3819/* */ 3820/* Returns: */ 3821/* 0 for success, positive value for failure. */ 3822/****************************************************************************/ 3823static int 3824bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 3825 u16 *chain_prod, u32 *prod_bseq) 3826{ 3827 bus_dmamap_t map; 3828 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 3829 struct mbuf *m_new = NULL; 3830 struct rx_bd *rxbd; 3831 int nsegs, error, rc = 0; 3832#ifdef BCE_DEBUG 3833 u16 debug_chain_prod = *chain_prod; 3834#endif 3835 3836 DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Entering %s()\n", 3837 __FUNCTION__); 3838 3839 /* Make sure the inputs are valid. */ 3840 DBRUNIF((*chain_prod > MAX_RX_BD), 3841 BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n", 3842 __FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD)); 3843 3844 DBPRINT(sc, BCE_VERBOSE, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, " 3845 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq); 3846 3847 /* Update some debug statistic counters */ 3848 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3849 sc->rx_low_watermark = sc->free_rx_bd); 3850 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3851 3852 /* Check whether this is a new mbuf allocation. */ 3853 if (m == NULL) { 3854 3855 /* Simulate an mbuf allocation failure. */ 3856 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure), 3857 sc->mbuf_alloc_failed++; 3858 sc->debug_mbuf_sim_alloc_failed++; 3859 rc = ENOBUFS; 3860 goto bce_get_rx_buf_exit); 3861 3862 /* This is a new mbuf allocation. */ 3863#ifdef BCE_USE_SPLIT_HEADER 3864 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3865#else 3866 if (sc->rx_bd_mbuf_alloc_size == MCLBYTES) 3867 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3868 else 3869 m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->rx_bd_mbuf_alloc_size); 3870#endif 3871 3872 if (m_new == NULL) { 3873 sc->mbuf_alloc_failed++; 3874 rc = ENOBUFS; 3875 goto bce_get_rx_buf_exit; 3876 } 3877 3878 DBRUN(sc->debug_rx_mbuf_alloc++); 3879 } else { 3880 /* Reuse an existing mbuf. */ 3881 m_new = m; 3882 } 3883 3884 /* Make sure we have a valid packet header. */ 3885 M_ASSERTPKTHDR(m_new); 3886 3887 /* Initialize the mbuf size and pad if necessary for alignment. */ 3888 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size; 3889 m_adj(m_new, sc->rx_bd_mbuf_align_pad); 3890 3891 /* ToDo: Consider calling m_fragment() to test error handling. */ 3892 3893 /* Map the mbuf cluster into device memory. */ 3894 map = sc->rx_mbuf_map[*chain_prod]; 3895 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new, 3896 segs, &nsegs, BUS_DMA_NOWAIT); 3897 3898 /* Handle any mapping errors. */ 3899 if (error) { 3900 BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain (%d)!\n", 3901 __FILE__, __LINE__, error); 3902 3903 m_freem(m_new); 3904 DBRUN(sc->debug_rx_mbuf_alloc--); 3905 3906 rc = ENOBUFS; 3907 goto bce_get_rx_buf_exit; 3908 } 3909 3910 /* All mbufs must map to a single segment. */ 3911 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", 3912 __FUNCTION__, nsegs)); 3913 3914 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */ 3915 3916 /* Setup the rx_bd for the segment. */ 3917 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3918 3919 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); 3920 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); 3921 rxbd->rx_bd_len = htole32(segs[0].ds_len); 3922 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 3923 *prod_bseq += segs[0].ds_len; 3924 3925 /* Save the mbuf and update our counter. */ 3926 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3927 sc->free_rx_bd -= nsegs; 3928 3929 DBRUNMSG(BCE_EXCESSIVE, bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 3930 nsegs)); 3931 3932 DBPRINT(sc, BCE_VERBOSE, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, " 3933 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq); 3934 3935bce_get_rx_buf_exit: 3936 DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Exiting %s()\n", 3937 __FUNCTION__); 3938 3939 return(rc); 3940} 3941 3942 3943#ifdef BCE_USE_SPLIT_HEADER 3944/****************************************************************************/ 3945/* Encapsulate an mbuf cluster into the page chain. */ 3946/* */ 3947/* Returns: */ 3948/* 0 for success, positive value for failure. */ 3949/****************************************************************************/ 3950static int 3951bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 3952 u16 *prod_idx) 3953{ 3954 bus_dmamap_t map; 3955 bus_addr_t busaddr; 3956 struct mbuf *m_new = NULL; 3957 struct rx_bd *pgbd; 3958 int error, rc = 0; 3959#ifdef BCE_DEBUG 3960 u16 debug_prod_idx = *prod_idx; 3961#endif 3962 3963 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n", 3964 __FUNCTION__); 3965 3966 /* Make sure the inputs are valid. */ 3967 DBRUNIF((*prod_idx > MAX_PG_BD), 3968 BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n", 3969 __FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD)); 3970 3971 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, " 3972 "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 3973 3974 /* Update counters if we've hit a new low or run out of pages. */ 3975 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark), 3976 sc->pg_low_watermark = sc->free_pg_bd); 3977 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++); 3978 3979 /* Check whether this is a new mbuf allocation. */ 3980 if (m == NULL) { 3981 3982 /* Simulate an mbuf allocation failure. */ 3983 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure), 3984 sc->mbuf_alloc_failed++; 3985 sc->debug_mbuf_sim_alloc_failed++; 3986 rc = ENOBUFS; 3987 goto bce_get_pg_buf_exit); 3988 3989 /* This is a new mbuf allocation. */ 3990 m_new = m_getcl(M_DONTWAIT, MT_DATA, 0); 3991 if (m_new == NULL) { 3992 sc->mbuf_alloc_failed++; 3993 rc = ENOBUFS; 3994 goto bce_get_pg_buf_exit; 3995 } 3996 3997 DBRUN(sc->debug_pg_mbuf_alloc++); 3998 } else { 3999 /* Reuse an existing mbuf. */ 4000 m_new = m; 4001 m_new->m_data = m_new->m_ext.ext_buf; 4002 } 4003 4004 m_new->m_len = sc->pg_bd_mbuf_alloc_size; 4005 4006 /* ToDo: Consider calling m_fragment() to test error handling. */ 4007 4008 /* Map the mbuf cluster into device memory. */ 4009 map = sc->pg_mbuf_map[*prod_idx]; 4010 error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *), 4011 sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT); 4012 4013 /* Handle any mapping errors. */ 4014 if (error) { 4015 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", 4016 __FILE__, __LINE__); 4017 4018 m_freem(m_new); 4019 DBRUN(sc->debug_pg_mbuf_alloc--); 4020 4021 rc = ENOBUFS; 4022 goto bce_get_pg_buf_exit; 4023 } 4024 4025 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */ 4026 4027 /* 4028 * The page chain uses the same rx_bd data structure 4029 * as the receive chain but doesn't require a byte sequence (bseq). 4030 */ 4031 pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)]; 4032 4033 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr)); 4034 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr)); 4035 pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size); 4036 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 4037 4038 /* Save the mbuf and update our counter. */ 4039 sc->pg_mbuf_ptr[*prod_idx] = m_new; 4040 sc->free_pg_bd--; 4041 4042 DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 4043 1)); 4044 4045 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, " 4046 "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 4047 4048bce_get_pg_buf_exit: 4049 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n", 4050 __FUNCTION__); 4051 4052 return(rc); 4053} 4054#endif /* BCE_USE_SPLIT_HEADER */ 4055 4056 4057/****************************************************************************/ 4058/* Allocate memory and initialize the TX data structures. */ 4059/* */ 4060/* Returns: */ 4061/* 0 for success, positive value for failure. */ 4062/****************************************************************************/ 4063static int 4064bce_init_tx_chain(struct bce_softc *sc) 4065{ 4066 struct tx_bd *txbd; 4067 u32 val; 4068 int i, rc = 0; 4069 4070 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 4071 4072 /* Set the initial TX producer/consumer indices. */ 4073 sc->tx_prod = 0; 4074 sc->tx_cons = 0; 4075 sc->tx_prod_bseq = 0; 4076 sc->used_tx_bd = 0; 4077 sc->max_tx_bd = USABLE_TX_BD; 4078 DBRUN(sc->tx_hi_watermark = USABLE_TX_BD); 4079 DBRUN(sc->tx_full_count = 0); 4080 4081 /* 4082 * The NetXtreme II supports a linked-list structre called 4083 * a Buffer Descriptor Chain (or BD chain). A BD chain 4084 * consists of a series of 1 or more chain pages, each of which 4085 * consists of a fixed number of BD entries. 4086 * The last BD entry on each page is a pointer to the next page 4087 * in the chain, and the last pointer in the BD chain 4088 * points back to the beginning of the chain. 4089 */ 4090 4091 /* Set the TX next pointer chain entries. */ 4092 for (i = 0; i < TX_PAGES; i++) { 4093 int j; 4094 4095 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4096 4097 /* Check if we've reached the last page. */ 4098 if (i == (TX_PAGES - 1)) 4099 j = 0; 4100 else 4101 j = i + 1; 4102 4103 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 4104 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 4105 } 4106 4107 /* Initialize the context ID for an L2 TX chain. */ 4108 val = BCE_L2CTX_TYPE_TYPE_L2; 4109 val |= BCE_L2CTX_TYPE_SIZE_L2; 4110 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val); 4111 4112 val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4113 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val); 4114 4115 /* Point the hardware to the first page in the chain. */ 4116 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 4117 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val); 4118 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 4119 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val); 4120 4121 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD)); 4122 4123 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 4124 4125 return(rc); 4126} 4127 4128 4129/****************************************************************************/ 4130/* Free memory and clear the TX data structures. */ 4131/* */ 4132/* Returns: */ 4133/* Nothing. */ 4134/****************************************************************************/ 4135static void 4136bce_free_tx_chain(struct bce_softc *sc) 4137{ 4138 int i; 4139 4140 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 4141 4142 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4143 for (i = 0; i < TOTAL_TX_BD; i++) { 4144 if (sc->tx_mbuf_ptr[i] != NULL) { 4145 if (sc->tx_mbuf_map != NULL) 4146 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i], 4147 BUS_DMASYNC_POSTWRITE); 4148 m_freem(sc->tx_mbuf_ptr[i]); 4149 sc->tx_mbuf_ptr[i] = NULL; 4150 DBRUN(sc->debug_tx_mbuf_alloc--); 4151 } 4152 } 4153 4154 /* Clear each TX chain page. */ 4155 for (i = 0; i < TX_PAGES; i++) 4156 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 4157 4158 sc->used_tx_bd = 0; 4159 4160 /* Check if we lost any mbufs in the process. */ 4161 DBRUNIF((sc->debug_tx_mbuf_alloc), 4162 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs " 4163 "from tx chain!\n", 4164 __FILE__, __LINE__, sc->debug_tx_mbuf_alloc)); 4165 4166 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 4167} 4168 4169 4170/****************************************************************************/ 4171/* Allocate memory and initialize the RX data structures. */ 4172/* */ 4173/* Returns: */ 4174/* 0 for success, positive value for failure. */ 4175/****************************************************************************/ 4176static int 4177bce_init_rx_chain(struct bce_softc *sc) 4178{ 4179 struct rx_bd *rxbd; 4180 int i, rc = 0; 4181 u32 val; 4182 4183 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 4184 4185 /* Initialize the RX producer and consumer indices. */ 4186 sc->rx_prod = 0; 4187 sc->rx_cons = 0; 4188 sc->rx_prod_bseq = 0; 4189 sc->free_rx_bd = USABLE_RX_BD; 4190 sc->max_rx_bd = USABLE_RX_BD; 4191 DBRUN(sc->rx_low_watermark = sc->max_rx_bd); 4192 DBRUN(sc->rx_empty_count = 0); 4193 4194 /* Initialize the RX next pointer chain entries. */ 4195 for (i = 0; i < RX_PAGES; i++) { 4196 int j; 4197 4198 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4199 4200 /* Check if we've reached the last page. */ 4201 if (i == (RX_PAGES - 1)) 4202 j = 0; 4203 else 4204 j = i + 1; 4205 4206 /* Setup the chain page pointers. */ 4207 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 4208 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 4209 } 4210 4211 /* Initialize the context ID for an L2 RX chain. */ 4212 val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 4213 val |= BCE_L2CTX_CTX_TYPE_SIZE_L2; 4214 val |= 0x02 << 8; 4215 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val); 4216 4217 /* Point the hardware to the first page in the chain. */ 4218 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 4219 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val); 4220 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 4221 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val); 4222 4223 /* Fill up the RX chain. */ 4224 bce_fill_rx_chain(sc); 4225 4226 for (i = 0; i < RX_PAGES; i++) { 4227 bus_dmamap_sync( 4228 sc->rx_bd_chain_tag, 4229 sc->rx_bd_chain_map[i], 4230 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4231 } 4232 4233 DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4234 4235 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 4236 4237 return(rc); 4238} 4239 4240 4241/****************************************************************************/ 4242/* Add mbufs to the RX chain until its full or an mbuf allocation error */ 4243/* occurs. */ 4244/* */ 4245/* Returns: */ 4246/* Nothing */ 4247/****************************************************************************/ 4248static void 4249bce_fill_rx_chain(struct bce_softc *sc) 4250{ 4251 u16 prod, prod_idx; 4252 u32 prod_bseq; 4253 4254 DBPRINT(sc, BCE_VERBOSE_RECV, "Entering %s()\n", __FUNCTION__); 4255 4256 prod = sc->rx_prod; 4257 prod_bseq = sc->rx_prod_bseq; 4258 4259 /* Keep filling the RX chain until it's full. */ 4260 while (sc->free_rx_bd > 0) { 4261 prod_idx = RX_CHAIN_IDX(prod); 4262 if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) { 4263 /* Bail out if we can't add an mbuf to the chain. */ 4264 break; 4265 } 4266 prod = NEXT_RX_BD(prod); 4267 } 4268 4269 /* Save the RX chain producer index. */ 4270 sc->rx_prod = prod; 4271 sc->rx_prod_bseq = prod_bseq; 4272 4273 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 4274 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n", 4275 __FUNCTION__, sc->rx_prod)); 4276 4277 /* Tell the chip about the waiting rx_bd's. */ 4278 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod); 4279 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4280 4281 DBPRINT(sc, BCE_VERBOSE_RECV, "Exiting %s()\n", __FUNCTION__); 4282} 4283 4284 4285/****************************************************************************/ 4286/* Free memory and clear the RX data structures. */ 4287/* */ 4288/* Returns: */ 4289/* Nothing. */ 4290/****************************************************************************/ 4291static void 4292bce_free_rx_chain(struct bce_softc *sc) 4293{ 4294 int i; 4295 4296 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 4297 4298#ifdef BCE_USE_SPLIT_HEADER 4299 /* Clear the jumbo page chain support. */ 4300 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, 0); 4301#endif 4302 4303 /* Free any mbufs still in the RX mbuf chain. */ 4304 for (i = 0; i < TOTAL_RX_BD; i++) { 4305 if (sc->rx_mbuf_ptr[i] != NULL) { 4306 if (sc->rx_mbuf_map[i] != NULL) 4307 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i], 4308 BUS_DMASYNC_POSTREAD); 4309 m_freem(sc->rx_mbuf_ptr[i]); 4310 sc->rx_mbuf_ptr[i] = NULL; 4311 DBRUN(sc->debug_rx_mbuf_alloc--); 4312 } 4313 } 4314 4315 /* Clear each RX chain page. */ 4316 for (i = 0; i < RX_PAGES; i++) 4317 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4318 4319 sc->free_rx_bd = sc->max_rx_bd; 4320 4321 /* Check if we lost any mbufs in the process. */ 4322 DBRUNIF((sc->debug_rx_mbuf_alloc), 4323 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n", 4324 __FUNCTION__, sc->debug_rx_mbuf_alloc)); 4325 4326 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 4327} 4328 4329 4330#ifdef BCE_USE_SPLIT_HEADER 4331/****************************************************************************/ 4332/* Allocate memory and initialize the page data structures. */ 4333/* Assumes that bce_init_rx_chain() has not already been called. */ 4334/* */ 4335/* Returns: */ 4336/* 0 for success, positive value for failure. */ 4337/****************************************************************************/ 4338static int 4339bce_init_pg_chain(struct bce_softc *sc) 4340{ 4341 struct rx_bd *pgbd; 4342 int i, rc = 0; 4343 u32 val; 4344 4345 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 4346 4347 /* Initialize the page producer and consumer indices. */ 4348 sc->pg_prod = 0; 4349 sc->pg_cons = 0; 4350 sc->free_pg_bd = USABLE_PG_BD; 4351 sc->max_pg_bd = USABLE_PG_BD; 4352 DBRUN(sc->pg_low_watermark = sc->max_pg_bd); 4353 DBRUN(sc->pg_empty_count = 0); 4354 4355 /* Initialize the page next pointer chain entries. */ 4356 for (i = 0; i < PG_PAGES; i++) { 4357 int j; 4358 4359 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE]; 4360 4361 /* Check if we've reached the last page. */ 4362 if (i == (PG_PAGES - 1)) 4363 j = 0; 4364 else 4365 j = i + 1; 4366 4367 /* Setup the chain page pointers. */ 4368 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j])); 4369 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); 4370 } 4371 4372 /* Point the hardware to the first page in the page chain. */ 4373 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]); 4374 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_HI, val); 4375 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); 4376 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_LO, val); 4377 4378 /* Configure the rx_bd and page chain mbuf cluster size. */ 4379#ifdef BCE_USE_SPLIT_HEADER 4380 val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size; 4381#else 4382 val = (sc->rx_bd_mbuf_data_len << 16); 4383#endif 4384 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, val); 4385 4386 /* Configure the context reserved for jumbo support. */ 4387 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RBDC_KEY, 4388 BCE_L2CTX_RBDC_JUMBO_KEY); 4389 4390 /* Fill up the page chain. */ 4391 bce_fill_pg_chain(sc); 4392 4393 for (i = 0; i < PG_PAGES; i++) { 4394 bus_dmamap_sync( 4395 sc->pg_bd_chain_tag, 4396 sc->pg_bd_chain_map[i], 4397 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4398 } 4399 4400 DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD)); 4401 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 4402 4403 return(rc); 4404} 4405 4406/****************************************************************************/ 4407/* Add mbufs to the page chain until its full or an mbuf allocation error */ 4408/* occurs. */ 4409/* */ 4410/* Returns: */ 4411/* Nothing */ 4412/****************************************************************************/ 4413static void 4414bce_fill_pg_chain(struct bce_softc *sc) 4415{ 4416 u16 prod, prod_idx; 4417 4418 DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__); 4419 4420 prod = sc->pg_prod; 4421 4422 /* Keep filling the page chain until it's full. */ 4423 while (sc->free_pg_bd > 0) { 4424 prod_idx = PG_CHAIN_IDX(prod); 4425 if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) { 4426 /* Bail out if we can't add an mbuf to the chain. */ 4427 break; 4428 } 4429 prod = NEXT_PG_BD(prod); 4430 } 4431 4432 /* Save the page chain producer index. */ 4433 sc->pg_prod = prod; 4434 4435 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 4436 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n", 4437 __FUNCTION__, sc->pg_prod)); 4438 4439 /* Tell the chip about the new rx_bd's in the page chain. */ 4440 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_PG_BDIDX, sc->pg_prod); 4441 4442 DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__); 4443} 4444 4445 4446/****************************************************************************/ 4447/* Free memory and clear the RX data structures. */ 4448/* */ 4449/* Returns: */ 4450/* Nothing. */ 4451/****************************************************************************/ 4452static void 4453bce_free_pg_chain(struct bce_softc *sc) 4454{ 4455 int i; 4456 4457 DBPRINT(sc, BCE_EXCESSIVE_RESET, "Entering %s()\n", __FUNCTION__); 4458 4459 /* Free any mbufs still in the mbuf page chain. */ 4460 for (i = 0; i < TOTAL_PG_BD; i++) { 4461 if (sc->pg_mbuf_ptr[i] != NULL) { 4462 if (sc->pg_mbuf_map[i] != NULL) 4463 bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i], 4464 BUS_DMASYNC_POSTREAD); 4465 m_freem(sc->pg_mbuf_ptr[i]); 4466 sc->pg_mbuf_ptr[i] = NULL; 4467 DBRUN(sc->debug_pg_mbuf_alloc--); 4468 } 4469 } 4470 4471 /* Clear each page chain pages. */ 4472 for (i = 0; i < PG_PAGES; i++) 4473 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); 4474 4475 sc->free_pg_bd = sc->max_pg_bd; 4476 4477 /* Check if we lost any mbufs in the process. */ 4478 DBRUNIF((sc->debug_pg_mbuf_alloc), 4479 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n", 4480 __FUNCTION__, sc->debug_pg_mbuf_alloc)); 4481 4482 DBPRINT(sc, BCE_EXCESSIVE_RESET, "Exiting %s()\n", __FUNCTION__); 4483} 4484#endif /* BCE_USE_SPLIT_HEADER */ 4485 4486 4487/****************************************************************************/ 4488/* Set media options. */ 4489/* */ 4490/* Returns: */ 4491/* 0 for success, positive value for failure. */ 4492/****************************************************************************/ 4493static int 4494bce_ifmedia_upd(struct ifnet *ifp) 4495{ 4496 struct bce_softc *sc; 4497 4498 sc = ifp->if_softc; 4499 BCE_LOCK(sc); 4500 bce_ifmedia_upd_locked(ifp); 4501 BCE_UNLOCK(sc); 4502 return (0); 4503} 4504 4505 4506/****************************************************************************/ 4507/* Set media options. */ 4508/* */ 4509/* Returns: */ 4510/* Nothing. */ 4511/****************************************************************************/ 4512static void 4513bce_ifmedia_upd_locked(struct ifnet *ifp) 4514{ 4515 struct bce_softc *sc; 4516 struct mii_data *mii; 4517 struct ifmedia *ifm; 4518 4519 sc = ifp->if_softc; 4520 ifm = &sc->bce_ifmedia; 4521 BCE_LOCK_ASSERT(sc); 4522 4523 mii = device_get_softc(sc->bce_miibus); 4524 4525 /* Make sure the MII bus has been enumerated. */ 4526 if (mii) { 4527 sc->bce_link = 0; 4528 if (mii->mii_instance) { 4529 struct mii_softc *miisc; 4530 4531 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4532 mii_phy_reset(miisc); 4533 } 4534 mii_mediachg(mii); 4535 } 4536} 4537 4538 4539/****************************************************************************/ 4540/* Reports current media status. */ 4541/* */ 4542/* Returns: */ 4543/* Nothing. */ 4544/****************************************************************************/ 4545static void 4546bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4547{ 4548 struct bce_softc *sc; 4549 struct mii_data *mii; 4550 4551 sc = ifp->if_softc; 4552 4553 BCE_LOCK(sc); 4554 4555 mii = device_get_softc(sc->bce_miibus); 4556 4557 mii_pollstat(mii); 4558 ifmr->ifm_active = mii->mii_media_active; 4559 ifmr->ifm_status = mii->mii_media_status; 4560 4561 BCE_UNLOCK(sc); 4562} 4563 4564 4565/****************************************************************************/ 4566/* Handles PHY generated interrupt events. */ 4567/* */ 4568/* Returns: */ 4569/* Nothing. */ 4570/****************************************************************************/ 4571static void 4572bce_phy_intr(struct bce_softc *sc) 4573{ 4574 u32 new_link_state, old_link_state; 4575 4576 new_link_state = sc->status_block->status_attn_bits & 4577 STATUS_ATTN_BITS_LINK_STATE; 4578 old_link_state = sc->status_block->status_attn_bits_ack & 4579 STATUS_ATTN_BITS_LINK_STATE; 4580 4581 /* Handle any changes if the link state has changed. */ 4582 if (new_link_state != old_link_state) { 4583 4584 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 4585 4586 sc->bce_link = 0; 4587 callout_stop(&sc->bce_tick_callout); 4588 bce_tick(sc); 4589 4590 /* Update the status_attn_bits_ack field in the status block. */ 4591 if (new_link_state) { 4592 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4593 STATUS_ATTN_BITS_LINK_STATE); 4594 DBPRINT(sc, BCE_INFO_MISC, "Link is now UP.\n"); 4595 } 4596 else { 4597 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4598 STATUS_ATTN_BITS_LINK_STATE); 4599 DBPRINT(sc, BCE_INFO_MISC, "Link is now DOWN.\n"); 4600 } 4601 4602 } 4603 4604 /* Acknowledge the link change interrupt. */ 4605 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4606} 4607 4608 4609/****************************************************************************/ 4610/* Reads the receive consumer value from the status block (skipping over */ 4611/* chain page pointer if necessary). */ 4612/* */ 4613/* Returns: */ 4614/* hw_cons */ 4615/****************************************************************************/ 4616static inline u16 4617bce_get_hw_rx_cons(struct bce_softc *sc) 4618{ 4619 u16 hw_cons = sc->status_block->status_rx_quick_consumer_index0; 4620 4621 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4622 hw_cons++; 4623 4624 return hw_cons; 4625} 4626 4627 4628/****************************************************************************/ 4629/* Handles received frame interrupt events. */ 4630/* */ 4631/* Returns: */ 4632/* Nothing. */ 4633/****************************************************************************/ 4634static void 4635bce_rx_intr(struct bce_softc *sc) 4636{ 4637 struct ifnet *ifp = sc->bce_ifp; 4638 struct l2_fhdr *l2fhdr; 4639 unsigned int pkt_len; 4640 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; 4641 u32 status; 4642#ifdef BCE_USE_SPLIT_HEADER 4643 unsigned int pages, rem_len; 4644 u16 sw_pg_cons, sw_pg_cons_idx; 4645#endif 4646 4647 4648#ifdef BCE_DEBUG 4649 u32 timer_start, timer_end; 4650 timer_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN); 4651 sc->rx_interrupts++; 4652#endif 4653 4654 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4655 for (int i = 0; i < RX_PAGES; i++) 4656 bus_dmamap_sync(sc->rx_bd_chain_tag, 4657 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE); 4658 4659#ifdef BCE_USE_SPLIT_HEADER 4660 /* Prepare the page chain pages to be accessed by the host CPU. */ 4661 for (int i = 0; i < PG_PAGES; i++) 4662 bus_dmamap_sync(sc->pg_bd_chain_tag, 4663 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTWRITE); 4664#endif 4665 4666 /* Get the hardware's view of the RX consumer index. */ 4667 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 4668 4669 /* Get working copies of the driver's view of the consumer indices. */ 4670 sw_rx_cons = sc->rx_cons; 4671#ifdef BCE_USE_SPLIT_HEADER 4672 sw_pg_cons = sc->pg_cons; 4673#endif 4674 4675 DBPRINT(sc, BCE_INFO_RECV, "%s(enter): rx_prod = 0x%04X, " 4676 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4677 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4678 4679 /* Update some debug statistics counters */ 4680 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4681 sc->rx_low_watermark = sc->free_rx_bd); 4682 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 4683 4684 /* Scan through the receive chain as long as there is work to do */ 4685 /* ToDo: Consider setting a limit on the number of packets processed. */ 4686 while (sw_rx_cons != hw_rx_cons) { 4687 struct mbuf *m0; 4688 4689 /* Convert the producer/consumer indices to an actual rx_bd index. */ 4690 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); 4691 4692 /* Unmap the mbuf from DMA space. */ 4693 bus_dmamap_sync(sc->rx_mbuf_tag, 4694 sc->rx_mbuf_map[sw_rx_cons_idx], 4695 BUS_DMASYNC_POSTREAD); 4696 bus_dmamap_unload(sc->rx_mbuf_tag, 4697 sc->rx_mbuf_map[sw_rx_cons_idx]); 4698 4699 /* Remove the mbuf from the RX chain. */ 4700 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx]; 4701 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL; 4702 DBRUN(sc->debug_rx_mbuf_alloc--); 4703 sc->free_rx_bd++; 4704 4705 /* 4706 * Frames received on the NetXteme II are prepended 4707 * with an l2_fhdr structure which provides status 4708 * information about the received frame (including 4709 * VLAN tags and checksum info). The frames are also 4710 * automatically adjusted to align the IP header 4711 * (i.e. two null bytes are inserted before the 4712 * Ethernet header). As a result the data DMA'd by 4713 * the controller into the mbuf is as follows: 4714 * +---------+-----+---------------------+-----+ 4715 * | l2_fhdr | pad | packet data | FCS | 4716 * +---------+-----+---------------------+-----+ 4717 * The l2_fhdr needs to be checked and skipped and 4718 * the FCS needs to be stripped before sending the 4719 * packet up the stack. 4720 */ 4721 l2fhdr = mtod(m0, struct l2_fhdr *); 4722 4723 /* Get the packet data + FCS length and the status. */ 4724 pkt_len = l2fhdr->l2_fhdr_pkt_len; 4725 status = l2fhdr->l2_fhdr_status; 4726 4727 /* 4728 * Skip over the l2_fhdr and pad, resulting in the 4729 * following data in the mbuf: 4730 * +---------------------+-----+ 4731 * | packet data | FCS | 4732 * +---------------------+-----+ 4733 */ 4734 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4735 4736 4737#ifdef BCE_USE_SPLIT_HEADER 4738 /* 4739 * Check whether the received frame fits in a single 4740 * mbuf or not (i.e. packet data + FCS <= 4741 * sc->rx_bd_mbuf_data_len bytes). 4742 */ 4743 if (pkt_len > m0->m_len) { 4744 /* 4745 * The received frame is larger than a single mbuf. 4746 * If the frame was a TCP frame then only the TCP 4747 * header is placed in the mbuf, the remaining 4748 * payload (including FCS) is placed in the page 4749 * chain, the SPLIT flag is set, and the header 4750 * length is placed in the IP checksum field. 4751 * If the frame is not a TCP frame then the mbuf 4752 * is filled and the remaining bytes are placed 4753 * in the page chain. 4754 */ 4755 4756 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large packet.\n", 4757 __FUNCTION__); 4758 4759 if (status & L2_FHDR_STATUS_SPLIT) 4760 m0->m_len = l2fhdr->l2_fhdr_ip_xsum; 4761 4762 rem_len = pkt_len - m0->m_len; 4763 4764 /* Calculate how many pages to pull off the page chain. */ 4765 /* ToDo: The following assumes that mbuf clusters are 2KB. */ 4766 pages = (rem_len + sc->pg_bd_mbuf_alloc_size) >> 11; 4767 4768 /* Pull mbufs off the page chain for the remaining data. */ 4769 while (rem_len > 0) { 4770 struct mbuf *m_pg; 4771 4772 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons); 4773 4774 /* Remove the mbuf from the page chain. */ 4775 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx]; 4776 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL; 4777 DBRUN(sc->debug_pg_mbuf_alloc--); 4778 sc->free_pg_bd++; 4779 4780 /* Unmap the page chain mbuf from DMA space. */ 4781 bus_dmamap_sync(sc->pg_mbuf_tag, 4782 sc->pg_mbuf_map[sw_pg_cons_idx], 4783 BUS_DMASYNC_POSTREAD); 4784 bus_dmamap_unload(sc->pg_mbuf_tag, 4785 sc->pg_mbuf_map[sw_pg_cons_idx]); 4786 4787 /* Adjust the mbuf length. */ 4788 if (rem_len < m_pg->m_len) { 4789 /* The mbuf chain is complete. */ 4790 m_pg->m_len = rem_len; 4791 rem_len = 0; 4792 } else { 4793 /* More packet data is waiting. */ 4794 rem_len -= m_pg->m_len; 4795 } 4796 4797 /* Concatenate the mbuf cluster to the mbuf. */ 4798 m_cat(m0, m_pg); 4799 4800 sw_pg_cons = NEXT_PG_BD(sw_pg_cons); 4801 } 4802 4803 /* Set the total packet length. */ 4804 m0->m_pkthdr.len = pkt_len; 4805 4806 } else { 4807 /* 4808 * The received packet is small and fits in a 4809 * single mbuf (i.e. the l2_fhdr + pad + packet + 4810 * FCS <= MHLEN). In other words, the packet is 4811 * 154 bytes or less in size. 4812 */ 4813 4814 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small packet.\n", 4815 __FUNCTION__); 4816 4817 /* Set the total packet length. */ 4818 m0->m_pkthdr.len = m0->m_len = pkt_len; 4819 } 4820#endif 4821 4822 /* Remove the trailing Ethernet FCS. */ 4823 m_adj(m0, -ETHER_CRC_LEN); 4824 4825 /* Check that the resulting mbuf chain is valid. */ 4826 DBRUN(m_sanity(m0, FALSE)); 4827 4828 DBRUNIF((m0->m_len < ETHER_HDR_LEN), 4829 BCE_PRINTF("%s(): Unexpected length = %d!.\n", 4830 __FUNCTION__, m0->m_len); 4831 bce_breakpoint(sc)); 4832 4833 DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check), 4834 BCE_PRINTF("Simulating l2_fhdr status error.\n"); 4835 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4836 4837 /* Check the received frame for errors. */ 4838 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4839 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 4840 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { 4841 4842 /* Log the error and release the mbuf. */ 4843 ifp->if_ierrors++; 4844 DBRUN(sc->l2fhdr_status_errors++); 4845 4846 m_freem(m0); 4847 m0 = NULL; 4848 goto bce_rx_int_next_rx; 4849 } 4850 4851 /* Send the packet to the appropriate interface. */ 4852 m0->m_pkthdr.rcvif = ifp; 4853 4854 /* Assume no hardware checksum. */ 4855 m0->m_pkthdr.csum_flags = 0; 4856 4857 /* Validate the checksum if offload enabled. */ 4858 if (ifp->if_capenable & IFCAP_RXCSUM) { 4859 4860 /* Check for an IP datagram. */ 4861 if (!(status & L2_FHDR_STATUS_SPLIT) && 4862 (status & L2_FHDR_STATUS_IP_DATAGRAM)) { 4863 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4864 4865 /* Check if the IP checksum is valid. */ 4866 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 4867 m0->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4868 } 4869 4870 /* Check for a valid TCP/UDP frame. */ 4871 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4872 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4873 4874 /* Check for a good TCP/UDP checksum. */ 4875 if ((status & (L2_FHDR_ERRORS_TCP_XSUM | 4876 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4877 m0->m_pkthdr.csum_data = 4878 l2fhdr->l2_fhdr_tcp_udp_xsum; 4879 m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID 4880 | CSUM_PSEUDO_HDR); 4881 } 4882 } 4883 } 4884 4885 /* 4886 * If we received a packet with a vlan tag, 4887 * attach that information to the packet. 4888 */ 4889 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4890#if __FreeBSD_version < 700000 4891 VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue); 4892#else 4893 m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag; 4894 m0->m_flags |= M_VLANTAG; 4895#endif 4896 } 4897 4898 /* Pass the mbuf off to the upper layers. */ 4899 ifp->if_ipackets++; 4900 4901bce_rx_int_next_rx: 4902 sw_rx_cons = NEXT_RX_BD(sw_rx_cons); 4903 4904 /* If we have a packet, pass it up the stack */ 4905 if (m0) { 4906 /* Make sure we don't lose our place when we release the lock. */ 4907 sc->rx_cons = sw_rx_cons; 4908#ifdef BCE_USE_SPLIT_HEADER 4909 sc->pg_cons = sw_pg_cons; 4910#endif 4911 4912 BCE_UNLOCK(sc); 4913 (*ifp->if_input)(ifp, m0); 4914 BCE_LOCK(sc); 4915 4916 /* Recover our place. */ 4917 sw_rx_cons = sc->rx_cons; 4918#ifdef BCE_USE_SPLIT_HEADER 4919 sw_pg_cons = sc->pg_cons; 4920#endif 4921 } 4922 4923 /* Refresh hw_cons to see if there's new work */ 4924 if (sw_rx_cons == hw_rx_cons) 4925 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 4926 } 4927 4928 /* No new packets to process. Refill the RX and page chains and exit. */ 4929#ifdef BCE_USE_SPLIT_HEADER 4930 sc->pg_cons = sw_pg_cons; 4931 bce_fill_pg_chain(sc); 4932#endif 4933 4934 sc->rx_cons = sw_rx_cons; 4935 bce_fill_rx_chain(sc); 4936 4937 for (int i = 0; i < RX_PAGES; i++) 4938 bus_dmamap_sync(sc->rx_bd_chain_tag, 4939 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 4940 4941#ifdef BCE_USE_SPLIT_HEADER 4942 for (int i = 0; i < PG_PAGES; i++) 4943 bus_dmamap_sync(sc->pg_bd_chain_tag, 4944 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 4945#endif 4946 4947 DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4948 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4949 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4950 4951#ifdef BCE_DEBUG 4952 timer_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN); 4953 sc->rx_intr_time += (u64) (timer_start > timer_end ? 4954 (timer_start - timer_end) : (~timer_start + timer_end + 1)); 4955#endif 4956} 4957 4958 4959/****************************************************************************/ 4960/* Reads the transmit consumer value from the status block (skipping over */ 4961/* chain page pointer if necessary). */ 4962/* */ 4963/* Returns: */ 4964/* hw_cons */ 4965/****************************************************************************/ 4966static inline u16 4967bce_get_hw_tx_cons(struct bce_softc *sc) 4968{ 4969 u16 hw_cons = sc->status_block->status_tx_quick_consumer_index0; 4970 4971 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4972 hw_cons++; 4973 4974 return hw_cons; 4975} 4976 4977 4978/****************************************************************************/ 4979/* Handles transmit completion interrupt events. */ 4980/* */ 4981/* Returns: */ 4982/* Nothing. */ 4983/****************************************************************************/ 4984static void 4985bce_tx_intr(struct bce_softc *sc) 4986{ 4987 struct ifnet *ifp = sc->bce_ifp; 4988 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4989 4990#ifdef BCE_DEBUG 4991 u32 timer_start, timer_end; 4992 timer_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN); 4993 sc->tx_interrupts++; 4994#endif 4995 4996 BCE_LOCK_ASSERT(sc); 4997 4998 /* Get the hardware's view of the TX consumer index. */ 4999 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 5000 sw_tx_cons = sc->tx_cons; 5001 5002 /* Prevent speculative reads from getting ahead of the status block. */ 5003 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 5004 BUS_SPACE_BARRIER_READ); 5005 5006 /* Cycle through any completed TX chain page entries. */ 5007 while (sw_tx_cons != hw_tx_cons) { 5008#ifdef BCE_DEBUG 5009 struct tx_bd *txbd = NULL; 5010#endif 5011 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 5012 5013 DBPRINT(sc, BCE_INFO_SEND, 5014 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 5015 "sw_tx_chain_cons = 0x%04X\n", 5016 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 5017 5018 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 5019 BCE_PRINTF("%s(%d): TX chain consumer out of range! " 5020 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons, 5021 (int) MAX_TX_BD); 5022 bce_breakpoint(sc)); 5023 5024 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 5025 [TX_IDX(sw_tx_chain_cons)]); 5026 5027 DBRUNIF((txbd == NULL), 5028 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", 5029 __FILE__, __LINE__, sw_tx_chain_cons); 5030 bce_breakpoint(sc)); 5031 5032 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__); 5033 bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); 5034 5035 /* 5036 * Free the associated mbuf. Remember 5037 * that only the last tx_bd of a packet 5038 * has an mbuf pointer and DMA map. 5039 */ 5040 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 5041 5042 /* Validate that this is the last tx_bd. */ 5043 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), 5044 BCE_PRINTF("%s(%d): tx_bd END flag not set but " 5045 "txmbuf == NULL!\n", __FILE__, __LINE__); 5046 bce_breakpoint(sc)); 5047 5048 DBRUNMSG(BCE_INFO_SEND, 5049 BCE_PRINTF("%s(): Unloading map/freeing mbuf " 5050 "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons)); 5051 5052 /* Unmap the mbuf. */ 5053 bus_dmamap_unload(sc->tx_mbuf_tag, 5054 sc->tx_mbuf_map[sw_tx_chain_cons]); 5055 5056 /* Free the mbuf. */ 5057 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 5058 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 5059 DBRUN(sc->debug_tx_mbuf_alloc--); 5060 5061 ifp->if_opackets++; 5062 } 5063 5064 sc->used_tx_bd--; 5065 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 5066 5067 /* Refresh hw_cons to see if there's new work. */ 5068 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 5069 5070 /* Prevent speculative reads from getting ahead of the status block. */ 5071 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 5072 BUS_SPACE_BARRIER_READ); 5073 } 5074 5075 /* Clear the TX timeout timer. */ 5076 sc->watchdog_timer = 0; 5077 5078 /* Clear the tx hardware queue full flag. */ 5079 if (sc->used_tx_bd < sc->max_tx_bd) { 5080 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE), 5081 DBPRINT(sc, BCE_INFO_SEND, 5082 "%s(): Open TX chain! %d/%d (used/total)\n", 5083 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd)); 5084 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5085 } 5086 5087 sc->tx_cons = sw_tx_cons; 5088#ifdef BCE_DEBUG 5089 timer_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN); 5090 sc->tx_intr_time += (u64) (timer_start > timer_end ? 5091 (timer_start - timer_end) : (~timer_start + timer_end + 1)); 5092#endif 5093} 5094 5095 5096/****************************************************************************/ 5097/* Disables interrupt generation. */ 5098/* */ 5099/* Returns: */ 5100/* Nothing. */ 5101/****************************************************************************/ 5102static void 5103bce_disable_intr(struct bce_softc *sc) 5104{ 5105 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5106 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5107 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5108} 5109 5110 5111/****************************************************************************/ 5112/* Enables interrupt generation. */ 5113/* */ 5114/* Returns: */ 5115/* Nothing. */ 5116/****************************************************************************/ 5117static void 5118bce_enable_intr(struct bce_softc *sc) 5119{ 5120 u32 val; 5121 5122 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5123 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 5124 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 5125 5126 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5127 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5128 5129 val = REG_RD(sc, BCE_HC_COMMAND); 5130 REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW); 5131} 5132 5133 5134/****************************************************************************/ 5135/* Handles controller initialization. */ 5136/* */ 5137/* Returns: */ 5138/* Nothing. */ 5139/****************************************************************************/ 5140static void 5141bce_init_locked(struct bce_softc *sc) 5142{ 5143 struct ifnet *ifp; 5144 u32 ether_mtu = 0; 5145 5146 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 5147 5148 BCE_LOCK_ASSERT(sc); 5149 5150 ifp = sc->bce_ifp; 5151 5152 /* Check if the driver is still running and bail out if it is. */ 5153 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5154 goto bce_init_locked_exit; 5155 5156 bce_stop(sc); 5157 5158 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 5159 BCE_PRINTF("%s(%d): Controller reset failed!\n", 5160 __FILE__, __LINE__); 5161 goto bce_init_locked_exit; 5162 } 5163 5164 if (bce_chipinit(sc)) { 5165 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 5166 __FILE__, __LINE__); 5167 goto bce_init_locked_exit; 5168 } 5169 5170 if (bce_blockinit(sc)) { 5171 BCE_PRINTF("%s(%d): Block initialization failed!\n", 5172 __FILE__, __LINE__); 5173 goto bce_init_locked_exit; 5174 } 5175 5176 /* Load our MAC address. */ 5177 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN); 5178 bce_set_mac_addr(sc); 5179 5180 /* 5181 * Calculate and program the hardware Ethernet MTU 5182 * size. Be generous on the receive if we have room. 5183 */ 5184#ifdef BCE_USE_SPLIT_HEADER 5185 if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size)) 5186 ether_mtu = sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size; 5187#else 5188 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len) 5189 ether_mtu = sc->rx_bd_mbuf_data_len; 5190#endif 5191 else 5192 ether_mtu = ifp->if_mtu; 5193 5194 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 5195 5196 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__, 5197 ether_mtu); 5198 5199 /* Program the mtu, enabling jumbo frame support if necessary. */ 5200 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) 5201 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 5202 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 5203 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 5204 else 5205 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 5206 5207 DBPRINT(sc, BCE_INFO_LOAD, 5208 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, " 5209 "rx_bd_mbuf_align_pad = %d, pg_bd_mbuf_alloc_size = %d\n", 5210 __FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len, 5211 sc->rx_bd_mbuf_align_pad, sc->pg_bd_mbuf_alloc_size); 5212 5213 /* Program appropriate promiscuous/multicast filtering. */ 5214 bce_set_rx_mode(sc); 5215 5216#ifdef BCE_USE_SPLIT_HEADER 5217 /* Init page buffer descriptor chain. */ 5218 bce_init_pg_chain(sc); 5219#endif 5220 5221 /* Init RX buffer descriptor chain. */ 5222 bce_init_rx_chain(sc); 5223 5224 /* Init TX buffer descriptor chain. */ 5225 bce_init_tx_chain(sc); 5226 5227 /* Enable host interrupts. */ 5228 bce_enable_intr(sc); 5229 5230 bce_ifmedia_upd_locked(ifp); 5231 5232 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5233 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5234 5235 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 5236 5237bce_init_locked_exit: 5238 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 5239 5240 return; 5241} 5242 5243 5244/****************************************************************************/ 5245/* Initialize the controller just enough so that any management firmware */ 5246/* running on the device will continue to operate correctly. */ 5247/* */ 5248/* Returns: */ 5249/* Nothing. */ 5250/****************************************************************************/ 5251static void 5252bce_mgmt_init_locked(struct bce_softc *sc) 5253{ 5254 struct ifnet *ifp; 5255 5256 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 5257 5258 BCE_LOCK_ASSERT(sc); 5259 5260 /* Bail out if management firmware is not running. */ 5261 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) { 5262 DBPRINT(sc, BCE_VERBOSE_SPECIAL, 5263 "No management firmware running...\n"); 5264 goto bce_mgmt_init_locked_exit; 5265 } 5266 5267 ifp = sc->bce_ifp; 5268 5269 /* Enable all critical blocks in the MAC. */ 5270 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff); 5271 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 5272 DELAY(20); 5273 5274 bce_ifmedia_upd_locked(ifp); 5275bce_mgmt_init_locked_exit: 5276 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 5277 5278 return; 5279} 5280 5281 5282/****************************************************************************/ 5283/* Handles controller initialization when called from an unlocked routine. */ 5284/* */ 5285/* Returns: */ 5286/* Nothing. */ 5287/****************************************************************************/ 5288static void 5289bce_init(void *xsc) 5290{ 5291 struct bce_softc *sc = xsc; 5292 5293 BCE_LOCK(sc); 5294 bce_init_locked(sc); 5295 BCE_UNLOCK(sc); 5296} 5297 5298 5299/****************************************************************************/ 5300/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 5301/* memory visible to the controller. */ 5302/* */ 5303/* Returns: */ 5304/* 0 for success, positive value for failure. */ 5305/* Modified: */ 5306/* m_head: May be set to NULL if MBUF is excessively fragmented. */ 5307/****************************************************************************/ 5308static int 5309bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head) 5310{ 5311 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 5312 bus_dmamap_t map; 5313 struct tx_bd *txbd = NULL; 5314 struct mbuf *m0; 5315 struct ether_vlan_header *eh; 5316 struct ip *ip; 5317 struct tcphdr *th; 5318 u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0; 5319 u32 prod_bseq; 5320 int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0; 5321 5322 5323#ifdef BCE_DEBUG 5324 u16 debug_prod; 5325#endif 5326 int i, error, nsegs, rc = 0; 5327 5328 /* Transfer any checksum offload flags to the bd. */ 5329 m0 = *m_head; 5330 if (m0->m_pkthdr.csum_flags) { 5331 if (m0->m_pkthdr.csum_flags & CSUM_IP) 5332 flags |= TX_BD_FLAGS_IP_CKSUM; 5333 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 5334 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 5335 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5336 /* For TSO the controller needs two pieces of info, */ 5337 /* the MSS and the IP+TCP options length. */ 5338 mss = htole16(m0->m_pkthdr.tso_segsz); 5339 5340 /* Map the header and find the Ethernet type & header length */ 5341 eh = mtod(m0, struct ether_vlan_header *); 5342 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5343 etype = ntohs(eh->evl_proto); 5344 e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 5345 } else { 5346 etype = ntohs(eh->evl_encap_proto); 5347 e_hlen = ETHER_HDR_LEN; 5348 } 5349 5350 /* Check for supported TSO Ethernet types (only IPv4 for now) */ 5351 switch (etype) { 5352 case ETHERTYPE_IP: 5353 ip = (struct ip *)(m0->m_data + e_hlen); 5354 5355 /* TSO only supported for TCP protocol */ 5356 if (ip->ip_p != IPPROTO_TCP) { 5357 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n", 5358 __FILE__, __LINE__); 5359 goto bce_tx_encap_skip_tso; 5360 } 5361 5362 /* Get IP header length in bytes (min 20) */ 5363 ip_hlen = ip->ip_hl << 2; 5364 5365 /* Get the TCP header length in bytes (min 20) */ 5366 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 5367 tcp_hlen = (th->th_off << 2); 5368 5369 /* IP header length and checksum will be calc'd by hardware */ 5370 ip_len = ip->ip_len; 5371 ip->ip_len = 0; 5372 ip->ip_sum = 0; 5373 break; 5374 case ETHERTYPE_IPV6: 5375 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n", 5376 __FILE__, __LINE__); 5377 goto bce_tx_encap_skip_tso; 5378 default: 5379 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n", 5380 __FILE__, __LINE__); 5381 goto bce_tx_encap_skip_tso; 5382 } 5383 5384 hdr_len = e_hlen + ip_hlen + tcp_hlen; 5385 5386 DBPRINT(sc, BCE_EXCESSIVE_SEND, 5387 "%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n", 5388 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len); 5389 5390 /* Set the LSO flag in the TX BD */ 5391 flags |= TX_BD_FLAGS_SW_LSO; 5392 /* Set the length of IP + TCP options (in 32 bit words) */ 5393 flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8); 5394 5395bce_tx_encap_skip_tso: 5396 DBRUN(sc->requested_tso_frames++); 5397 } 5398 } 5399 5400 /* Transfer any VLAN tags to the bd. */ 5401 if (m0->m_flags & M_VLANTAG) { 5402 flags |= TX_BD_FLAGS_VLAN_TAG; 5403 vlan_tag = m0->m_pkthdr.ether_vtag; 5404 } 5405 5406 /* Map the mbuf into DMAable memory. */ 5407 prod = sc->tx_prod; 5408 chain_prod = TX_CHAIN_IDX(prod); 5409 map = sc->tx_mbuf_map[chain_prod]; 5410 5411 /* Map the mbuf into our DMA address space. */ 5412 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, 5413 segs, &nsegs, BUS_DMA_NOWAIT); 5414 5415 /* Check if the DMA mapping was successful */ 5416 if (error == EFBIG) { 5417 5418 /* The mbuf is too fragmented for our DMA mapping. */ 5419 DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n", 5420 __FUNCTION__, nsegs); 5421 DBRUN(bce_dump_mbuf(sc, m0);); 5422 5423 /* Try to defrag the mbuf. */ 5424 m0 = m_defrag(*m_head, M_DONTWAIT); 5425 if (m0 == NULL) { 5426 /* Defrag was unsuccessful */ 5427 m_freem(*m_head); 5428 *m_head = NULL; 5429 sc->mbuf_alloc_failed++; 5430 return (ENOBUFS); 5431 } 5432 5433 /* Defrag was successful, try mapping again */ 5434 *m_head = m0; 5435 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, 5436 segs, &nsegs, BUS_DMA_NOWAIT); 5437 5438 /* Still getting an error after a defrag. */ 5439 if (error == ENOMEM) { 5440 /* Insufficient DMA buffers available. */ 5441 sc->tx_dma_map_failures++; 5442 return (error); 5443 } else if (error != 0) { 5444 /* Still can't map the mbuf, release it and return an error. */ 5445 BCE_PRINTF( 5446 "%s(%d): Unknown error mapping mbuf into TX chain!\n", 5447 __FILE__, __LINE__); 5448 m_freem(m0); 5449 *m_head = NULL; 5450 sc->tx_dma_map_failures++; 5451 return (ENOBUFS); 5452 } 5453 } else if (error == ENOMEM) { 5454 /* Insufficient DMA buffers available. */ 5455 sc->tx_dma_map_failures++; 5456 return (error); 5457 } else if (error != 0) { 5458 m_freem(m0); 5459 *m_head = NULL; 5460 sc->tx_dma_map_failures++; 5461 return (error); 5462 } 5463 5464 /* Make sure there's room in the chain */ 5465 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { 5466 bus_dmamap_unload(sc->tx_mbuf_tag, map); 5467 return (ENOBUFS); 5468 } 5469 5470 /* prod points to an empty tx_bd at this point. */ 5471 prod_bseq = sc->tx_prod_bseq; 5472 5473#ifdef BCE_DEBUG 5474 debug_prod = chain_prod; 5475#endif 5476 5477 DBPRINT(sc, BCE_INFO_SEND, 5478 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, " 5479 "prod_bseq = 0x%08X\n", 5480 __FUNCTION__, prod, chain_prod, prod_bseq); 5481 5482 /* 5483 * Cycle through each mbuf segment that makes up 5484 * the outgoing frame, gathering the mapping info 5485 * for that segment and creating a tx_bd for 5486 * the mbuf. 5487 */ 5488 for (i = 0; i < nsegs ; i++) { 5489 5490 chain_prod = TX_CHAIN_IDX(prod); 5491 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 5492 5493 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 5494 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 5495 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len); 5496 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 5497 txbd->tx_bd_flags = htole16(flags); 5498 prod_bseq += segs[i].ds_len; 5499 if (i == 0) 5500 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 5501 prod = NEXT_TX_BD(prod); 5502 } 5503 5504 /* Set the END flag on the last TX buffer descriptor. */ 5505 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 5506 5507 DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs)); 5508 5509 DBPRINT(sc, BCE_INFO_SEND, 5510 "%s( end ): prod = 0x%04X, chain_prod = 0x%04X, " 5511 "prod_bseq = 0x%08X\n", 5512 __FUNCTION__, prod, chain_prod, prod_bseq); 5513 5514 /* 5515 * Ensure that the mbuf pointer for this transmission 5516 * is placed at the array index of the last 5517 * descriptor in this chain. This is done 5518 * because a single map is used for all 5519 * segments of the mbuf and we don't want to 5520 * unload the map before all of the segments 5521 * have been freed. 5522 */ 5523 sc->tx_mbuf_ptr[chain_prod] = m0; 5524 sc->used_tx_bd += nsegs; 5525 5526 /* Update some debug statistic counters */ 5527 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 5528 sc->tx_hi_watermark = sc->used_tx_bd); 5529 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); 5530 DBRUNIF(sc->debug_tx_mbuf_alloc++); 5531 5532 DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1)); 5533 5534 /* prod points to the next free tx_bd at this point. */ 5535 sc->tx_prod = prod; 5536 sc->tx_prod_bseq = prod_bseq; 5537 5538 return(rc); 5539} 5540 5541 5542/****************************************************************************/ 5543/* Main transmit routine when called from another routine with a lock. */ 5544/* */ 5545/* Returns: */ 5546/* Nothing. */ 5547/****************************************************************************/ 5548static void 5549bce_start_locked(struct ifnet *ifp) 5550{ 5551 struct bce_softc *sc = ifp->if_softc; 5552 struct mbuf *m_head = NULL; 5553 int count = 0; 5554 u16 tx_prod, tx_chain_prod; 5555 5556 /* prod points to the next free tx_bd. */ 5557 tx_prod = sc->tx_prod; 5558 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 5559 5560 DBPRINT(sc, BCE_INFO_SEND, 5561 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 5562 "tx_prod_bseq = 0x%08X\n", 5563 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); 5564 5565 /* If there's no link or the transmit queue is empty then just exit. */ 5566 if (!sc->bce_link) { 5567 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", 5568 __FUNCTION__); 5569 goto bce_start_locked_exit; 5570 } 5571 5572 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 5573 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n", 5574 __FUNCTION__); 5575 goto bce_start_locked_exit; 5576 } 5577 5578 /* 5579 * Keep adding entries while there is space in the ring. 5580 */ 5581 while (sc->used_tx_bd < sc->max_tx_bd) { 5582 5583 /* Check for any frames to send. */ 5584 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 5585 if (m_head == NULL) 5586 break; 5587 5588 /* 5589 * Pack the data into the transmit ring. If we 5590 * don't have room, place the mbuf back at the 5591 * head of the queue and set the OACTIVE flag 5592 * to wait for the NIC to drain the chain. 5593 */ 5594 if (bce_tx_encap(sc, &m_head)) { 5595 if (m_head != NULL) 5596 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 5597 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5598 DBPRINT(sc, BCE_INFO_SEND, 5599 "TX chain is closed for business! Total tx_bd used = %d\n", 5600 sc->used_tx_bd); 5601 break; 5602 } 5603 5604 count++; 5605 5606 /* Send a copy of the frame to any BPF listeners. */ 5607 ETHER_BPF_MTAP(ifp, m_head); 5608 } 5609 5610 if (count == 0) { 5611 /* no packets were dequeued */ 5612 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n", 5613 __FUNCTION__); 5614 goto bce_start_locked_exit; 5615 } 5616 5617 /* Update the driver's counters. */ 5618 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5619 5620 /* Start the transmit. */ 5621 REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5622 REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5623 5624 /* Set the tx timeout. */ 5625 sc->watchdog_timer = BCE_TX_TIMEOUT; 5626 5627bce_start_locked_exit: 5628 DBPRINT(sc, BCE_INFO_SEND, 5629 "%s(exit ): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 5630 "tx_prod_bseq = 0x%08X\n", 5631 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); 5632 5633 return; 5634} 5635 5636 5637/****************************************************************************/ 5638/* Main transmit routine when called from another routine without a lock. */ 5639/* */ 5640/* Returns: */ 5641/* Nothing. */ 5642/****************************************************************************/ 5643static void 5644bce_start(struct ifnet *ifp) 5645{ 5646 struct bce_softc *sc = ifp->if_softc; 5647 5648 BCE_LOCK(sc); 5649 bce_start_locked(ifp); 5650 BCE_UNLOCK(sc); 5651} 5652 5653 5654/****************************************************************************/ 5655/* Handles any IOCTL calls from the operating system. */ 5656/* */ 5657/* Returns: */ 5658/* 0 for success, positive value for failure. */ 5659/****************************************************************************/ 5660static int 5661bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 5662{ 5663 struct bce_softc *sc = ifp->if_softc; 5664 struct ifreq *ifr = (struct ifreq *) data; 5665 struct mii_data *mii; 5666 int mask, error = 0; 5667 5668 switch(command) { 5669 5670 /* Set the interface MTU. */ 5671 case SIOCSIFMTU: 5672 /* Check that the MTU setting is supported. */ 5673 if ((ifr->ifr_mtu < BCE_MIN_MTU) || 5674 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) { 5675 error = EINVAL; 5676 break; 5677 } 5678 5679 DBPRINT(sc, BCE_INFO_MISC, 5680 "SIOCSIFMTU: Changing MTU from %d to %d\n", 5681 (int) ifp->if_mtu, (int) ifr->ifr_mtu); 5682 5683 BCE_LOCK(sc); 5684 ifp->if_mtu = ifr->ifr_mtu; 5685 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 5686#ifdef BCE_USE_SPLIT_HEADER 5687 /* No buffer allocation size changes are necessary. */ 5688#else 5689 /* Recalculate our buffer allocation sizes. */ 5690 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > MCLBYTES) { 5691 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES; 5692 sc->rx_bd_mbuf_align_pad = roundup2(MJUM9BYTES, 16) - MJUM9BYTES; 5693 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 5694 sc->rx_bd_mbuf_align_pad; 5695 } else { 5696 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 5697 sc->rx_bd_mbuf_align_pad = roundup2(MCLBYTES, 16) - MCLBYTES; 5698 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 5699 sc->rx_bd_mbuf_align_pad; 5700 } 5701#endif 5702 5703 bce_init_locked(sc); 5704 BCE_UNLOCK(sc); 5705 break; 5706 5707 /* Set interface flags. */ 5708 case SIOCSIFFLAGS: 5709 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n"); 5710 5711 BCE_LOCK(sc); 5712 5713 /* Check if the interface is up. */ 5714 if (ifp->if_flags & IFF_UP) { 5715 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 5716 /* Change promiscuous/multicast flags as necessary. */ 5717 bce_set_rx_mode(sc); 5718 } else { 5719 /* Start the HW */ 5720 bce_init_locked(sc); 5721 } 5722 } else { 5723 /* The interface is down, check if driver is running. */ 5724 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 5725 bce_stop(sc); 5726 5727 /* If MFW is running, restart the controller a bit. */ 5728 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5729 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5730 bce_chipinit(sc); 5731 bce_mgmt_init_locked(sc); 5732 } 5733 } 5734 } 5735 5736 BCE_UNLOCK(sc); 5737 error = 0; 5738 5739 break; 5740 5741 /* Add/Delete multicast address */ 5742 case SIOCADDMULTI: 5743 case SIOCDELMULTI: 5744 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n"); 5745 5746 BCE_LOCK(sc); 5747 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 5748 bce_set_rx_mode(sc); 5749 error = 0; 5750 } 5751 BCE_UNLOCK(sc); 5752 5753 break; 5754 5755 /* Set/Get Interface media */ 5756 case SIOCSIFMEDIA: 5757 case SIOCGIFMEDIA: 5758 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n"); 5759 5760 mii = device_get_softc(sc->bce_miibus); 5761 error = ifmedia_ioctl(ifp, ifr, 5762 &mii->mii_media, command); 5763 break; 5764 5765 /* Set interface capability */ 5766 case SIOCSIFCAP: 5767 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5768 DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask); 5769 5770 /* Toggle the TX checksum capabilites enable flag. */ 5771 if (mask & IFCAP_TXCSUM) { 5772 ifp->if_capenable ^= IFCAP_TXCSUM; 5773 if (IFCAP_TXCSUM & ifp->if_capenable) 5774 ifp->if_hwassist = BCE_IF_HWASSIST; 5775 else 5776 ifp->if_hwassist = 0; 5777 } 5778 5779 /* Toggle the RX checksum capabilities enable flag. */ 5780 if (mask & IFCAP_RXCSUM) { 5781 ifp->if_capenable ^= IFCAP_RXCSUM; 5782 if (IFCAP_RXCSUM & ifp->if_capenable) 5783 ifp->if_hwassist = BCE_IF_HWASSIST; 5784 else 5785 ifp->if_hwassist = 0; 5786 } 5787 5788 /* Toggle the TSO capabilities enable flag. */ 5789 if (bce_tso_enable && (mask & IFCAP_TSO4)) { 5790 ifp->if_capenable ^= IFCAP_TSO4; 5791 if (IFCAP_RXCSUM & ifp->if_capenable) 5792 ifp->if_hwassist = BCE_IF_HWASSIST; 5793 else 5794 ifp->if_hwassist = 0; 5795 } 5796 5797 /* Toggle VLAN_MTU capabilities enable flag. */ 5798 if (mask & IFCAP_VLAN_MTU) { 5799 BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n", 5800 __FILE__, __LINE__); 5801 } 5802 5803 /* Toggle VLANHWTAG capabilities enabled flag. */ 5804 if (mask & IFCAP_VLAN_HWTAGGING) { 5805 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 5806 BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while " 5807 "management firmware (ASF/IPMI/UMP) is running!\n", 5808 __FILE__, __LINE__); 5809 else 5810 BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n", 5811 __FILE__, __LINE__); 5812 } 5813 5814 break; 5815 default: 5816 /* We don't know how to handle the IOCTL, pass it on. */ 5817 error = ether_ioctl(ifp, command, data); 5818 break; 5819 } 5820 5821 return(error); 5822} 5823 5824 5825/****************************************************************************/ 5826/* Transmit timeout handler. */ 5827/* */ 5828/* Returns: */ 5829/* Nothing. */ 5830/****************************************************************************/ 5831static void 5832bce_watchdog(struct bce_softc *sc) 5833{ 5834 5835 BCE_LOCK_ASSERT(sc); 5836 5837 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 5838 return; 5839 5840 /* 5841 * If we are in this routine because of pause frames, then 5842 * don't reset the hardware. 5843 */ 5844 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5845 return; 5846 5847 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n", 5848 __FILE__, __LINE__); 5849 5850 DBRUNMSG(BCE_VERBOSE_SEND, 5851 bce_dump_driver_state(sc); 5852 bce_dump_status_block(sc)); 5853 5854 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */ 5855 5856 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 5857 5858 bce_init_locked(sc); 5859 sc->bce_ifp->if_oerrors++; 5860 5861} 5862 5863 5864/* 5865 * Interrupt handler. 5866 */ 5867/****************************************************************************/ 5868/* Main interrupt entry point. Verifies that the controller generated the */ 5869/* interrupt and then calls a separate routine for handle the various */ 5870/* interrupt causes (PHY, TX, RX). */ 5871/* */ 5872/* Returns: */ 5873/* 0 for success, positive value for failure. */ 5874/****************************************************************************/ 5875static void 5876bce_intr(void *xsc) 5877{ 5878 struct bce_softc *sc; 5879 struct ifnet *ifp; 5880 u32 status_attn_bits; 5881 u16 hw_rx_cons, hw_tx_cons; 5882 5883 sc = xsc; 5884 ifp = sc->bce_ifp; 5885 5886 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__); 5887 BCE_LOCK(sc); 5888 5889 DBRUN(sc->interrupts_generated++); 5890 5891 bus_dmamap_sync(sc->status_tag, sc->status_map, 5892 BUS_DMASYNC_POSTWRITE); 5893 5894 /* 5895 * If the hardware status block index 5896 * matches the last value read by the 5897 * driver and we haven't asserted our 5898 * interrupt then there's nothing to do. 5899 */ 5900 if ((sc->status_block->status_idx == sc->last_status_idx) && 5901 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5902 goto bce_intr_exit; 5903 5904 /* Ack the interrupt and stop others from occuring. */ 5905 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5906 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5907 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5908 5909 /* Check if the hardware has finished any work. */ 5910 hw_rx_cons = bce_get_hw_rx_cons(sc); 5911 hw_tx_cons = bce_get_hw_tx_cons(sc); 5912 5913 /* Keep processing data as long as there is work to do. */ 5914 for (;;) { 5915 5916 status_attn_bits = sc->status_block->status_attn_bits; 5917 5918 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention), 5919 BCE_PRINTF("Simulating unexpected status attention bit set."); 5920 status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR); 5921 5922 /* Was it a link change interrupt? */ 5923 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5924 (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 5925 bce_phy_intr(sc); 5926 5927 /* If any other attention is asserted then the chip is toast. */ 5928 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5929 (sc->status_block->status_attn_bits_ack & 5930 ~STATUS_ATTN_BITS_LINK_STATE))) { 5931 5932 DBRUN(sc->unexpected_attentions++); 5933 5934 BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n", 5935 __FILE__, __LINE__, sc->status_block->status_attn_bits); 5936 5937 DBRUNMSG(BCE_FATAL, 5938 if (bce_debug_unexpected_attention == 0) 5939 bce_breakpoint(sc)); 5940 5941 bce_init_locked(sc); 5942 goto bce_intr_exit; 5943 } 5944 5945 /* Check for any completed RX frames. */ 5946 if (hw_rx_cons != sc->hw_rx_cons) 5947 bce_rx_intr(sc); 5948 5949 /* Check for any completed TX frames. */ 5950 if (hw_tx_cons != sc->hw_tx_cons) 5951 bce_tx_intr(sc); 5952 5953 /* Save the status block index value for use during the next interrupt. */ 5954 sc->last_status_idx = sc->status_block->status_idx; 5955 5956 /* Prevent speculative reads from getting ahead of the status block. */ 5957 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 5958 BUS_SPACE_BARRIER_READ); 5959 5960 /* If there's no work left then exit the interrupt service routine. */ 5961 hw_rx_cons = bce_get_hw_rx_cons(sc); 5962 hw_tx_cons = bce_get_hw_tx_cons(sc); 5963 5964 if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons)) 5965 break; 5966 5967 } 5968 5969 bus_dmamap_sync(sc->status_tag, sc->status_map, 5970 BUS_DMASYNC_PREWRITE); 5971 5972 /* Re-enable interrupts. */ 5973 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5974 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 5975 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5976 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5977 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5978 5979 /* Handle any frames that arrived while handling the interrupt. */ 5980 if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 5981 bce_start_locked(ifp); 5982 5983bce_intr_exit: 5984 BCE_UNLOCK(sc); 5985} 5986 5987 5988/****************************************************************************/ 5989/* Programs the various packet receive modes (broadcast and multicast). */ 5990/* */ 5991/* Returns: */ 5992/* Nothing. */ 5993/****************************************************************************/ 5994static void 5995bce_set_rx_mode(struct bce_softc *sc) 5996{ 5997 struct ifnet *ifp; 5998 struct ifmultiaddr *ifma; 5999 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 6000 u32 rx_mode, sort_mode; 6001 int h, i; 6002 6003 BCE_LOCK_ASSERT(sc); 6004 6005 ifp = sc->bce_ifp; 6006 6007 /* Initialize receive mode default settings. */ 6008 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 6009 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 6010 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 6011 6012 /* 6013 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 6014 * be enbled. 6015 */ 6016 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 6017 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))) 6018 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 6019 6020 /* 6021 * Check for promiscuous, all multicast, or selected 6022 * multicast address filtering. 6023 */ 6024 if (ifp->if_flags & IFF_PROMISC) { 6025 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n"); 6026 6027 /* Enable promiscuous mode. */ 6028 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 6029 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 6030 } else if (ifp->if_flags & IFF_ALLMULTI) { 6031 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n"); 6032 6033 /* Enable all multicast addresses. */ 6034 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 6035 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); 6036 } 6037 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 6038 } else { 6039 /* Accept one or more multicast(s). */ 6040 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n"); 6041 6042 IF_ADDR_LOCK(ifp); 6043 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 6044 if (ifma->ifma_addr->sa_family != AF_LINK) 6045 continue; 6046 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 6047 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; 6048 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 6049 } 6050 IF_ADDR_UNLOCK(ifp); 6051 6052 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 6053 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); 6054 6055 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 6056 } 6057 6058 /* Only make changes if the recive mode has actually changed. */ 6059 if (rx_mode != sc->rx_mode) { 6060 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n", 6061 rx_mode); 6062 6063 sc->rx_mode = rx_mode; 6064 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 6065 } 6066 6067 /* Disable and clear the exisitng sort before enabling a new sort. */ 6068 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 6069 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 6070 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 6071} 6072 6073 6074/****************************************************************************/ 6075/* Called periodically to updates statistics from the controllers */ 6076/* statistics block. */ 6077/* */ 6078/* Returns: */ 6079/* Nothing. */ 6080/****************************************************************************/ 6081static void 6082bce_stats_update(struct bce_softc *sc) 6083{ 6084 struct ifnet *ifp; 6085 struct statistics_block *stats; 6086 6087 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__); 6088 6089 ifp = sc->bce_ifp; 6090 6091 stats = (struct statistics_block *) sc->stats_block; 6092 6093 /* 6094 * Update the interface statistics from the 6095 * hardware statistics. 6096 */ 6097 ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions; 6098 6099 ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts + 6100 (u_long) stats->stat_EtherStatsOverrsizePkts + 6101 (u_long) stats->stat_IfInMBUFDiscards + 6102 (u_long) stats->stat_Dot3StatsAlignmentErrors + 6103 (u_long) stats->stat_Dot3StatsFCSErrors; 6104 6105 ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 6106 (u_long) stats->stat_Dot3StatsExcessiveCollisions + 6107 (u_long) stats->stat_Dot3StatsLateCollisions; 6108 6109 /* 6110 * Certain controllers don't report 6111 * carrier sense errors correctly. 6112 * See errata E11_5708CA0_1165. 6113 */ 6114 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 6115 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) 6116 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 6117 6118 /* 6119 * Update the sysctl statistics from the 6120 * hardware statistics. 6121 */ 6122 sc->stat_IfHCInOctets = 6123 ((u64) stats->stat_IfHCInOctets_hi << 32) + 6124 (u64) stats->stat_IfHCInOctets_lo; 6125 6126 sc->stat_IfHCInBadOctets = 6127 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 6128 (u64) stats->stat_IfHCInBadOctets_lo; 6129 6130 sc->stat_IfHCOutOctets = 6131 ((u64) stats->stat_IfHCOutOctets_hi << 32) + 6132 (u64) stats->stat_IfHCOutOctets_lo; 6133 6134 sc->stat_IfHCOutBadOctets = 6135 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + 6136 (u64) stats->stat_IfHCOutBadOctets_lo; 6137 6138 sc->stat_IfHCInUcastPkts = 6139 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + 6140 (u64) stats->stat_IfHCInUcastPkts_lo; 6141 6142 sc->stat_IfHCInMulticastPkts = 6143 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + 6144 (u64) stats->stat_IfHCInMulticastPkts_lo; 6145 6146 sc->stat_IfHCInBroadcastPkts = 6147 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + 6148 (u64) stats->stat_IfHCInBroadcastPkts_lo; 6149 6150 sc->stat_IfHCOutUcastPkts = 6151 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + 6152 (u64) stats->stat_IfHCOutUcastPkts_lo; 6153 6154 sc->stat_IfHCOutMulticastPkts = 6155 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + 6156 (u64) stats->stat_IfHCOutMulticastPkts_lo; 6157 6158 sc->stat_IfHCOutBroadcastPkts = 6159 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 6160 (u64) stats->stat_IfHCOutBroadcastPkts_lo; 6161 6162 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 6163 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 6164 6165 sc->stat_Dot3StatsCarrierSenseErrors = 6166 stats->stat_Dot3StatsCarrierSenseErrors; 6167 6168 sc->stat_Dot3StatsFCSErrors = 6169 stats->stat_Dot3StatsFCSErrors; 6170 6171 sc->stat_Dot3StatsAlignmentErrors = 6172 stats->stat_Dot3StatsAlignmentErrors; 6173 6174 sc->stat_Dot3StatsSingleCollisionFrames = 6175 stats->stat_Dot3StatsSingleCollisionFrames; 6176 6177 sc->stat_Dot3StatsMultipleCollisionFrames = 6178 stats->stat_Dot3StatsMultipleCollisionFrames; 6179 6180 sc->stat_Dot3StatsDeferredTransmissions = 6181 stats->stat_Dot3StatsDeferredTransmissions; 6182 6183 sc->stat_Dot3StatsExcessiveCollisions = 6184 stats->stat_Dot3StatsExcessiveCollisions; 6185 6186 sc->stat_Dot3StatsLateCollisions = 6187 stats->stat_Dot3StatsLateCollisions; 6188 6189 sc->stat_EtherStatsCollisions = 6190 stats->stat_EtherStatsCollisions; 6191 6192 sc->stat_EtherStatsFragments = 6193 stats->stat_EtherStatsFragments; 6194 6195 sc->stat_EtherStatsJabbers = 6196 stats->stat_EtherStatsJabbers; 6197 6198 sc->stat_EtherStatsUndersizePkts = 6199 stats->stat_EtherStatsUndersizePkts; 6200 6201 sc->stat_EtherStatsOverrsizePkts = 6202 stats->stat_EtherStatsOverrsizePkts; 6203 6204 sc->stat_EtherStatsPktsRx64Octets = 6205 stats->stat_EtherStatsPktsRx64Octets; 6206 6207 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 6208 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 6209 6210 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 6211 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 6212 6213 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 6214 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 6215 6216 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 6217 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 6218 6219 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 6220 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 6221 6222 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 6223 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 6224 6225 sc->stat_EtherStatsPktsTx64Octets = 6226 stats->stat_EtherStatsPktsTx64Octets; 6227 6228 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 6229 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 6230 6231 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 6232 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 6233 6234 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 6235 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 6236 6237 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 6238 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 6239 6240 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 6241 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 6242 6243 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 6244 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 6245 6246 sc->stat_XonPauseFramesReceived = 6247 stats->stat_XonPauseFramesReceived; 6248 6249 sc->stat_XoffPauseFramesReceived = 6250 stats->stat_XoffPauseFramesReceived; 6251 6252 sc->stat_OutXonSent = 6253 stats->stat_OutXonSent; 6254 6255 sc->stat_OutXoffSent = 6256 stats->stat_OutXoffSent; 6257 6258 sc->stat_FlowControlDone = 6259 stats->stat_FlowControlDone; 6260 6261 sc->stat_MacControlFramesReceived = 6262 stats->stat_MacControlFramesReceived; 6263 6264 sc->stat_XoffStateEntered = 6265 stats->stat_XoffStateEntered; 6266 6267 sc->stat_IfInFramesL2FilterDiscards = 6268 stats->stat_IfInFramesL2FilterDiscards; 6269 6270 sc->stat_IfInRuleCheckerDiscards = 6271 stats->stat_IfInRuleCheckerDiscards; 6272 6273 sc->stat_IfInFTQDiscards = 6274 stats->stat_IfInFTQDiscards; 6275 6276 sc->stat_IfInMBUFDiscards = 6277 stats->stat_IfInMBUFDiscards; 6278 6279 sc->stat_IfInRuleCheckerP4Hit = 6280 stats->stat_IfInRuleCheckerP4Hit; 6281 6282 sc->stat_CatchupInRuleCheckerDiscards = 6283 stats->stat_CatchupInRuleCheckerDiscards; 6284 6285 sc->stat_CatchupInFTQDiscards = 6286 stats->stat_CatchupInFTQDiscards; 6287 6288 sc->stat_CatchupInMBUFDiscards = 6289 stats->stat_CatchupInMBUFDiscards; 6290 6291 sc->stat_CatchupInRuleCheckerP4Hit = 6292 stats->stat_CatchupInRuleCheckerP4Hit; 6293 6294 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 6295 6296 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__); 6297} 6298 6299 6300/****************************************************************************/ 6301/* Periodic function to notify the bootcode that the driver is still */ 6302/* present. */ 6303/* */ 6304/* Returns: */ 6305/* Nothing. */ 6306/****************************************************************************/ 6307static void 6308bce_pulse(void *xsc) 6309{ 6310 struct bce_softc *sc = xsc; 6311 u32 msg; 6312 6313 DBPRINT(sc, BCE_EXCESSIVE_MISC, "pulse\n"); 6314 6315 BCE_LOCK_ASSERT(sc); 6316 6317 /* Tell the firmware that the driver is still running. */ 6318 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq; 6319 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg); 6320 6321 /* Schedule the next pulse. */ 6322 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); 6323 6324 return; 6325} 6326 6327 6328/****************************************************************************/ 6329/* Periodic function to perform maintenance tasks. */ 6330/* */ 6331/* Returns: */ 6332/* Nothing. */ 6333/****************************************************************************/ 6334static void 6335bce_tick(void *xsc) 6336{ 6337 struct bce_softc *sc = xsc; 6338 struct mii_data *mii; 6339 struct ifnet *ifp; 6340 6341 ifp = sc->bce_ifp; 6342 6343 BCE_LOCK_ASSERT(sc); 6344 6345 /* Schedule the next tick. */ 6346 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 6347 6348 /* Update the statistics from the hardware statistics block. */ 6349 bce_stats_update(sc); 6350 6351 /* Top off the receive and page chains. */ 6352#ifdef BCE_USE_SPLIT_HEADER 6353 bce_fill_pg_chain(sc); 6354#endif 6355 bce_fill_rx_chain(sc); 6356 6357 /* Check that chip hasn't hung. */ 6358 bce_watchdog(sc); 6359 6360 /* If link is up already up then we're done. */ 6361 if (sc->bce_link) 6362 goto bce_tick_locked_exit; 6363 6364 mii = device_get_softc(sc->bce_miibus); 6365 mii_tick(mii); 6366 6367 /* Check if the link has come up. */ 6368 if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE && 6369 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 6370 sc->bce_link++; 6371 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 6372 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) && 6373 bootverbose) 6374 BCE_PRINTF("Gigabit link up\n"); 6375 6376 /* Now that link is up, handle any outstanding TX traffic. */ 6377 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 6378 bce_start_locked(ifp); 6379 } 6380 6381bce_tick_locked_exit: 6382 return; 6383} 6384 6385 6386#ifdef BCE_DEBUG 6387/****************************************************************************/ 6388/* Allows the driver state to be dumped through the sysctl interface. */ 6389/* */ 6390/* Returns: */ 6391/* 0 for success, positive value for failure. */ 6392/****************************************************************************/ 6393static int 6394bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 6395{ 6396 int error; 6397 int result; 6398 struct bce_softc *sc; 6399 6400 result = -1; 6401 error = sysctl_handle_int(oidp, &result, 0, req); 6402 6403 if (error || !req->newptr) 6404 return (error); 6405 6406 if (result == 1) { 6407 sc = (struct bce_softc *)arg1; 6408 bce_dump_driver_state(sc); 6409 } 6410 6411 return error; 6412} 6413 6414 6415/****************************************************************************/ 6416/* Allows the hardware state to be dumped through the sysctl interface. */ 6417/* */ 6418/* Returns: */ 6419/* 0 for success, positive value for failure. */ 6420/****************************************************************************/ 6421static int 6422bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 6423{ 6424 int error; 6425 int result; 6426 struct bce_softc *sc; 6427 6428 result = -1; 6429 error = sysctl_handle_int(oidp, &result, 0, req); 6430 6431 if (error || !req->newptr) 6432 return (error); 6433 6434 if (result == 1) { 6435 sc = (struct bce_softc *)arg1; 6436 bce_dump_hw_state(sc); 6437 } 6438 6439 return error; 6440} 6441 6442 6443/****************************************************************************/ 6444/* Allows the bootcode state to be dumped through the sysctl interface. */ 6445/* */ 6446/* Returns: */ 6447/* 0 for success, positive value for failure. */ 6448/****************************************************************************/ 6449static int 6450bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS) 6451{ 6452 int error; 6453 int result; 6454 struct bce_softc *sc; 6455 6456 result = -1; 6457 error = sysctl_handle_int(oidp, &result, 0, req); 6458 6459 if (error || !req->newptr) 6460 return (error); 6461 6462 if (result == 1) { 6463 sc = (struct bce_softc *)arg1; 6464 bce_dump_bc_state(sc); 6465 } 6466 6467 return error; 6468} 6469 6470 6471/****************************************************************************/ 6472/* Provides a sysctl interface to allow dumping the RX chain. */ 6473/* */ 6474/* Returns: */ 6475/* 0 for success, positive value for failure. */ 6476/****************************************************************************/ 6477static int 6478bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS) 6479{ 6480 int error; 6481 int result; 6482 struct bce_softc *sc; 6483 6484 result = -1; 6485 error = sysctl_handle_int(oidp, &result, 0, req); 6486 6487 if (error || !req->newptr) 6488 return (error); 6489 6490 if (result == 1) { 6491 sc = (struct bce_softc *)arg1; 6492 bce_dump_rx_chain(sc, 0, TOTAL_RX_BD); 6493 } 6494 6495 return error; 6496} 6497 6498 6499/****************************************************************************/ 6500/* Provides a sysctl interface to allow dumping the TX chain. */ 6501/* */ 6502/* Returns: */ 6503/* 0 for success, positive value for failure. */ 6504/****************************************************************************/ 6505static int 6506bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 6507{ 6508 int error; 6509 int result; 6510 struct bce_softc *sc; 6511 6512 result = -1; 6513 error = sysctl_handle_int(oidp, &result, 0, req); 6514 6515 if (error || !req->newptr) 6516 return (error); 6517 6518 if (result == 1) { 6519 sc = (struct bce_softc *)arg1; 6520 bce_dump_tx_chain(sc, 0, USABLE_TX_BD); 6521 } 6522 6523 return error; 6524} 6525 6526 6527#ifdef BCE_USE_SPLIT_HEADER 6528/****************************************************************************/ 6529/* Provides a sysctl interface to allow dumping the page chain. */ 6530/* */ 6531/* Returns: */ 6532/* 0 for success, positive value for failure. */ 6533/****************************************************************************/ 6534static int 6535bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS) 6536{ 6537 int error; 6538 int result; 6539 struct bce_softc *sc; 6540 6541 result = -1; 6542 error = sysctl_handle_int(oidp, &result, 0, req); 6543 6544 if (error || !req->newptr) 6545 return (error); 6546 6547 if (result == 1) { 6548 sc = (struct bce_softc *)arg1; 6549 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD); 6550 } 6551 6552 return error; 6553} 6554#endif 6555 6556 6557/****************************************************************************/ 6558/* Provides a sysctl interface to allow reading arbitrary registers in the */ 6559/* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6560/* */ 6561/* Returns: */ 6562/* 0 for success, positive value for failure. */ 6563/****************************************************************************/ 6564static int 6565bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 6566{ 6567 struct bce_softc *sc; 6568 int error; 6569 u32 val, result; 6570 6571 result = -1; 6572 error = sysctl_handle_int(oidp, &result, 0, req); 6573 if (error || (req->newptr == NULL)) 6574 return (error); 6575 6576 /* Make sure the register is accessible. */ 6577 if (result < 0x8000) { 6578 sc = (struct bce_softc *)arg1; 6579 val = REG_RD(sc, result); 6580 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 6581 } else if (result < 0x0280000) { 6582 sc = (struct bce_softc *)arg1; 6583 val = REG_RD_IND(sc, result); 6584 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 6585 } 6586 6587 return (error); 6588} 6589 6590 6591/****************************************************************************/ 6592/* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 6593/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6594/* */ 6595/* Returns: */ 6596/* 0 for success, positive value for failure. */ 6597/****************************************************************************/ 6598static int 6599bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 6600{ 6601 struct bce_softc *sc; 6602 device_t dev; 6603 int error, result; 6604 u16 val; 6605 6606 result = -1; 6607 error = sysctl_handle_int(oidp, &result, 0, req); 6608 if (error || (req->newptr == NULL)) 6609 return (error); 6610 6611 /* Make sure the register is accessible. */ 6612 if (result < 0x20) { 6613 sc = (struct bce_softc *)arg1; 6614 dev = sc->bce_dev; 6615 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 6616 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val); 6617 } 6618 return (error); 6619} 6620 6621 6622/****************************************************************************/ 6623/* Provides a sysctl interface to forcing the driver to dump state and */ 6624/* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6625/* */ 6626/* Returns: */ 6627/* 0 for success, positive value for failure. */ 6628/****************************************************************************/ 6629static int 6630bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 6631{ 6632 int error; 6633 int result; 6634 struct bce_softc *sc; 6635 6636 result = -1; 6637 error = sysctl_handle_int(oidp, &result, 0, req); 6638 6639 if (error || !req->newptr) 6640 return (error); 6641 6642 if (result == 1) { 6643 sc = (struct bce_softc *)arg1; 6644 bce_breakpoint(sc); 6645 } 6646 6647 return error; 6648} 6649#endif 6650 6651 6652/****************************************************************************/ 6653/* Adds any sysctl parameters for tuning or debugging purposes. */ 6654/* */ 6655/* Returns: */ 6656/* 0 for success, positive value for failure. */ 6657/****************************************************************************/ 6658static void 6659bce_add_sysctls(struct bce_softc *sc) 6660{ 6661 struct sysctl_ctx_list *ctx; 6662 struct sysctl_oid_list *children; 6663 6664 ctx = device_get_sysctl_ctx(sc->bce_dev); 6665 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); 6666 6667#ifdef BCE_DEBUG 6668 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6669 "rx_low_watermark", 6670 CTLFLAG_RD, &sc->rx_low_watermark, 6671 0, "Lowest level of free rx_bd's"); 6672 6673 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6674 "rx_empty_count", 6675 CTLFLAG_RD, &sc->rx_empty_count, 6676 0, "Number of times the RX chain was empty"); 6677 6678 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6679 "tx_hi_watermark", 6680 CTLFLAG_RD, &sc->tx_hi_watermark, 6681 0, "Highest level of used tx_bd's"); 6682 6683 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6684 "tx_full_count", 6685 CTLFLAG_RD, &sc->tx_full_count, 6686 0, "Number of times the TX chain was full"); 6687 6688 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6689 "l2fhdr_status_errors", 6690 CTLFLAG_RD, &sc->l2fhdr_status_errors, 6691 0, "l2_fhdr status errors"); 6692 6693 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6694 "unexpected_attentions", 6695 CTLFLAG_RD, &sc->unexpected_attentions, 6696 0, "Unexpected attentions"); 6697 6698 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6699 "lost_status_block_updates", 6700 CTLFLAG_RD, &sc->lost_status_block_updates, 6701 0, "Lost status block updates"); 6702 6703 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6704 "debug_mbuf_sim_alloc_failed", 6705 CTLFLAG_RD, &sc->debug_mbuf_sim_alloc_failed, 6706 0, "Simulated mbuf cluster allocation failures"); 6707 6708 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6709 "requested_tso_frames", 6710 CTLFLAG_RD, &sc->requested_tso_frames, 6711 0, "Number of TSO frames received"); 6712 6713 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6714 "rx_interrupts", 6715 CTLFLAG_RD, &sc->rx_interrupts, 6716 0, "Number of RX interrupts"); 6717 6718 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6719 "tx_interrupts", 6720 CTLFLAG_RD, &sc->tx_interrupts, 6721 0, "Number of TX interrupts"); 6722 6723 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6724 "rx_intr_time", 6725 CTLFLAG_RD, &sc->rx_intr_time, 6726 "RX interrupt time"); 6727 6728 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6729 "tx_intr_time", 6730 CTLFLAG_RD, &sc->tx_intr_time, 6731 "TX interrupt time"); 6732 6733#endif 6734 6735 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6736 "mbuf_alloc_failed", 6737 CTLFLAG_RD, &sc->mbuf_alloc_failed, 6738 0, "mbuf cluster allocation failures"); 6739 6740 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6741 "tx_dma_map_failures", 6742 CTLFLAG_RD, &sc->tx_dma_map_failures, 6743 0, "tx dma mapping failures"); 6744 6745 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6746 "stat_IfHcInOctets", 6747 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6748 "Bytes received"); 6749 6750 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6751 "stat_IfHCInBadOctets", 6752 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6753 "Bad bytes received"); 6754 6755 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6756 "stat_IfHCOutOctets", 6757 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6758 "Bytes sent"); 6759 6760 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6761 "stat_IfHCOutBadOctets", 6762 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6763 "Bad bytes sent"); 6764 6765 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6766 "stat_IfHCInUcastPkts", 6767 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6768 "Unicast packets received"); 6769 6770 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6771 "stat_IfHCInMulticastPkts", 6772 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6773 "Multicast packets received"); 6774 6775 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6776 "stat_IfHCInBroadcastPkts", 6777 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6778 "Broadcast packets received"); 6779 6780 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6781 "stat_IfHCOutUcastPkts", 6782 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6783 "Unicast packets sent"); 6784 6785 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6786 "stat_IfHCOutMulticastPkts", 6787 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6788 "Multicast packets sent"); 6789 6790 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6791 "stat_IfHCOutBroadcastPkts", 6792 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6793 "Broadcast packets sent"); 6794 6795 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6796 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6797 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6798 0, "Internal MAC transmit errors"); 6799 6800 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6801 "stat_Dot3StatsCarrierSenseErrors", 6802 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6803 0, "Carrier sense errors"); 6804 6805 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6806 "stat_Dot3StatsFCSErrors", 6807 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6808 0, "Frame check sequence errors"); 6809 6810 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6811 "stat_Dot3StatsAlignmentErrors", 6812 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6813 0, "Alignment errors"); 6814 6815 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6816 "stat_Dot3StatsSingleCollisionFrames", 6817 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6818 0, "Single Collision Frames"); 6819 6820 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6821 "stat_Dot3StatsMultipleCollisionFrames", 6822 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6823 0, "Multiple Collision Frames"); 6824 6825 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6826 "stat_Dot3StatsDeferredTransmissions", 6827 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6828 0, "Deferred Transmissions"); 6829 6830 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6831 "stat_Dot3StatsExcessiveCollisions", 6832 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6833 0, "Excessive Collisions"); 6834 6835 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6836 "stat_Dot3StatsLateCollisions", 6837 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6838 0, "Late Collisions"); 6839 6840 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6841 "stat_EtherStatsCollisions", 6842 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6843 0, "Collisions"); 6844 6845 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6846 "stat_EtherStatsFragments", 6847 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6848 0, "Fragments"); 6849 6850 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6851 "stat_EtherStatsJabbers", 6852 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6853 0, "Jabbers"); 6854 6855 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6856 "stat_EtherStatsUndersizePkts", 6857 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6858 0, "Undersize packets"); 6859 6860 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6861 "stat_EtherStatsOverrsizePkts", 6862 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6863 0, "stat_EtherStatsOverrsizePkts"); 6864 6865 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6866 "stat_EtherStatsPktsRx64Octets", 6867 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6868 0, "Bytes received in 64 byte packets"); 6869 6870 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6871 "stat_EtherStatsPktsRx65Octetsto127Octets", 6872 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6873 0, "Bytes received in 65 to 127 byte packets"); 6874 6875 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6876 "stat_EtherStatsPktsRx128Octetsto255Octets", 6877 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6878 0, "Bytes received in 128 to 255 byte packets"); 6879 6880 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6881 "stat_EtherStatsPktsRx256Octetsto511Octets", 6882 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6883 0, "Bytes received in 256 to 511 byte packets"); 6884 6885 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6886 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6887 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6888 0, "Bytes received in 512 to 1023 byte packets"); 6889 6890 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6891 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6892 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6893 0, "Bytes received in 1024 t0 1522 byte packets"); 6894 6895 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6896 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6897 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6898 0, "Bytes received in 1523 to 9022 byte packets"); 6899 6900 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6901 "stat_EtherStatsPktsTx64Octets", 6902 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6903 0, "Bytes sent in 64 byte packets"); 6904 6905 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6906 "stat_EtherStatsPktsTx65Octetsto127Octets", 6907 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6908 0, "Bytes sent in 65 to 127 byte packets"); 6909 6910 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6911 "stat_EtherStatsPktsTx128Octetsto255Octets", 6912 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6913 0, "Bytes sent in 128 to 255 byte packets"); 6914 6915 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6916 "stat_EtherStatsPktsTx256Octetsto511Octets", 6917 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6918 0, "Bytes sent in 256 to 511 byte packets"); 6919 6920 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6921 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6922 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6923 0, "Bytes sent in 512 to 1023 byte packets"); 6924 6925 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6926 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6927 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6928 0, "Bytes sent in 1024 to 1522 byte packets"); 6929 6930 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6931 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6932 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6933 0, "Bytes sent in 1523 to 9022 byte packets"); 6934 6935 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6936 "stat_XonPauseFramesReceived", 6937 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6938 0, "XON pause frames receved"); 6939 6940 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6941 "stat_XoffPauseFramesReceived", 6942 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6943 0, "XOFF pause frames received"); 6944 6945 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6946 "stat_OutXonSent", 6947 CTLFLAG_RD, &sc->stat_OutXonSent, 6948 0, "XON pause frames sent"); 6949 6950 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6951 "stat_OutXoffSent", 6952 CTLFLAG_RD, &sc->stat_OutXoffSent, 6953 0, "XOFF pause frames sent"); 6954 6955 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6956 "stat_FlowControlDone", 6957 CTLFLAG_RD, &sc->stat_FlowControlDone, 6958 0, "Flow control done"); 6959 6960 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6961 "stat_MacControlFramesReceived", 6962 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6963 0, "MAC control frames received"); 6964 6965 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6966 "stat_XoffStateEntered", 6967 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6968 0, "XOFF state entered"); 6969 6970 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6971 "stat_IfInFramesL2FilterDiscards", 6972 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6973 0, "Received L2 packets discarded"); 6974 6975 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6976 "stat_IfInRuleCheckerDiscards", 6977 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6978 0, "Received packets discarded by rule"); 6979 6980 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6981 "stat_IfInFTQDiscards", 6982 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6983 0, "Received packet FTQ discards"); 6984 6985 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6986 "stat_IfInMBUFDiscards", 6987 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6988 0, "Received packets discarded due to lack of controller buffer memory"); 6989 6990 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6991 "stat_IfInRuleCheckerP4Hit", 6992 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6993 0, "Received packets rule checker hits"); 6994 6995 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6996 "stat_CatchupInRuleCheckerDiscards", 6997 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6998 0, "Received packets discarded in Catchup path"); 6999 7000 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 7001 "stat_CatchupInFTQDiscards", 7002 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 7003 0, "Received packets discarded in FTQ in Catchup path"); 7004 7005 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 7006 "stat_CatchupInMBUFDiscards", 7007 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 7008 0, "Received packets discarded in controller buffer memory in Catchup path"); 7009 7010 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 7011 "stat_CatchupInRuleCheckerP4Hit", 7012 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 7013 0, "Received packets rule checker hits in Catchup path"); 7014 7015 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 7016 "com_no_buffers", 7017 CTLFLAG_RD, &sc->com_no_buffers, 7018 0, "Valid packets received but no RX buffers available"); 7019 7020#ifdef BCE_DEBUG 7021 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7022 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 7023 (void *)sc, 0, 7024 bce_sysctl_driver_state, "I", "Drive state information"); 7025 7026 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7027 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 7028 (void *)sc, 0, 7029 bce_sysctl_hw_state, "I", "Hardware state information"); 7030 7031 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7032 "bc_state", CTLTYPE_INT | CTLFLAG_RW, 7033 (void *)sc, 0, 7034 bce_sysctl_bc_state, "I", "Bootcode state information"); 7035 7036 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7037 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW, 7038 (void *)sc, 0, 7039 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain"); 7040 7041 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7042 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 7043 (void *)sc, 0, 7044 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 7045 7046#ifdef BCE_USE_SPLIT_HEADER 7047 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7048 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW, 7049 (void *)sc, 0, 7050 bce_sysctl_dump_pg_chain, "I", "Dump page chain"); 7051#endif 7052 7053 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7054 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 7055 (void *)sc, 0, 7056 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 7057 7058 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7059 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 7060 (void *)sc, 0, 7061 bce_sysctl_reg_read, "I", "Register read"); 7062 7063 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 7064 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 7065 (void *)sc, 0, 7066 bce_sysctl_phy_read, "I", "PHY register read"); 7067 7068#endif 7069 7070} 7071 7072 7073/****************************************************************************/ 7074/* BCE Debug Routines */ 7075/****************************************************************************/ 7076#ifdef BCE_DEBUG 7077 7078/****************************************************************************/ 7079/* Freezes the controller to allow for a cohesive state dump. */ 7080/* */ 7081/* Returns: */ 7082/* Nothing. */ 7083/****************************************************************************/ 7084static void 7085bce_freeze_controller(struct bce_softc *sc) 7086{ 7087 u32 val; 7088 val = REG_RD(sc, BCE_MISC_COMMAND); 7089 val |= BCE_MISC_COMMAND_DISABLE_ALL; 7090 REG_WR(sc, BCE_MISC_COMMAND, val); 7091 7092} 7093 7094 7095/****************************************************************************/ 7096/* Unfreezes the controller after a freeze operation. This may not always */ 7097/* work and the controller will require a reset! */ 7098/* */ 7099/* Returns: */ 7100/* Nothing. */ 7101/****************************************************************************/ 7102static void 7103bce_unfreeze_controller(struct bce_softc *sc) 7104{ 7105 u32 val; 7106 val = REG_RD(sc, BCE_MISC_COMMAND); 7107 val |= BCE_MISC_COMMAND_ENABLE_ALL; 7108 REG_WR(sc, BCE_MISC_COMMAND, val); 7109 7110} 7111 7112/****************************************************************************/ 7113/* Prints out information about an mbuf. */ 7114/* */ 7115/* Returns: */ 7116/* Nothing. */ 7117/****************************************************************************/ 7118static void 7119bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 7120{ 7121 struct mbuf *mp = m; 7122 7123 if (m == NULL) { 7124 BCE_PRINTF("mbuf: null pointer\n"); 7125 return; 7126 } 7127 7128 while (mp) { 7129 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n", 7130 mp, mp->m_len, mp->m_flags, 7131 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", 7132 mp->m_data); 7133 7134 if (mp->m_flags & M_PKTHDR) { 7135 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n", 7136 mp->m_pkthdr.len, mp->m_flags, 7137 "\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG" 7138 "\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE", 7139 mp->m_pkthdr.csum_flags, 7140 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 7141 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 7142 "\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR"); 7143 } 7144 7145 if (mp->m_flags & M_EXT) { 7146 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ", 7147 mp->m_ext.ext_buf, mp->m_ext.ext_size); 7148 switch (mp->m_ext.ext_type) { 7149 case EXT_CLUSTER: printf("EXT_CLUSTER\n"); break; 7150 case EXT_SFBUF: printf("EXT_SFBUF\n"); break; 7151 case EXT_JUMBO9: printf("EXT_JUMBO9\n"); break; 7152 case EXT_JUMBO16: printf("EXT_JUMBO16\n"); break; 7153 case EXT_PACKET: printf("EXT_PACKET\n"); break; 7154 case EXT_MBUF: printf("EXT_MBUF\n"); break; 7155 case EXT_NET_DRV: printf("EXT_NET_DRV\n"); break; 7156 case EXT_MOD_TYPE: printf("EXT_MDD_TYPE\n"); break; 7157 case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break; 7158 case EXT_EXTREF: printf("EXT_EXTREF\n"); break; 7159 default: printf("UNKNOWN\n"); 7160 } 7161 } 7162 7163 mp = mp->m_next; 7164 } 7165} 7166 7167 7168/****************************************************************************/ 7169/* Prints out the mbufs in the TX mbuf chain. */ 7170/* */ 7171/* Returns: */ 7172/* Nothing. */ 7173/****************************************************************************/ 7174static void 7175bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 7176{ 7177 struct mbuf *m; 7178 7179 BCE_PRINTF( 7180 "----------------------------" 7181 " tx mbuf data " 7182 "----------------------------\n"); 7183 7184 for (int i = 0; i < count; i++) { 7185 m = sc->tx_mbuf_ptr[chain_prod]; 7186 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod); 7187 bce_dump_mbuf(sc, m); 7188 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 7189 } 7190 7191 BCE_PRINTF( 7192 "----------------------------" 7193 "----------------" 7194 "----------------------------\n"); 7195} 7196 7197 7198/****************************************************************************/ 7199/* Prints out the mbufs in the RX mbuf chain. */ 7200/* */ 7201/* Returns: */ 7202/* Nothing. */ 7203/****************************************************************************/ 7204static void 7205bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 7206{ 7207 struct mbuf *m; 7208 7209 BCE_PRINTF( 7210 "----------------------------" 7211 " rx mbuf data " 7212 "----------------------------\n"); 7213 7214 for (int i = 0; i < count; i++) { 7215 m = sc->rx_mbuf_ptr[chain_prod]; 7216 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod); 7217 bce_dump_mbuf(sc, m); 7218 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 7219 } 7220 7221 7222 BCE_PRINTF( 7223 "----------------------------" 7224 "----------------" 7225 "----------------------------\n"); 7226} 7227 7228 7229#ifdef BCE_USE_SPLIT_HEADER 7230/****************************************************************************/ 7231/* Prints out the mbufs in the mbuf page chain. */ 7232/* */ 7233/* Returns: */ 7234/* Nothing. */ 7235/****************************************************************************/ 7236static void 7237bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 7238{ 7239 struct mbuf *m; 7240 7241 BCE_PRINTF( 7242 "----------------------------" 7243 " pg mbuf data " 7244 "----------------------------\n"); 7245 7246 for (int i = 0; i < count; i++) { 7247 m = sc->pg_mbuf_ptr[chain_prod]; 7248 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod); 7249 bce_dump_mbuf(sc, m); 7250 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod)); 7251 } 7252 7253 7254 BCE_PRINTF( 7255 "----------------------------" 7256 "----------------" 7257 "----------------------------\n"); 7258} 7259#endif 7260 7261 7262/****************************************************************************/ 7263/* Prints out a tx_bd structure. */ 7264/* */ 7265/* Returns: */ 7266/* Nothing. */ 7267/****************************************************************************/ 7268static void 7269bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 7270{ 7271 if (idx > MAX_TX_BD) 7272 /* Index out of range. */ 7273 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 7274 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 7275 /* TX Chain page pointer. */ 7276 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 7277 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); 7278 else { 7279 /* Normal tx_bd entry. */ 7280 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 7281 "vlan tag= 0x%04X, flags = 0x%04X (", idx, 7282 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 7283 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 7284 txbd->tx_bd_flags); 7285 7286 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) 7287 printf(" CONN_FAULT"); 7288 7289 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) 7290 printf(" TCP_UDP_CKSUM"); 7291 7292 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) 7293 printf(" IP_CKSUM"); 7294 7295 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) 7296 printf(" VLAN"); 7297 7298 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) 7299 printf(" COAL_NOW"); 7300 7301 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) 7302 printf(" DONT_GEN_CRC"); 7303 7304 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) 7305 printf(" START"); 7306 7307 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) 7308 printf(" END"); 7309 7310 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) 7311 printf(" LSO"); 7312 7313 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) 7314 printf(" OPTION_WORD"); 7315 7316 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) 7317 printf(" FLAGS"); 7318 7319 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) 7320 printf(" SNAP"); 7321 7322 printf(" )\n"); 7323 } 7324 7325} 7326 7327 7328/****************************************************************************/ 7329/* Prints out a rx_bd structure. */ 7330/* */ 7331/* Returns: */ 7332/* Nothing. */ 7333/****************************************************************************/ 7334static void 7335bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 7336{ 7337 if (idx > MAX_RX_BD) 7338 /* Index out of range. */ 7339 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 7340 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 7341 /* RX Chain page pointer. */ 7342 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 7343 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); 7344 else 7345 /* Normal rx_bd entry. */ 7346 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 7347 "flags = 0x%08X\n", idx, 7348 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 7349 rxbd->rx_bd_len, rxbd->rx_bd_flags); 7350} 7351 7352 7353#ifdef BCE_USE_SPLIT_HEADER 7354/****************************************************************************/ 7355/* Prints out a rx_bd structure in the page chain. */ 7356/* */ 7357/* Returns: */ 7358/* Nothing. */ 7359/****************************************************************************/ 7360static void 7361bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd) 7362{ 7363 if (idx > MAX_PG_BD) 7364 /* Index out of range. */ 7365 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx); 7366 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE) 7367 /* Page Chain page pointer. */ 7368 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 7369 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo); 7370 else 7371 /* Normal rx_bd entry. */ 7372 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 7373 "flags = 0x%08X\n", idx, 7374 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo, 7375 pgbd->rx_bd_len, pgbd->rx_bd_flags); 7376} 7377#endif 7378 7379 7380/****************************************************************************/ 7381/* Prints out a l2_fhdr structure. */ 7382/* */ 7383/* Returns: */ 7384/* Nothing. */ 7385/****************************************************************************/ 7386static void 7387bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 7388{ 7389 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, " 7390 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, " 7391 "tcp_udp_xsum = 0x%04X\n", idx, 7392 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB, 7393 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 7394 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 7395} 7396 7397 7398/****************************************************************************/ 7399/* Prints out the L2 context memory. (Only useful for CID 0 to 15. ) */ 7400/* */ 7401/* Returns: */ 7402/* Nothing. */ 7403/****************************************************************************/ 7404static void 7405bce_dump_ctx(struct bce_softc *sc, u16 cid) 7406{ 7407 if (cid < TX_CID) { 7408 BCE_PRINTF( 7409 "----------------------------" 7410 " CTX Data " 7411 "----------------------------\n"); 7412 7413 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid); 7414 BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BDIDX) host rx producer index\n", 7415 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BDIDX)); 7416 BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BSEQ) host byte sequence\n", 7417 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BSEQ)); 7418 BCE_PRINTF(" 0x%08X - (L2CTX_NX_BSEQ) h/w byte sequence\n", 7419 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BSEQ)); 7420 BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_HI) h/w buffer descriptor address\n", 7421 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_HI)); 7422 BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_LO) h/w buffer descriptor address\n", 7423 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_LO)); 7424 BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDIDX) h/w rx consumer index\n", 7425 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDIDX)); 7426 BCE_PRINTF(" 0x%08X - (L2CTX_HOST_PG_BDIDX) host page producer index\n", 7427 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_PG_BDIDX)); 7428 BCE_PRINTF(" 0x%08X - (L2CTX_PG_BUF_SIZE) host rx_bd/page buffer size\n", 7429 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_PG_BUF_SIZE)); 7430 BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_HI) h/w page chain address\n", 7431 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_HI)); 7432 BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_LO) h/w page chain address\n", 7433 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_LO)); 7434 BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDIDX) h/w page consumer index\n", 7435 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDIDX)); 7436 7437 BCE_PRINTF( 7438 "----------------------------" 7439 "----------------" 7440 "----------------------------\n"); 7441 } 7442} 7443 7444 7445/****************************************************************************/ 7446/* Prints out the FTQ data. */ 7447/* */ 7448/* Returns: */ 7449/* Nothing. */ 7450/****************************************************************************/ 7451static void 7452bce_dump_ftqs(struct bce_softc *sc) 7453{ 7454 u32 cmd, ctl, cur_depth, max_depth, valid_cnt; 7455 7456 BCE_PRINTF( 7457 "----------------------------" 7458 " FTQ Data " 7459 "----------------------------\n"); 7460 7461 BCE_PRINTF(" FTQ Command Control Depth_Now Max_Depth Valid_Cnt\n"); 7462 BCE_PRINTF(" ----- ---------- ---------- ---------- ---------- ----------\n"); 7463 7464 /* Setup the generic statistic counters for the FTQ valid count. */ 7465 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, 7466 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) | 7467 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) | 7468 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) | 7469 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT))); 7470 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, 7471 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) | 7472 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) | 7473 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) | 7474 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT))); 7475 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, 7476 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) | 7477 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) | 7478 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) | 7479 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT))); 7480 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, 7481 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) | 7482 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) | 7483 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) | 7484 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT))); 7485 7486 7487 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD); 7488 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL); 7489 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22; 7490 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12; 7491 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 7492 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7493 cmd, ctl, cur_depth, max_depth, valid_cnt); 7494 7495 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD); 7496 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL); 7497 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22; 7498 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12; 7499 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 7500 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7501 cmd, ctl, cur_depth, max_depth, valid_cnt); 7502 7503 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD); 7504 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL); 7505 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22; 7506 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12; 7507 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 7508 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7509 cmd, ctl, cur_depth, max_depth, valid_cnt); 7510 7511 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD); 7512 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL); 7513 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22; 7514 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12; 7515 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 7516 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7517 cmd, ctl, cur_depth, max_depth, valid_cnt); 7518 7519 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD); 7520 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL); 7521 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22; 7522 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12; 7523 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4); 7524 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7525 cmd, ctl, cur_depth, max_depth, valid_cnt); 7526 7527 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD); 7528 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL); 7529 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22; 7530 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12; 7531 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5); 7532 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7533 cmd, ctl, cur_depth, max_depth, valid_cnt); 7534 7535 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD); 7536 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL); 7537 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22; 7538 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12; 7539 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6); 7540 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7541 cmd, ctl, cur_depth, max_depth, valid_cnt); 7542 7543 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD); 7544 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL); 7545 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22; 7546 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12; 7547 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7); 7548 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7549 cmd, ctl, cur_depth, max_depth, valid_cnt); 7550 7551 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD); 7552 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL); 7553 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22; 7554 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12; 7555 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8); 7556 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7557 cmd, ctl, cur_depth, max_depth, valid_cnt); 7558 7559 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD); 7560 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL); 7561 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22; 7562 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12; 7563 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9); 7564 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7565 cmd, ctl, cur_depth, max_depth, valid_cnt); 7566 7567 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD); 7568 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL); 7569 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22; 7570 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12; 7571 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10); 7572 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7573 cmd, ctl, cur_depth, max_depth, valid_cnt); 7574 7575 7576 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD); 7577 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL); 7578 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22; 7579 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12; 7580 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11); 7581 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7582 cmd, ctl, cur_depth, max_depth, valid_cnt); 7583 7584 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD); 7585 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL); 7586 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22; 7587 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12; 7588 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12); 7589 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7590 cmd, ctl, cur_depth, max_depth, valid_cnt); 7591 7592 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD); 7593 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL); 7594 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22; 7595 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12; 7596 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13); 7597 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7598 cmd, ctl, cur_depth, max_depth, valid_cnt); 7599 7600 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD); 7601 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL); 7602 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22; 7603 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12; 7604 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14); 7605 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7606 cmd, ctl, cur_depth, max_depth, valid_cnt); 7607 7608 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD); 7609 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL); 7610 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22; 7611 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12; 7612 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15); 7613 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7614 cmd, ctl, cur_depth, max_depth, valid_cnt); 7615 7616 /* Setup the generic statistic counters for the FTQ valid count. */ 7617 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, 7618 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) | 7619 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) | 7620 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT))); 7621 7622 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD); 7623 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL); 7624 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22; 7625 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12; 7626 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 7627 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7628 cmd, ctl, cur_depth, max_depth, valid_cnt); 7629 7630 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD); 7631 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL); 7632 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22; 7633 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12; 7634 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 7635 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7636 cmd, ctl, cur_depth, max_depth, valid_cnt); 7637 7638 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD); 7639 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL); 7640 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22; 7641 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12; 7642 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 7643 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 7644 cmd, ctl, cur_depth, max_depth, valid_cnt); 7645 7646 BCE_PRINTF( 7647 "----------------------------" 7648 "----------------" 7649 "----------------------------\n"); 7650} 7651 7652 7653/****************************************************************************/ 7654/* Prints out the TX chain. */ 7655/* */ 7656/* Returns: */ 7657/* Nothing. */ 7658/****************************************************************************/ 7659static void 7660bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count) 7661{ 7662 struct tx_bd *txbd; 7663 7664 /* First some info about the tx_bd chain structure. */ 7665 BCE_PRINTF( 7666 "----------------------------" 7667 " tx_bd chain " 7668 "----------------------------\n"); 7669 7670 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n", 7671 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES); 7672 7673 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 7674 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); 7675 7676 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD); 7677 7678 BCE_PRINTF( 7679 "----------------------------" 7680 " tx_bd data " 7681 "----------------------------\n"); 7682 7683 /* Now print out the tx_bd's themselves. */ 7684 for (int i = 0; i < count; i++) { 7685 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 7686 bce_dump_txbd(sc, tx_prod, txbd); 7687 tx_prod = NEXT_TX_BD(tx_prod); 7688 } 7689 7690 BCE_PRINTF( 7691 "----------------------------" 7692 "----------------" 7693 "----------------------------\n"); 7694} 7695 7696 7697/****************************************************************************/ 7698/* Prints out the RX chain. */ 7699/* */ 7700/* Returns: */ 7701/* Nothing. */ 7702/****************************************************************************/ 7703static void 7704bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count) 7705{ 7706 struct rx_bd *rxbd; 7707 7708 /* First some info about the rx_bd chain structure. */ 7709 BCE_PRINTF( 7710 "----------------------------" 7711 " rx_bd chain " 7712 "----------------------------\n"); 7713 7714 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n", 7715 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES); 7716 7717 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 7718 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); 7719 7720 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD); 7721 7722 BCE_PRINTF( 7723 "----------------------------" 7724 " rx_bd data " 7725 "----------------------------\n"); 7726 7727 /* Now print out the rx_bd's themselves. */ 7728 for (int i = 0; i < count; i++) { 7729 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 7730 bce_dump_rxbd(sc, rx_prod, rxbd); 7731 rx_prod = RX_CHAIN_IDX(rx_prod + 1); 7732 } 7733 7734 BCE_PRINTF( 7735 "----------------------------" 7736 "----------------" 7737 "----------------------------\n"); 7738} 7739 7740 7741#ifdef BCE_USE_SPLIT_HEADER 7742/****************************************************************************/ 7743/* Prints out the page chain. */ 7744/* */ 7745/* Returns: */ 7746/* Nothing. */ 7747/****************************************************************************/ 7748static void 7749bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count) 7750{ 7751 struct rx_bd *pgbd; 7752 7753 /* First some info about the page chain structure. */ 7754 BCE_PRINTF( 7755 "----------------------------" 7756 " page chain " 7757 "----------------------------\n"); 7758 7759 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n", 7760 (u32) BCM_PAGE_SIZE, (u32) PG_PAGES); 7761 7762 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 7763 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE); 7764 7765 BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n", 7766 (u32) TOTAL_PG_BD, (u32) MAX_PG_BD); 7767 7768 BCE_PRINTF( 7769 "----------------------------" 7770 " page data " 7771 "----------------------------\n"); 7772 7773 /* Now print out the rx_bd's themselves. */ 7774 for (int i = 0; i < count; i++) { 7775 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)]; 7776 bce_dump_pgbd(sc, pg_prod, pgbd); 7777 pg_prod = PG_CHAIN_IDX(pg_prod + 1); 7778 } 7779 7780 BCE_PRINTF( 7781 "----------------------------" 7782 "----------------" 7783 "----------------------------\n"); 7784} 7785#endif 7786 7787 7788/****************************************************************************/ 7789/* Prints out the status block from host memory. */ 7790/* */ 7791/* Returns: */ 7792/* Nothing. */ 7793/****************************************************************************/ 7794static void 7795bce_dump_status_block(struct bce_softc *sc) 7796{ 7797 struct status_block *sblk; 7798 7799 sblk = sc->status_block; 7800 7801 BCE_PRINTF( 7802 "----------------------------" 7803 " Status Block " 7804 "----------------------------\n"); 7805 7806 BCE_PRINTF(" 0x%08X - attn_bits\n", 7807 sblk->status_attn_bits); 7808 7809 BCE_PRINTF(" 0x%08X - attn_bits_ack\n", 7810 sblk->status_attn_bits_ack); 7811 7812 BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n", 7813 sblk->status_rx_quick_consumer_index0, 7814 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0)); 7815 7816 BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n", 7817 sblk->status_tx_quick_consumer_index0, 7818 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0)); 7819 7820 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx); 7821 7822 /* Theses indices are not used for normal L2 drivers. */ 7823 if (sblk->status_rx_quick_consumer_index1) 7824 BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n", 7825 sblk->status_rx_quick_consumer_index1, 7826 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1)); 7827 7828 if (sblk->status_tx_quick_consumer_index1) 7829 BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n", 7830 sblk->status_tx_quick_consumer_index1, 7831 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1)); 7832 7833 if (sblk->status_rx_quick_consumer_index2) 7834 BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n", 7835 sblk->status_rx_quick_consumer_index2, 7836 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2)); 7837 7838 if (sblk->status_tx_quick_consumer_index2) 7839 BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n", 7840 sblk->status_tx_quick_consumer_index2, 7841 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2)); 7842 7843 if (sblk->status_rx_quick_consumer_index3) 7844 BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n", 7845 sblk->status_rx_quick_consumer_index3, 7846 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3)); 7847 7848 if (sblk->status_tx_quick_consumer_index3) 7849 BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n", 7850 sblk->status_tx_quick_consumer_index3, 7851 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3)); 7852 7853 if (sblk->status_rx_quick_consumer_index4 || 7854 sblk->status_rx_quick_consumer_index5) 7855 BCE_PRINTF("rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 7856 sblk->status_rx_quick_consumer_index4, 7857 sblk->status_rx_quick_consumer_index5); 7858 7859 if (sblk->status_rx_quick_consumer_index6 || 7860 sblk->status_rx_quick_consumer_index7) 7861 BCE_PRINTF("rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 7862 sblk->status_rx_quick_consumer_index6, 7863 sblk->status_rx_quick_consumer_index7); 7864 7865 if (sblk->status_rx_quick_consumer_index8 || 7866 sblk->status_rx_quick_consumer_index9) 7867 BCE_PRINTF("rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 7868 sblk->status_rx_quick_consumer_index8, 7869 sblk->status_rx_quick_consumer_index9); 7870 7871 if (sblk->status_rx_quick_consumer_index10 || 7872 sblk->status_rx_quick_consumer_index11) 7873 BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 7874 sblk->status_rx_quick_consumer_index10, 7875 sblk->status_rx_quick_consumer_index11); 7876 7877 if (sblk->status_rx_quick_consumer_index12 || 7878 sblk->status_rx_quick_consumer_index13) 7879 BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 7880 sblk->status_rx_quick_consumer_index12, 7881 sblk->status_rx_quick_consumer_index13); 7882 7883 if (sblk->status_rx_quick_consumer_index14 || 7884 sblk->status_rx_quick_consumer_index15) 7885 BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 7886 sblk->status_rx_quick_consumer_index14, 7887 sblk->status_rx_quick_consumer_index15); 7888 7889 if (sblk->status_completion_producer_index || 7890 sblk->status_cmd_consumer_index) 7891 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n", 7892 sblk->status_completion_producer_index, 7893 sblk->status_cmd_consumer_index); 7894 7895 BCE_PRINTF( 7896 "----------------------------" 7897 "----------------" 7898 "----------------------------\n"); 7899} 7900 7901 7902/****************************************************************************/ 7903/* Prints out the statistics block from host memory. */ 7904/* */ 7905/* Returns: */ 7906/* Nothing. */ 7907/****************************************************************************/ 7908static void 7909bce_dump_stats_block(struct bce_softc *sc) 7910{ 7911 struct statistics_block *sblk; 7912 7913 sblk = sc->stats_block; 7914 7915 BCE_PRINTF( 7916 "---------------" 7917 " Stats Block (All Stats Not Shown Are 0) " 7918 "---------------\n"); 7919 7920 if (sblk->stat_IfHCInOctets_hi 7921 || sblk->stat_IfHCInOctets_lo) 7922 BCE_PRINTF("0x%08X:%08X : " 7923 "IfHcInOctets\n", 7924 sblk->stat_IfHCInOctets_hi, 7925 sblk->stat_IfHCInOctets_lo); 7926 7927 if (sblk->stat_IfHCInBadOctets_hi 7928 || sblk->stat_IfHCInBadOctets_lo) 7929 BCE_PRINTF("0x%08X:%08X : " 7930 "IfHcInBadOctets\n", 7931 sblk->stat_IfHCInBadOctets_hi, 7932 sblk->stat_IfHCInBadOctets_lo); 7933 7934 if (sblk->stat_IfHCOutOctets_hi 7935 || sblk->stat_IfHCOutOctets_lo) 7936 BCE_PRINTF("0x%08X:%08X : " 7937 "IfHcOutOctets\n", 7938 sblk->stat_IfHCOutOctets_hi, 7939 sblk->stat_IfHCOutOctets_lo); 7940 7941 if (sblk->stat_IfHCOutBadOctets_hi 7942 || sblk->stat_IfHCOutBadOctets_lo) 7943 BCE_PRINTF("0x%08X:%08X : " 7944 "IfHcOutBadOctets\n", 7945 sblk->stat_IfHCOutBadOctets_hi, 7946 sblk->stat_IfHCOutBadOctets_lo); 7947 7948 if (sblk->stat_IfHCInUcastPkts_hi 7949 || sblk->stat_IfHCInUcastPkts_lo) 7950 BCE_PRINTF("0x%08X:%08X : " 7951 "IfHcInUcastPkts\n", 7952 sblk->stat_IfHCInUcastPkts_hi, 7953 sblk->stat_IfHCInUcastPkts_lo); 7954 7955 if (sblk->stat_IfHCInBroadcastPkts_hi 7956 || sblk->stat_IfHCInBroadcastPkts_lo) 7957 BCE_PRINTF("0x%08X:%08X : " 7958 "IfHcInBroadcastPkts\n", 7959 sblk->stat_IfHCInBroadcastPkts_hi, 7960 sblk->stat_IfHCInBroadcastPkts_lo); 7961 7962 if (sblk->stat_IfHCInMulticastPkts_hi 7963 || sblk->stat_IfHCInMulticastPkts_lo) 7964 BCE_PRINTF("0x%08X:%08X : " 7965 "IfHcInMulticastPkts\n", 7966 sblk->stat_IfHCInMulticastPkts_hi, 7967 sblk->stat_IfHCInMulticastPkts_lo); 7968 7969 if (sblk->stat_IfHCOutUcastPkts_hi 7970 || sblk->stat_IfHCOutUcastPkts_lo) 7971 BCE_PRINTF("0x%08X:%08X : " 7972 "IfHcOutUcastPkts\n", 7973 sblk->stat_IfHCOutUcastPkts_hi, 7974 sblk->stat_IfHCOutUcastPkts_lo); 7975 7976 if (sblk->stat_IfHCOutBroadcastPkts_hi 7977 || sblk->stat_IfHCOutBroadcastPkts_lo) 7978 BCE_PRINTF("0x%08X:%08X : " 7979 "IfHcOutBroadcastPkts\n", 7980 sblk->stat_IfHCOutBroadcastPkts_hi, 7981 sblk->stat_IfHCOutBroadcastPkts_lo); 7982 7983 if (sblk->stat_IfHCOutMulticastPkts_hi 7984 || sblk->stat_IfHCOutMulticastPkts_lo) 7985 BCE_PRINTF("0x%08X:%08X : " 7986 "IfHcOutMulticastPkts\n", 7987 sblk->stat_IfHCOutMulticastPkts_hi, 7988 sblk->stat_IfHCOutMulticastPkts_lo); 7989 7990 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 7991 BCE_PRINTF(" 0x%08X : " 7992 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 7993 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 7994 7995 if (sblk->stat_Dot3StatsCarrierSenseErrors) 7996 BCE_PRINTF(" 0x%08X : Dot3StatsCarrierSenseErrors\n", 7997 sblk->stat_Dot3StatsCarrierSenseErrors); 7998 7999 if (sblk->stat_Dot3StatsFCSErrors) 8000 BCE_PRINTF(" 0x%08X : Dot3StatsFCSErrors\n", 8001 sblk->stat_Dot3StatsFCSErrors); 8002 8003 if (sblk->stat_Dot3StatsAlignmentErrors) 8004 BCE_PRINTF(" 0x%08X : Dot3StatsAlignmentErrors\n", 8005 sblk->stat_Dot3StatsAlignmentErrors); 8006 8007 if (sblk->stat_Dot3StatsSingleCollisionFrames) 8008 BCE_PRINTF(" 0x%08X : Dot3StatsSingleCollisionFrames\n", 8009 sblk->stat_Dot3StatsSingleCollisionFrames); 8010 8011 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 8012 BCE_PRINTF(" 0x%08X : Dot3StatsMultipleCollisionFrames\n", 8013 sblk->stat_Dot3StatsMultipleCollisionFrames); 8014 8015 if (sblk->stat_Dot3StatsDeferredTransmissions) 8016 BCE_PRINTF(" 0x%08X : Dot3StatsDeferredTransmissions\n", 8017 sblk->stat_Dot3StatsDeferredTransmissions); 8018 8019 if (sblk->stat_Dot3StatsExcessiveCollisions) 8020 BCE_PRINTF(" 0x%08X : Dot3StatsExcessiveCollisions\n", 8021 sblk->stat_Dot3StatsExcessiveCollisions); 8022 8023 if (sblk->stat_Dot3StatsLateCollisions) 8024 BCE_PRINTF(" 0x%08X : Dot3StatsLateCollisions\n", 8025 sblk->stat_Dot3StatsLateCollisions); 8026 8027 if (sblk->stat_EtherStatsCollisions) 8028 BCE_PRINTF(" 0x%08X : EtherStatsCollisions\n", 8029 sblk->stat_EtherStatsCollisions); 8030 8031 if (sblk->stat_EtherStatsFragments) 8032 BCE_PRINTF(" 0x%08X : EtherStatsFragments\n", 8033 sblk->stat_EtherStatsFragments); 8034 8035 if (sblk->stat_EtherStatsJabbers) 8036 BCE_PRINTF(" 0x%08X : EtherStatsJabbers\n", 8037 sblk->stat_EtherStatsJabbers); 8038 8039 if (sblk->stat_EtherStatsUndersizePkts) 8040 BCE_PRINTF(" 0x%08X : EtherStatsUndersizePkts\n", 8041 sblk->stat_EtherStatsUndersizePkts); 8042 8043 if (sblk->stat_EtherStatsOverrsizePkts) 8044 BCE_PRINTF(" 0x%08X : EtherStatsOverrsizePkts\n", 8045 sblk->stat_EtherStatsOverrsizePkts); 8046 8047 if (sblk->stat_EtherStatsPktsRx64Octets) 8048 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx64Octets\n", 8049 sblk->stat_EtherStatsPktsRx64Octets); 8050 8051 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 8052 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 8053 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 8054 8055 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 8056 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx128Octetsto255Octets\n", 8057 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 8058 8059 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 8060 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx256Octetsto511Octets\n", 8061 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 8062 8063 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 8064 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n", 8065 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 8066 8067 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 8068 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n", 8069 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 8070 8071 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 8072 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n", 8073 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 8074 8075 if (sblk->stat_EtherStatsPktsTx64Octets) 8076 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx64Octets\n", 8077 sblk->stat_EtherStatsPktsTx64Octets); 8078 8079 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 8080 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 8081 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 8082 8083 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 8084 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx128Octetsto255Octets\n", 8085 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 8086 8087 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 8088 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx256Octetsto511Octets\n", 8089 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 8090 8091 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 8092 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n", 8093 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 8094 8095 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 8096 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n", 8097 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 8098 8099 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 8100 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n", 8101 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 8102 8103 if (sblk->stat_XonPauseFramesReceived) 8104 BCE_PRINTF(" 0x%08X : XonPauseFramesReceived\n", 8105 sblk->stat_XonPauseFramesReceived); 8106 8107 if (sblk->stat_XoffPauseFramesReceived) 8108 BCE_PRINTF(" 0x%08X : XoffPauseFramesReceived\n", 8109 sblk->stat_XoffPauseFramesReceived); 8110 8111 if (sblk->stat_OutXonSent) 8112 BCE_PRINTF(" 0x%08X : OutXonSent\n", 8113 sblk->stat_OutXonSent); 8114 8115 if (sblk->stat_OutXoffSent) 8116 BCE_PRINTF(" 0x%08X : OutXoffSent\n", 8117 sblk->stat_OutXoffSent); 8118 8119 if (sblk->stat_FlowControlDone) 8120 BCE_PRINTF(" 0x%08X : FlowControlDone\n", 8121 sblk->stat_FlowControlDone); 8122 8123 if (sblk->stat_MacControlFramesReceived) 8124 BCE_PRINTF(" 0x%08X : MacControlFramesReceived\n", 8125 sblk->stat_MacControlFramesReceived); 8126 8127 if (sblk->stat_XoffStateEntered) 8128 BCE_PRINTF(" 0x%08X : XoffStateEntered\n", 8129 sblk->stat_XoffStateEntered); 8130 8131 if (sblk->stat_IfInFramesL2FilterDiscards) 8132 BCE_PRINTF(" 0x%08X : IfInFramesL2FilterDiscards\n", 8133 sblk->stat_IfInFramesL2FilterDiscards); 8134 8135 if (sblk->stat_IfInRuleCheckerDiscards) 8136 BCE_PRINTF(" 0x%08X : IfInRuleCheckerDiscards\n", 8137 sblk->stat_IfInRuleCheckerDiscards); 8138 8139 if (sblk->stat_IfInFTQDiscards) 8140 BCE_PRINTF(" 0x%08X : IfInFTQDiscards\n", 8141 sblk->stat_IfInFTQDiscards); 8142 8143 if (sblk->stat_IfInMBUFDiscards) 8144 BCE_PRINTF(" 0x%08X : IfInMBUFDiscards\n", 8145 sblk->stat_IfInMBUFDiscards); 8146 8147 if (sblk->stat_IfInRuleCheckerP4Hit) 8148 BCE_PRINTF(" 0x%08X : IfInRuleCheckerP4Hit\n", 8149 sblk->stat_IfInRuleCheckerP4Hit); 8150 8151 if (sblk->stat_CatchupInRuleCheckerDiscards) 8152 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerDiscards\n", 8153 sblk->stat_CatchupInRuleCheckerDiscards); 8154 8155 if (sblk->stat_CatchupInFTQDiscards) 8156 BCE_PRINTF(" 0x%08X : CatchupInFTQDiscards\n", 8157 sblk->stat_CatchupInFTQDiscards); 8158 8159 if (sblk->stat_CatchupInMBUFDiscards) 8160 BCE_PRINTF(" 0x%08X : CatchupInMBUFDiscards\n", 8161 sblk->stat_CatchupInMBUFDiscards); 8162 8163 if (sblk->stat_CatchupInRuleCheckerP4Hit) 8164 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerP4Hit\n", 8165 sblk->stat_CatchupInRuleCheckerP4Hit); 8166 8167 BCE_PRINTF( 8168 "----------------------------" 8169 "----------------" 8170 "----------------------------\n"); 8171} 8172 8173 8174/****************************************************************************/ 8175/* Prints out a summary of the driver state. */ 8176/* */ 8177/* Returns: */ 8178/* Nothing. */ 8179/****************************************************************************/ 8180static void 8181bce_dump_driver_state(struct bce_softc *sc) 8182{ 8183 u32 val_hi, val_lo; 8184 8185 BCE_PRINTF( 8186 "-----------------------------" 8187 " Driver State " 8188 "-----------------------------\n"); 8189 8190 val_hi = BCE_ADDR_HI(sc); 8191 val_lo = BCE_ADDR_LO(sc); 8192 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n", 8193 val_hi, val_lo); 8194 8195 val_hi = BCE_ADDR_HI(sc->bce_vhandle); 8196 val_lo = BCE_ADDR_LO(sc->bce_vhandle); 8197 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n", 8198 val_hi, val_lo); 8199 8200 val_hi = BCE_ADDR_HI(sc->status_block); 8201 val_lo = BCE_ADDR_LO(sc->status_block); 8202 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n", 8203 val_hi, val_lo); 8204 8205 val_hi = BCE_ADDR_HI(sc->stats_block); 8206 val_lo = BCE_ADDR_LO(sc->stats_block); 8207 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n", 8208 val_hi, val_lo); 8209 8210 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 8211 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 8212 BCE_PRINTF( 8213 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n", 8214 val_hi, val_lo); 8215 8216 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 8217 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 8218 BCE_PRINTF( 8219 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n", 8220 val_hi, val_lo); 8221 8222#ifdef BCE_USE_SPLIT_HEADER 8223 val_hi = BCE_ADDR_HI(sc->pg_bd_chain); 8224 val_lo = BCE_ADDR_LO(sc->pg_bd_chain); 8225 BCE_PRINTF( 8226 "0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n", 8227 val_hi, val_lo); 8228#endif 8229 8230 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 8231 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 8232 BCE_PRINTF( 8233 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 8234 val_hi, val_lo); 8235 8236 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 8237 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 8238 BCE_PRINTF( 8239 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 8240 val_hi, val_lo); 8241 8242#ifdef BCE_USE_SPLIT_HEADER 8243 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr); 8244 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr); 8245 BCE_PRINTF( 8246 "0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n", 8247 val_hi, val_lo); 8248#endif 8249 8250 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) h/w intrs\n", 8251 sc->interrupts_generated); 8252 8253 BCE_PRINTF(" 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 8254 sc->rx_interrupts); 8255 8256 BCE_PRINTF(" 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 8257 sc->tx_interrupts); 8258 8259 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) status block index\n", 8260 sc->last_status_idx); 8261 8262 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n", 8263 sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod)); 8264 8265 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n", 8266 sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons)); 8267 8268 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 8269 sc->tx_prod_bseq); 8270 8271 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n", 8272 sc->debug_tx_mbuf_alloc); 8273 8274 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 8275 sc->used_tx_bd); 8276 8277 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 8278 sc->tx_hi_watermark, sc->max_tx_bd); 8279 8280 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n", 8281 sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod)); 8282 8283 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n", 8284 sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons)); 8285 8286 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 8287 sc->rx_prod_bseq); 8288 8289 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n", 8290 sc->debug_rx_mbuf_alloc); 8291 8292 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 8293 sc->free_rx_bd); 8294 8295#ifdef BCE_USE_SPLIT_HEADER 8296 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer index\n", 8297 sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod)); 8298 8299 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n", 8300 sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons)); 8301 8302 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n", 8303 sc->debug_pg_mbuf_alloc); 8304 8305 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page rx_bd's\n", 8306 sc->free_pg_bd); 8307 8308 BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n", 8309 sc->pg_low_watermark, sc->max_pg_bd); 8310#endif 8311 8312 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed) " 8313 "mbuf alloc failures\n", 8314 sc->mbuf_alloc_failed); 8315 8316 BCE_PRINTF(" 0x%08X - (sc->debug_mbuf_sim_alloc_failed) " 8317 "simulated mbuf alloc failures\n", 8318 sc->debug_mbuf_sim_alloc_failed); 8319 8320 BCE_PRINTF( 8321 "----------------------------" 8322 "----------------" 8323 "----------------------------\n"); 8324} 8325 8326 8327/****************************************************************************/ 8328/* Prints out the hardware state through a summary of important register, */ 8329/* followed by a complete register dump. */ 8330/* */ 8331/* Returns: */ 8332/* Nothing. */ 8333/****************************************************************************/ 8334static void 8335bce_dump_hw_state(struct bce_softc *sc) 8336{ 8337 u32 val; 8338 8339 BCE_PRINTF( 8340 "----------------------------" 8341 " Hardware State " 8342 "----------------------------\n"); 8343 8344 BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver); 8345 8346 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 8347 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n", 8348 val, BCE_MISC_ENABLE_STATUS_BITS); 8349 8350 val = REG_RD(sc, BCE_DMA_STATUS); 8351 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS); 8352 8353 val = REG_RD(sc, BCE_CTX_STATUS); 8354 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS); 8355 8356 val = REG_RD(sc, BCE_EMAC_STATUS); 8357 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS); 8358 8359 val = REG_RD(sc, BCE_RPM_STATUS); 8360 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS); 8361 8362 val = REG_RD(sc, 0x2004); 8363 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004); 8364 8365 val = REG_RD(sc, BCE_RV2P_STATUS); 8366 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS); 8367 8368 val = REG_RD(sc, 0x2c04); 8369 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04); 8370 8371 val = REG_RD(sc, BCE_TBDR_STATUS); 8372 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS); 8373 8374 val = REG_RD(sc, BCE_TDMA_STATUS); 8375 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS); 8376 8377 val = REG_RD(sc, BCE_HC_STATUS); 8378 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS); 8379 8380 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 8381 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE); 8382 8383 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 8384 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE); 8385 8386 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 8387 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE); 8388 8389 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 8390 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE); 8391 8392 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 8393 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE); 8394 8395 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 8396 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE); 8397 8398 BCE_PRINTF( 8399 "----------------------------" 8400 "----------------" 8401 "----------------------------\n"); 8402 8403 BCE_PRINTF( 8404 "----------------------------" 8405 " Register Dump " 8406 "----------------------------\n"); 8407 8408 for (int i = 0x400; i < 0x8000; i += 0x10) { 8409 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 8410 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 8411 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 8412 } 8413 8414 BCE_PRINTF( 8415 "----------------------------" 8416 "----------------" 8417 "----------------------------\n"); 8418} 8419 8420 8421/****************************************************************************/ 8422/* Prints out the bootcode state. */ 8423/* */ 8424/* Returns: */ 8425/* Nothing. */ 8426/****************************************************************************/ 8427static void 8428bce_dump_bc_state(struct bce_softc *sc) 8429{ 8430 u32 val; 8431 8432 BCE_PRINTF( 8433 "----------------------------" 8434 " Bootcode State " 8435 "----------------------------\n"); 8436 8437 BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver); 8438 8439 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE); 8440 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n", 8441 val, BCE_BC_RESET_TYPE); 8442 8443 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE); 8444 BCE_PRINTF("0x%08X - (0x%06X) state\n", 8445 val, BCE_BC_STATE); 8446 8447 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION); 8448 BCE_PRINTF("0x%08X - (0x%06X) condition\n", 8449 val, BCE_BC_CONDITION); 8450 8451 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD); 8452 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n", 8453 val, BCE_BC_STATE_DEBUG_CMD); 8454 8455 BCE_PRINTF( 8456 "----------------------------" 8457 "----------------" 8458 "----------------------------\n"); 8459} 8460 8461 8462/****************************************************************************/ 8463/* Prints out the TXP state. */ 8464/* */ 8465/* Returns: */ 8466/* Nothing. */ 8467/****************************************************************************/ 8468static void 8469bce_dump_txp_state(struct bce_softc *sc) 8470{ 8471 u32 val1; 8472 8473 BCE_PRINTF( 8474 "----------------------------" 8475 " TXP State " 8476 "----------------------------\n"); 8477 8478 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 8479 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val1, BCE_TXP_CPU_MODE); 8480 8481 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 8482 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE); 8483 8484 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 8485 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val1, BCE_TXP_CPU_EVENT_MASK); 8486 8487 BCE_PRINTF( 8488 "----------------------------" 8489 " Register Dump " 8490 "----------------------------\n"); 8491 8492 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 8493 /* Skip the big blank spaces */ 8494 if (i < 0x454000 && i > 0x5ffff) 8495 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 8496 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), 8497 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); 8498 } 8499 8500 BCE_PRINTF( 8501 "----------------------------" 8502 "----------------" 8503 "----------------------------\n"); 8504} 8505 8506 8507/****************************************************************************/ 8508/* Prints out the RXP state. */ 8509/* */ 8510/* Returns: */ 8511/* Nothing. */ 8512/****************************************************************************/ 8513static void 8514bce_dump_rxp_state(struct bce_softc *sc) 8515{ 8516 u32 val1; 8517 8518 BCE_PRINTF( 8519 "----------------------------" 8520 " RXP State " 8521 "----------------------------\n"); 8522 8523 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 8524 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val1, BCE_RXP_CPU_MODE); 8525 8526 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 8527 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE); 8528 8529 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 8530 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val1, BCE_RXP_CPU_EVENT_MASK); 8531 8532 BCE_PRINTF( 8533 "----------------------------" 8534 " Register Dump " 8535 "----------------------------\n"); 8536 8537 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 8538 /* Skip the big blank sapces */ 8539 if (i < 0xc5400 && i > 0xdffff) 8540 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 8541 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), 8542 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); 8543 } 8544 8545 BCE_PRINTF( 8546 "----------------------------" 8547 "----------------" 8548 "----------------------------\n"); 8549} 8550 8551 8552/****************************************************************************/ 8553/* Prints out the TPAT state. */ 8554/* */ 8555/* Returns: */ 8556/* Nothing. */ 8557/****************************************************************************/ 8558static void 8559bce_dump_tpat_state(struct bce_softc *sc) 8560{ 8561 u32 val1; 8562 8563 BCE_PRINTF( 8564 "----------------------------" 8565 " TPAT State " 8566 "----------------------------\n"); 8567 8568 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 8569 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val1, BCE_TPAT_CPU_MODE); 8570 8571 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 8572 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE); 8573 8574 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 8575 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val1, BCE_TPAT_CPU_EVENT_MASK); 8576 8577 BCE_PRINTF( 8578 "----------------------------" 8579 " Register Dump " 8580 "----------------------------\n"); 8581 8582 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 8583 /* Skip the big blank spaces */ 8584 if (i < 0x854000 && i > 0x9ffff) 8585 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 8586 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), 8587 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); 8588 } 8589 8590 BCE_PRINTF( 8591 "----------------------------" 8592 "----------------" 8593 "----------------------------\n"); 8594} 8595 8596 8597/* ToDo: Add CP and COM proccessor state dumps. */ 8598 8599 8600/****************************************************************************/ 8601/* Prints out the driver state and then enters the debugger. */ 8602/* */ 8603/* Returns: */ 8604/* Nothing. */ 8605/****************************************************************************/ 8606static void 8607bce_breakpoint(struct bce_softc *sc) 8608{ 8609 8610 /* 8611 * Unreachable code to silence compiler warnings 8612 * about unused functions. 8613 */ 8614 if (0) { 8615 bce_freeze_controller(sc); 8616 bce_unfreeze_controller(sc); 8617 bce_dump_txbd(sc, 0, NULL); 8618 bce_dump_rxbd(sc, 0, NULL); 8619 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 8620 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 8621 bce_dump_l2fhdr(sc, 0, NULL); 8622 bce_dump_ctx(sc, RX_CID); 8623 bce_dump_ftqs(sc); 8624 bce_dump_tx_chain(sc, 0, USABLE_TX_BD); 8625 bce_dump_rx_chain(sc, 0, USABLE_RX_BD); 8626 bce_dump_status_block(sc); 8627 bce_dump_stats_block(sc); 8628 bce_dump_driver_state(sc); 8629 bce_dump_hw_state(sc); 8630 bce_dump_bc_state(sc); 8631 bce_dump_txp_state(sc); 8632 bce_dump_rxp_state(sc); 8633 bce_dump_tpat_state(sc); 8634#ifdef BCE_USE_SPLIT_HEADER 8635 bce_dump_pgbd(sc, 0, NULL); 8636 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD); 8637 bce_dump_pg_chain(sc, 0, USABLE_PG_BD); 8638#endif 8639 } 8640 8641 bce_dump_status_block(sc); 8642 bce_dump_driver_state(sc); 8643 8644 /* Call the debugger. */ 8645 breakpoint(); 8646 8647 return; 8648} 8649#endif 8650 8651