if_bnx.c revision 1.1
1/* $OpenBSD: if_bnx.c,v 1.1 2006/06/26 04:57:54 brad Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Broadcom Corporation 5 * David Christensen <davidch@broadcom.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written consent. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#if 0 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 36#endif 37 38/* 39 * The following controllers are supported by this driver: 40 * BCM5706C A2, A3 41 * BCM5708C B1 42 * 43 * The following controllers are not supported by this driver: 44 * (These are not "Production" versions of the controller.) 45 * 46 * BCM5706C A0, A1 47 * BCM5706S A0, A1, A2, A3 48 * BCM5708C A0, B0 49 * BCM5708S A0, B0, B1 50 */ 51 52#define BNX_DEBUG 53 54#include <dev/pci/if_bnxreg.h> 55#include <dev/pci/if_bnxfw.h> 56 57/****************************************************************************/ 58/* BNX Driver Version */ 59/****************************************************************************/ 60char bnx_driver_version[] = "v0.9.6"; 61 62/****************************************************************************/ 63/* BNX Debug Options */ 64/****************************************************************************/ 65#ifdef BNX_DEBUG 66 u32 bnx_debug = BNX_WARN; 67 68 /* 0 = Never */ 69 /* 1 = 1 in 2,147,483,648 */ 70 /* 256 = 1 in 8,388,608 */ 71 /* 2048 = 1 in 1,048,576 */ 72 /* 65536 = 1 in 32,768 */ 73 /* 1048576 = 1 in 2,048 */ 74 /* 268435456 = 1 in 8 */ 75 /* 536870912 = 1 in 4 */ 76 /* 1073741824 = 1 in 2 */ 77 78 /* Controls how often the l2_fhdr frame error check will fail. */ 79 int bnx_debug_l2fhdr_status_check = 0; 80 81 /* Controls how often the unexpected attention check will fail. */ 82 int bnx_debug_unexpected_attention = 0; 83 84 /* Controls how often to simulate an mbuf allocation failure. */ 85 int bnx_debug_mbuf_allocation_failure = 0; 86 87 /* Controls how often to simulate a DMA mapping failure. */ 88 int bnx_debug_dma_map_addr_failure = 0; 89 90 /* Controls how often to simulate a bootcode failure. */ 91 int bnx_debug_bootcode_running_failure = 0; 92#endif 93 94/****************************************************************************/ 95/* PCI Device ID Table */ 96/* */ 97/* Used by bnx_probe() to identify the devices supported by this driver. */ 98/****************************************************************************/ 99const struct pci_matchid bnx_devices[] = { 100 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 }, 101 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S }, 102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 }, 103 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S } 104}; 105 106/****************************************************************************/ 107/* Supported Flash NVRAM device data. */ 108/****************************************************************************/ 109static struct flash_spec flash_table[] = 110{ 111 /* Slow EEPROM */ 112 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 113 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 114 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 115 "EEPROM - slow"}, 116 /* Expansion entry 0001 */ 117 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 118 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 119 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 120 "Entry 0001"}, 121 /* Saifun SA25F010 (non-buffered flash) */ 122 /* strap, cfg1, & write1 need updates */ 123 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 124 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 125 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 126 "Non-buffered flash (128kB)"}, 127 /* Saifun SA25F020 (non-buffered flash) */ 128 /* strap, cfg1, & write1 need updates */ 129 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 131 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 132 "Non-buffered flash (256kB)"}, 133 /* Expansion entry 0100 */ 134 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 137 "Entry 0100"}, 138 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 139 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 140 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 141 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 142 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 143 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 144 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 145 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 146 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 147 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 148 /* Saifun SA25F005 (non-buffered flash) */ 149 /* strap, cfg1, & write1 need updates */ 150 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 152 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 153 "Non-buffered flash (64kB)"}, 154 /* Fast EEPROM */ 155 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 156 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 157 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 158 "EEPROM - fast"}, 159 /* Expansion entry 1001 */ 160 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 161 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 163 "Entry 1001"}, 164 /* Expansion entry 1010 */ 165 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 166 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 167 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 168 "Entry 1010"}, 169 /* ATMEL AT45DB011B (buffered flash) */ 170 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 171 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 172 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 173 "Buffered flash (128kB)"}, 174 /* Expansion entry 1100 */ 175 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 176 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 178 "Entry 1100"}, 179 /* Expansion entry 1101 */ 180 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 181 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 182 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 183 "Entry 1101"}, 184 /* Ateml Expansion entry 1110 */ 185 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 186 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 187 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 188 "Entry 1110 (Atmel)"}, 189 /* ATMEL AT45DB021B (buffered flash) */ 190 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 191 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 192 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 193 "Buffered flash (256kB)"}, 194}; 195 196/****************************************************************************/ 197/* OpenBSD device entry points. */ 198/****************************************************************************/ 199int bnx_probe (struct device *, void *, void *); 200void bnx_attach (struct device *, struct device *, void *); 201#if 0 202void bnx_detach (void *); 203#endif 204void bnx_shutdown (void *); 205 206/****************************************************************************/ 207/* BNX Debug Data Structure Dump Routines */ 208/****************************************************************************/ 209#ifdef BNX_DEBUG 210void bnx_dump_mbuf (struct bnx_softc *, struct mbuf *); 211void bnx_dump_tx_mbuf_chain (struct bnx_softc *, int, int); 212void bnx_dump_rx_mbuf_chain (struct bnx_softc *, int, int); 213void bnx_dump_txbd (struct bnx_softc *, int, struct tx_bd *); 214void bnx_dump_rxbd (struct bnx_softc *, int, struct rx_bd *); 215void bnx_dump_l2fhdr (struct bnx_softc *, int, struct l2_fhdr *); 216void bnx_dump_tx_chain (struct bnx_softc *, int, int); 217void bnx_dump_rx_chain (struct bnx_softc *, int, int); 218void bnx_dump_status_block (struct bnx_softc *); 219void bnx_dump_stats_block (struct bnx_softc *); 220void bnx_dump_driver_state (struct bnx_softc *); 221void bnx_dump_hw_state (struct bnx_softc *); 222void bnx_breakpoint (struct bnx_softc *); 223#endif 224 225/****************************************************************************/ 226/* BNX Register/Memory Access Routines */ 227/****************************************************************************/ 228u32 bnx_reg_rd_ind (struct bnx_softc *, u32); 229void bnx_reg_wr_ind (struct bnx_softc *, u32, u32); 230void bnx_ctx_wr (struct bnx_softc *, u32, u32, u32); 231int bnx_miibus_read_reg (struct device *, int, int); 232void bnx_miibus_write_reg (struct device *, int, int, int); 233void bnx_miibus_statchg (struct device *); 234 235/****************************************************************************/ 236/* BNX NVRAM Access Routines */ 237/****************************************************************************/ 238int bnx_acquire_nvram_lock (struct bnx_softc *); 239int bnx_release_nvram_lock (struct bnx_softc *); 240void bnx_enable_nvram_access (struct bnx_softc *); 241void bnx_disable_nvram_access (struct bnx_softc *); 242int bnx_nvram_read_dword (struct bnx_softc *, u32, u8 *, u32); 243int bnx_init_nvram (struct bnx_softc *); 244int bnx_nvram_read (struct bnx_softc *, u32, u8 *, int); 245int bnx_nvram_test (struct bnx_softc *); 246#ifdef BNX_NVRAM_WRITE_SUPPORT 247int bnx_enable_nvram_write (struct bnx_softc *); 248void bnx_disable_nvram_write (struct bnx_softc *); 249int bnx_nvram_erase_page (struct bnx_softc *, u32); 250int bnx_nvram_write_dword (struct bnx_softc *, u32, u8 *, u32); 251int bnx_nvram_write (struct bnx_softc *, u32, u8 *, int); 252#endif 253 254/****************************************************************************/ 255/* */ 256/****************************************************************************/ 257int bnx_dma_alloc (struct bnx_softc *); 258void bnx_dma_free (struct bnx_softc *); 259void bnx_release_resources (struct bnx_softc *); 260void bnx_dma_map_tx_desc (void *, bus_dmamap_t); 261 262/****************************************************************************/ 263/* BNX Firmware Synchronization and Load */ 264/****************************************************************************/ 265int bnx_fw_sync (struct bnx_softc *, u32); 266void bnx_load_rv2p_fw (struct bnx_softc *, u32 *, u32, u32); 267void bnx_load_cpu_fw (struct bnx_softc *, struct cpu_reg *, struct fw_info *); 268void bnx_init_cpus (struct bnx_softc *); 269 270void bnx_stop (struct bnx_softc *); 271int bnx_reset (struct bnx_softc *, u32); 272int bnx_chipinit (struct bnx_softc *); 273int bnx_blockinit (struct bnx_softc *); 274int bnx_get_buf (struct bnx_softc *, struct mbuf *, u16 *, u16 *, u32 *); 275 276int bnx_init_tx_chain (struct bnx_softc *); 277int bnx_init_rx_chain (struct bnx_softc *); 278void bnx_free_rx_chain (struct bnx_softc *); 279void bnx_free_tx_chain (struct bnx_softc *); 280 281int bnx_tx_encap (struct bnx_softc *, struct mbuf *, u16 *, u16 *, u32 *); 282void bnx_start (struct ifnet *); 283int bnx_ioctl (struct ifnet *, u_long, caddr_t); 284void bnx_watchdog (struct ifnet *); 285int bnx_ifmedia_upd (struct ifnet *); 286void bnx_ifmedia_sts (struct ifnet *, struct ifmediareq *); 287void bnx_init (void *); 288 289void bnx_init_context (struct bnx_softc *); 290void bnx_get_mac_addr (struct bnx_softc *); 291void bnx_set_mac_addr (struct bnx_softc *); 292void bnx_phy_intr (struct bnx_softc *); 293void bnx_rx_intr (struct bnx_softc *); 294void bnx_tx_intr (struct bnx_softc *); 295void bnx_disable_intr (struct bnx_softc *); 296void bnx_enable_intr (struct bnx_softc *); 297 298int bnx_intr (void *); 299void bnx_set_rx_mode (struct bnx_softc *); 300void bnx_stats_update (struct bnx_softc *); 301void bnx_tick (void *); 302 303/****************************************************************************/ 304/* OpenBSD device dispatch table. */ 305/****************************************************************************/ 306struct cfattach bnx_ca = { 307 sizeof(struct bnx_softc), bnx_probe, bnx_attach 308}; 309 310struct cfdriver bnx_cd = { 311 0, "bnx", DV_IFNET 312}; 313 314/****************************************************************************/ 315/* Device probe function. */ 316/* */ 317/* Compares the device to the driver's list of supported devices and */ 318/* reports back to the OS whether this is the right driver for the device. */ 319/* */ 320/* Returns: */ 321/* BUS_PROBE_DEFAULT on success, positive value on failure. */ 322/****************************************************************************/ 323int 324bnx_probe(struct device *parent, void *match, void *aux) 325{ 326 return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices, 327 sizeof(bnx_devices)/sizeof(bnx_devices[0]))); 328} 329 330/****************************************************************************/ 331/* Device attach function. */ 332/* */ 333/* Allocates device resources, performs secondary chip identification, */ 334/* resets and initializes the hardware, and initializes driver instance */ 335/* variables. */ 336/* */ 337/* Returns: */ 338/* 0 on success, positive value on failure. */ 339/****************************************************************************/ 340void 341bnx_attach(struct device *parent, struct device *self, void *aux) 342{ 343 struct bnx_softc *sc = (struct bnx_softc *)self; 344 struct pci_attach_args *pa = aux; 345 pci_chipset_tag_t pc = pa->pa_pc; 346 pci_intr_handle_t ih; 347 const char *intrstr = NULL; 348 struct ifnet *ifp; 349 u32 val; 350 pcireg_t memtype; 351 bus_size_t size; 352 353 sc->bnx_pa = *pa; 354 355 /* 356 * Map control/status registers. 357 */ 358 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 359 switch (memtype) { 360 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 361 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 362 if (pci_mapreg_map(pa, BNX_PCI_BAR0, 363 memtype, 0, &sc->bnx_btag, &sc->bnx_bhandle, 364 NULL, &size, 0) == 0) 365 break; 366 default: 367 printf(": can't find mem space\n"); 368 return; 369 } 370 371 if (pci_intr_map(pa, &ih)) { 372 printf(": couldn't map interrupt\n"); 373 goto bnx_attach_fail; 374 } 375 376 intrstr = pci_intr_string(pc, ih); 377 378 /* 379 * Configure byte swap and enable indirect register access. 380 * Rely on CPU to do target byte swapping on big endian systems. 381 * Access to registers outside of PCI configurtion space are not 382 * valid until this is done. 383 */ 384 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 385 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 386 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 387 388 /* Save ASIC revsion info. */ 389 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 390 391 /* Weed out any non-production controller revisions. */ 392 switch(BNX_CHIP_ID(sc)) { 393 case BNX_CHIP_ID_5706_A0: 394 case BNX_CHIP_ID_5706_A1: 395 case BNX_CHIP_ID_5708_A0: 396 case BNX_CHIP_ID_5708_B0: 397 printf(": unsupported controller revision (%c%d)!\n", 398 (((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x08) & 0xf0) >> 4) + 'A'), 399 (pci_conf_read(pa->pa_pc, pa->pa_tag, 0x08) & 0xf)); 400 goto bnx_attach_fail; 401 } 402 403 if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) { 404 printf(": SerDes controllers are not supported!\n"); 405 goto bnx_attach_fail; 406 } 407 408#if 0 409 /* 410 * The embedded PCIe to PCI-X bridge (EPB) 411 * in the 5708 cannot address memory above 412 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 413 */ 414 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708) 415 sc->max_bus_addr = BNX_BUS_SPACE_MAXADDR; 416 else 417 sc->max_bus_addr = BUS_SPACE_MAXADDR; 418#endif 419 420 /* 421 * Find the base address for shared memory access. 422 * Newer versions of bootcode use a signature and offset 423 * while older versions use a fixed address. 424 */ 425 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 426 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 427 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0); 428 else 429 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 430 431 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 432 433 /* Set initial device and PHY flags */ 434 sc->bnx_flags = 0; 435 sc->bnx_phy_flags = 0; 436 437 /* Get PCI bus information (speed and type). */ 438 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 439 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 440 u32 clkreg; 441 442 sc->bnx_flags |= BNX_PCIX_FLAG; 443 444 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 445 446 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 447 switch (clkreg) { 448 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 449 sc->bus_speed_mhz = 133; 450 break; 451 452 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 453 sc->bus_speed_mhz = 100; 454 break; 455 456 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 457 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 458 sc->bus_speed_mhz = 66; 459 break; 460 461 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 462 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 463 sc->bus_speed_mhz = 50; 464 break; 465 466 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 467 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 468 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 469 sc->bus_speed_mhz = 33; 470 break; 471 } 472 } else { 473 if (val & BNX_PCICFG_MISC_STATUS_M66EN) 474 sc->bus_speed_mhz = 66; 475 else 476 sc->bus_speed_mhz = 33; 477 } 478 479 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 480 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 481 482 /* Reset the controller. */ 483 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 484 goto bnx_attach_fail; 485 486 /* Initialize the controller. */ 487 if (bnx_chipinit(sc)) { 488 printf(": Controller initialization failed!\n"); 489 goto bnx_attach_fail; 490 } 491 492 /* Perform NVRAM test. */ 493 if (bnx_nvram_test(sc)) { 494 printf(": NVRAM test failed!\n"); 495 goto bnx_attach_fail; 496 } 497 498 /* Fetch the permanent Ethernet MAC address. */ 499 bnx_get_mac_addr(sc); 500 501 /* 502 * Trip points control how many BDs 503 * should be ready before generating an 504 * interrupt while ticks control how long 505 * a BD can sit in the chain before 506 * generating an interrupt. Set the default 507 * values for the RX and TX rings. 508 */ 509 510#ifdef BNX_DRBUG 511 /* Force more frequent interrupts. */ 512 sc->bnx_tx_quick_cons_trip_int = 1; 513 sc->bnx_tx_quick_cons_trip = 1; 514 sc->bnx_tx_ticks_int = 0; 515 sc->bnx_tx_ticks = 0; 516 517 sc->bnx_rx_quick_cons_trip_int = 1; 518 sc->bnx_rx_quick_cons_trip = 1; 519 sc->bnx_rx_ticks_int = 0; 520 sc->bnx_rx_ticks = 0; 521#else 522 sc->bnx_tx_quick_cons_trip_int = 20; 523 sc->bnx_tx_quick_cons_trip = 20; 524 sc->bnx_tx_ticks_int = 80; 525 sc->bnx_tx_ticks = 80; 526 527 sc->bnx_rx_quick_cons_trip_int = 6; 528 sc->bnx_rx_quick_cons_trip = 6; 529 sc->bnx_rx_ticks_int = 18; 530 sc->bnx_rx_ticks = 18; 531#endif 532 533 /* Update statistics once every second. */ 534 sc->bnx_stats_ticks = 1000000 & 0xffff00; 535 536 /* 537 * The copper based NetXtreme II controllers 538 * use an integrated PHY at address 1 while 539 * the SerDes controllers use a PHY at 540 * address 2. 541 */ 542 sc->bnx_phy_addr = 1; 543 544 if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) { 545 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 546 sc->bnx_flags |= BNX_NO_WOL_FLAG; 547 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708) { 548 sc->bnx_phy_addr = 2; 549 val = REG_RD_IND(sc, sc->bnx_shmem_base + 550 BNX_SHARED_HW_CFG_CONFIG); 551 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) 552 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 553 } 554 } 555 556 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 557 printf(": SerDes is not supported by this driver!\n"); 558 goto bnx_attach_fail; 559 } 560 561 /* Allocate DMA memory resources. */ 562 sc->bnx_dmatag = pa->pa_dmat; 563 if (bnx_dma_alloc(sc)) { 564 printf("%s: DMA resource allocation failed!\n", 565 sc->bnx_dev.dv_xname); 566 goto bnx_attach_fail; 567 } 568 569 /* Initialize the ifnet interface. */ 570 ifp = &sc->arpcom.ac_if; 571 ifp->if_softc = sc; 572 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 573 ifp->if_ioctl = bnx_ioctl; 574 ifp->if_start = bnx_start; 575 ifp->if_timer = 0; 576 ifp->if_watchdog = bnx_watchdog; 577 if (sc->bnx_phy_flags & BNX_PHY_2_5G_CAPABLE_FLAG) 578 ifp->if_baudrate = IF_Gbps(2.5); 579 else 580 ifp->if_baudrate = IF_Gbps(1); 581 ifp->if_hardmtu = BNX_MAX_JUMBO_MTU; 582 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD); 583 IFQ_SET_READY(&ifp->if_snd); 584 bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 585 bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 586 587 /* Assume a standard 1500 byte MTU size for mbuf allocations. */ 588 sc->mbuf_alloc_size = MCLBYTES; 589 590 /* Hookup IRQ last. */ 591 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc, 592 sc->bnx_dev.dv_xname); 593 if (sc->bnx_intrhand == NULL) { 594 printf(": couldn't establish interrupt"); 595 if (intrstr != NULL) 596 printf(" at %s", intrstr); 597 printf("\n"); 598 goto bnx_attach_fail; 599 } 600 601 printf(": %s, address %s\n", intrstr, 602 ether_sprintf(sc->arpcom.ac_enaddr)); 603 604 sc->bnx_mii.mii_ifp = ifp; 605 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg; 606 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg; 607 sc->bnx_mii.mii_statchg = bnx_miibus_statchg; 608 609 /* Look for our PHY. */ 610 ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd, 611 bnx_ifmedia_sts); 612 mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff, 613 MII_PHY_ANY, MII_OFFSET_ANY, 0); 614 615 if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) { 616 printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname); 617 ifmedia_add(&sc->bnx_mii.mii_media, 618 IFM_ETHER|IFM_MANUAL, 0, NULL); 619 ifmedia_set(&sc->bnx_mii.mii_media, 620 IFM_ETHER|IFM_MANUAL); 621 } else { 622 ifmedia_set(&sc->bnx_mii.mii_media, 623 IFM_ETHER|IFM_AUTO); 624 } 625 626 /* Attach to the Ethernet interface list. */ 627 if_attach(ifp); 628 ether_ifattach(ifp); 629 630 timeout_set(&sc->bnx_timeout, bnx_tick, sc); 631 632 /* Print some important debugging info. */ 633 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 634 635 goto bnx_attach_exit; 636 637bnx_attach_fail: 638 bnx_release_resources(sc); 639 640bnx_attach_exit: 641 642 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 643} 644 645/****************************************************************************/ 646/* Device detach function. */ 647/* */ 648/* Stops the controller, resets the controller, and releases resources. */ 649/* */ 650/* Returns: */ 651/* 0 on success, positive value on failure. */ 652/****************************************************************************/ 653#if 0 654void 655bnx_detach(void *xsc) 656{ 657 struct bnx_softc *sc; 658 struct ifnet *ifp = &sc->arpcom.ac_if; 659 660 sc = device_get_softc(dev); 661 662 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 663 664 /* Stop and reset the controller. */ 665 bnx_stop(sc); 666 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 667 668 ether_ifdetach(ifp); 669 670 /* If we have a child device on the MII bus remove it too. */ 671 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 672 ifmedia_removeall(&sc->bnx_ifmedia); 673 } else { 674 bus_generic_detach(dev); 675 device_delete_child(dev, sc->bnx_mii); 676 } 677 678 /* Release all remaining resources. */ 679 bnx_release_resources(sc); 680 681 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 682 683 return(0); 684} 685#endif 686 687/****************************************************************************/ 688/* Device shutdown function. */ 689/* */ 690/* Stops and resets the controller. */ 691/* */ 692/* Returns: */ 693/* Nothing */ 694/****************************************************************************/ 695void 696bnx_shutdown(void *xsc) 697{ 698 struct bnx_softc *sc = (struct bnx_softc *)xsc; 699 700 bnx_stop(sc); 701 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 702} 703 704/****************************************************************************/ 705/* Indirect register read. */ 706/* */ 707/* Reads NetXtreme II registers using an index/data register pair in PCI */ 708/* configuration space. Using this mechanism avoids issues with posted */ 709/* reads but is much slower than memory-mapped I/O. */ 710/* */ 711/* Returns: */ 712/* The value of the register. */ 713/****************************************************************************/ 714u32 715bnx_reg_rd_ind(struct bnx_softc *sc, u32 offset) 716{ 717 struct pci_attach_args *pa = &(sc->bnx_pa); 718 719 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, offset); 720#ifdef BNX_DEBUG 721 { 722 u32 val; 723 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 724 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 725 __FUNCTION__, offset, val); 726 return val; 727 } 728#else 729 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 730#endif 731} 732 733/****************************************************************************/ 734/* Indirect register write. */ 735/* */ 736/* Writes NetXtreme II registers using an index/data register pair in PCI */ 737/* configuration space. Using this mechanism avoids issues with posted */ 738/* writes but is muchh slower than memory-mapped I/O. */ 739/* */ 740/* Returns: */ 741/* Nothing. */ 742/****************************************************************************/ 743void 744bnx_reg_wr_ind(struct bnx_softc *sc, u32 offset, u32 val) 745{ 746 struct pci_attach_args *pa = &(sc->bnx_pa); 747 748 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 749 __FUNCTION__, offset, val); 750 751 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, offset); 752 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 753} 754 755/****************************************************************************/ 756/* Context memory write. */ 757/* */ 758/* The NetXtreme II controller uses context memory to track connection */ 759/* information for L2 and higher network protocols. */ 760/* */ 761/* Returns: */ 762/* Nothing. */ 763/****************************************************************************/ 764void 765bnx_ctx_wr(struct bnx_softc *sc, u32 cid_addr, u32 offset, u32 val) 766{ 767 768 DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 769 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val); 770 771 offset += cid_addr; 772 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 773 REG_WR(sc, BNX_CTX_DATA, val); 774} 775 776/****************************************************************************/ 777/* PHY register read. */ 778/* */ 779/* Implements register reads on the MII bus. */ 780/* */ 781/* Returns: */ 782/* The value of the register. */ 783/****************************************************************************/ 784int 785bnx_miibus_read_reg(struct device *dev, int phy, int reg) 786{ 787 struct bnx_softc *sc = (struct bnx_softc *)dev; 788 u32 val; 789 int i; 790 791 /* Make sure we are accessing the correct PHY address. */ 792 if (phy != sc->bnx_phy_addr) { 793 DBPRINT(sc, BNX_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy); 794 return(0); 795 } 796 797 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 798 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 799 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 800 801 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 802 REG_RD(sc, BNX_EMAC_MDIO_MODE); 803 804 DELAY(40); 805 } 806 807 val = BNX_MIPHY(phy) | BNX_MIREG(reg) | 808 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 809 BNX_EMAC_MDIO_COMM_START_BUSY; 810 REG_WR(sc, BNX_EMAC_MDIO_COMM, val); 811 812 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 813 DELAY(10); 814 815 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 816 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) { 817 DELAY(5); 818 819 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 820 val &= BNX_EMAC_MDIO_COMM_DATA; 821 822 break; 823 } 824 } 825 826 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) { 827 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 828 __FILE__, __LINE__, phy, reg); 829 val = 0x0; 830 } else { 831 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 832 } 833 834 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 835 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff); 836 837 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 838 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 839 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 840 841 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 842 REG_RD(sc, BNX_EMAC_MDIO_MODE); 843 844 DELAY(40); 845 } 846 847 return (val & 0xffff); 848 849} 850 851/****************************************************************************/ 852/* PHY register write. */ 853/* */ 854/* Implements register writes on the MII bus. */ 855/* */ 856/* Returns: */ 857/* The value of the register. */ 858/****************************************************************************/ 859void 860bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val) 861{ 862 struct bnx_softc *sc = (struct bnx_softc *)dev; 863 u32 val1; 864 int i; 865 866 /* Make sure we are accessing the correct PHY address. */ 867 if (phy != sc->bnx_phy_addr) { 868 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", phy); 869 return; 870 } 871 872 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 873 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff); 874 875 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 876 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 877 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 878 879 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 880 REG_RD(sc, BNX_EMAC_MDIO_MODE); 881 882 DELAY(40); 883 } 884 885 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 886 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 887 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 888 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 889 890 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 891 DELAY(10); 892 893 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 894 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 895 DELAY(5); 896 break; 897 } 898 } 899 900 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) 901 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", 902 __FILE__, __LINE__); 903 904 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 905 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 906 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 907 908 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 909 REG_RD(sc, BNX_EMAC_MDIO_MODE); 910 911 DELAY(40); 912 } 913} 914 915/****************************************************************************/ 916/* MII bus status change. */ 917/* */ 918/* Called by the MII bus driver when the PHY establishes link to set the */ 919/* MAC interface registers. */ 920/* */ 921/* Returns: */ 922/* Nothing. */ 923/****************************************************************************/ 924void 925bnx_miibus_statchg(struct device *dev) 926{ 927 struct bnx_softc *sc = (struct bnx_softc *)dev; 928 struct mii_data *mii = &sc->bnx_mii; 929 930 BNX_CLRBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT); 931 932 /* Set MII or GMII inerface based on the speed negotiated by the PHY. */ 933 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 934 DBPRINT(sc, BNX_INFO, "Setting GMII interface.\n"); 935 BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT_GMII); 936 } else { 937 DBPRINT(sc, BNX_INFO, "Setting MII interface.\n"); 938 BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT_MII); 939 } 940 941 /* Set half or full duplex based on the duplicity negotiated by the PHY. */ 942 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 943 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 944 BNX_CLRBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_HALF_DUPLEX); 945 } else { 946 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 947 BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_HALF_DUPLEX); 948 } 949} 950 951 952/****************************************************************************/ 953/* Acquire NVRAM lock. */ 954/* */ 955/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 956/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 957/* for use by the driver. */ 958/* */ 959/* Returns: */ 960/* 0 on success, positive value on failure. */ 961/****************************************************************************/ 962int 963bnx_acquire_nvram_lock(struct bnx_softc *sc) 964{ 965 u32 val; 966 int j; 967 968 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 969 970 /* Request access to the flash interface. */ 971 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 972 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 973 val = REG_RD(sc, BNX_NVM_SW_ARB); 974 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 975 break; 976 977 DELAY(5); 978 } 979 980 if (j >= NVRAM_TIMEOUT_COUNT) { 981 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 982 return EBUSY; 983 } 984 985 return 0; 986} 987 988/****************************************************************************/ 989/* Release NVRAM lock. */ 990/* */ 991/* When the caller is finished accessing NVRAM the lock must be released. */ 992/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 993/* for use by the driver. */ 994/* */ 995/* Returns: */ 996/* 0 on success, positive value on failure. */ 997/****************************************************************************/ 998int 999bnx_release_nvram_lock(struct bnx_softc *sc) 1000{ 1001 int j; 1002 u32 val; 1003 1004 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1005 1006 /* 1007 * Relinquish nvram interface. 1008 */ 1009 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1010 1011 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1012 val = REG_RD(sc, BNX_NVM_SW_ARB); 1013 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1014 break; 1015 1016 DELAY(5); 1017 } 1018 1019 if (j >= NVRAM_TIMEOUT_COUNT) { 1020 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1021 return EBUSY; 1022 } 1023 1024 return 0; 1025} 1026 1027#ifdef BNX_NVRAM_WRITE_SUPPORT 1028/****************************************************************************/ 1029/* Enable NVRAM write access. */ 1030/* */ 1031/* Before writing to NVRAM the caller must enable NVRAM writes. */ 1032/* */ 1033/* Returns: */ 1034/* 0 on success, positive value on failure. */ 1035/****************************************************************************/ 1036int 1037bnx_enable_nvram_write(struct bnx_softc *sc) 1038{ 1039 u32 val; 1040 1041 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1042 1043 val = REG_RD(sc, BNX_MISC_CFG); 1044 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1045 1046 if (!sc->bnx_flash_info->buffered) { 1047 int j; 1048 1049 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1050 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1051 1052 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1053 DELAY(5); 1054 1055 val = REG_RD(sc, BNX_NVM_COMMAND); 1056 if (val & BNX_NVM_COMMAND_DONE) 1057 break; 1058 } 1059 1060 if (j >= NVRAM_TIMEOUT_COUNT) { 1061 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1062 return EBUSY; 1063 } 1064 } 1065 return 0; 1066} 1067 1068/****************************************************************************/ 1069/* Disable NVRAM write access. */ 1070/* */ 1071/* When the caller is finished writing to NVRAM write access must be */ 1072/* disabled. */ 1073/* */ 1074/* Returns: */ 1075/* Nothing. */ 1076/****************************************************************************/ 1077void 1078bnx_disable_nvram_write(struct bnx_softc *sc) 1079{ 1080 u32 val; 1081 1082 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1083 1084 val = REG_RD(sc, BNX_MISC_CFG); 1085 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1086} 1087#endif 1088 1089/****************************************************************************/ 1090/* Enable NVRAM access. */ 1091/* */ 1092/* Before accessing NVRAM for read or write operations the caller must */ 1093/* enabled NVRAM access. */ 1094/* */ 1095/* Returns: */ 1096/* Nothing. */ 1097/****************************************************************************/ 1098void 1099bnx_enable_nvram_access(struct bnx_softc *sc) 1100{ 1101 u32 val; 1102 1103 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1104 1105 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1106 /* Enable both bits, even on read. */ 1107 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1108 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1109} 1110 1111/****************************************************************************/ 1112/* Disable NVRAM access. */ 1113/* */ 1114/* When the caller is finished accessing NVRAM access must be disabled. */ 1115/* */ 1116/* Returns: */ 1117/* Nothing. */ 1118/****************************************************************************/ 1119void 1120bnx_disable_nvram_access(struct bnx_softc *sc) 1121{ 1122 u32 val; 1123 1124 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1125 1126 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1127 1128 /* Disable both bits, even after read. */ 1129 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1130 val & ~(BNX_NVM_ACCESS_ENABLE_EN | 1131 BNX_NVM_ACCESS_ENABLE_WR_EN)); 1132} 1133 1134#ifdef BNX_NVRAM_WRITE_SUPPORT 1135/****************************************************************************/ 1136/* Erase NVRAM page before writing. */ 1137/* */ 1138/* Non-buffered flash parts require that a page be erased before it is */ 1139/* written. */ 1140/* */ 1141/* Returns: */ 1142/* 0 on success, positive value on failure. */ 1143/****************************************************************************/ 1144int 1145bnx_nvram_erase_page(struct bnx_softc *sc, u32 offset) 1146{ 1147 u32 cmd; 1148 int j; 1149 1150 /* Buffered flash doesn't require an erase. */ 1151 if (sc->bnx_flash_info->buffered) 1152 return 0; 1153 1154 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1155 1156 /* Build an erase command. */ 1157 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1158 BNX_NVM_COMMAND_DOIT; 1159 1160 /* 1161 * Clear the DONE bit separately, set the NVRAM adress to erase, 1162 * and issue the erase command. 1163 */ 1164 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1165 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1166 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1167 1168 /* Wait for completion. */ 1169 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1170 u32 val; 1171 1172 DELAY(5); 1173 1174 val = REG_RD(sc, BNX_NVM_COMMAND); 1175 if (val & BNX_NVM_COMMAND_DONE) 1176 break; 1177 } 1178 1179 if (j >= NVRAM_TIMEOUT_COUNT) { 1180 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1181 return EBUSY; 1182 } 1183 1184 return 0; 1185} 1186#endif /* BNX_NVRAM_WRITE_SUPPORT */ 1187 1188/****************************************************************************/ 1189/* Read a dword (32 bits) from NVRAM. */ 1190/* */ 1191/* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1192/* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1193/* */ 1194/* Returns: */ 1195/* 0 on success and the 32 bit value read, positive value on failure. */ 1196/****************************************************************************/ 1197int 1198bnx_nvram_read_dword(struct bnx_softc *sc, u32 offset, u8 *ret_val, 1199 u32 cmd_flags) 1200{ 1201 u32 cmd; 1202 int i, rc = 0; 1203 1204 /* Build the command word. */ 1205 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1206 1207 /* Calculate the offset for buffered flash. */ 1208 if (sc->bnx_flash_info->buffered) { 1209 offset = ((offset / sc->bnx_flash_info->page_size) << 1210 sc->bnx_flash_info->page_bits) + 1211 (offset % sc->bnx_flash_info->page_size); 1212 } 1213 1214 /* 1215 * Clear the DONE bit separately, set the address to read, 1216 * and issue the read. 1217 */ 1218 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1219 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1220 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1221 1222 /* Wait for completion. */ 1223 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1224 u32 val; 1225 1226 DELAY(5); 1227 1228 val = REG_RD(sc, BNX_NVM_COMMAND); 1229 if (val & BNX_NVM_COMMAND_DONE) { 1230 val = REG_RD(sc, BNX_NVM_READ); 1231 1232 val = bnx_be32toh(val); 1233 memcpy(ret_val, &val, 4); 1234 break; 1235 } 1236 } 1237 1238 /* Check for errors. */ 1239 if (i >= NVRAM_TIMEOUT_COUNT) { 1240 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n", 1241 __FILE__, __LINE__, offset); 1242 rc = EBUSY; 1243 } 1244 1245 return(rc); 1246} 1247 1248#ifdef BNX_NVRAM_WRITE_SUPPORT 1249/****************************************************************************/ 1250/* Write a dword (32 bits) to NVRAM. */ 1251/* */ 1252/* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1253/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1254/* enabled NVRAM write access. */ 1255/* */ 1256/* Returns: */ 1257/* 0 on success, positive value on failure. */ 1258/****************************************************************************/ 1259int 1260bnx_nvram_write_dword(struct bnx_softc *sc, u32 offset, u8 *val, 1261 u32 cmd_flags) 1262{ 1263 u32 cmd, val32; 1264 int j; 1265 1266 /* Build the command word. */ 1267 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1268 1269 /* Calculate the offset for buffered flash. */ 1270 if (sc->bnx_flash_info->buffered) { 1271 offset = ((offset / sc->bnx_flash_info->page_size) << 1272 sc->bnx_flash_info->page_bits) + 1273 (offset % sc->bnx_flash_info->page_size); 1274 } 1275 1276 /* 1277 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1278 * set the NVRAM address to write, and issue the write command 1279 */ 1280 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1281 memcpy(&val32, val, 4); 1282 val32 = htobe32(val32); 1283 REG_WR(sc, BNX_NVM_WRITE, val32); 1284 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1285 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1286 1287 /* Wait for completion. */ 1288 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1289 DELAY(5); 1290 1291 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1292 break; 1293 } 1294 if (j >= NVRAM_TIMEOUT_COUNT) { 1295 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n", 1296 __FILE__, __LINE__, offset); 1297 return EBUSY; 1298 } 1299 1300 return 0; 1301} 1302#endif /* BNX_NVRAM_WRITE_SUPPORT */ 1303 1304/****************************************************************************/ 1305/* Initialize NVRAM access. */ 1306/* */ 1307/* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1308/* access that device. */ 1309/* */ 1310/* Returns: */ 1311/* 0 on success, positive value on failure. */ 1312/****************************************************************************/ 1313int 1314bnx_init_nvram(struct bnx_softc *sc) 1315{ 1316 u32 val; 1317 int j, entry_count, rc; 1318 struct flash_spec *flash; 1319 1320 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 1321 1322 /* Determine the selected interface. */ 1323 val = REG_RD(sc, BNX_NVM_CFG1); 1324 1325 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1326 1327 rc = 0; 1328 1329 /* 1330 * Flash reconfiguration is required to support additional 1331 * NVRAM devices not directly supported in hardware. 1332 * Check if the flash interface was reconfigured 1333 * by the bootcode. 1334 */ 1335 1336 if (val & 0x40000000) { 1337 /* Flash interface reconfigured by bootcode. */ 1338 1339 DBPRINT(sc,BNX_INFO_LOAD, 1340 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1341 1342 for (j = 0, flash = &flash_table[0]; j < entry_count; 1343 j++, flash++) { 1344 if ((val & FLASH_BACKUP_STRAP_MASK) == 1345 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1346 sc->bnx_flash_info = flash; 1347 break; 1348 } 1349 } 1350 } else { 1351 /* Flash interface not yet reconfigured. */ 1352 u32 mask; 1353 1354 DBPRINT(sc,BNX_INFO_LOAD, 1355 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1356 1357 if (val & (1 << 23)) 1358 mask = FLASH_BACKUP_STRAP_MASK; 1359 else 1360 mask = FLASH_STRAP_MASK; 1361 1362 /* Look for the matching NVRAM device configuration data. */ 1363 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { 1364 1365 /* Check if the device matches any of the known devices. */ 1366 if ((val & mask) == (flash->strapping & mask)) { 1367 /* Found a device match. */ 1368 sc->bnx_flash_info = flash; 1369 1370 /* Request access to the flash interface. */ 1371 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1372 return rc; 1373 1374 /* Reconfigure the flash interface. */ 1375 bnx_enable_nvram_access(sc); 1376 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1377 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1378 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1379 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1380 bnx_disable_nvram_access(sc); 1381 bnx_release_nvram_lock(sc); 1382 1383 break; 1384 } 1385 } 1386 } 1387 1388 /* Check if a matching device was found. */ 1389 if (j == entry_count) { 1390 sc->bnx_flash_info = NULL; 1391 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1392 __FILE__, __LINE__); 1393 rc = ENODEV; 1394 } 1395 1396 /* Write the flash config data to the shared memory interface. */ 1397 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1398 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1399 if (val) 1400 sc->bnx_flash_size = val; 1401 else 1402 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1403 1404 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = 0x%08X\n", 1405 sc->bnx_flash_info->total_size); 1406 1407 DBPRINT(sc,BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 1408 1409 return rc; 1410} 1411 1412/****************************************************************************/ 1413/* Read an arbitrary range of data from NVRAM. */ 1414/* */ 1415/* Prepares the NVRAM interface for access and reads the requested data */ 1416/* into the supplied buffer. */ 1417/* */ 1418/* Returns: */ 1419/* 0 on success and the data read, positive value on failure. */ 1420/****************************************************************************/ 1421int 1422bnx_nvram_read(struct bnx_softc *sc, u32 offset, u8 *ret_buf, 1423 int buf_size) 1424{ 1425 int rc = 0; 1426 u32 cmd_flags, offset32, len32, extra; 1427 1428 if (buf_size == 0) 1429 return 0; 1430 1431 /* Request access to the flash interface. */ 1432 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1433 return rc; 1434 1435 /* Enable access to flash interface */ 1436 bnx_enable_nvram_access(sc); 1437 1438 len32 = buf_size; 1439 offset32 = offset; 1440 extra = 0; 1441 1442 cmd_flags = 0; 1443 1444 if (offset32 & 3) { 1445 u8 buf[4]; 1446 u32 pre_len; 1447 1448 offset32 &= ~3; 1449 pre_len = 4 - (offset & 3); 1450 1451 if (pre_len >= len32) { 1452 pre_len = len32; 1453 cmd_flags = BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1454 } 1455 else { 1456 cmd_flags = BNX_NVM_COMMAND_FIRST; 1457 } 1458 1459 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1460 1461 if (rc) 1462 return rc; 1463 1464 memcpy(ret_buf, buf + (offset & 3), pre_len); 1465 1466 offset32 += 4; 1467 ret_buf += pre_len; 1468 len32 -= pre_len; 1469 } 1470 1471 if (len32 & 3) { 1472 extra = 4 - (len32 & 3); 1473 len32 = (len32 + 4) & ~3; 1474 } 1475 1476 if (len32 == 4) { 1477 u8 buf[4]; 1478 1479 if (cmd_flags) 1480 cmd_flags = BNX_NVM_COMMAND_LAST; 1481 else 1482 cmd_flags = BNX_NVM_COMMAND_FIRST | 1483 BNX_NVM_COMMAND_LAST; 1484 1485 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1486 1487 memcpy(ret_buf, buf, 4 - extra); 1488 } 1489 else if (len32 > 0) { 1490 u8 buf[4]; 1491 1492 /* Read the first word. */ 1493 if (cmd_flags) 1494 cmd_flags = 0; 1495 else 1496 cmd_flags = BNX_NVM_COMMAND_FIRST; 1497 1498 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1499 1500 /* Advance to the next dword. */ 1501 offset32 += 4; 1502 ret_buf += 4; 1503 len32 -= 4; 1504 1505 while (len32 > 4 && rc == 0) { 1506 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1507 1508 /* Advance to the next dword. */ 1509 offset32 += 4; 1510 ret_buf += 4; 1511 len32 -= 4; 1512 } 1513 1514 if (rc) 1515 return rc; 1516 1517 cmd_flags = BNX_NVM_COMMAND_LAST; 1518 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1519 1520 memcpy(ret_buf, buf, 4 - extra); 1521 } 1522 1523 /* Disable access to flash interface and release the lock. */ 1524 bnx_disable_nvram_access(sc); 1525 bnx_release_nvram_lock(sc); 1526 1527 return rc; 1528} 1529 1530#ifdef BNX_NVRAM_WRITE_SUPPORT 1531/****************************************************************************/ 1532/* Write an arbitrary range of data from NVRAM. */ 1533/* */ 1534/* Prepares the NVRAM interface for write access and writes the requested */ 1535/* data from the supplied buffer. The caller is responsible for */ 1536/* calculating any appropriate CRCs. */ 1537/* */ 1538/* Returns: */ 1539/* 0 on success, positive value on failure. */ 1540/****************************************************************************/ 1541int 1542bnx_nvram_write(struct bnx_softc *sc, u32 offset, u8 *data_buf, 1543 int buf_size) 1544{ 1545 u32 written, offset32, len32; 1546 u8 *buf, start[4], end[4]; 1547 int rc = 0; 1548 int align_start, align_end; 1549 1550 buf = data_buf; 1551 offset32 = offset; 1552 len32 = buf_size; 1553 align_start = align_end = 0; 1554 1555 if ((align_start = (offset32 & 3))) { 1556 offset32 &= ~3; 1557 len32 += align_start; 1558 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1559 return rc; 1560 } 1561 1562 if (len32 & 3) { 1563 if ((len32 > 4) || !align_start) { 1564 align_end = 4 - (len32 & 3); 1565 len32 += align_end; 1566 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1567 end, 4))) { 1568 return rc; 1569 } 1570 } 1571 } 1572 1573 if (align_start || align_end) { 1574 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1575 if (buf == 0) 1576 return ENOMEM; 1577 if (align_start) { 1578 memcpy(buf, start, 4); 1579 } 1580 if (align_end) { 1581 memcpy(buf + len32 - 4, end, 4); 1582 } 1583 memcpy(buf + align_start, data_buf, buf_size); 1584 } 1585 1586 written = 0; 1587 while ((written < len32) && (rc == 0)) { 1588 u32 page_start, page_end, data_start, data_end; 1589 u32 addr, cmd_flags; 1590 int i; 1591 u8 flash_buffer[264]; 1592 1593 /* Find the page_start addr */ 1594 page_start = offset32 + written; 1595 page_start -= (page_start % sc->bnx_flash_info->page_size); 1596 /* Find the page_end addr */ 1597 page_end = page_start + sc->bnx_flash_info->page_size; 1598 /* Find the data_start addr */ 1599 data_start = (written == 0) ? offset32 : page_start; 1600 /* Find the data_end addr */ 1601 data_end = (page_end > offset32 + len32) ? 1602 (offset32 + len32) : page_end; 1603 1604 /* Request access to the flash interface. */ 1605 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1606 goto nvram_write_end; 1607 1608 /* Enable access to flash interface */ 1609 bnx_enable_nvram_access(sc); 1610 1611 cmd_flags = BNX_NVM_COMMAND_FIRST; 1612 if (sc->bnx_flash_info->buffered == 0) { 1613 int j; 1614 1615 /* Read the whole page into the buffer 1616 * (non-buffer flash only) */ 1617 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 1618 if (j == (sc->bnx_flash_info->page_size - 4)) { 1619 cmd_flags |= BNX_NVM_COMMAND_LAST; 1620 } 1621 rc = bnx_nvram_read_dword(sc, 1622 page_start + j, 1623 &flash_buffer[j], 1624 cmd_flags); 1625 1626 if (rc) 1627 goto nvram_write_end; 1628 1629 cmd_flags = 0; 1630 } 1631 } 1632 1633 /* Enable writes to flash interface (unlock write-protect) */ 1634 if ((rc = bnx_enable_nvram_write(sc)) != 0) 1635 goto nvram_write_end; 1636 1637 /* Erase the page */ 1638 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 1639 goto nvram_write_end; 1640 1641 /* Re-enable the write again for the actual write */ 1642 bnx_enable_nvram_write(sc); 1643 1644 /* Loop to write back the buffer data from page_start to 1645 * data_start */ 1646 i = 0; 1647 if (sc->bnx_flash_info->buffered == 0) { 1648 for (addr = page_start; addr < data_start; 1649 addr += 4, i += 4) { 1650 1651 rc = bnx_nvram_write_dword(sc, addr, 1652 &flash_buffer[i], cmd_flags); 1653 1654 if (rc != 0) 1655 goto nvram_write_end; 1656 1657 cmd_flags = 0; 1658 } 1659 } 1660 1661 /* Loop to write the new data from data_start to data_end */ 1662 for (addr = data_start; addr < data_end; addr += 4, i++) { 1663 if ((addr == page_end - 4) || 1664 ((sc->bnx_flash_info->buffered) && 1665 (addr == data_end - 4))) { 1666 1667 cmd_flags |= BNX_NVM_COMMAND_LAST; 1668 } 1669 rc = bnx_nvram_write_dword(sc, addr, buf, 1670 cmd_flags); 1671 1672 if (rc != 0) 1673 goto nvram_write_end; 1674 1675 cmd_flags = 0; 1676 buf += 4; 1677 } 1678 1679 /* Loop to write back the buffer data from data_end 1680 * to page_end */ 1681 if (sc->bnx_flash_info->buffered == 0) { 1682 for (addr = data_end; addr < page_end; 1683 addr += 4, i += 4) { 1684 1685 if (addr == page_end-4) { 1686 cmd_flags = BNX_NVM_COMMAND_LAST; 1687 } 1688 rc = bnx_nvram_write_dword(sc, addr, 1689 &flash_buffer[i], cmd_flags); 1690 1691 if (rc != 0) 1692 goto nvram_write_end; 1693 1694 cmd_flags = 0; 1695 } 1696 } 1697 1698 /* Disable writes to flash interface (lock write-protect) */ 1699 bnx_disable_nvram_write(sc); 1700 1701 /* Disable access to flash interface */ 1702 bnx_disable_nvram_access(sc); 1703 bnx_release_nvram_lock(sc); 1704 1705 /* Increment written */ 1706 written += data_end - data_start; 1707 } 1708 1709nvram_write_end: 1710 if (align_start || align_end) 1711 free(buf, M_DEVBUF); 1712 1713 return rc; 1714} 1715#endif /* BNX_NVRAM_WRITE_SUPPORT */ 1716 1717/****************************************************************************/ 1718/* Verifies that NVRAM is accessible and contains valid data. */ 1719/* */ 1720/* Reads the configuration data from NVRAM and verifies that the CRC is */ 1721/* correct. */ 1722/* */ 1723/* Returns: */ 1724/* 0 on success, positive value on failure. */ 1725/****************************************************************************/ 1726int 1727bnx_nvram_test(struct bnx_softc *sc) 1728{ 1729 u32 buf[BNX_NVRAM_SIZE / 4]; 1730 u8 *data = (u8 *) buf; 1731 int rc = 0; 1732 u32 magic, csum; 1733 1734 /* 1735 * Check that the device NVRAM is valid by reading 1736 * the magic value at offset 0. 1737 */ 1738 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 1739 goto bnx_nvram_test_done; 1740 1741 magic = bnx_be32toh(buf[0]); 1742 if (magic != BNX_NVRAM_MAGIC) { 1743 rc = ENODEV; 1744 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, " 1745 "Found: 0x%08X\n", 1746 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 1747 goto bnx_nvram_test_done; 1748 } 1749 1750 /* 1751 * Verify that the device NVRAM includes valid 1752 * configuration data. 1753 */ 1754 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 1755 goto bnx_nvram_test_done; 1756 1757 csum = ether_crc32_le(data, 0x100); 1758 if (csum != BNX_CRC32_RESIDUAL) { 1759 rc = ENODEV; 1760 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! " 1761 "Expected: 0x%08X, Found: 0x%08X\n", 1762 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1763 goto bnx_nvram_test_done; 1764 } 1765 1766 csum = ether_crc32_le(data + 0x100, 0x100); 1767 if (csum != BNX_CRC32_RESIDUAL) { 1768 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information " 1769 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1770 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1771 rc = ENODEV; 1772 } 1773 1774bnx_nvram_test_done: 1775 return rc; 1776} 1777 1778/****************************************************************************/ 1779/* Free any DMA memory owned by the driver. */ 1780/* */ 1781/* Scans through each data structre that requires DMA memory and frees */ 1782/* the memory if allocated. */ 1783/* */ 1784/* Returns: */ 1785/* Nothing. */ 1786/****************************************************************************/ 1787void 1788bnx_dma_free(struct bnx_softc *sc) 1789{ 1790 int i; 1791 1792 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 1793 1794 /* Destroy the status block. */ 1795 if (sc->status_block != NULL) { 1796 bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block, 1797 BNX_STATUS_BLK_SZ); 1798 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 1799 sc->status_rseg); 1800 sc->status_block = NULL; 1801 } 1802 if (sc->status_map != NULL) { 1803 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 1804 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 1805 } 1806 1807 /* Destroy the statistics block. */ 1808 if (sc->stats_block != NULL) { 1809 bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block, 1810 BNX_STATS_BLK_SZ); 1811 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 1812 sc->stats_rseg); 1813 sc->stats_block = NULL; 1814 } 1815 if (sc->stats_map != NULL) { 1816 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 1817 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 1818 } 1819 1820 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 1821 for (i = 0; i < TX_PAGES; i++ ) { 1822 if (sc->tx_bd_chain[i] != NULL) { 1823 bus_dmamem_unmap(sc->bnx_dmatag, 1824 (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 1825 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 1826 sc->tx_bd_chain_rseg[i]); 1827 sc->tx_bd_chain[i] = NULL; 1828 } 1829 1830 if (sc->tx_bd_chain_map[i] != NULL) { 1831 bus_dmamap_unload(sc->bnx_dmatag, sc->tx_bd_chain_map[i]); 1832 bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_bd_chain_map[i]); 1833 } 1834 1835 } 1836 1837 /* Unload and destroy the TX mbuf maps. */ 1838 for (i = 0; i < TOTAL_TX_BD; i++) { 1839 if (sc->tx_mbuf_map[i] != NULL) { 1840 bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]); 1841 bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]); 1842 } 1843 } 1844 1845 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 1846 for (i = 0; i < RX_PAGES; i++ ) { 1847 if (sc->rx_bd_chain[i] != NULL) { 1848 bus_dmamem_unmap(sc->bnx_dmatag, 1849 (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 1850 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 1851 sc->rx_bd_chain_rseg[i]); 1852 sc->rx_bd_chain[i] = NULL; 1853 } 1854 1855 if (sc->rx_bd_chain_map[i] != NULL) { 1856 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_bd_chain_map[i]); 1857 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_bd_chain_map[i]); 1858 } 1859 1860 } 1861 1862 /* Unload and destroy the RX mbuf maps. */ 1863 for (i = 0; i < TOTAL_RX_BD; i++) { 1864 if (sc->rx_mbuf_map[i] != NULL) { 1865 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 1866 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 1867 } 1868 } 1869 1870 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 1871} 1872 1873/****************************************************************************/ 1874/* Map TX buffers into TX buffer descriptors. */ 1875/* */ 1876/* Given a series of DMA memory containting an outgoing frame, map the */ 1877/* segments into the tx_bd structure used by the hardware. */ 1878/* */ 1879/* Returns: */ 1880/* Nothing. */ 1881/****************************************************************************/ 1882void 1883bnx_dma_map_tx_desc(void *arg, bus_dmamap_t map) 1884{ 1885 struct bnx_dmamap_arg *map_arg; 1886 struct bnx_softc *sc; 1887 struct tx_bd *txbd = NULL; 1888 int i = 0, nseg; 1889 u16 prod, chain_prod; 1890 u32 prod_bseq; 1891#ifdef BNX_DEBUG 1892 u16 debug_prod; 1893#endif 1894 1895 map_arg = arg; 1896 sc = map_arg->sc; 1897 nseg = map->dm_nsegs; 1898 1899 /* Signal error to caller if there's too many segments */ 1900 if (nseg > map_arg->maxsegs) { 1901 DBPRINT(sc, BNX_WARN, 1902 "%s(): Mapped TX descriptors: max segs = %d, " 1903 "actual segs = %d\n", 1904 __FUNCTION__, map_arg->maxsegs, nseg); 1905 1906 map_arg->maxsegs = 0; 1907 return; 1908 } 1909 1910 /* prod points to an empty tx_bd at this point. */ 1911 prod = map_arg->prod; 1912 chain_prod = map_arg->chain_prod; 1913 prod_bseq = map_arg->prod_bseq; 1914 1915#ifdef BNX_DEBUG 1916 debug_prod = chain_prod; 1917#endif 1918 1919 DBPRINT(sc, BNX_INFO_SEND, 1920 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 1921 "prod_bseq = 0x%08X\n", 1922 __FUNCTION__, prod, chain_prod, prod_bseq); 1923 1924 /* 1925 * Cycle through each mbuf segment that makes up 1926 * the outgoing frame, gathering the mapping info 1927 * for that segment and creating a tx_bd to for 1928 * the mbuf. 1929 */ 1930 1931 txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 1932 1933 /* Setup the first tx_bd for the first segment. */ 1934 txbd->tx_bd_haddr_lo = htole32(BNX_ADDR_LO(map->dm_segs[i].ds_addr)); 1935 txbd->tx_bd_haddr_hi = htole32(BNX_ADDR_HI(map->dm_segs[i].ds_addr)); 1936 txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len); 1937 txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags | 1938 TX_BD_FLAGS_START); 1939 prod_bseq += map->dm_segs[i].ds_len; 1940 1941 /* Setup any remaing segments. */ 1942 for (i = 1; i < nseg; i++) { 1943 prod = NEXT_TX_BD(prod); 1944 chain_prod = TX_CHAIN_IDX(prod); 1945 1946 txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 1947 1948 txbd->tx_bd_haddr_lo = htole32(BNX_ADDR_LO(map->dm_segs[i].ds_addr)); 1949 txbd->tx_bd_haddr_hi = htole32(BNX_ADDR_HI(map->dm_segs[i].ds_addr)); 1950 txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len); 1951 txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags); 1952 1953 prod_bseq += map->dm_segs[i].ds_len; 1954 } 1955 1956 /* Set the END flag on the last TX buffer descriptor. */ 1957 txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END); 1958 1959 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, nseg)); 1960 1961 DBPRINT(sc, BNX_INFO_SEND, 1962 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 1963 "prod_bseq = 0x%08X\n", 1964 __FUNCTION__, prod, chain_prod, prod_bseq); 1965 1966 /* prod points to the last tx_bd at this point. */ 1967 map_arg->maxsegs = nseg; 1968 map_arg->prod = prod; 1969 map_arg->chain_prod = chain_prod; 1970 map_arg->prod_bseq = prod_bseq; 1971} 1972 1973/****************************************************************************/ 1974/* Allocate any DMA memory needed by the driver. */ 1975/* */ 1976/* Allocates DMA memory needed for the various global structures needed by */ 1977/* hardware. */ 1978/* */ 1979/* Returns: */ 1980/* 0 for success, positive value for failure. */ 1981/****************************************************************************/ 1982int 1983bnx_dma_alloc(struct bnx_softc *sc) 1984{ 1985 int i, rc = 0; 1986 1987 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 1988 1989 /* 1990 * Allocate DMA memory for the status block, map the memory into DMA 1991 * space, and fetch the physical address of the block. 1992 */ 1993 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1994 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 1995 &sc->status_rseg, BUS_DMA_NOWAIT)) { 1996 printf(": Could not allocate status block DMA memory!\n"); 1997 rc = ENOMEM; 1998 goto bnx_dma_alloc_exit; 1999 } 2000 2001 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2002 BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) { 2003 printf(": Could not map status block DMA memory!\n"); 2004 rc = ENOMEM; 2005 goto bnx_dma_alloc_exit; 2006 } 2007 2008 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2009 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2010 printf(": Could not create status block DMA map!\n"); 2011 rc = ENOMEM; 2012 goto bnx_dma_alloc_exit; 2013 } 2014 2015 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2016 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2017 printf(": Could not load status block DMA memory!\n"); 2018 rc = ENOMEM; 2019 goto bnx_dma_alloc_exit; 2020 } 2021 2022 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2023 bzero(sc->status_block, BNX_STATUS_BLK_SZ); 2024 2025 /* DRC - Fix for 64 bit addresses. */ 2026 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2027 (u32) sc->status_block_paddr); 2028 2029 /* 2030 * Allocate DMA memory for the statistics block, map the memory into 2031 * DMA space, and fetch the physical address of the block. 2032 */ 2033 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2034 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2035 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2036 printf(": Could not allocate stats block DMA memory!\n"); 2037 rc = ENOMEM; 2038 goto bnx_dma_alloc_exit; 2039 } 2040 2041 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2042 BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) { 2043 printf(": Could not map stats block DMA memory!\n"); 2044 rc = ENOMEM; 2045 goto bnx_dma_alloc_exit; 2046 } 2047 2048 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2049 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2050 printf(": Could not create stats block DMA map!\n"); 2051 rc = ENOMEM; 2052 goto bnx_dma_alloc_exit; 2053 } 2054 2055 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2056 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2057 printf(": Could not load status block DMA memory!\n"); 2058 rc = ENOMEM; 2059 goto bnx_dma_alloc_exit; 2060 } 2061 2062 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2063 bzero(sc->stats_block, BNX_STATS_BLK_SZ); 2064 2065 /* DRC - Fix for 64 bit address. */ 2066 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n", 2067 (u32) sc->stats_block_paddr); 2068 2069 /* 2070 * Allocate DMA memory for the TX buffer descriptor chain, 2071 * and fetch the physical address of the block. 2072 */ 2073 for (i = 0; i < TX_PAGES; i++) { 2074 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2075 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2076 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2077 printf(": Could not allocate TX desc %d DMA memory!\n", i); 2078 rc = ENOMEM; 2079 goto bnx_dma_alloc_exit; 2080 } 2081 2082 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2083 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2084 (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2085 printf(": Could not map TX desc %d DMA memory!\n", i); 2086 rc = ENOMEM; 2087 goto bnx_dma_alloc_exit; 2088 } 2089 2090 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2091 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2092 &sc->tx_bd_chain_map[i])) { 2093 printf(": Could not create Tx desc %d DMA map!\n", i); 2094 rc = ENOMEM; 2095 goto bnx_dma_alloc_exit; 2096 } 2097 2098 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2099 (caddr_t)sc->tx_bd_chain[i], BNX_STATS_BLK_SZ, NULL, 2100 BUS_DMA_NOWAIT)) { 2101 printf(": Could not load TX desc %d DMA memory!\n", i); 2102 rc = ENOMEM; 2103 goto bnx_dma_alloc_exit; 2104 } 2105 2106 sc->tx_bd_chain_paddr[i] = sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2107 2108 /* DRC - Fix for 64 bit systems. */ 2109 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2110 i, (u32) sc->tx_bd_chain_paddr[i]); 2111 } 2112 2113 /* 2114 * Create DMA maps for the TX buffer mbufs. 2115 */ 2116 for (i = 0; i < TOTAL_TX_BD; i++) { 2117 if (bus_dmamap_create(sc->bnx_dmatag, MCLBYTES * BNX_MAX_SEGMENTS, 2118 BNX_MAX_SEGMENTS, MCLBYTES, 0, BUS_DMA_NOWAIT, 2119 &sc->tx_mbuf_map[i])) { 2120 printf(": Could not create Tx mbuf %d DMA map!\n", i); 2121 rc = ENOMEM; 2122 goto bnx_dma_alloc_exit; 2123 } 2124 } 2125 2126 /* 2127 * Allocate DMA memory for the Rx buffer descriptor chain, 2128 * and fetch the physical address of the block. 2129 */ 2130 for (i = 0; i < RX_PAGES; i++) { 2131 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2132 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2133 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2134 printf(": Could not allocate Rx desc %d DMA memory!\n", i); 2135 rc = ENOMEM; 2136 goto bnx_dma_alloc_exit; 2137 } 2138 2139 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2140 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2141 (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2142 printf(": Could not map Rx desc %d DMA memory!\n", i); 2143 rc = ENOMEM; 2144 goto bnx_dma_alloc_exit; 2145 } 2146 2147 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2148 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2149 &sc->rx_bd_chain_map[i])) { 2150 printf(": Could not create Rx desc %d DMA map!\n", i); 2151 rc = ENOMEM; 2152 goto bnx_dma_alloc_exit; 2153 } 2154 2155 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2156 (caddr_t)sc->rx_bd_chain[i], BNX_STATS_BLK_SZ, NULL, 2157 BUS_DMA_NOWAIT)) { 2158 printf(": Could not load Rx desc %d DMA memory!\n", i); 2159 rc = ENOMEM; 2160 goto bnx_dma_alloc_exit; 2161 } 2162 2163 bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2164 sc->rx_bd_chain_paddr[i] = sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2165 2166 /* DRC - Fix for 64 bit systems. */ 2167 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2168 i, (u32) sc->rx_bd_chain_paddr[i]); 2169 } 2170 2171 /* 2172 * Create DMA maps for the Rx buffer mbufs. 2173 */ 2174 for (i = 0; i < TOTAL_RX_BD; i++) { 2175 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU, 2176 BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT, 2177 &sc->rx_mbuf_map[i])) { 2178 printf(": Could not create Rx mbuf %d DMA map!\n", i); 2179 rc = ENOMEM; 2180 goto bnx_dma_alloc_exit; 2181 } 2182 } 2183 2184 bnx_dma_alloc_exit: 2185 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2186 2187 return(rc); 2188} 2189 2190/****************************************************************************/ 2191/* Release all resources used by the driver. */ 2192/* */ 2193/* Releases all resources acquired by the driver including interrupts, */ 2194/* interrupt handler, interfaces, mutexes, and DMA memory. */ 2195/* */ 2196/* Returns: */ 2197/* Nothing. */ 2198/****************************************************************************/ 2199void 2200bnx_release_resources(struct bnx_softc *sc) 2201{ 2202 struct pci_attach_args *pa = &(sc->bnx_pa); 2203 2204 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2205 2206 bnx_dma_free(sc); 2207 2208 if (sc->bnx_intrhand != NULL) 2209 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2210 2211 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2212} 2213 2214/****************************************************************************/ 2215/* Firmware synchronization. */ 2216/* */ 2217/* Before performing certain events such as a chip reset, synchronize with */ 2218/* the firmware first. */ 2219/* */ 2220/* Returns: */ 2221/* 0 for success, positive value for failure. */ 2222/****************************************************************************/ 2223int 2224bnx_fw_sync(struct bnx_softc *sc, u32 msg_data) 2225{ 2226 int i, rc = 0; 2227 u32 val; 2228 2229 /* Don't waste any time if we've timed out before. */ 2230 if (sc->bnx_fw_timed_out) { 2231 rc = EBUSY; 2232 goto bnx_fw_sync_exit; 2233 } 2234 2235 /* Increment the message sequence number. */ 2236 sc->bnx_fw_wr_seq++; 2237 msg_data |= sc->bnx_fw_wr_seq; 2238 2239 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", msg_data); 2240 2241 /* Send the message to the bootcode driver mailbox. */ 2242 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2243 2244 /* Wait for the bootcode to acknowledge the message. */ 2245 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2246 /* Check for a response in the bootcode firmware mailbox. */ 2247 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2248 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2249 break; 2250 DELAY(1000); 2251 } 2252 2253 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2254 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2255 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2256 2257 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2258 "msg_data = 0x%08X\n", 2259 __FILE__, __LINE__, msg_data); 2260 2261 msg_data &= ~BNX_DRV_MSG_CODE; 2262 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2263 2264 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2265 2266 sc->bnx_fw_timed_out = 1; 2267 rc = EBUSY; 2268 } 2269 2270bnx_fw_sync_exit: 2271 return (rc); 2272} 2273 2274/****************************************************************************/ 2275/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2276/* */ 2277/* Returns: */ 2278/* Nothing. */ 2279/****************************************************************************/ 2280void 2281bnx_load_rv2p_fw(struct bnx_softc *sc, u32 *rv2p_code, 2282 u32 rv2p_code_len, u32 rv2p_proc) 2283{ 2284 int i; 2285 u32 val; 2286 2287 for (i = 0; i < rv2p_code_len; i += 8) { 2288 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2289 rv2p_code++; 2290 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2291 rv2p_code++; 2292 2293 if (rv2p_proc == RV2P_PROC1) { 2294 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2295 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2296 } 2297 else { 2298 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2299 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2300 } 2301 } 2302 2303 /* Reset the processor, un-stall is done later. */ 2304 if (rv2p_proc == RV2P_PROC1) { 2305 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2306 } 2307 else { 2308 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2309 } 2310} 2311 2312/****************************************************************************/ 2313/* Load RISC processor firmware. */ 2314/* */ 2315/* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2316/* associated with a particular processor. */ 2317/* */ 2318/* Returns: */ 2319/* Nothing. */ 2320/****************************************************************************/ 2321void 2322bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2323 struct fw_info *fw) 2324{ 2325 u32 offset; 2326 u32 val; 2327 2328 /* Halt the CPU. */ 2329 val = REG_RD_IND(sc, cpu_reg->mode); 2330 val |= cpu_reg->mode_value_halt; 2331 REG_WR_IND(sc, cpu_reg->mode, val); 2332 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2333 2334 /* Load the Text area. */ 2335 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2336 if (fw->text) { 2337 int j; 2338 2339 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { 2340 REG_WR_IND(sc, offset, fw->text[j]); 2341 } 2342 } 2343 2344 /* Load the Data area. */ 2345 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2346 if (fw->data) { 2347 int j; 2348 2349 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { 2350 REG_WR_IND(sc, offset, fw->data[j]); 2351 } 2352 } 2353 2354 /* Load the SBSS area. */ 2355 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2356 if (fw->sbss) { 2357 int j; 2358 2359 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { 2360 REG_WR_IND(sc, offset, fw->sbss[j]); 2361 } 2362 } 2363 2364 /* Load the BSS area. */ 2365 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2366 if (fw->bss) { 2367 int j; 2368 2369 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { 2370 REG_WR_IND(sc, offset, fw->bss[j]); 2371 } 2372 } 2373 2374 /* Load the Read-Only area. */ 2375 offset = cpu_reg->spad_base + 2376 (fw->rodata_addr - cpu_reg->mips_view_base); 2377 if (fw->rodata) { 2378 int j; 2379 2380 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { 2381 REG_WR_IND(sc, offset, fw->rodata[j]); 2382 } 2383 } 2384 2385 /* Clear the pre-fetch instruction. */ 2386 REG_WR_IND(sc, cpu_reg->inst, 0); 2387 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2388 2389 /* Start the CPU. */ 2390 val = REG_RD_IND(sc, cpu_reg->mode); 2391 val &= ~cpu_reg->mode_value_halt; 2392 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2393 REG_WR_IND(sc, cpu_reg->mode, val); 2394} 2395 2396/****************************************************************************/ 2397/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2398/* */ 2399/* Loads the firmware for each CPU and starts the CPU. */ 2400/* */ 2401/* Returns: */ 2402/* Nothing. */ 2403/****************************************************************************/ 2404void 2405bnx_init_cpus(struct bnx_softc *sc) 2406{ 2407 struct cpu_reg cpu_reg; 2408 struct fw_info fw; 2409 2410 /* Initialize the RV2P processor. */ 2411 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), RV2P_PROC1); 2412 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), RV2P_PROC2); 2413 2414 /* Initialize the RX Processor. */ 2415 cpu_reg.mode = BNX_RXP_CPU_MODE; 2416 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2417 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2418 cpu_reg.state = BNX_RXP_CPU_STATE; 2419 cpu_reg.state_value_clear = 0xffffff; 2420 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2421 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2422 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2423 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2424 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2425 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2426 cpu_reg.mips_view_base = 0x8000000; 2427 2428 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 2429 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 2430 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 2431 fw.start_addr = bnx_RXP_b06FwStartAddr; 2432 2433 fw.text_addr = bnx_RXP_b06FwTextAddr; 2434 fw.text_len = bnx_RXP_b06FwTextLen; 2435 fw.text_index = 0; 2436 fw.text = bnx_RXP_b06FwText; 2437 2438 fw.data_addr = bnx_RXP_b06FwDataAddr; 2439 fw.data_len = bnx_RXP_b06FwDataLen; 2440 fw.data_index = 0; 2441 fw.data = bnx_RXP_b06FwData; 2442 2443 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 2444 fw.sbss_len = bnx_RXP_b06FwSbssLen; 2445 fw.sbss_index = 0; 2446 fw.sbss = bnx_RXP_b06FwSbss; 2447 2448 fw.bss_addr = bnx_RXP_b06FwBssAddr; 2449 fw.bss_len = bnx_RXP_b06FwBssLen; 2450 fw.bss_index = 0; 2451 fw.bss = bnx_RXP_b06FwBss; 2452 2453 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 2454 fw.rodata_len = bnx_RXP_b06FwRodataLen; 2455 fw.rodata_index = 0; 2456 fw.rodata = bnx_RXP_b06FwRodata; 2457 2458 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2459 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2460 2461 /* Initialize the TX Processor. */ 2462 cpu_reg.mode = BNX_TXP_CPU_MODE; 2463 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2464 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2465 cpu_reg.state = BNX_TXP_CPU_STATE; 2466 cpu_reg.state_value_clear = 0xffffff; 2467 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2468 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2469 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2470 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2471 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2472 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2473 cpu_reg.mips_view_base = 0x8000000; 2474 2475 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 2476 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 2477 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 2478 fw.start_addr = bnx_TXP_b06FwStartAddr; 2479 2480 fw.text_addr = bnx_TXP_b06FwTextAddr; 2481 fw.text_len = bnx_TXP_b06FwTextLen; 2482 fw.text_index = 0; 2483 fw.text = bnx_TXP_b06FwText; 2484 2485 fw.data_addr = bnx_TXP_b06FwDataAddr; 2486 fw.data_len = bnx_TXP_b06FwDataLen; 2487 fw.data_index = 0; 2488 fw.data = bnx_TXP_b06FwData; 2489 2490 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 2491 fw.sbss_len = bnx_TXP_b06FwSbssLen; 2492 fw.sbss_index = 0; 2493 fw.sbss = bnx_TXP_b06FwSbss; 2494 2495 fw.bss_addr = bnx_TXP_b06FwBssAddr; 2496 fw.bss_len = bnx_TXP_b06FwBssLen; 2497 fw.bss_index = 0; 2498 fw.bss = bnx_TXP_b06FwBss; 2499 2500 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 2501 fw.rodata_len = bnx_TXP_b06FwRodataLen; 2502 fw.rodata_index = 0; 2503 fw.rodata = bnx_TXP_b06FwRodata; 2504 2505 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2506 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2507 2508 /* Initialize the TX Patch-up Processor. */ 2509 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2510 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2511 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2512 cpu_reg.state = BNX_TPAT_CPU_STATE; 2513 cpu_reg.state_value_clear = 0xffffff; 2514 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2515 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2516 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2517 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2518 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2519 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2520 cpu_reg.mips_view_base = 0x8000000; 2521 2522 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 2523 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 2524 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 2525 fw.start_addr = bnx_TPAT_b06FwStartAddr; 2526 2527 fw.text_addr = bnx_TPAT_b06FwTextAddr; 2528 fw.text_len = bnx_TPAT_b06FwTextLen; 2529 fw.text_index = 0; 2530 fw.text = bnx_TPAT_b06FwText; 2531 2532 fw.data_addr = bnx_TPAT_b06FwDataAddr; 2533 fw.data_len = bnx_TPAT_b06FwDataLen; 2534 fw.data_index = 0; 2535 fw.data = bnx_TPAT_b06FwData; 2536 2537 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 2538 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 2539 fw.sbss_index = 0; 2540 fw.sbss = bnx_TPAT_b06FwSbss; 2541 2542 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 2543 fw.bss_len = bnx_TPAT_b06FwBssLen; 2544 fw.bss_index = 0; 2545 fw.bss = bnx_TPAT_b06FwBss; 2546 2547 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 2548 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 2549 fw.rodata_index = 0; 2550 fw.rodata = bnx_TPAT_b06FwRodata; 2551 2552 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2553 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2554 2555 /* Initialize the Completion Processor. */ 2556 cpu_reg.mode = BNX_COM_CPU_MODE; 2557 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2558 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2559 cpu_reg.state = BNX_COM_CPU_STATE; 2560 cpu_reg.state_value_clear = 0xffffff; 2561 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2562 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2563 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2564 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2565 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2566 cpu_reg.spad_base = BNX_COM_SCRATCH; 2567 cpu_reg.mips_view_base = 0x8000000; 2568 2569 fw.ver_major = bnx_COM_b06FwReleaseMajor; 2570 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 2571 fw.ver_fix = bnx_COM_b06FwReleaseFix; 2572 fw.start_addr = bnx_COM_b06FwStartAddr; 2573 2574 fw.text_addr = bnx_COM_b06FwTextAddr; 2575 fw.text_len = bnx_COM_b06FwTextLen; 2576 fw.text_index = 0; 2577 fw.text = bnx_COM_b06FwText; 2578 2579 fw.data_addr = bnx_COM_b06FwDataAddr; 2580 fw.data_len = bnx_COM_b06FwDataLen; 2581 fw.data_index = 0; 2582 fw.data = bnx_COM_b06FwData; 2583 2584 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 2585 fw.sbss_len = bnx_COM_b06FwSbssLen; 2586 fw.sbss_index = 0; 2587 fw.sbss = bnx_COM_b06FwSbss; 2588 2589 fw.bss_addr = bnx_COM_b06FwBssAddr; 2590 fw.bss_len = bnx_COM_b06FwBssLen; 2591 fw.bss_index = 0; 2592 fw.bss = bnx_COM_b06FwBss; 2593 2594 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 2595 fw.rodata_len = bnx_COM_b06FwRodataLen; 2596 fw.rodata_index = 0; 2597 fw.rodata = bnx_COM_b06FwRodata; 2598 2599 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 2600 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2601} 2602 2603/****************************************************************************/ 2604/* Initialize context memory. */ 2605/* */ 2606/* Clears the memory associated with each Context ID (CID). */ 2607/* */ 2608/* Returns: */ 2609/* Nothing. */ 2610/****************************************************************************/ 2611void 2612bnx_init_context(struct bnx_softc *sc) 2613{ 2614 u32 vcid; 2615 2616 vcid = 96; 2617 while (vcid) { 2618 u32 vcid_addr, pcid_addr, offset; 2619 2620 vcid--; 2621 2622 vcid_addr = GET_CID_ADDR(vcid); 2623 pcid_addr = vcid_addr; 2624 2625 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00); 2626 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr); 2627 2628 /* Zero out the context. */ 2629 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) { 2630 CTX_WR(sc, 0x00, offset, 0); 2631 } 2632 2633 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 2634 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr); 2635 } 2636} 2637 2638/****************************************************************************/ 2639/* Fetch the permanent MAC address of the controller. */ 2640/* */ 2641/* Returns: */ 2642/* Nothing. */ 2643/****************************************************************************/ 2644void 2645bnx_get_mac_addr(struct bnx_softc *sc) 2646{ 2647 u32 mac_lo = 0, mac_hi = 0; 2648 2649 /* 2650 * The NetXtreme II bootcode populates various NIC 2651 * power-on and runtime configuration items in a 2652 * shared memory area. The factory configured MAC 2653 * address is available from both NVRAM and the 2654 * shared memory area so we'll read the value from 2655 * shared memory for speed. 2656 */ 2657 2658 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + 2659 BNX_PORT_HW_CFG_MAC_UPPER); 2660 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + 2661 BNX_PORT_HW_CFG_MAC_LOWER); 2662 2663 if ((mac_lo == 0) && (mac_hi == 0)) { 2664 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 2665 __FILE__, __LINE__); 2666 } else { 2667 sc->eaddr[0] = (u_char)(mac_hi >> 8); 2668 sc->eaddr[1] = (u_char)(mac_hi >> 0); 2669 sc->eaddr[2] = (u_char)(mac_lo >> 24); 2670 sc->eaddr[3] = (u_char)(mac_lo >> 16); 2671 sc->eaddr[4] = (u_char)(mac_lo >> 8); 2672 sc->eaddr[5] = (u_char)(mac_lo >> 0); 2673 } 2674 2675 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":"); 2676} 2677 2678/****************************************************************************/ 2679/* Program the MAC address. */ 2680/* */ 2681/* Returns: */ 2682/* Nothing. */ 2683/****************************************************************************/ 2684void 2685bnx_set_mac_addr(struct bnx_softc *sc) 2686{ 2687 u32 val; 2688 u8 *mac_addr = sc->eaddr; 2689 2690 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":"); 2691 2692 val = (mac_addr[0] << 8) | mac_addr[1]; 2693 2694 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 2695 2696 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 2697 (mac_addr[4] << 8) | mac_addr[5]; 2698 2699 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 2700} 2701 2702/****************************************************************************/ 2703/* Stop the controller. */ 2704/* */ 2705/* Returns: */ 2706/* Nothing. */ 2707/****************************************************************************/ 2708void 2709bnx_stop(struct bnx_softc *sc) 2710{ 2711 struct ifnet *ifp = &sc->arpcom.ac_if; 2712 struct ifmedia_entry *ifm; 2713 struct mii_data *mii = NULL; 2714 int mtmp, itmp; 2715 2716 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2717 2718 mii = &sc->bnx_mii; 2719 2720 timeout_del(&sc->bnx_timeout); 2721 2722 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2723 2724 /* Disable the transmit/receive blocks. */ 2725 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 2726 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 2727 DELAY(20); 2728 2729 bnx_disable_intr(sc); 2730 2731 /* Tell firmware that the driver is going away. */ 2732 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 2733 2734 /* Free the RX lists. */ 2735 bnx_free_rx_chain(sc); 2736 2737 /* Free TX buffers. */ 2738 bnx_free_tx_chain(sc); 2739 2740 /* 2741 * Isolate/power down the PHY, but leave the media selection 2742 * unchanged so that things will be put back to normal when 2743 * we bring the interface back up. 2744 */ 2745 2746 itmp = ifp->if_flags; 2747 ifp->if_flags |= IFF_UP; 2748 /* 2749 * If we are called from bnx_detach(), mii is already NULL. 2750 */ 2751 if (mii != NULL) { 2752 ifm = mii->mii_media.ifm_cur; 2753 mtmp = ifm->ifm_media; 2754 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2755 mii_mediachg(mii); 2756 ifm->ifm_media = mtmp; 2757 } 2758 2759 ifp->if_flags = itmp; 2760 ifp->if_timer = 0; 2761 2762 sc->bnx_link = 0; 2763 2764 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2765 2766} 2767 2768int 2769bnx_reset(struct bnx_softc *sc, u32 reset_code) 2770{ 2771 u32 val; 2772 int i, rc = 0; 2773 2774 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2775 2776 /* Wait for pending PCI transactions to complete. */ 2777 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 2778 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 2779 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 2780 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 2781 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 2782 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 2783 DELAY(5); 2784 2785 /* Assume bootcode is running. */ 2786 sc->bnx_fw_timed_out = 0; 2787 2788 /* Give the firmware a chance to prepare for the reset. */ 2789 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 2790 if (rc) 2791 goto bnx_reset_exit; 2792 2793 /* Set a firmware reminder that this is a soft reset. */ 2794 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 2795 BNX_DRV_RESET_SIGNATURE_MAGIC); 2796 2797 /* Dummy read to force the chip to complete all current transactions. */ 2798 val = REG_RD(sc, BNX_MISC_ID); 2799 2800 /* Chip reset. */ 2801 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 2802 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 2803 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 2804 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 2805 2806 /* Allow up to 30us for reset to complete. */ 2807 for (i = 0; i < 10; i++) { 2808 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 2809 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 2810 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 2811 break; 2812 } 2813 DELAY(10); 2814 } 2815 2816 /* Check that reset completed successfully. */ 2817 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 2818 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 2819 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 2820 __FILE__, __LINE__); 2821 rc = EBUSY; 2822 goto bnx_reset_exit; 2823 } 2824 2825 /* Make sure byte swapping is properly configured. */ 2826 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 2827 if (val != 0x01020304) { 2828 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 2829 __FILE__, __LINE__); 2830 rc = ENODEV; 2831 goto bnx_reset_exit; 2832 } 2833 2834 /* Just completed a reset, assume that firmware is running again. */ 2835 sc->bnx_fw_timed_out = 0; 2836 2837 /* Wait for the firmware to finish its initialization. */ 2838 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 2839 if (rc) 2840 BNX_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n", 2841 __FILE__, __LINE__); 2842 2843bnx_reset_exit: 2844 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2845 2846 return (rc); 2847} 2848 2849int 2850bnx_chipinit(struct bnx_softc *sc) 2851{ 2852 struct pci_attach_args *pa = &(sc->bnx_pa); 2853 u32 val; 2854 int rc = 0; 2855 2856 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2857 2858 /* Make sure the interrupt is not active. */ 2859 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 2860 2861 /* Initialize DMA byte/word swapping, configure the number of DMA */ 2862 /* channels and PCI clock compensation delay. */ 2863 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 2864 BNX_DMA_CONFIG_DATA_WORD_SWAP | 2865#if BYTE_ORDER == BIG_ENDIAN 2866 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 2867#endif 2868 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 2869 DMA_READ_CHANS << 12 | 2870 DMA_WRITE_CHANS << 16; 2871 2872 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 2873 2874 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 2875 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 2876 2877 /* 2878 * This setting resolves a problem observed on certain Intel PCI 2879 * chipsets that cannot handle multiple outstanding DMA operations. 2880 * See errata E9_5706A1_65. 2881 */ 2882 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 2883 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 2884 !(sc->bnx_flags & BNX_PCIX_FLAG)) 2885 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 2886 2887 REG_WR(sc, BNX_DMA_CONFIG, val); 2888 2889 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 2890 if (sc->bnx_flags & BNX_PCIX_FLAG) { 2891 u16 val; 2892 2893 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 2894 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, val & ~0x2); 2895 } 2896 2897 /* Enable the RX_V2P and Context state machines before access. */ 2898 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 2899 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 2900 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 2901 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 2902 2903 /* Initialize context mapping and zero out the quick contexts. */ 2904 bnx_init_context(sc); 2905 2906 /* Initialize the on-boards CPUs */ 2907 bnx_init_cpus(sc); 2908 2909 /* Prepare NVRAM for access. */ 2910 if (bnx_init_nvram(sc)) { 2911 rc = ENODEV; 2912 goto bnx_chipinit_exit; 2913 } 2914 2915 /* Set the kernel bypass block size */ 2916 val = REG_RD(sc, BNX_MQ_CONFIG); 2917 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 2918 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 2919 REG_WR(sc, BNX_MQ_CONFIG, val); 2920 2921 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 2922 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 2923 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 2924 2925 val = (BCM_PAGE_BITS - 8) << 24; 2926 REG_WR(sc, BNX_RV2P_CONFIG, val); 2927 2928 /* Configure page size. */ 2929 val = REG_RD(sc, BNX_TBDR_CONFIG); 2930 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 2931 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 2932 REG_WR(sc, BNX_TBDR_CONFIG, val); 2933 2934bnx_chipinit_exit: 2935 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 2936 2937 return(rc); 2938} 2939 2940/****************************************************************************/ 2941/* Initialize the controller in preparation to send/receive traffic. */ 2942/* */ 2943/* Returns: */ 2944/* 0 for success, positive value for failure. */ 2945/****************************************************************************/ 2946int 2947bnx_blockinit(struct bnx_softc *sc) 2948{ 2949 u32 reg, val; 2950 int rc = 0; 2951 2952 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 2953 2954 /* Load the hardware default MAC address. */ 2955 bnx_set_mac_addr(sc); 2956 2957 /* Set the Ethernet backoff seed value */ 2958 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + 2959 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + 2960 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 2961 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 2962 2963 sc->last_status_idx = 0; 2964 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 2965 2966 /* Set up link change interrupt generation. */ 2967 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 2968 2969 /* Program the physical address of the status block. */ 2970 REG_WR(sc, BNX_HC_STATUS_ADDR_L, 2971 BNX_ADDR_LO(sc->status_block_paddr)); 2972 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 2973 BNX_ADDR_HI(sc->status_block_paddr)); 2974 2975 /* Program the physical address of the statistics block. */ 2976 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 2977 BNX_ADDR_LO(sc->stats_block_paddr)); 2978 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 2979 BNX_ADDR_HI(sc->stats_block_paddr)); 2980 2981 /* Program various host coalescing parameters. */ 2982 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, 2983 (sc->bnx_tx_quick_cons_trip_int << 16) | sc->bnx_tx_quick_cons_trip); 2984 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, 2985 (sc->bnx_rx_quick_cons_trip_int << 16) | sc->bnx_rx_quick_cons_trip); 2986 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, 2987 (sc->bnx_comp_prod_trip_int << 16) | sc->bnx_comp_prod_trip); 2988 REG_WR(sc, BNX_HC_TX_TICKS, 2989 (sc->bnx_tx_ticks_int << 16) | sc->bnx_tx_ticks); 2990 REG_WR(sc, BNX_HC_RX_TICKS, 2991 (sc->bnx_rx_ticks_int << 16) | sc->bnx_rx_ticks); 2992 REG_WR(sc, BNX_HC_COM_TICKS, 2993 (sc->bnx_com_ticks_int << 16) | sc->bnx_com_ticks); 2994 REG_WR(sc, BNX_HC_CMD_TICKS, 2995 (sc->bnx_cmd_ticks_int << 16) | sc->bnx_cmd_ticks); 2996 REG_WR(sc, BNX_HC_STATS_TICKS, 2997 (sc->bnx_stats_ticks & 0xffff00)); 2998 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 2999 0xbb8); /* 3ms */ 3000 REG_WR(sc, BNX_HC_CONFIG, 3001 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3002 BNX_HC_CONFIG_COLLECT_STATS)); 3003 3004 /* Clear the internal statistics counters. */ 3005 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3006 3007 /* Verify that bootcode is running. */ 3008 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3009 3010 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3011 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3012 __FILE__, __LINE__); 3013 reg = 0); 3014 3015 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3016 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3017 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3018 "Expected: 08%08X\n", __FILE__, __LINE__, 3019 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3020 BNX_DEV_INFO_SIGNATURE_MAGIC); 3021 rc = ENODEV; 3022 goto bnx_blockinit_exit; 3023 } 3024 3025 /* Check if any management firmware is running. */ 3026 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 3027 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | BNX_PORT_FEATURE_IMD_ENABLED)) { 3028 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 3029 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 3030 } 3031 3032 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV); 3033 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver); 3034 3035 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3036 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3037 3038 /* Enable link state change interrupt generation. */ 3039 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3040 3041 /* Enable all remaining blocks in the MAC. */ 3042 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff); 3043 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3044 DELAY(20); 3045 3046bnx_blockinit_exit: 3047 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3048 3049 return (rc); 3050} 3051 3052/****************************************************************************/ 3053/* Encapsulate an mbuf cluster into the rx_bd chain. */ 3054/* */ 3055/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3056/* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3057/* necessary. */ 3058/* */ 3059/* Returns: */ 3060/* 0 for success, positive value for failure. */ 3061/****************************************************************************/ 3062int 3063bnx_get_buf(struct bnx_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod, 3064 u32 *prod_bseq) 3065{ 3066 bus_dmamap_t map; 3067 struct mbuf *m_new = NULL; 3068 struct rx_bd *rxbd; 3069 int i, rc = 0; 3070#ifdef BNX_DEBUG 3071 u16 debug_chain_prod = *chain_prod; 3072#endif 3073 3074 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3075 __FUNCTION__); 3076 3077 /* Make sure the inputs are valid. */ 3078 DBRUNIF((*chain_prod > MAX_RX_BD), 3079 printf("%s: RX producer out of range: 0x%04X > 0x%04X\n", 3080 *chain_prod, (u16) MAX_RX_BD)); 3081 3082 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, " 3083 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq); 3084 3085 if (m == NULL) { 3086 3087 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3088 BNX_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n", 3089 __FILE__, __LINE__); 3090 sc->mbuf_alloc_failed++; 3091 rc = ENOBUFS; 3092 goto bnx_get_buf_exit); 3093 3094 /* This is a new mbuf allocation. */ 3095 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3096 if (m_new == NULL) { 3097 3098 DBPRINT(sc, BNX_WARN, "%s(%d): RX mbuf header allocation failed!\n", 3099 __FILE__, __LINE__); 3100 3101 DBRUNIF(1, sc->mbuf_alloc_failed++); 3102 3103 rc = ENOBUFS; 3104 goto bnx_get_buf_exit; 3105 } 3106 3107 DBRUNIF(1, sc->rx_mbuf_alloc++); 3108 if (sc->mbuf_alloc_size <= MCLBYTES) 3109 MCLGET(m_new, M_DONTWAIT); 3110 else 3111 MEXTMALLOC(m_new, sc->mbuf_alloc_size, M_DONTWAIT); 3112 if (!(m_new->m_flags & M_EXT)) { 3113 3114 DBPRINT(sc, BNX_WARN, "%s(%d): RX mbuf chain allocation failed!\n", 3115 __FILE__, __LINE__); 3116 3117 m_freem(m_new); 3118 3119 DBRUNIF(1, sc->rx_mbuf_alloc--); 3120 DBRUNIF(1, sc->mbuf_alloc_failed++); 3121 3122 rc = ENOBUFS; 3123 goto bnx_get_buf_exit; 3124 } 3125 3126 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3127 } else { 3128 m_new = m; 3129 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3130 m_new->m_data = m_new->m_ext.ext_buf; 3131 } 3132 3133 /* Map the mbuf cluster into device memory. */ 3134 map = sc->rx_mbuf_map[*chain_prod]; 3135 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3136 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3137 __FILE__, __LINE__); 3138 3139 m_freem(m_new); 3140 3141 DBRUNIF(1, sc->rx_mbuf_alloc--); 3142 3143 rc = ENOBUFS; 3144 goto bnx_get_buf_exit; 3145 } 3146 3147 /* Watch for overflow. */ 3148 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD), 3149 printf("%s: Too many free rx_bd (0x%04X > 0x%04X)!\n", 3150 sc->free_rx_bd, (u16) USABLE_RX_BD)); 3151 3152 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3153 sc->rx_low_watermark = sc->free_rx_bd); 3154 3155 /* Setup the rx_bd for the first segment. */ 3156 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3157 3158 rxbd->rx_bd_haddr_lo = htole32(BNX_ADDR_LO(map->dm_segs[0].ds_addr)); 3159 rxbd->rx_bd_haddr_hi = htole32(BNX_ADDR_HI(map->dm_segs[0].ds_addr)); 3160 rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len); 3161 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3162 *prod_bseq += map->dm_segs[0].ds_len; 3163 3164 for (i = 1; i < map->dm_nsegs; i++) { 3165 3166 *prod = NEXT_RX_BD(*prod); 3167 *chain_prod = RX_CHAIN_IDX(*prod); 3168 3169 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3170 3171 rxbd->rx_bd_haddr_lo = htole32(BNX_ADDR_LO(map->dm_segs[i].ds_addr)); 3172 rxbd->rx_bd_haddr_hi = htole32(BNX_ADDR_HI(map->dm_segs[i].ds_addr)); 3173 rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len); 3174 rxbd->rx_bd_flags = 0; 3175 *prod_bseq += map->dm_segs[i].ds_len; 3176 } 3177 3178 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3179 3180 /* Save the mbuf and update our counter. */ 3181 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3182 sc->free_rx_bd -= map->dm_nsegs; 3183 3184 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3185 map->dm_nsegs)); 3186 3187 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, " 3188 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq); 3189 3190bnx_get_buf_exit: 3191 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 3192 __FUNCTION__); 3193 3194 return(rc); 3195} 3196 3197/****************************************************************************/ 3198/* Allocate memory and initialize the TX data structures. */ 3199/* */ 3200/* Returns: */ 3201/* 0 for success, positive value for failure. */ 3202/****************************************************************************/ 3203int 3204bnx_init_tx_chain(struct bnx_softc *sc) 3205{ 3206 struct tx_bd *txbd; 3207 u32 val; 3208 int i, rc = 0; 3209 3210 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3211 3212 /* Set the initial TX producer/consumer indices. */ 3213 sc->tx_prod = 0; 3214 sc->tx_cons = 0; 3215 sc->tx_prod_bseq = 0; 3216 sc->used_tx_bd = 0; 3217 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 3218 3219 /* 3220 * The NetXtreme II supports a linked-list structure called 3221 * a Buffer Descriptor Chain (or BD chain). A BD chain 3222 * consists of a series of 1 or more chain pages, each of which 3223 * consists of a fixed number of BD entries. 3224 * The last BD entry on each page is a pointer to the next page 3225 * in the chain, and the last pointer in the BD chain 3226 * points back to the beginning of the chain. 3227 */ 3228 3229 /* Set the TX next pointer chain entries. */ 3230 for (i = 0; i < TX_PAGES; i++) { 3231 int j; 3232 3233 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 3234 3235 /* Check if we've reached the last page. */ 3236 if (i == (TX_PAGES - 1)) 3237 j = 0; 3238 else 3239 j = i + 1; 3240 3241 txbd->tx_bd_haddr_hi = htole32(BNX_ADDR_HI(sc->tx_bd_chain_paddr[j])); 3242 txbd->tx_bd_haddr_lo = htole32(BNX_ADDR_LO(sc->tx_bd_chain_paddr[j])); 3243 } 3244 3245 /* 3246 * Initialize the context ID for an L2 TX chain. 3247 */ 3248 val = BNX_L2CTX_TYPE_TYPE_L2; 3249 val |= BNX_L2CTX_TYPE_SIZE_L2; 3250 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 3251 3252 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3253 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 3254 3255 /* Point the hardware to the first page in the chain. */ 3256 val = BNX_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3257 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 3258 val = BNX_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3259 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 3260 3261 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD)); 3262 3263 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3264 3265 return(rc); 3266} 3267 3268/****************************************************************************/ 3269/* Free memory and clear the TX data structures. */ 3270/* */ 3271/* Returns: */ 3272/* Nothing. */ 3273/****************************************************************************/ 3274void 3275bnx_free_tx_chain(struct bnx_softc *sc) 3276{ 3277 int i; 3278 3279 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3280 3281 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 3282 for (i = 0; i < TOTAL_TX_BD; i++) { 3283 if (sc->tx_mbuf_ptr[i] != NULL) { 3284 if (sc->tx_mbuf_map != NULL) 3285 bus_dmamap_sync(sc->bnx_dmatag, 3286 sc->tx_mbuf_map[i], 0, 3287 sc->tx_mbuf_map[i]->dm_mapsize, 3288 BUS_DMASYNC_POSTWRITE); 3289 m_freem(sc->tx_mbuf_ptr[i]); 3290 sc->tx_mbuf_ptr[i] = NULL; 3291 DBRUNIF(1, sc->tx_mbuf_alloc--); 3292 } 3293 } 3294 3295 /* Clear each TX chain page. */ 3296 for (i = 0; i < TX_PAGES; i++) 3297 bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 3298 3299 /* Check if we lost any mbufs in the process. */ 3300 DBRUNIF((sc->tx_mbuf_alloc), 3301 printf("%s: Memory leak! Lost %d mbufs " 3302 "from tx chain!\n", 3303 sc->tx_mbuf_alloc)); 3304 3305 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3306} 3307 3308/****************************************************************************/ 3309/* Allocate memory and initialize the RX data structures. */ 3310/* */ 3311/* Returns: */ 3312/* 0 for success, positive value for failure. */ 3313/****************************************************************************/ 3314int 3315bnx_init_rx_chain(struct bnx_softc *sc) 3316{ 3317 struct rx_bd *rxbd; 3318 int i, rc = 0; 3319 u16 prod, chain_prod; 3320 u32 prod_bseq, val; 3321 3322 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3323 3324 /* Initialize the RX producer and consumer indices. */ 3325 sc->rx_prod = 0; 3326 sc->rx_cons = 0; 3327 sc->rx_prod_bseq = 0; 3328 sc->free_rx_bd = BNX_RX_SLACK_SPACE; 3329 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 3330 3331 /* Initialize the RX next pointer chain entries. */ 3332 for (i = 0; i < RX_PAGES; i++) { 3333 int j; 3334 3335 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 3336 3337 /* Check if we've reached the last page. */ 3338 if (i == (RX_PAGES - 1)) 3339 j = 0; 3340 else 3341 j = i + 1; 3342 3343 /* Setup the chain page pointers. */ 3344 rxbd->rx_bd_haddr_hi = htole32(BNX_ADDR_HI(sc->rx_bd_chain_paddr[j])); 3345 rxbd->rx_bd_haddr_lo = htole32(BNX_ADDR_LO(sc->rx_bd_chain_paddr[j])); 3346 } 3347 3348 /* Initialize the context ID for an L2 RX chain. */ 3349 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 3350 val |= BNX_L2CTX_CTX_TYPE_SIZE_L2; 3351 val |= 0x02 << 8; 3352 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 3353 3354 /* Point the hardware to the first page in the chain. */ 3355 val = BNX_ADDR_HI(sc->rx_bd_chain_paddr[0]); 3356 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 3357 val = BNX_ADDR_LO(sc->rx_bd_chain_paddr[0]); 3358 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 3359 3360 /* Allocate mbuf clusters for the rx_bd chain. */ 3361 prod = prod_bseq = 0; 3362 while (prod < BNX_RX_SLACK_SPACE) { 3363 chain_prod = RX_CHAIN_IDX(prod); 3364 if (bnx_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) { 3365 printf("%s: Error filling RX chain: rx_bd[0x%04X]!\n", 3366 chain_prod); 3367 rc = ENOBUFS; 3368 break; 3369 } 3370 prod = NEXT_RX_BD(prod); 3371 } 3372 3373 /* Save the RX chain producer index. */ 3374 sc->rx_prod = prod; 3375 sc->rx_prod_bseq = prod_bseq; 3376 3377 for (i = 0; i < RX_PAGES; i++) { 3378 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 3379 sc->rx_bd_chain_map[i]->dm_mapsize, 3380 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3381 } 3382 3383 /* Tell the chip about the waiting rx_bd's. */ 3384 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 3385 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 3386 3387 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 3388 3389 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3390 3391 return(rc); 3392} 3393 3394/****************************************************************************/ 3395/* Free memory and clear the RX data structures. */ 3396/* */ 3397/* Returns: */ 3398/* Nothing. */ 3399/****************************************************************************/ 3400void 3401bnx_free_rx_chain(struct bnx_softc *sc) 3402{ 3403 int i; 3404 3405 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3406 3407 /* Free any mbufs still in the RX mbuf chain. */ 3408 for (i = 0; i < TOTAL_RX_BD; i++) { 3409 if (sc->rx_mbuf_ptr[i] != NULL) { 3410 if (sc->rx_mbuf_map[i] != NULL) 3411 bus_dmamap_sync(sc->bnx_dmatag, 3412 sc->rx_mbuf_map[i], 0, 3413 sc->rx_mbuf_map[i]->dm_mapsize, 3414 BUS_DMASYNC_POSTREAD); 3415 m_freem(sc->rx_mbuf_ptr[i]); 3416 sc->rx_mbuf_ptr[i] = NULL; 3417 DBRUNIF(1, sc->rx_mbuf_alloc--); 3418 } 3419 } 3420 3421 /* Clear each RX chain page. */ 3422 for (i = 0; i < RX_PAGES; i++) 3423 bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 3424 3425 /* Check if we lost any mbufs in the process. */ 3426 DBRUNIF((sc->rx_mbuf_alloc), 3427 printf("%s: Memory leak! Lost %d mbufs from rx chain!\n", 3428 sc->rx_mbuf_alloc)); 3429 3430 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 3431} 3432 3433/****************************************************************************/ 3434/* Set media options. */ 3435/* */ 3436/* Returns: */ 3437/* 0 for success, positive value for failure. */ 3438/****************************************************************************/ 3439int 3440bnx_ifmedia_upd(struct ifnet *ifp) 3441{ 3442 struct bnx_softc *sc; 3443 struct mii_data *mii; 3444 struct ifmedia *ifm; 3445 int rc = 0; 3446 3447 sc = ifp->if_softc; 3448 ifm = &sc->bnx_ifmedia; 3449 3450 /* DRC - ToDo: Add SerDes support. */ 3451 3452 mii = &sc->bnx_mii; 3453 sc->bnx_link = 0; 3454 if (mii->mii_instance) { 3455 struct mii_softc *miisc; 3456 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 3457 miisc = LIST_NEXT(miisc, mii_list)) 3458 mii_phy_reset(miisc); 3459 } 3460 mii_mediachg(mii); 3461 3462 return(rc); 3463} 3464 3465/****************************************************************************/ 3466/* Reports current media status. */ 3467/* */ 3468/* Returns: */ 3469/* Nothing. */ 3470/****************************************************************************/ 3471void 3472bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3473{ 3474 struct bnx_softc *sc; 3475 struct mii_data *mii; 3476 int s; 3477 3478 sc = ifp->if_softc; 3479 3480 s = splnet(); 3481 3482 mii = &sc->bnx_mii; 3483 3484 /* DRC - ToDo: Add SerDes support. */ 3485 3486 mii_pollstat(mii); 3487 ifmr->ifm_active = mii->mii_media_active; 3488 ifmr->ifm_status = mii->mii_media_status; 3489 3490 splx(s); 3491} 3492 3493/****************************************************************************/ 3494/* Handles PHY generated interrupt events. */ 3495/* */ 3496/* Returns: */ 3497/* Nothing. */ 3498/****************************************************************************/ 3499void 3500bnx_phy_intr(struct bnx_softc *sc) 3501{ 3502 u32 new_link_state, old_link_state; 3503 3504 new_link_state = sc->status_block->status_attn_bits & 3505 STATUS_ATTN_BITS_LINK_STATE; 3506 old_link_state = sc->status_block->status_attn_bits_ack & 3507 STATUS_ATTN_BITS_LINK_STATE; 3508 3509 /* Handle any changes if the link state has changed. */ 3510 if (new_link_state != old_link_state) { 3511 3512 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 3513 3514 sc->bnx_link = 0; 3515 timeout_del(&sc->bnx_timeout); 3516 bnx_tick(sc); 3517 3518 /* Update the status_attn_bits_ack field in the status block. */ 3519 if (new_link_state) { 3520 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 3521 STATUS_ATTN_BITS_LINK_STATE); 3522 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 3523 } else { 3524 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 3525 STATUS_ATTN_BITS_LINK_STATE); 3526 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 3527 } 3528 3529 } 3530 3531 /* Acknowledge the link change interrupt. */ 3532 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 3533} 3534 3535/****************************************************************************/ 3536/* Handles received frame interrupt events. */ 3537/* */ 3538/* Returns: */ 3539/* Nothing. */ 3540/****************************************************************************/ 3541void 3542bnx_rx_intr(struct bnx_softc *sc) 3543{ 3544 struct status_block *sblk = sc->status_block; 3545 struct ifnet *ifp = &sc->arpcom.ac_if; 3546 u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 3547 u32 sw_prod_bseq; 3548 struct l2_fhdr *l2fhdr; 3549 int i; 3550 3551 DBRUNIF(1, sc->rx_interrupts++); 3552 3553 /* Prepare the RX chain pages to be accessed by the host CPU. */ 3554 for (i = 0; i < RX_PAGES; i++) 3555 bus_dmamap_sync(sc->bnx_dmatag, 3556 sc->rx_bd_chain_map[i], 0, 3557 sc->rx_bd_chain_map[i]->dm_mapsize, 3558 BUS_DMASYNC_POSTWRITE); 3559 3560 /* Get the hardware's view of the RX consumer index. */ 3561 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 3562 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 3563 hw_cons++; 3564 3565 /* Get working copies of the driver's view of the RX indices. */ 3566 sw_cons = sc->rx_cons; 3567 sw_prod = sc->rx_prod; 3568 sw_prod_bseq = sc->rx_prod_bseq; 3569 3570 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 3571 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 3572 __FUNCTION__, sw_prod, sw_cons, 3573 sw_prod_bseq); 3574 3575 /* Prevent speculative reads from getting ahead of the status block. */ 3576 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 3577 BUS_SPACE_BARRIER_READ); 3578 3579 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3580 sc->rx_low_watermark = sc->free_rx_bd); 3581 3582 /* 3583 * Scan through the receive chain as long 3584 * as there is work to do. 3585 */ 3586 while (sw_cons != hw_cons) { 3587 struct mbuf *m; 3588 struct rx_bd *rxbd; 3589 unsigned int len; 3590 u32 status; 3591 3592 /* Convert the producer/consumer indices to an actual rx_bd index. */ 3593 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 3594 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 3595 3596 /* Get the used rx_bd. */ 3597 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 3598 sc->free_rx_bd++; 3599 3600 DBRUN(BNX_VERBOSE_RECV, 3601 printf("%s(): ", __FUNCTION__); 3602 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 3603 3604 /* The mbuf is stored with the last rx_bd entry of a packet. */ 3605 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 3606 3607 /* Validate that this is the last rx_bd. */ 3608 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)), 3609 printf("%s: Unexpected mbuf found in rx_bd[0x%04X]!\n", 3610 sw_chain_cons); 3611 bnx_breakpoint(sc)); 3612 3613 /* DRC - ToDo: If the received packet is small, say less */ 3614 /* than 128 bytes, allocate a new mbuf here, */ 3615 /* copy the data to that mbuf, and recycle */ 3616 /* the mapped jumbo frame. */ 3617 3618 /* Unmap the mbuf from DMA space. */ 3619 bus_dmamap_sync(sc->bnx_dmatag, 3620 sc->rx_mbuf_map[sw_chain_cons], 0, 3621 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 3622 BUS_DMASYNC_POSTREAD); 3623 bus_dmamap_unload(sc->bnx_dmatag, 3624 sc->rx_mbuf_map[sw_chain_cons]); 3625 3626 /* Remove the mbuf from the driver's chain. */ 3627 m = sc->rx_mbuf_ptr[sw_chain_cons]; 3628 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 3629 3630 /* 3631 * Frames received on the NetXteme II are prepended 3632 * with the l2_fhdr structure which provides status 3633 * information about the received frame (including 3634 * VLAN tags and checksum info) and are also 3635 * automatically adjusted to align the IP header 3636 * (i.e. two null bytes are inserted before the 3637 * Ethernet header). 3638 */ 3639 l2fhdr = mtod(m, struct l2_fhdr *); 3640 3641 len = l2fhdr->l2_fhdr_pkt_len; 3642 status = l2fhdr->l2_fhdr_status; 3643 3644 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 3645 printf("Simulating l2_fhdr status error.\n"); 3646 status = status | L2_FHDR_ERRORS_PHY_DECODE); 3647 3648 /* Watch for unusual sized frames. */ 3649 DBRUNIF(((len < BNX_MIN_MTU) || (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 3650 printf("%s: Unusual frame size found. " 3651 "Min(%d), Actual(%d), Max(%d)\n", 3652 (int) BNX_MIN_MTU, 3653 len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN); 3654 bnx_dump_mbuf(sc, m); 3655 bnx_breakpoint(sc)); 3656 3657 len -= ETHER_CRC_LEN; 3658 3659 /* Check the received frame for errors. */ 3660 if (status & (L2_FHDR_ERRORS_BAD_CRC | 3661 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 3662 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { 3663 3664 ifp->if_ierrors++; 3665 DBRUNIF(1, sc->l2fhdr_status_errors++); 3666 3667 /* Reuse the mbuf for a new frame. */ 3668 if (bnx_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) { 3669 3670 DBRUNIF(1, bnx_breakpoint(sc)); 3671 panic("%s: Can't reuse RX mbuf!\n", sc->bnx_dev.dv_xname); 3672 3673 } 3674 goto bnx_rx_int_next_rx; 3675 } 3676 3677 /* 3678 * Get a new mbuf for the rx_bd. If no new 3679 * mbufs are available then reuse the current mbuf, 3680 * log an ierror on the interface, and generate 3681 * an error in the system log. 3682 */ 3683 if (bnx_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) { 3684 3685 DBRUN(BNX_WARN, 3686 printf("%s: Failed to allocate " 3687 "new mbuf, incoming frame dropped!\n")); 3688 3689 ifp->if_ierrors++; 3690 3691 /* Try and reuse the exisitng mbuf. */ 3692 if (bnx_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) { 3693 3694 DBRUNIF(1, bnx_breakpoint(sc)); 3695 panic("%s: Double mbuf allocation failure!", sc->bnx_dev.dv_xname); 3696 3697 } 3698 goto bnx_rx_int_next_rx; 3699 } 3700 3701 /* Skip over the l2_fhdr when passing the data up the stack. */ 3702 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 3703 3704 /* Adjust the packet length to match the received data. */ 3705 m->m_pkthdr.len = m->m_len = len; 3706 3707 /* Send the packet to the appropriate interface. */ 3708 m->m_pkthdr.rcvif = ifp; 3709 3710 DBRUN(BNX_VERBOSE_RECV, 3711 struct ether_header *eh; 3712 eh = mtod(m, struct ether_header *); 3713 printf("%s: to: %6D, from: %6D, type: 0x%04X\n", 3714 __FUNCTION__, eh->ether_dhost, ":", 3715 eh->ether_shost, ":", htons(eh->ether_type))); 3716 3717#ifdef BNX_CKSUM 3718 /* Validate the checksum if offload enabled. */ 3719 if (ifp->if_capenable & IFCAP_RXCSUM) { 3720 3721 /* Check for an IP datagram. */ 3722 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 3723 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3724 3725 /* Check if the IP checksum is valid. */ 3726 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 3727 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3728 else 3729 DBPRINT(sc, BNX_WARN_SEND, 3730 "%s(): Invalid IP checksum = 0x%04X!\n", 3731 __FUNCTION__, l2fhdr->l2_fhdr_ip_xsum); 3732 } 3733 3734 /* Check for a valid TCP/UDP frame. */ 3735 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 3736 L2_FHDR_STATUS_UDP_DATAGRAM)) { 3737 3738 /* Check for a good TCP/UDP checksum. */ 3739 if ((status & (L2_FHDR_ERRORS_TCP_XSUM | 3740 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 3741 m->m_pkthdr.csum_data = 3742 l2fhdr->l2_fhdr_tcp_udp_xsum; 3743 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID 3744 | CSUM_PSEUDO_HDR); 3745 } else 3746 DBPRINT(sc, BNX_WARN_SEND, 3747 "%s(): Invalid TCP/UDP checksum = 0x%04X!\n", 3748 __FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum); 3749 } 3750 } 3751#endif 3752 3753#if NBPFILTER > 0 3754 /* 3755 * Handle BPF listeners. Let the BPF 3756 * user see the packet. 3757 */ 3758 if (ifp->if_bpf) 3759 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 3760#endif 3761 3762 /* Pass the mbuf off to the upper layers. */ 3763 ifp->if_ipackets++; 3764 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(): Passing received frame up.\n", 3765 __FUNCTION__); 3766 ether_input_mbuf(ifp, m); 3767 DBRUNIF(1, sc->rx_mbuf_alloc--); 3768 3769bnx_rx_int_next_rx: 3770 sw_prod = NEXT_RX_BD(sw_prod); 3771 } 3772 3773 sw_cons = NEXT_RX_BD(sw_cons); 3774 3775 /* Refresh hw_cons to see if there's new work */ 3776 if (sw_cons == hw_cons) { 3777 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 3778 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 3779 hw_cons++; 3780 } 3781 3782 /* Prevent speculative reads from getting ahead of the status block. */ 3783 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 3784 BUS_SPACE_BARRIER_READ); 3785 } 3786 3787 for (i = 0; i < RX_PAGES; i++) 3788 bus_dmamap_sync(sc->bnx_dmatag, 3789 sc->rx_bd_chain_map[i], 0, 3790 sc->rx_bd_chain_map[i]->dm_mapsize, 3791 BUS_DMASYNC_PREWRITE); 3792 3793 sc->rx_cons = sw_cons; 3794 sc->rx_prod = sw_prod; 3795 sc->rx_prod_bseq = sw_prod_bseq; 3796 3797 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 3798 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 3799 3800 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 3801 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 3802 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 3803} 3804 3805/****************************************************************************/ 3806/* Handles transmit completion interrupt events. */ 3807/* */ 3808/* Returns: */ 3809/* Nothing. */ 3810/****************************************************************************/ 3811void 3812bnx_tx_intr(struct bnx_softc *sc) 3813{ 3814 struct status_block *sblk = sc->status_block; 3815 struct ifnet *ifp = &sc->arpcom.ac_if; 3816 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 3817 3818 DBRUNIF(1, sc->tx_interrupts++); 3819 3820 /* Get the hardware's view of the TX consumer index. */ 3821 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 3822 3823 /* Skip to the next entry if this is a chain page pointer. */ 3824 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 3825 hw_tx_cons++; 3826 3827 sw_tx_cons = sc->tx_cons; 3828 3829 /* Prevent speculative reads from getting ahead of the status block. */ 3830 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 3831 BUS_SPACE_BARRIER_READ); 3832 3833 /* Cycle through any completed TX chain page entries. */ 3834 while (sw_tx_cons != hw_tx_cons) { 3835#ifdef BNX_DEBUG 3836 struct tx_bd *txbd = NULL; 3837#endif 3838 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 3839 3840 DBPRINT(sc, BNX_INFO_SEND, 3841 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 3842 "sw_tx_chain_cons = 0x%04X\n", 3843 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 3844 3845 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 3846 printf("%s: TX chain consumer out of range! " 3847 " 0x%04X > 0x%04X\n", 3848 sw_tx_chain_cons, 3849 (int) MAX_TX_BD); 3850 bnx_breakpoint(sc)); 3851 3852 DBRUNIF(1, 3853 txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 3854 [TX_IDX(sw_tx_chain_cons)]); 3855 3856 DBRUNIF((txbd == NULL), 3857 printf("%s: Unexpected NULL tx_bd[0x%04X]!\n", 3858 sw_tx_chain_cons); 3859 bnx_breakpoint(sc)); 3860 3861 DBRUN(BNX_INFO_SEND, 3862 printf("%s: ", __FUNCTION__); 3863 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 3864 3865 /* 3866 * Free the associated mbuf. Remember 3867 * that only the last tx_bd of a packet 3868 * has an mbuf pointer and DMA map. 3869 */ 3870 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 3871 3872 /* Validate that this is the last tx_bd. */ 3873 DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)), 3874 printf("%s: tx_bd END flag not set but " 3875 "txmbuf == NULL!\n"); 3876 bnx_breakpoint(sc)); 3877 3878 DBRUN(BNX_INFO_SEND, 3879 printf("%s: Unloading map/freeing mbuf " 3880 "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons)); 3881 3882 /* Unmap the mbuf. */ 3883 bus_dmamap_unload(sc->bnx_dmatag, 3884 sc->tx_mbuf_map[sw_tx_chain_cons]); 3885 3886 /* Free the mbuf. */ 3887 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 3888 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 3889 DBRUNIF(1, sc->tx_mbuf_alloc--); 3890 3891 ifp->if_opackets++; 3892 } 3893 3894 sc->used_tx_bd--; 3895 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 3896 3897 /* Refresh hw_cons to see if there's new work. */ 3898 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 3899 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 3900 hw_tx_cons++; 3901 3902 /* Prevent speculative reads from getting ahead of the status block. */ 3903 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 3904 BUS_SPACE_BARRIER_READ); 3905 } 3906 3907 /* Clear the TX timeout timer. */ 3908 ifp->if_timer = 0; 3909 3910 /* Clear the tx hardware queue full flag. */ 3911 if ((sc->used_tx_bd + BNX_TX_SLACK_SPACE) < USABLE_TX_BD) { 3912 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 3913 printf("%s: TX chain is open for business! Used tx_bd = %d\n", 3914 sc->used_tx_bd)); 3915 ifp->if_flags &= ~IFF_OACTIVE; 3916 } 3917 3918 sc->tx_cons = sw_tx_cons; 3919} 3920 3921/****************************************************************************/ 3922/* Disables interrupt generation. */ 3923/* */ 3924/* Returns: */ 3925/* Nothing. */ 3926/****************************************************************************/ 3927void 3928bnx_disable_intr(struct bnx_softc *sc) 3929{ 3930 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 3931 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3932 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 3933} 3934 3935/****************************************************************************/ 3936/* Enables interrupt generation. */ 3937/* */ 3938/* Returns: */ 3939/* Nothing. */ 3940/****************************************************************************/ 3941void 3942bnx_enable_intr(struct bnx_softc *sc) 3943{ 3944 u32 val; 3945 3946 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 3947 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 3948 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 3949 3950 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 3951 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 3952 3953 val = REG_RD(sc, BNX_HC_COMMAND); 3954 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 3955} 3956 3957/****************************************************************************/ 3958/* Handles controller initialization. */ 3959/* */ 3960/* Returns: */ 3961/* Nothing. */ 3962/****************************************************************************/ 3963void 3964bnx_init(void *xsc) 3965{ 3966 struct bnx_softc *sc = (struct bnx_softc *)xsc; 3967 struct ifnet *ifp = &sc->arpcom.ac_if; 3968 u32 ether_mtu; 3969 int s; 3970 3971 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__); 3972 3973 s = splnet(); 3974 3975 bnx_stop(sc); 3976 3977 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) { 3978 printf("%s: Controller reset failed!\n"); 3979 goto bnx_init_locked_exit; 3980 } 3981 3982 if (bnx_chipinit(sc)) { 3983 printf("%s: Controller initialization failed!\n"); 3984 goto bnx_init_locked_exit; 3985 } 3986 3987 if (bnx_blockinit(sc)) { 3988 printf("%s: Block initialization failed!\n"); 3989 goto bnx_init_locked_exit; 3990 } 3991 3992 /* Load our MAC address. */ 3993 bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN); 3994 bnx_set_mac_addr(sc); 3995 3996 /* Calculate and program the Ethernet MTU size. */ 3997#if 0 3998 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 3999#else 4000 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 4001#endif 4002 4003 DBPRINT(sc, BNX_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu); 4004 4005 /* 4006 * Program the mtu and enable jumbo frame 4007 * support. Also set the mbuf 4008 * allocation count for RX frames. 4009 */ 4010#if 0 4011 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 4012 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4013 sc->mbuf_alloc_size = BNX_MAX_MRU; /* MJUM9BYTES */ 4014#else 4015 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu); 4016 sc->mbuf_alloc_size = MCLBYTES; 4017#endif 4018 4019 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4020 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4021 4022 DBPRINT(sc, BNX_INFO, 4023 "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4024 "max_frame_size = %d\n", 4025 __FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size); 4026 4027 /* Program appropriate promiscuous/multicast filtering. */ 4028 bnx_set_rx_mode(sc); 4029 4030 /* Init RX buffer descriptor chain. */ 4031 bnx_init_rx_chain(sc); 4032 4033 /* Init TX buffer descriptor chain. */ 4034 bnx_init_tx_chain(sc); 4035 4036 /* Enable host interrupts. */ 4037 bnx_enable_intr(sc); 4038 4039 bnx_ifmedia_upd(ifp); 4040 4041 ifp->if_flags |= IFF_RUNNING; 4042 ifp->if_flags &= ~IFF_OACTIVE; 4043 4044 timeout_add(&sc->bnx_timeout, hz); 4045 4046bnx_init_locked_exit: 4047 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__); 4048 4049 splx(s); 4050 4051 return; 4052} 4053 4054/****************************************************************************/ 4055/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4056/* memory visible to the controller. */ 4057/* */ 4058/* Returns: */ 4059/* 0 for success, positive value for failure. */ 4060/****************************************************************************/ 4061int 4062bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m_head, u16 *prod, 4063 u16 *chain_prod, u32 *prod_bseq) 4064{ 4065 u32 vlan_tag_flags = 0; 4066#ifdef BNX_VLAN 4067 struct m_tag *mtag; 4068#endif 4069 struct bnx_dmamap_arg map_arg; 4070 bus_dmamap_t map; 4071 int i, rc = 0; 4072 4073#ifdef BNX_CKSUM 4074 /* Transfer any checksum offload flags to the bd. */ 4075 if (m_head->m_pkthdr.csum_flags) { 4076 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 4077 vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM; 4078 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4079 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4080 } 4081#endif 4082 4083#ifdef BNX_VLAN 4084 /* Transfer any VLAN tags to the bd. */ 4085 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); 4086 if (mtag != NULL) 4087 vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG | 4088 (VLAN_TAG_VALUE(mtag) << 16)); 4089#endif 4090 4091 /* Map the mbuf into DMAable memory. */ 4092 map = sc->tx_mbuf_map[*chain_prod]; 4093 map_arg.sc = sc; 4094 map_arg.prod = *prod; 4095 map_arg.chain_prod = *chain_prod; 4096 map_arg.prod_bseq = *prod_bseq; 4097 map_arg.tx_flags = vlan_tag_flags; 4098 map_arg.maxsegs = USABLE_TX_BD - sc->used_tx_bd - 4099 BNX_TX_SLACK_SPACE; 4100 4101#if 0 4102 KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!")); 4103#endif 4104 4105 for (i = 0; i < TX_PAGES; i++) 4106 map_arg.tx_chain[i] = sc->tx_bd_chain[i]; 4107 4108 /* Map the mbuf into our DMA address space. */ 4109 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_head, 4110 BUS_DMA_NOWAIT)) { 4111 printf("%s: Error mapping mbuf into TX chain!\n", 4112 sc->bnx_dev.dv_xname); 4113 rc = ENOBUFS; 4114 goto bnx_tx_encap_exit; 4115 } 4116 bnx_dma_map_tx_desc(&map_arg, map); 4117 4118 /* 4119 * Ensure that the map for this transmission 4120 * is placed at the array index of the last 4121 * descriptor in this chain. This is done 4122 * because a single map is used for all 4123 * segments of the mbuf and we don't want to 4124 * delete the map before all of the segments 4125 * have been freed. 4126 */ 4127 sc->tx_mbuf_map[*chain_prod] = 4128 sc->tx_mbuf_map[map_arg.chain_prod]; 4129 sc->tx_mbuf_map[map_arg.chain_prod] = map; 4130 sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head; 4131 sc->used_tx_bd += map_arg.maxsegs; 4132 4133 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 4134 sc->tx_hi_watermark = sc->used_tx_bd); 4135 4136 DBRUNIF(1, sc->tx_mbuf_alloc++); 4137 4138 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, *chain_prod, 4139 map_arg.maxsegs)); 4140 4141 /* prod still points the last used tx_bd at this point. */ 4142 *prod = map_arg.prod; 4143 *chain_prod = map_arg.chain_prod; 4144 *prod_bseq = map_arg.prod_bseq; 4145 4146bnx_tx_encap_exit: 4147 4148 return(rc); 4149} 4150 4151/****************************************************************************/ 4152/* Main transmit routine. */ 4153/* */ 4154/* Returns: */ 4155/* Nothing. */ 4156/****************************************************************************/ 4157void 4158bnx_start(struct ifnet *ifp) 4159{ 4160 struct bnx_softc *sc = ifp->if_softc; 4161 struct mbuf *m_head = NULL; 4162 int count = 0; 4163 u16 tx_prod, tx_chain_prod; 4164 u32 tx_prod_bseq; 4165 4166 /* If there's no link or the transmit queue is empty then just exit. */ 4167 if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) { 4168 DBPRINT(sc, BNX_INFO_SEND, "%s(): No link or transmit queue empty.\n", 4169 __FUNCTION__); 4170 goto bnx_start_locked_exit; 4171 } 4172 4173 /* prod points to the next free tx_bd. */ 4174 tx_prod = sc->tx_prod; 4175 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 4176 tx_prod_bseq = sc->tx_prod_bseq; 4177 4178 DBPRINT(sc, BNX_INFO_SEND, 4179 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, " 4180 "tx_prod_bseq = 0x%08X\n", 4181 __FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq); 4182 4183 /* Keep adding entries while there is space in the ring. */ 4184 while (sc->tx_mbuf_ptr[tx_chain_prod] == NULL) { 4185 4186 /* Check for any frames to send. */ 4187 IF_DEQUEUE(&ifp->if_snd, m_head); 4188 if (m_head == NULL) 4189 break; 4190 4191 /* 4192 * Pack the data into the transmit ring. If we 4193 * don't have room, place the mbuf back at the 4194 * head of the queue and set the OACTIVE flag 4195 * to wait for the NIC to drain the chain. 4196 */ 4197 if (bnx_tx_encap(sc, m_head, &tx_prod, &tx_chain_prod, &tx_prod_bseq)) { 4198 IF_PREPEND(&ifp->if_snd, m_head); 4199 ifp->if_flags |= IFF_OACTIVE; 4200 DBPRINT(sc, BNX_INFO_SEND, 4201 "TX chain is closed for business! Total tx_bd used = %d\n", 4202 sc->used_tx_bd); 4203 break; 4204 } 4205 4206 count++; 4207 4208#if NBPFILTER > 0 4209 /* Send a copy of the frame to any BPF listeners. */ 4210 if (ifp->if_bpf) 4211 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 4212#endif 4213 4214 tx_prod = NEXT_TX_BD(tx_prod); 4215 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 4216 } 4217 4218 if (count == 0) { 4219 /* no packets were dequeued */ 4220 DBPRINT(sc, BNX_VERBOSE_SEND, "%s(): No packets were dequeued\n", 4221 __FUNCTION__); 4222 goto bnx_start_locked_exit; 4223 } 4224 4225 /* Update the driver's counters. */ 4226 sc->tx_prod = tx_prod; 4227 sc->tx_prod_bseq = tx_prod_bseq; 4228 4229 DBPRINT(sc, BNX_INFO_SEND, 4230 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 4231 "tx_prod_bseq = 0x%08X\n", 4232 __FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq); 4233 4234 /* Start the transmit. */ 4235 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 4236 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 4237 4238 /* Set the tx timeout. */ 4239 ifp->if_timer = BNX_TX_TIMEOUT; 4240 4241bnx_start_locked_exit: 4242 return; 4243} 4244 4245/****************************************************************************/ 4246/* Handles any IOCTL calls from the operating system. */ 4247/* */ 4248/* Returns: */ 4249/* 0 for success, positive value for failure. */ 4250/****************************************************************************/ 4251int 4252bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 4253{ 4254 struct bnx_softc *sc = ifp->if_softc; 4255 struct ifreq *ifr = (struct ifreq *) data; 4256 struct ifaddr *ifa = (struct ifaddr *)data; 4257 struct mii_data *mii; 4258 int s, error = 0; 4259 4260 s = splnet(); 4261 4262 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 4263 splx(s); 4264 return (error); 4265 } 4266 4267 switch (command) { 4268 case SIOCSIFADDR: 4269 ifp->if_flags |= IFF_UP; 4270 if (!(ifp->if_flags & IFF_RUNNING)) 4271 bnx_init(sc); 4272#ifdef INET 4273 if (ifa->ifa_addr->sa_family == AF_INET) 4274 arp_ifinit(&sc->arpcom, ifa); 4275#endif /* INET */ 4276 break; 4277 case SIOCSIFMTU: 4278 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 4279 error = EINVAL; 4280 else if (ifp->if_mtu != ifr->ifr_mtu) 4281 ifp->if_mtu = ifr->ifr_mtu; 4282 break; 4283 case SIOCSIFFLAGS: 4284 if (ifp->if_flags & IFF_UP) { 4285 if ((ifp->if_flags & IFF_RUNNING) && 4286 ((ifp->if_flags ^ sc->bnx_if_flags) & 4287 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 4288 bnx_set_rx_mode(sc); 4289 } else { 4290 if (!(ifp->if_flags & IFF_RUNNING)) 4291 bnx_init(ifp); 4292 } 4293 } else { 4294 if (ifp->if_flags & IFF_RUNNING) 4295 bnx_stop(sc); 4296 } 4297 sc->bnx_if_flags = ifp->if_flags; 4298 break; 4299 case SIOCADDMULTI: 4300 case SIOCDELMULTI: 4301 error = (command == SIOCADDMULTI) 4302 ? ether_addmulti(ifr, &sc->arpcom) 4303 : ether_delmulti(ifr, &sc->arpcom); 4304 4305 if (error == ENETRESET) { 4306 if (ifp->if_flags & IFF_RUNNING) 4307 bnx_set_rx_mode(sc); 4308 error = 0; 4309 } 4310 break; 4311 case SIOCSIFMEDIA: 4312 case SIOCGIFMEDIA: 4313 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 4314 sc->bnx_phy_flags); 4315 4316 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 4317 error = ifmedia_ioctl(ifp, ifr, 4318 &sc->bnx_ifmedia, command); 4319 } else { 4320 mii = &sc->bnx_mii; 4321 error = ifmedia_ioctl(ifp, ifr, 4322 &mii->mii_media, command); 4323 } 4324 break; 4325 default: 4326 error = ENOTTY; 4327 break; 4328 } 4329 4330 splx(s); 4331 4332 return (error); 4333} 4334 4335/****************************************************************************/ 4336/* Transmit timeout handler. */ 4337/* */ 4338/* Returns: */ 4339/* Nothing. */ 4340/****************************************************************************/ 4341void 4342bnx_watchdog(struct ifnet *ifp) 4343{ 4344 struct bnx_softc *sc = ifp->if_softc; 4345 4346 DBRUN(BNX_WARN_SEND, 4347 bnx_dump_driver_state(sc); 4348 bnx_dump_status_block(sc)); 4349 4350 printf("%s: Watchdog timeout occurred, resetting!\n"); 4351 4352 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 4353 4354 bnx_init(sc); 4355 4356 ifp->if_oerrors++; 4357} 4358 4359/* 4360 * Interrupt handler. 4361 */ 4362/****************************************************************************/ 4363/* Main interrupt entry point. Verifies that the controller generated the */ 4364/* interrupt and then calls a separate routine for handle the various */ 4365/* interrupt causes (PHY, TX, RX). */ 4366/* */ 4367/* Returns: */ 4368/* 0 for success, positive value for failure. */ 4369/****************************************************************************/ 4370int 4371bnx_intr(void *xsc) 4372{ 4373 struct bnx_softc *sc; 4374 struct ifnet *ifp; 4375 u32 status_attn_bits; 4376 4377 sc = xsc; 4378 ifp = &sc->arpcom.ac_if; 4379 4380 DBRUNIF(1, sc->interrupts_generated++); 4381 4382 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 4383 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4384 4385 /* 4386 * If the hardware status block index 4387 * matches the last value read by the 4388 * driver and we haven't asserted our 4389 * interrupt then there's nothing to do. 4390 */ 4391 if ((sc->status_block->status_idx == sc->last_status_idx) && 4392 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) & BNX_PCICFG_MISC_STATUS_INTA_VALUE)) 4393 return (0); 4394 4395 /* Ack the interrupt and stop others from occuring. */ 4396 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 4397 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 4398 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4399 4400 /* Keep processing data as long as there is work to do. */ 4401 for (;;) { 4402 4403 status_attn_bits = sc->status_block->status_attn_bits; 4404 4405 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 4406 printf("Simulating unexpected status attention bit set."); 4407 status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR); 4408 4409 /* Was it a link change interrupt? */ 4410 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 4411 (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 4412 bnx_phy_intr(sc); 4413 4414 /* If any other attention is asserted then the chip is toast. */ 4415 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 4416 (sc->status_block->status_attn_bits_ack & 4417 ~STATUS_ATTN_BITS_LINK_STATE))) { 4418 4419 DBRUN(1, sc->unexpected_attentions++); 4420 4421 printf("%s: Fatal attention detected: 0x%08X\n", 4422 sc->status_block->status_attn_bits); 4423 4424 DBRUN(BNX_FATAL, 4425 if (bnx_debug_unexpected_attention == 0) 4426 bnx_breakpoint(sc)); 4427 4428 bnx_init(sc); 4429 return (1); 4430 } 4431 4432 /* Check for any completed RX frames. */ 4433 if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons) 4434 bnx_rx_intr(sc); 4435 4436 /* Check for any completed TX frames. */ 4437 if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons) 4438 bnx_tx_intr(sc); 4439 4440 /* Save the status block index value for use during the next interrupt. */ 4441 sc->last_status_idx = sc->status_block->status_idx; 4442 4443 /* Prevent speculative reads from getting ahead of the status block. */ 4444 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4445 BUS_SPACE_BARRIER_READ); 4446 4447 /* If there's no work left then exit the interrupt service routine. */ 4448 if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) && 4449 (sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons)) 4450 break; 4451 4452 } 4453 4454 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 4455 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 4456 4457 /* Re-enable interrupts. */ 4458 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 4459 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 4460 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4461 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 4462 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4463 4464 /* Handle any frames that arrived while handling the interrupt. */ 4465 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 4466 bnx_start(ifp); 4467 4468 return (1); 4469} 4470 4471/****************************************************************************/ 4472/* Programs the various packet receive modes (broadcast and multicast). */ 4473/* */ 4474/* Returns: */ 4475/* Nothing. */ 4476/****************************************************************************/ 4477void 4478bnx_set_rx_mode(struct bnx_softc *sc) 4479{ 4480 struct arpcom *ac = &sc->arpcom; 4481 struct ifnet *ifp = &ac->ac_if; 4482 struct ether_multi *enm; 4483 struct ether_multistep step; 4484 u32 hashes[4] = { 0, 0, 0, 0 }; 4485 u32 rx_mode, sort_mode; 4486 int h, i; 4487 4488 /* Initialize receive mode default settings. */ 4489 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 4490 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 4491 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 4492 4493 /* 4494 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 4495 * be enbled. 4496 */ 4497 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 4498 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 4499 4500 /* 4501 * Check for promiscuous, all multicast, or selected 4502 * multicast address filtering. 4503 */ 4504 if (ifp->if_flags & IFF_PROMISC) { 4505 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 4506 4507 /* Enable promiscuous mode. */ 4508 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 4509 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 4510 } else if (ifp->if_flags & IFF_ALLMULTI) { 4511allmulti: 4512 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 4513 4514 /* Enable all multicast addresses. */ 4515 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 4516 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); 4517 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 4518 } else { 4519 /* Accept one or more multicast(s). */ 4520 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 4521 4522 ETHER_FIRST_MULTI(step, ac, enm); 4523 while (enm != NULL) { 4524 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 4525 ifp->if_flags |= IFF_ALLMULTI; 4526 goto allmulti; 4527 } 4528 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x7F; 4529 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 4530 ETHER_NEXT_MULTI(step, enm); 4531 } 4532 4533 for (i = 0; i < 4; i++) 4534 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); 4535 4536 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 4537 } 4538 4539 /* Only make changes if the recive mode has actually changed. */ 4540 if (rx_mode != sc->rx_mode) { 4541 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 4542 rx_mode); 4543 4544 sc->rx_mode = rx_mode; 4545 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 4546 } 4547 4548 /* Disable and clear the exisitng sort before enabling a new sort. */ 4549 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 4550 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 4551 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 4552} 4553 4554/****************************************************************************/ 4555/* Called periodically to updates statistics from the controllers */ 4556/* statistics block. */ 4557/* */ 4558/* Returns: */ 4559/* Nothing. */ 4560/****************************************************************************/ 4561void 4562bnx_stats_update(struct bnx_softc *sc) 4563{ 4564 struct ifnet *ifp = &sc->arpcom.ac_if; 4565 struct statistics_block *stats; 4566 4567 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__); 4568 4569 stats = (struct statistics_block *) sc->stats_block; 4570 4571 /* 4572 * Update the interface statistics from the 4573 * hardware statistics. 4574 */ 4575 ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions; 4576 4577 ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts + 4578 (u_long) stats->stat_EtherStatsOverrsizePkts + 4579 (u_long) stats->stat_IfInMBUFDiscards + 4580 (u_long) stats->stat_Dot3StatsAlignmentErrors + 4581 (u_long) stats->stat_Dot3StatsFCSErrors; 4582 4583 ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 4584 (u_long) stats->stat_Dot3StatsExcessiveCollisions + 4585 (u_long) stats->stat_Dot3StatsLateCollisions; 4586 4587 /* 4588 * Certain controllers don't report 4589 * carrier sense errors correctly. 4590 * See errata E11_5708CA0_1165. 4591 */ 4592 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 4593 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) 4594 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 4595 4596 /* 4597 * Update the sysctl statistics from the 4598 * hardware statistics. 4599 */ 4600 sc->stat_IfHCInOctets = 4601 ((u64) stats->stat_IfHCInOctets_hi << 32) + 4602 (u64) stats->stat_IfHCInOctets_lo; 4603 4604 sc->stat_IfHCInBadOctets = 4605 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 4606 (u64) stats->stat_IfHCInBadOctets_lo; 4607 4608 sc->stat_IfHCOutOctets = 4609 ((u64) stats->stat_IfHCOutOctets_hi << 32) + 4610 (u64) stats->stat_IfHCOutOctets_lo; 4611 4612 sc->stat_IfHCOutBadOctets = 4613 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + 4614 (u64) stats->stat_IfHCOutBadOctets_lo; 4615 4616 sc->stat_IfHCInUcastPkts = 4617 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + 4618 (u64) stats->stat_IfHCInUcastPkts_lo; 4619 4620 sc->stat_IfHCInMulticastPkts = 4621 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + 4622 (u64) stats->stat_IfHCInMulticastPkts_lo; 4623 4624 sc->stat_IfHCInBroadcastPkts = 4625 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + 4626 (u64) stats->stat_IfHCInBroadcastPkts_lo; 4627 4628 sc->stat_IfHCOutUcastPkts = 4629 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + 4630 (u64) stats->stat_IfHCOutUcastPkts_lo; 4631 4632 sc->stat_IfHCOutMulticastPkts = 4633 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + 4634 (u64) stats->stat_IfHCOutMulticastPkts_lo; 4635 4636 sc->stat_IfHCOutBroadcastPkts = 4637 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 4638 (u64) stats->stat_IfHCOutBroadcastPkts_lo; 4639 4640 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 4641 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 4642 4643 sc->stat_Dot3StatsCarrierSenseErrors = 4644 stats->stat_Dot3StatsCarrierSenseErrors; 4645 4646 sc->stat_Dot3StatsFCSErrors = 4647 stats->stat_Dot3StatsFCSErrors; 4648 4649 sc->stat_Dot3StatsAlignmentErrors = 4650 stats->stat_Dot3StatsAlignmentErrors; 4651 4652 sc->stat_Dot3StatsSingleCollisionFrames = 4653 stats->stat_Dot3StatsSingleCollisionFrames; 4654 4655 sc->stat_Dot3StatsMultipleCollisionFrames = 4656 stats->stat_Dot3StatsMultipleCollisionFrames; 4657 4658 sc->stat_Dot3StatsDeferredTransmissions = 4659 stats->stat_Dot3StatsDeferredTransmissions; 4660 4661 sc->stat_Dot3StatsExcessiveCollisions = 4662 stats->stat_Dot3StatsExcessiveCollisions; 4663 4664 sc->stat_Dot3StatsLateCollisions = 4665 stats->stat_Dot3StatsLateCollisions; 4666 4667 sc->stat_EtherStatsCollisions = 4668 stats->stat_EtherStatsCollisions; 4669 4670 sc->stat_EtherStatsFragments = 4671 stats->stat_EtherStatsFragments; 4672 4673 sc->stat_EtherStatsJabbers = 4674 stats->stat_EtherStatsJabbers; 4675 4676 sc->stat_EtherStatsUndersizePkts = 4677 stats->stat_EtherStatsUndersizePkts; 4678 4679 sc->stat_EtherStatsOverrsizePkts = 4680 stats->stat_EtherStatsOverrsizePkts; 4681 4682 sc->stat_EtherStatsPktsRx64Octets = 4683 stats->stat_EtherStatsPktsRx64Octets; 4684 4685 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 4686 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 4687 4688 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 4689 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 4690 4691 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 4692 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 4693 4694 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 4695 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 4696 4697 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 4698 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 4699 4700 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 4701 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 4702 4703 sc->stat_EtherStatsPktsTx64Octets = 4704 stats->stat_EtherStatsPktsTx64Octets; 4705 4706 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 4707 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 4708 4709 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 4710 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 4711 4712 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 4713 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 4714 4715 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 4716 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 4717 4718 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 4719 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 4720 4721 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 4722 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 4723 4724 sc->stat_XonPauseFramesReceived = 4725 stats->stat_XonPauseFramesReceived; 4726 4727 sc->stat_XoffPauseFramesReceived = 4728 stats->stat_XoffPauseFramesReceived; 4729 4730 sc->stat_OutXonSent = 4731 stats->stat_OutXonSent; 4732 4733 sc->stat_OutXoffSent = 4734 stats->stat_OutXoffSent; 4735 4736 sc->stat_FlowControlDone = 4737 stats->stat_FlowControlDone; 4738 4739 sc->stat_MacControlFramesReceived = 4740 stats->stat_MacControlFramesReceived; 4741 4742 sc->stat_XoffStateEntered = 4743 stats->stat_XoffStateEntered; 4744 4745 sc->stat_IfInFramesL2FilterDiscards = 4746 stats->stat_IfInFramesL2FilterDiscards; 4747 4748 sc->stat_IfInRuleCheckerDiscards = 4749 stats->stat_IfInRuleCheckerDiscards; 4750 4751 sc->stat_IfInFTQDiscards = 4752 stats->stat_IfInFTQDiscards; 4753 4754 sc->stat_IfInMBUFDiscards = 4755 stats->stat_IfInMBUFDiscards; 4756 4757 sc->stat_IfInRuleCheckerP4Hit = 4758 stats->stat_IfInRuleCheckerP4Hit; 4759 4760 sc->stat_CatchupInRuleCheckerDiscards = 4761 stats->stat_CatchupInRuleCheckerDiscards; 4762 4763 sc->stat_CatchupInFTQDiscards = 4764 stats->stat_CatchupInFTQDiscards; 4765 4766 sc->stat_CatchupInMBUFDiscards = 4767 stats->stat_CatchupInMBUFDiscards; 4768 4769 sc->stat_CatchupInRuleCheckerP4Hit = 4770 stats->stat_CatchupInRuleCheckerP4Hit; 4771 4772 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__); 4773} 4774 4775void 4776bnx_tick(void *xsc) 4777{ 4778 struct bnx_softc *sc = xsc; 4779 struct ifnet *ifp = &sc->arpcom.ac_if; 4780 struct mii_data *mii = NULL; 4781 u32 msg; 4782 4783 /* Tell the firmware that the driver is still running. */ 4784#ifdef BNX_DEBUG 4785 msg = (u32) BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 4786#else 4787 msg = (u32) ++sc->bnx_fw_drv_pulse_wr_seq; 4788#endif 4789 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 4790 4791 /* Update the statistics from the hardware statistics block. */ 4792 bnx_stats_update(sc); 4793 4794 /* Schedule the next tick. */ 4795 timeout_add(&sc->bnx_timeout, hz); 4796 4797 /* If link is up already up then we're done. */ 4798 if (sc->bnx_link) 4799 goto bnx_tick_locked_exit; 4800 4801 /* DRC - ToDo: Add SerDes support and check SerDes link here. */ 4802 4803 mii = &sc->bnx_mii; 4804 mii_tick(mii); 4805 4806 /* Check if the link has come up. */ 4807 if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE && 4808 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4809 sc->bnx_link++; 4810 /* Now that link is up, handle any outstanding TX traffic. */ 4811 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4812 bnx_start(ifp); 4813 } 4814 4815bnx_tick_locked_exit: 4816 return; 4817} 4818 4819/****************************************************************************/ 4820/* BNX Debug Routines */ 4821/****************************************************************************/ 4822#ifdef BNX_DEBUG 4823 4824/****************************************************************************/ 4825/* Prints out information about an mbuf. */ 4826/* */ 4827/* Returns: */ 4828/* Nothing. */ 4829/****************************************************************************/ 4830void 4831bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 4832{ 4833 u32 val_hi, val_lo; 4834 struct mbuf *mp = m; 4835 4836 if (m == NULL) { 4837 /* Index out of range. */ 4838 printf("mbuf ptr is null!\n"); 4839 return; 4840 } 4841 4842 while (mp) { 4843 val_hi = BNX_ADDR_HI(mp); 4844 val_lo = BNX_ADDR_LO(mp); 4845 printf("mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ", 4846 val_hi, val_lo, mp->m_len); 4847 4848 if (mp->m_flags & M_EXT) 4849 printf("M_EXT "); 4850 if (mp->m_flags & M_PKTHDR) 4851 printf("M_PKTHDR "); 4852 printf("\n"); 4853 4854 if (mp->m_flags & M_EXT) { 4855 val_hi = BNX_ADDR_HI(mp->m_ext.ext_buf); 4856 val_lo = BNX_ADDR_LO(mp->m_ext.ext_buf); 4857 printf("- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n", 4858 val_hi, val_lo, mp->m_ext.ext_size); 4859 } 4860 4861 mp = mp->m_next; 4862 } 4863 4864 4865} 4866 4867/****************************************************************************/ 4868/* Prints out the mbufs in the TX mbuf chain. */ 4869/* */ 4870/* Returns: */ 4871/* Nothing. */ 4872/****************************************************************************/ 4873void 4874bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 4875{ 4876 struct mbuf *m; 4877 int i; 4878 4879 BNX_PRINTF(sc, 4880 "----------------------------" 4881 " tx mbuf data " 4882 "----------------------------\n"); 4883 4884 for (i = 0; i < count; i++) { 4885 m = sc->tx_mbuf_ptr[chain_prod]; 4886 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 4887 bnx_dump_mbuf(sc, m); 4888 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 4889 } 4890 4891 BNX_PRINTF(sc, 4892 "----------------------------" 4893 "----------------" 4894 "----------------------------\n"); 4895} 4896 4897/* 4898 * This routine prints the RX mbuf chain. 4899 */ 4900void 4901bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 4902{ 4903 struct mbuf *m; 4904 int i; 4905 4906 BNX_PRINTF(sc, 4907 "----------------------------" 4908 " rx mbuf data " 4909 "----------------------------\n"); 4910 4911 for (i = 0; i < count; i++) { 4912 m = sc->rx_mbuf_ptr[chain_prod]; 4913 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 4914 bnx_dump_mbuf(sc, m); 4915 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 4916 } 4917 4918 4919 BNX_PRINTF(sc, 4920 "----------------------------" 4921 "----------------" 4922 "----------------------------\n"); 4923} 4924 4925void 4926bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 4927{ 4928 if (idx > MAX_TX_BD) 4929 /* Index out of range. */ 4930 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 4931 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4932 /* TX Chain page pointer. */ 4933 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 4934 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); 4935 else 4936 /* Normal tx_bd entry. */ 4937 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 4938 "flags = 0x%08X\n", idx, 4939 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 4940 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags); 4941} 4942 4943void 4944bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 4945{ 4946 if (idx > MAX_RX_BD) 4947 /* Index out of range. */ 4948 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 4949 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4950 /* TX Chain page pointer. */ 4951 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 4952 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); 4953 else 4954 /* Normal tx_bd entry. */ 4955 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 4956 "flags = 0x%08X\n", idx, 4957 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 4958 rxbd->rx_bd_len, rxbd->rx_bd_flags); 4959} 4960 4961void 4962bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 4963{ 4964 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 4965 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 4966 "tcp_udp_xsum = 0x%04X\n", idx, 4967 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 4968 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 4969 l2fhdr->l2_fhdr_tcp_udp_xsum); 4970} 4971 4972/* 4973 * This routine prints the TX chain. 4974 */ 4975void 4976bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 4977{ 4978 struct tx_bd *txbd; 4979 int i; 4980 4981 /* First some info about the tx_bd chain structure. */ 4982 BNX_PRINTF(sc, 4983 "----------------------------" 4984 " tx_bd chain " 4985 "----------------------------\n"); 4986 4987 BNX_PRINTF(sc, "page size = 0x%08X, tx chain pages = 0x%08X\n", 4988 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES); 4989 4990 BNX_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 4991 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); 4992 4993 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD); 4994 4995 BNX_PRINTF(sc, "" 4996 "-----------------------------" 4997 " tx_bd data " 4998 "-----------------------------\n"); 4999 5000 /* Now print out the tx_bd's themselves. */ 5001 for (i = 0; i < count; i++) { 5002 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 5003 bnx_dump_txbd(sc, tx_prod, txbd); 5004 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 5005 } 5006 5007 BNX_PRINTF(sc, 5008 "-----------------------------" 5009 "--------------" 5010 "-----------------------------\n"); 5011} 5012 5013/* 5014 * This routine prints the RX chain. 5015 */ 5016void 5017bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 5018{ 5019 struct rx_bd *rxbd; 5020 int i; 5021 5022 /* First some info about the tx_bd chain structure. */ 5023 BNX_PRINTF(sc, 5024 "----------------------------" 5025 " rx_bd chain " 5026 "----------------------------\n"); 5027 5028 BNX_PRINTF(sc, "----- RX_BD Chain -----\n"); 5029 5030 BNX_PRINTF(sc, "page size = 0x%08X, rx chain pages = 0x%08X\n", 5031 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES); 5032 5033 BNX_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 5034 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); 5035 5036 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD); 5037 5038 BNX_PRINTF(sc, 5039 "----------------------------" 5040 " rx_bd data " 5041 "----------------------------\n"); 5042 5043 /* Now print out the rx_bd's themselves. */ 5044 for (i = 0; i < count; i++) { 5045 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 5046 bnx_dump_rxbd(sc, rx_prod, rxbd); 5047 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 5048 } 5049 5050 BNX_PRINTF(sc, 5051 "----------------------------" 5052 "--------------" 5053 "----------------------------\n"); 5054} 5055 5056/* 5057 * This routine prints the status block. 5058 */ 5059void 5060bnx_dump_status_block(struct bnx_softc *sc) 5061{ 5062 struct status_block *sblk; 5063 5064 sblk = sc->status_block; 5065 5066 BNX_PRINTF(sc, "----------------------------- Status Block " 5067 "-----------------------------\n"); 5068 5069 BNX_PRINTF(sc, "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 5070 sblk->status_attn_bits, sblk->status_attn_bits_ack, 5071 sblk->status_idx); 5072 5073 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 5074 sblk->status_rx_quick_consumer_index0, 5075 sblk->status_tx_quick_consumer_index0); 5076 5077 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 5078 5079 /* Theses indices are not used for normal L2 drivers. */ 5080 if (sblk->status_rx_quick_consumer_index1 || 5081 sblk->status_tx_quick_consumer_index1) 5082 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 5083 sblk->status_rx_quick_consumer_index1, 5084 sblk->status_tx_quick_consumer_index1); 5085 5086 if (sblk->status_rx_quick_consumer_index2 || 5087 sblk->status_tx_quick_consumer_index2) 5088 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 5089 sblk->status_rx_quick_consumer_index2, 5090 sblk->status_tx_quick_consumer_index2); 5091 5092 if (sblk->status_rx_quick_consumer_index3 || 5093 sblk->status_tx_quick_consumer_index3) 5094 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 5095 sblk->status_rx_quick_consumer_index3, 5096 sblk->status_tx_quick_consumer_index3); 5097 5098 if (sblk->status_rx_quick_consumer_index4 || 5099 sblk->status_rx_quick_consumer_index5) 5100 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 5101 sblk->status_rx_quick_consumer_index4, 5102 sblk->status_rx_quick_consumer_index5); 5103 5104 if (sblk->status_rx_quick_consumer_index6 || 5105 sblk->status_rx_quick_consumer_index7) 5106 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 5107 sblk->status_rx_quick_consumer_index6, 5108 sblk->status_rx_quick_consumer_index7); 5109 5110 if (sblk->status_rx_quick_consumer_index8 || 5111 sblk->status_rx_quick_consumer_index9) 5112 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 5113 sblk->status_rx_quick_consumer_index8, 5114 sblk->status_rx_quick_consumer_index9); 5115 5116 if (sblk->status_rx_quick_consumer_index10 || 5117 sblk->status_rx_quick_consumer_index11) 5118 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 5119 sblk->status_rx_quick_consumer_index10, 5120 sblk->status_rx_quick_consumer_index11); 5121 5122 if (sblk->status_rx_quick_consumer_index12 || 5123 sblk->status_rx_quick_consumer_index13) 5124 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 5125 sblk->status_rx_quick_consumer_index12, 5126 sblk->status_rx_quick_consumer_index13); 5127 5128 if (sblk->status_rx_quick_consumer_index14 || 5129 sblk->status_rx_quick_consumer_index15) 5130 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 5131 sblk->status_rx_quick_consumer_index14, 5132 sblk->status_rx_quick_consumer_index15); 5133 5134 if (sblk->status_completion_producer_index || 5135 sblk->status_cmd_consumer_index) 5136 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 5137 sblk->status_completion_producer_index, 5138 sblk->status_cmd_consumer_index); 5139 5140 BNX_PRINTF(sc, "-------------------------------------------" 5141 "-----------------------------\n"); 5142} 5143 5144/* 5145 * This routine prints the statistics block. 5146 */ 5147void 5148bnx_dump_stats_block(struct bnx_softc *sc) 5149{ 5150 struct statistics_block *sblk; 5151 5152 sblk = sc->stats_block; 5153 5154 BNX_PRINTF(sc, "" 5155 "-----------------------------" 5156 " Stats Block " 5157 "-----------------------------\n"); 5158 5159 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 5160 "IfHcInBadOctets = 0x%08X:%08X\n", 5161 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 5162 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 5163 5164 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 5165 "IfHcOutBadOctets = 0x%08X:%08X\n", 5166 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 5167 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 5168 5169 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 5170 "IfHcInMulticastPkts = 0x%08X:%08X\n", 5171 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 5172 sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo); 5173 5174 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 5175 "IfHcOutUcastPkts = 0x%08X:%08X\n", 5176 sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo, 5177 sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo); 5178 5179 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n", 5180 sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo, 5181 sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo); 5182 5183 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 5184 BNX_PRINTF(sc, "0x%08X : " 5185 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 5186 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 5187 5188 if (sblk->stat_Dot3StatsCarrierSenseErrors) 5189 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 5190 sblk->stat_Dot3StatsCarrierSenseErrors); 5191 5192 if (sblk->stat_Dot3StatsFCSErrors) 5193 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 5194 sblk->stat_Dot3StatsFCSErrors); 5195 5196 if (sblk->stat_Dot3StatsAlignmentErrors) 5197 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 5198 sblk->stat_Dot3StatsAlignmentErrors); 5199 5200 if (sblk->stat_Dot3StatsSingleCollisionFrames) 5201 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 5202 sblk->stat_Dot3StatsSingleCollisionFrames); 5203 5204 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 5205 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 5206 sblk->stat_Dot3StatsMultipleCollisionFrames); 5207 5208 if (sblk->stat_Dot3StatsDeferredTransmissions) 5209 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 5210 sblk->stat_Dot3StatsDeferredTransmissions); 5211 5212 if (sblk->stat_Dot3StatsExcessiveCollisions) 5213 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 5214 sblk->stat_Dot3StatsExcessiveCollisions); 5215 5216 if (sblk->stat_Dot3StatsLateCollisions) 5217 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 5218 sblk->stat_Dot3StatsLateCollisions); 5219 5220 if (sblk->stat_EtherStatsCollisions) 5221 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 5222 sblk->stat_EtherStatsCollisions); 5223 5224 if (sblk->stat_EtherStatsFragments) 5225 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 5226 sblk->stat_EtherStatsFragments); 5227 5228 if (sblk->stat_EtherStatsJabbers) 5229 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 5230 sblk->stat_EtherStatsJabbers); 5231 5232 if (sblk->stat_EtherStatsUndersizePkts) 5233 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 5234 sblk->stat_EtherStatsUndersizePkts); 5235 5236 if (sblk->stat_EtherStatsOverrsizePkts) 5237 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 5238 sblk->stat_EtherStatsOverrsizePkts); 5239 5240 if (sblk->stat_EtherStatsPktsRx64Octets) 5241 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 5242 sblk->stat_EtherStatsPktsRx64Octets); 5243 5244 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 5245 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 5246 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 5247 5248 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 5249 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n", 5250 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 5251 5252 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 5253 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n", 5254 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 5255 5256 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 5257 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n", 5258 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 5259 5260 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 5261 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n", 5262 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 5263 5264 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 5265 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n", 5266 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 5267 5268 if (sblk->stat_EtherStatsPktsTx64Octets) 5269 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 5270 sblk->stat_EtherStatsPktsTx64Octets); 5271 5272 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 5273 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 5274 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 5275 5276 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 5277 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n", 5278 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 5279 5280 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 5281 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n", 5282 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 5283 5284 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 5285 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n", 5286 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 5287 5288 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 5289 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n", 5290 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 5291 5292 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 5293 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n", 5294 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 5295 5296 if (sblk->stat_XonPauseFramesReceived) 5297 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 5298 sblk->stat_XonPauseFramesReceived); 5299 5300 if (sblk->stat_XoffPauseFramesReceived) 5301 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 5302 sblk->stat_XoffPauseFramesReceived); 5303 5304 if (sblk->stat_OutXonSent) 5305 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 5306 sblk->stat_OutXonSent); 5307 5308 if (sblk->stat_OutXoffSent) 5309 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 5310 sblk->stat_OutXoffSent); 5311 5312 if (sblk->stat_FlowControlDone) 5313 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 5314 sblk->stat_FlowControlDone); 5315 5316 if (sblk->stat_MacControlFramesReceived) 5317 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 5318 sblk->stat_MacControlFramesReceived); 5319 5320 if (sblk->stat_XoffStateEntered) 5321 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 5322 sblk->stat_XoffStateEntered); 5323 5324 if (sblk->stat_IfInFramesL2FilterDiscards) 5325 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 5326 sblk->stat_IfInFramesL2FilterDiscards); 5327 5328 if (sblk->stat_IfInRuleCheckerDiscards) 5329 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 5330 sblk->stat_IfInRuleCheckerDiscards); 5331 5332 if (sblk->stat_IfInFTQDiscards) 5333 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 5334 sblk->stat_IfInFTQDiscards); 5335 5336 if (sblk->stat_IfInMBUFDiscards) 5337 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 5338 sblk->stat_IfInMBUFDiscards); 5339 5340 if (sblk->stat_IfInRuleCheckerP4Hit) 5341 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 5342 sblk->stat_IfInRuleCheckerP4Hit); 5343 5344 if (sblk->stat_CatchupInRuleCheckerDiscards) 5345 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 5346 sblk->stat_CatchupInRuleCheckerDiscards); 5347 5348 if (sblk->stat_CatchupInFTQDiscards) 5349 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 5350 sblk->stat_CatchupInFTQDiscards); 5351 5352 if (sblk->stat_CatchupInMBUFDiscards) 5353 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 5354 sblk->stat_CatchupInMBUFDiscards); 5355 5356 if (sblk->stat_CatchupInRuleCheckerP4Hit) 5357 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 5358 sblk->stat_CatchupInRuleCheckerP4Hit); 5359 5360 BNX_PRINTF(sc, 5361 "-----------------------------" 5362 "--------------" 5363 "-----------------------------\n"); 5364} 5365 5366void 5367bnx_dump_driver_state(struct bnx_softc *sc) 5368{ 5369 u32 val_hi, val_lo; 5370 5371 BNX_PRINTF(sc, 5372 "-----------------------------" 5373 " Driver State " 5374 "-----------------------------\n"); 5375 5376 val_hi = BNX_ADDR_HI(sc); 5377 val_lo = BNX_ADDR_LO(sc); 5378 BNX_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n", 5379 val_hi, val_lo); 5380 5381 val_hi = BNX_ADDR_HI(sc->status_block); 5382 val_lo = BNX_ADDR_LO(sc->status_block); 5383 BNX_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n", 5384 val_hi, val_lo); 5385 5386 val_hi = BNX_ADDR_HI(sc->stats_block); 5387 val_lo = BNX_ADDR_LO(sc->stats_block); 5388 BNX_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n", 5389 val_hi, val_lo); 5390 5391 val_hi = BNX_ADDR_HI(sc->tx_bd_chain); 5392 val_lo = BNX_ADDR_LO(sc->tx_bd_chain); 5393 BNX_PRINTF(sc, 5394 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n", 5395 val_hi, val_lo); 5396 5397 val_hi = BNX_ADDR_HI(sc->rx_bd_chain); 5398 val_lo = BNX_ADDR_LO(sc->rx_bd_chain); 5399 BNX_PRINTF(sc, 5400 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n", 5401 val_hi, val_lo); 5402 5403 val_hi = BNX_ADDR_HI(sc->tx_mbuf_ptr); 5404 val_lo = BNX_ADDR_LO(sc->tx_mbuf_ptr); 5405 BNX_PRINTF(sc, 5406 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 5407 val_hi, val_lo); 5408 5409 val_hi = BNX_ADDR_HI(sc->rx_mbuf_ptr); 5410 val_lo = BNX_ADDR_LO(sc->rx_mbuf_ptr); 5411 BNX_PRINTF(sc, 5412 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 5413 val_hi, val_lo); 5414 5415 BNX_PRINTF(sc, " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 5416 sc->interrupts_generated); 5417 5418 BNX_PRINTF(sc, " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 5419 sc->rx_interrupts); 5420 5421 BNX_PRINTF(sc, " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 5422 sc->tx_interrupts); 5423 5424 BNX_PRINTF(sc, " 0x%08X - (sc->last_status_idx) status block index\n", 5425 sc->last_status_idx); 5426 5427 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 5428 sc->tx_prod); 5429 5430 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 5431 sc->tx_cons); 5432 5433 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 5434 sc->tx_prod_bseq); 5435 5436 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 5437 sc->rx_prod); 5438 5439 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 5440 sc->rx_cons); 5441 5442 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 5443 sc->rx_prod_bseq); 5444 5445 BNX_PRINTF(sc, " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 5446 sc->rx_mbuf_alloc); 5447 5448 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 5449 sc->free_rx_bd); 5450 5451 BNX_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 5452 sc->rx_low_watermark, (u32) USABLE_RX_BD); 5453 5454 BNX_PRINTF(sc, " 0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n", 5455 sc->tx_mbuf_alloc); 5456 5457 BNX_PRINTF(sc, " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 5458 sc->rx_mbuf_alloc); 5459 5460 BNX_PRINTF(sc, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 5461 sc->used_tx_bd); 5462 5463 BNX_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 5464 sc->tx_hi_watermark, (u32) USABLE_TX_BD); 5465 5466 BNX_PRINTF(sc, " 0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n", 5467 sc->mbuf_alloc_failed); 5468 5469 BNX_PRINTF(sc, 5470 "-----------------------------" 5471 "--------------" 5472 "-----------------------------\n"); 5473} 5474 5475void 5476bnx_dump_hw_state(struct bnx_softc *sc) 5477{ 5478 u32 val1; 5479 int i; 5480 5481 BNX_PRINTF(sc, 5482 "----------------------------" 5483 " Hardware State " 5484 "----------------------------\n"); 5485 5486 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver); 5487 5488 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 5489 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 5490 val1, BNX_MISC_ENABLE_STATUS_BITS); 5491 5492 val1 = REG_RD(sc, BNX_DMA_STATUS); 5493 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 5494 5495 val1 = REG_RD(sc, BNX_CTX_STATUS); 5496 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 5497 5498 val1 = REG_RD(sc, BNX_EMAC_STATUS); 5499 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BNX_EMAC_STATUS); 5500 5501 val1 = REG_RD(sc, BNX_RPM_STATUS); 5502 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 5503 5504 val1 = REG_RD(sc, BNX_TBDR_STATUS); 5505 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BNX_TBDR_STATUS); 5506 5507 val1 = REG_RD(sc, BNX_TDMA_STATUS); 5508 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BNX_TDMA_STATUS); 5509 5510 val1 = REG_RD(sc, BNX_HC_STATUS); 5511 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 5512 5513 BNX_PRINTF(sc, 5514 "----------------------------" 5515 "----------------" 5516 "----------------------------\n"); 5517 5518 BNX_PRINTF(sc, 5519 "----------------------------" 5520 " Register Dump " 5521 "----------------------------\n"); 5522 5523 for (i = 0x400; i < 0x8000; i += 0x10) 5524 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 5525 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 5526 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 5527 5528 BNX_PRINTF(sc, 5529 "----------------------------" 5530 "----------------" 5531 "----------------------------\n"); 5532} 5533 5534void 5535bnx_breakpoint(struct bnx_softc *sc) 5536{ 5537 5538 /* Unreachable code to shut the compiler up about unused functions. */ 5539 if (0) { 5540 bnx_dump_txbd(sc, 0, NULL); 5541 bnx_dump_rxbd(sc, 0, NULL); 5542 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 5543 bnx_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 5544 bnx_dump_l2fhdr(sc, 0, NULL); 5545 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 5546 bnx_dump_rx_chain(sc, 0, USABLE_RX_BD); 5547 bnx_dump_status_block(sc); 5548 bnx_dump_stats_block(sc); 5549 bnx_dump_driver_state(sc); 5550 bnx_dump_hw_state(sc); 5551 } 5552 5553 bnx_dump_driver_state(sc); 5554 /* Print the important status block fields. */ 5555 bnx_dump_status_block(sc); 5556 5557#if 0 5558 /* Call the debugger. */ 5559 breakpoint(); 5560#endif 5561 5562 return; 5563} 5564#endif 5565