if_bce.c revision 215297
1/*- 2 * Copyright (c) 2006-2010 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/dev/bce/if_bce.c 215297 2010-11-14 13:26:10Z marius $"); 33 34/* 35 * The following controllers are supported by this driver: 36 * BCM5706C A2, A3 37 * BCM5706S A2, A3 38 * BCM5708C B1, B2 39 * BCM5708S B1, B2 40 * BCM5709C A1, C0 41 * BCM5709S A1, C0 42 * BCM5716C C0 43 * BCM5716S C0 44 * 45 * The following controllers are not supported by this driver: 46 * BCM5706C A0, A1 (pre-production) 47 * BCM5706S A0, A1 (pre-production) 48 * BCM5708C A0, B0 (pre-production) 49 * BCM5708S A0, B0 (pre-production) 50 * BCM5709C A0 B0, B1, B2 (pre-production) 51 * BCM5709S A0, B0, B1, B2 (pre-production) 52 */ 53 54#include "opt_bce.h" 55 56#include <dev/bce/if_bcereg.h> 57#include <dev/bce/if_bcefw.h> 58 59/****************************************************************************/ 60/* BCE Debug Options */ 61/****************************************************************************/ 62#ifdef BCE_DEBUG 63 u32 bce_debug = BCE_WARN; 64 65 /* 0 = Never */ 66 /* 1 = 1 in 2,147,483,648 */ 67 /* 256 = 1 in 8,388,608 */ 68 /* 2048 = 1 in 1,048,576 */ 69 /* 65536 = 1 in 32,768 */ 70 /* 1048576 = 1 in 2,048 */ 71 /* 268435456 = 1 in 8 */ 72 /* 536870912 = 1 in 4 */ 73 /* 1073741824 = 1 in 2 */ 74 75 /* Controls how often the l2_fhdr frame error check will fail. */ 76 int l2fhdr_error_sim_control = 0; 77 78 /* Controls how often the unexpected attention check will fail. */ 79 int unexpected_attention_sim_control = 0; 80 81 /* Controls how often to simulate an mbuf allocation failure. */ 82 int mbuf_alloc_failed_sim_control = 0; 83 84 /* Controls how often to simulate a DMA mapping failure. */ 85 int dma_map_addr_failed_sim_control = 0; 86 87 /* Controls how often to simulate a bootcode failure. */ 88 int bootcode_running_failure_sim_control = 0; 89#endif 90 91/****************************************************************************/ 92/* BCE Build Time Options */ 93/****************************************************************************/ 94/* #define BCE_NVRAM_WRITE_SUPPORT 1 */ 95 96 97/****************************************************************************/ 98/* PCI Device ID Table */ 99/* */ 100/* Used by bce_probe() to identify the devices supported by this driver. */ 101/****************************************************************************/ 102#define BCE_DEVDESC_MAX 64 103 104static struct bce_type bce_devs[] = { 105 /* BCM5706C Controllers and OEM boards. */ 106 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 107 "HP NC370T Multifunction Gigabit Server Adapter" }, 108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 109 "HP NC370i Multifunction Gigabit Server Adapter" }, 110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 111 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 112 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 113 "HP NC371i Multifunction Gigabit Server Adapter" }, 114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 115 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 116 117 /* BCM5706S controllers and OEM boards. */ 118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 119 "HP NC370F Multifunction Gigabit Server Adapter" }, 120 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 121 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 122 123 /* BCM5708C controllers and OEM boards. */ 124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 125 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 127 "HP NC373i Multifunction Gigabit Server Adapter" }, 128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 129 "HP NC374m PCIe Multifunction Adapter" }, 130 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 131 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 132 133 /* BCM5708S controllers and OEM boards. */ 134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 135 "HP NC373m Multifunction Gigabit Server Adapter" }, 136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 137 "HP NC373i Multifunction Gigabit Server Adapter" }, 138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 139 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 140 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 141 "Broadcom NetXtreme II BCM5708 1000Base-SX" }, 142 143 /* BCM5709C controllers and OEM boards. */ 144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 145 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 147 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 149 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 150 151 /* BCM5709S controllers and OEM boards. */ 152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 153 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 155 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 157 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 158 159 /* BCM5716 controllers and OEM boards. */ 160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 161 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 162 163 { 0, 0, 0, 0, NULL } 164}; 165 166 167/****************************************************************************/ 168/* Supported Flash NVRAM device data. */ 169/****************************************************************************/ 170static struct flash_spec flash_table[] = 171{ 172#define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 173#define NONBUFFERED_FLAGS (BCE_NV_WREN) 174 175 /* Slow EEPROM */ 176 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 177 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 178 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 179 "EEPROM - slow"}, 180 /* Expansion entry 0001 */ 181 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 183 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 184 "Entry 0001"}, 185 /* Saifun SA25F010 (non-buffered flash) */ 186 /* strap, cfg1, & write1 need updates */ 187 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 190 "Non-buffered flash (128kB)"}, 191 /* Saifun SA25F020 (non-buffered flash) */ 192 /* strap, cfg1, & write1 need updates */ 193 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 196 "Non-buffered flash (256kB)"}, 197 /* Expansion entry 0100 */ 198 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 201 "Entry 0100"}, 202 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 203 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 204 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 205 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 206 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 207 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 208 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 209 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 210 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 211 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 212 /* Saifun SA25F005 (non-buffered flash) */ 213 /* strap, cfg1, & write1 need updates */ 214 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 216 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 217 "Non-buffered flash (64kB)"}, 218 /* Fast EEPROM */ 219 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 220 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 221 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 222 "EEPROM - fast"}, 223 /* Expansion entry 1001 */ 224 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 226 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 227 "Entry 1001"}, 228 /* Expansion entry 1010 */ 229 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 231 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 232 "Entry 1010"}, 233 /* ATMEL AT45DB011B (buffered flash) */ 234 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 235 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 236 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 237 "Buffered flash (128kB)"}, 238 /* Expansion entry 1100 */ 239 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 240 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 241 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 242 "Entry 1100"}, 243 /* Expansion entry 1101 */ 244 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 245 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 246 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 247 "Entry 1101"}, 248 /* Ateml Expansion entry 1110 */ 249 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 250 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 251 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 252 "Entry 1110 (Atmel)"}, 253 /* ATMEL AT45DB021B (buffered flash) */ 254 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 255 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 256 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 257 "Buffered flash (256kB)"}, 258}; 259 260/* 261 * The BCM5709 controllers transparently handle the 262 * differences between Atmel 264 byte pages and all 263 * flash devices which use 256 byte pages, so no 264 * logical-to-physical mapping is required in the 265 * driver. 266 */ 267static struct flash_spec flash_5709 = { 268 .flags = BCE_NV_BUFFERED, 269 .page_bits = BCM5709_FLASH_PAGE_BITS, 270 .page_size = BCM5709_FLASH_PAGE_SIZE, 271 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 272 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 273 .name = "5709/5716 buffered flash (256kB)", 274}; 275 276 277/****************************************************************************/ 278/* FreeBSD device entry points. */ 279/****************************************************************************/ 280static int bce_probe (device_t); 281static int bce_attach (device_t); 282static int bce_detach (device_t); 283static int bce_shutdown (device_t); 284 285 286/****************************************************************************/ 287/* BCE Debug Data Structure Dump Routines */ 288/****************************************************************************/ 289#ifdef BCE_DEBUG 290static u32 bce_reg_rd (struct bce_softc *, u32); 291static void bce_reg_wr (struct bce_softc *, u32, u32); 292static void bce_reg_wr16 (struct bce_softc *, u32, u16); 293static u32 bce_ctx_rd (struct bce_softc *, u32, u32); 294static void bce_dump_enet (struct bce_softc *, struct mbuf *); 295static void bce_dump_mbuf (struct bce_softc *, struct mbuf *); 296static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int); 297static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int); 298#ifdef BCE_JUMBO_HDRSPLIT 299static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int); 300#endif 301static void bce_dump_txbd (struct bce_softc *, 302 int, struct tx_bd *); 303static void bce_dump_rxbd (struct bce_softc *, 304 int, struct rx_bd *); 305#ifdef BCE_JUMBO_HDRSPLIT 306static void bce_dump_pgbd (struct bce_softc *, 307 int, struct rx_bd *); 308#endif 309static void bce_dump_l2fhdr (struct bce_softc *, 310 int, struct l2_fhdr *); 311static void bce_dump_ctx (struct bce_softc *, u16); 312static void bce_dump_ftqs (struct bce_softc *); 313static void bce_dump_tx_chain (struct bce_softc *, u16, int); 314static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int); 315#ifdef BCE_JUMBO_HDRSPLIT 316static void bce_dump_pg_chain (struct bce_softc *, u16, int); 317#endif 318static void bce_dump_status_block (struct bce_softc *); 319static void bce_dump_stats_block (struct bce_softc *); 320static void bce_dump_driver_state (struct bce_softc *); 321static void bce_dump_hw_state (struct bce_softc *); 322static void bce_dump_mq_regs (struct bce_softc *); 323static void bce_dump_bc_state (struct bce_softc *); 324static void bce_dump_txp_state (struct bce_softc *, int); 325static void bce_dump_rxp_state (struct bce_softc *, int); 326static void bce_dump_tpat_state (struct bce_softc *, int); 327static void bce_dump_cp_state (struct bce_softc *, int); 328static void bce_dump_com_state (struct bce_softc *, int); 329static void bce_dump_rv2p_state (struct bce_softc *); 330static void bce_breakpoint (struct bce_softc *); 331#endif 332 333 334/****************************************************************************/ 335/* BCE Register/Memory Access Routines */ 336/****************************************************************************/ 337static u32 bce_reg_rd_ind (struct bce_softc *, u32); 338static void bce_reg_wr_ind (struct bce_softc *, u32, u32); 339static void bce_shmem_wr (struct bce_softc *, u32, u32); 340static u32 bce_shmem_rd (struct bce_softc *, u32); 341static void bce_ctx_wr (struct bce_softc *, u32, u32, u32); 342static int bce_miibus_read_reg (device_t, int, int); 343static int bce_miibus_write_reg (device_t, int, int, int); 344static void bce_miibus_statchg (device_t); 345 346#ifdef BCE_DEBUG 347static int sysctl_nvram_dump(SYSCTL_HANDLER_ARGS); 348#ifdef BCE_NVRAM_WRITE_SUPPORT 349static int sysctl_nvram_write(SYSCTL_HANDLER_ARGS); 350#endif 351#endif 352 353/****************************************************************************/ 354/* BCE NVRAM Access Routines */ 355/****************************************************************************/ 356static int bce_acquire_nvram_lock (struct bce_softc *); 357static int bce_release_nvram_lock (struct bce_softc *); 358static void bce_enable_nvram_access (struct bce_softc *); 359static void bce_disable_nvram_access (struct bce_softc *); 360static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32); 361static int bce_init_nvram (struct bce_softc *); 362static int bce_nvram_read (struct bce_softc *, u32, u8 *, int); 363static int bce_nvram_test (struct bce_softc *); 364#ifdef BCE_NVRAM_WRITE_SUPPORT 365static int bce_enable_nvram_write (struct bce_softc *); 366static void bce_disable_nvram_write (struct bce_softc *); 367static int bce_nvram_erase_page (struct bce_softc *, u32); 368static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32); 369static int bce_nvram_write (struct bce_softc *, u32, u8 *, int); 370#endif 371 372/****************************************************************************/ 373/* */ 374/****************************************************************************/ 375static void bce_get_media (struct bce_softc *); 376static void bce_init_media (struct bce_softc *); 377static void bce_dma_map_addr (void *, 378 bus_dma_segment_t *, int, int); 379static int bce_dma_alloc (device_t); 380static void bce_dma_free (struct bce_softc *); 381static void bce_release_resources (struct bce_softc *); 382 383/****************************************************************************/ 384/* BCE Firmware Synchronization and Load */ 385/****************************************************************************/ 386static int bce_fw_sync (struct bce_softc *, u32); 387static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32); 388static void bce_load_cpu_fw (struct bce_softc *, 389 struct cpu_reg *, struct fw_info *); 390static void bce_start_cpu (struct bce_softc *, struct cpu_reg *); 391static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *); 392static void bce_start_rxp_cpu (struct bce_softc *); 393static void bce_init_rxp_cpu (struct bce_softc *); 394static void bce_init_txp_cpu (struct bce_softc *); 395static void bce_init_tpat_cpu (struct bce_softc *); 396static void bce_init_cp_cpu (struct bce_softc *); 397static void bce_init_com_cpu (struct bce_softc *); 398static void bce_init_cpus (struct bce_softc *); 399 400static void bce_print_adapter_info (struct bce_softc *); 401static void bce_probe_pci_caps (device_t, struct bce_softc *); 402static void bce_stop (struct bce_softc *); 403static int bce_reset (struct bce_softc *, u32); 404static int bce_chipinit (struct bce_softc *); 405static int bce_blockinit (struct bce_softc *); 406 407static int bce_init_tx_chain (struct bce_softc *); 408static void bce_free_tx_chain (struct bce_softc *); 409 410static int bce_get_rx_buf (struct bce_softc *, 411 struct mbuf *, u16 *, u16 *, u32 *); 412static int bce_init_rx_chain (struct bce_softc *); 413static void bce_fill_rx_chain (struct bce_softc *); 414static void bce_free_rx_chain (struct bce_softc *); 415 416#ifdef BCE_JUMBO_HDRSPLIT 417static int bce_get_pg_buf (struct bce_softc *, 418 struct mbuf *, u16 *, u16 *); 419static int bce_init_pg_chain (struct bce_softc *); 420static void bce_fill_pg_chain (struct bce_softc *); 421static void bce_free_pg_chain (struct bce_softc *); 422#endif 423 424static struct mbuf *bce_tso_setup (struct bce_softc *, 425 struct mbuf **, u16 *); 426static int bce_tx_encap (struct bce_softc *, struct mbuf **); 427static void bce_start_locked (struct ifnet *); 428static void bce_start (struct ifnet *); 429static int bce_ioctl (struct ifnet *, u_long, caddr_t); 430static void bce_watchdog (struct bce_softc *); 431static int bce_ifmedia_upd (struct ifnet *); 432static int bce_ifmedia_upd_locked (struct ifnet *); 433static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *); 434static void bce_init_locked (struct bce_softc *); 435static void bce_init (void *); 436static void bce_mgmt_init_locked (struct bce_softc *sc); 437 438static int bce_init_ctx (struct bce_softc *); 439static void bce_get_mac_addr (struct bce_softc *); 440static void bce_set_mac_addr (struct bce_softc *); 441static void bce_phy_intr (struct bce_softc *); 442static inline u16 bce_get_hw_rx_cons (struct bce_softc *); 443static void bce_rx_intr (struct bce_softc *); 444static void bce_tx_intr (struct bce_softc *); 445static void bce_disable_intr (struct bce_softc *); 446static void bce_enable_intr (struct bce_softc *, int); 447 448static void bce_intr (void *); 449static void bce_set_rx_mode (struct bce_softc *); 450static void bce_stats_update (struct bce_softc *); 451static void bce_tick (void *); 452static void bce_pulse (void *); 453static void bce_add_sysctls (struct bce_softc *); 454 455 456/****************************************************************************/ 457/* FreeBSD device dispatch table. */ 458/****************************************************************************/ 459static device_method_t bce_methods[] = { 460 /* Device interface (device_if.h) */ 461 DEVMETHOD(device_probe, bce_probe), 462 DEVMETHOD(device_attach, bce_attach), 463 DEVMETHOD(device_detach, bce_detach), 464 DEVMETHOD(device_shutdown, bce_shutdown), 465/* Supported by device interface but not used here. */ 466/* DEVMETHOD(device_identify, bce_identify), */ 467/* DEVMETHOD(device_suspend, bce_suspend), */ 468/* DEVMETHOD(device_resume, bce_resume), */ 469/* DEVMETHOD(device_quiesce, bce_quiesce), */ 470 471 /* Bus interface (bus_if.h) */ 472 DEVMETHOD(bus_print_child, bus_generic_print_child), 473 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 474 475 /* MII interface (miibus_if.h) */ 476 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 477 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 478 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 479/* Supported by MII interface but not used here. */ 480/* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */ 481/* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */ 482 483 { 0, 0 } 484}; 485 486static driver_t bce_driver = { 487 "bce", 488 bce_methods, 489 sizeof(struct bce_softc) 490}; 491 492static devclass_t bce_devclass; 493 494MODULE_DEPEND(bce, pci, 1, 1, 1); 495MODULE_DEPEND(bce, ether, 1, 1, 1); 496MODULE_DEPEND(bce, miibus, 1, 1, 1); 497 498DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0); 499DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0); 500 501 502/****************************************************************************/ 503/* Tunable device values */ 504/****************************************************************************/ 505SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters"); 506 507/* Allowable values are TRUE or FALSE */ 508static int bce_tso_enable = TRUE; 509TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable); 510SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, 511"TSO Enable/Disable"); 512 513/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 514/* ToDo: Add MSI-X support. */ 515static int bce_msi_enable = 1; 516TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable); 517SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, 518"MSI-X|MSI|INTx selector"); 519 520/* ToDo: Add tunable to enable/disable strict MTU handling. */ 521/* Currently allows "loose" RX MTU checking (i.e. sets the */ 522/* H/W RX MTU to the size of the largest receive buffer, or */ 523/* 2048 bytes). This will cause a UNH failure but is more */ 524/* desireable from a functional perspective. */ 525 526 527/****************************************************************************/ 528/* Device probe function. */ 529/* */ 530/* Compares the device to the driver's list of supported devices and */ 531/* reports back to the OS whether this is the right driver for the device. */ 532/* */ 533/* Returns: */ 534/* BUS_PROBE_DEFAULT on success, positive value on failure. */ 535/****************************************************************************/ 536static int 537bce_probe(device_t dev) 538{ 539 struct bce_type *t; 540 struct bce_softc *sc; 541 char *descbuf; 542 u16 vid = 0, did = 0, svid = 0, sdid = 0; 543 544 t = bce_devs; 545 546 sc = device_get_softc(dev); 547 bzero(sc, sizeof(struct bce_softc)); 548 sc->bce_unit = device_get_unit(dev); 549 sc->bce_dev = dev; 550 551 /* Get the data for the device to be probed. */ 552 vid = pci_get_vendor(dev); 553 did = pci_get_device(dev); 554 svid = pci_get_subvendor(dev); 555 sdid = pci_get_subdevice(dev); 556 557 DBPRINT(sc, BCE_EXTREME_LOAD, 558 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 559 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 560 561 /* Look through the list of known devices for a match. */ 562 while(t->bce_name != NULL) { 563 564 if ((vid == t->bce_vid) && (did == t->bce_did) && 565 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) && 566 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) { 567 568 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 569 570 if (descbuf == NULL) 571 return(ENOMEM); 572 573 /* Print out the device identity. */ 574 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 575 t->bce_name, (((pci_read_config(dev, 576 PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), 577 (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); 578 579 device_set_desc_copy(dev, descbuf); 580 free(descbuf, M_TEMP); 581 return(BUS_PROBE_DEFAULT); 582 } 583 t++; 584 } 585 586 return(ENXIO); 587} 588 589 590/****************************************************************************/ 591/* PCI Capabilities Probe Function. */ 592/* */ 593/* Walks the PCI capabiites list for the device to find what features are */ 594/* supported. */ 595/* */ 596/* Returns: */ 597/* None. */ 598/****************************************************************************/ 599static void 600bce_print_adapter_info(struct bce_softc *sc) 601{ 602 int i = 0; 603 604 DBENTER(BCE_VERBOSE_LOAD); 605 606 if (bootverbose) { 607 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid); 608 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 609 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 610 611 612 /* Bus info. */ 613 if (sc->bce_flags & BCE_PCIE_FLAG) { 614 printf("Bus (PCIe x%d, ", sc->link_width); 615 switch (sc->link_speed) { 616 case 1: printf("2.5Gbps); "); break; 617 case 2: printf("5Gbps); "); break; 618 default: printf("Unknown link speed); "); 619 } 620 } else { 621 printf("Bus (PCI%s, %s, %dMHz); ", 622 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 623 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? 624 "32-bit" : "64-bit"), sc->bus_speed_mhz); 625 } 626 627 /* Firmware version and device features. */ 628 printf("B/C (%s); Flags (", sc->bce_bc_ver); 629 630 #ifdef BCE_JUMBO_HDRSPLIT 631 printf("SPLT"); 632 i++; 633 #endif 634 635 if (sc->bce_flags & BCE_USING_MSI_FLAG) { 636 if (i > 0) printf("|"); 637 printf("MSI"); i++; 638 } 639 640 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 641 if (i > 0) printf("|"); 642 printf("MSI-X"); i++; 643 } 644 645 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { 646 if (i > 0) printf("|"); 647 printf("2.5G"); i++; 648 } 649 650 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 651 if (i > 0) printf("|"); 652 printf("MFW); MFW (%s)\n", sc->bce_mfw_ver); 653 } else { 654 printf(")\n"); 655 } 656 } 657 658 DBEXIT(BCE_VERBOSE_LOAD); 659} 660 661 662/****************************************************************************/ 663/* PCI Capabilities Probe Function. */ 664/* */ 665/* Walks the PCI capabiites list for the device to find what features are */ 666/* supported. */ 667/* */ 668/* Returns: */ 669/* None. */ 670/****************************************************************************/ 671static void 672bce_probe_pci_caps(device_t dev, struct bce_softc *sc) 673{ 674 u32 reg; 675 676 DBENTER(BCE_VERBOSE_LOAD); 677 678 /* Check if PCI-X capability is enabled. */ 679 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 680 if (reg != 0) 681 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 682 } 683 684 /* Check if PCIe capability is enabled. */ 685 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 686 if (reg != 0) { 687 u16 link_status = pci_read_config(dev, reg + 0x12, 2); 688 DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = " 689 "0x%08X\n", link_status); 690 sc->link_speed = link_status & 0xf; 691 sc->link_width = (link_status >> 4) & 0x3f; 692 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 693 sc->bce_flags |= BCE_PCIE_FLAG; 694 } 695 } 696 697 /* Check if MSI capability is enabled. */ 698 if (pci_find_extcap(dev, PCIY_MSI, ®) == 0) { 699 if (reg != 0) 700 sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG; 701 } 702 703 /* Check if MSI-X capability is enabled. */ 704 if (pci_find_extcap(dev, PCIY_MSIX, ®) == 0) { 705 if (reg != 0) 706 sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG; 707 } 708 709 DBEXIT(BCE_VERBOSE_LOAD); 710} 711 712 713/****************************************************************************/ 714/* Device attach function. */ 715/* */ 716/* Allocates device resources, performs secondary chip identification, */ 717/* resets and initializes the hardware, and initializes driver instance */ 718/* variables. */ 719/* */ 720/* Returns: */ 721/* 0 on success, positive value on failure. */ 722/****************************************************************************/ 723static int 724bce_attach(device_t dev) 725{ 726 struct bce_softc *sc; 727 struct ifnet *ifp; 728 u32 val; 729 int error, rid, rc = 0; 730 731 sc = device_get_softc(dev); 732 sc->bce_dev = dev; 733 734 DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 735 736 sc->bce_unit = device_get_unit(dev); 737 738 /* Set initial device and PHY flags */ 739 sc->bce_flags = 0; 740 sc->bce_phy_flags = 0; 741 742 pci_enable_busmaster(dev); 743 744 /* Allocate PCI memory resources. */ 745 rid = PCIR_BAR(0); 746 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 747 &rid, RF_ACTIVE); 748 749 if (sc->bce_res_mem == NULL) { 750 BCE_PRINTF("%s(%d): PCI memory allocation failed\n", 751 __FILE__, __LINE__); 752 rc = ENXIO; 753 goto bce_attach_fail; 754 } 755 756 /* Get various resource handles. */ 757 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 758 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 759 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem); 760 761 bce_probe_pci_caps(dev, sc); 762 763 rid = 1; 764#if 0 765 /* Try allocating MSI-X interrupts. */ 766 if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) && 767 (bce_msi_enable >= 2) && 768 ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 769 &rid, RF_ACTIVE)) != NULL)) { 770 771 msi_needed = sc->bce_msi_count = 1; 772 773 if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) || 774 (sc->bce_msi_count != msi_needed)) { 775 BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d," 776 "Received = %d, error = %d\n", __FILE__, __LINE__, 777 msi_needed, sc->bce_msi_count, error); 778 sc->bce_msi_count = 0; 779 pci_release_msi(dev); 780 bus_release_resource(dev, SYS_RES_MEMORY, rid, 781 sc->bce_res_irq); 782 sc->bce_res_irq = NULL; 783 } else { 784 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n", 785 __FUNCTION__); 786 sc->bce_flags |= BCE_USING_MSIX_FLAG; 787 sc->bce_intr = bce_intr; 788 } 789 } 790#endif 791 792 /* Try allocating a MSI interrupt. */ 793 if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) && 794 (bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) { 795 sc->bce_msi_count = 1; 796 if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) { 797 BCE_PRINTF("%s(%d): MSI allocation failed! " 798 "error = %d\n", __FILE__, __LINE__, error); 799 sc->bce_msi_count = 0; 800 pci_release_msi(dev); 801 } else { 802 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI " 803 "interrupt.\n", __FUNCTION__); 804 sc->bce_flags |= BCE_USING_MSI_FLAG; 805 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 806 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 807 sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG; 808 sc->bce_irq_rid = 1; 809 sc->bce_intr = bce_intr; 810 } 811 } 812 813 /* Try allocating a legacy interrupt. */ 814 if (sc->bce_msi_count == 0) { 815 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n", 816 __FUNCTION__); 817 rid = 0; 818 sc->bce_intr = bce_intr; 819 } 820 821 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 822 &rid, RF_SHAREABLE | RF_ACTIVE); 823 824 sc->bce_irq_rid = rid; 825 826 /* Report any IRQ allocation errors. */ 827 if (sc->bce_res_irq == NULL) { 828 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n", 829 __FILE__, __LINE__); 830 rc = ENXIO; 831 goto bce_attach_fail; 832 } 833 834 /* Initialize mutex for the current device instance. */ 835 BCE_LOCK_INIT(sc, device_get_nameunit(dev)); 836 837 /* 838 * Configure byte swap and enable indirect register access. 839 * Rely on CPU to do target byte swapping on big endian systems. 840 * Access to registers outside of PCI configurtion space are not 841 * valid until this is done. 842 */ 843 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 844 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 845 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 846 847 /* Save ASIC revsion info. */ 848 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 849 850 /* Weed out any non-production controller revisions. */ 851 switch(BCE_CHIP_ID(sc)) { 852 case BCE_CHIP_ID_5706_A0: 853 case BCE_CHIP_ID_5706_A1: 854 case BCE_CHIP_ID_5708_A0: 855 case BCE_CHIP_ID_5708_B0: 856 case BCE_CHIP_ID_5709_A0: 857 case BCE_CHIP_ID_5709_B0: 858 case BCE_CHIP_ID_5709_B1: 859 case BCE_CHIP_ID_5709_B2: 860 BCE_PRINTF("%s(%d): Unsupported controller " 861 "revision (%c%d)!\n", __FILE__, __LINE__, 862 (((pci_read_config(dev, PCIR_REVID, 4) & 863 0xf0) >> 4) + 'A'), (pci_read_config(dev, 864 PCIR_REVID, 4) & 0xf)); 865 rc = ENODEV; 866 goto bce_attach_fail; 867 } 868 869 /* 870 * The embedded PCIe to PCI-X bridge (EPB) 871 * in the 5708 cannot address memory above 872 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 873 */ 874 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 875 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; 876 else 877 sc->max_bus_addr = BUS_SPACE_MAXADDR; 878 879 /* 880 * Find the base address for shared memory access. 881 * Newer versions of bootcode use a signature and offset 882 * while older versions use a fixed address. 883 */ 884 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 885 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) 886 /* Multi-port devices use different offsets in shared memory. */ 887 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 + 888 (pci_get_function(sc->bce_dev) << 2)); 889 else 890 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 891 892 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n", 893 __FUNCTION__, sc->bce_shmem_base); 894 895 /* Fetch the bootcode revision. */ 896 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 897 for (int i = 0, j = 0; i < 3; i++) { 898 u8 num; 899 900 num = (u8) (val >> (24 - (i * 8))); 901 for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 902 if (num >= k || !skip0 || k == 1) { 903 sc->bce_bc_ver[j++] = (num / k) + '0'; 904 skip0 = 0; 905 } 906 } 907 908 if (i != 2) 909 sc->bce_bc_ver[j++] = '.'; 910 } 911 912 /* Check if any management firwmare is enabled. */ 913 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 914 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 915 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 916 917 /* Allow time for firmware to enter the running state. */ 918 for (int i = 0; i < 30; i++) { 919 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 920 if (val & BCE_CONDITION_MFW_RUN_MASK) 921 break; 922 DELAY(10000); 923 } 924 925 /* Check if management firmware is running. */ 926 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 927 val &= BCE_CONDITION_MFW_RUN_MASK; 928 if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) && 929 (val != BCE_CONDITION_MFW_RUN_NONE)) { 930 u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 931 int i = 0; 932 933 /* Read the management firmware version string. */ 934 for (int j = 0; j < 3; j++) { 935 val = bce_reg_rd_ind(sc, addr + j * 4); 936 val = bswap32(val); 937 memcpy(&sc->bce_mfw_ver[i], &val, 4); 938 i += 4; 939 } 940 } else { 941 /* May cause firmware synchronization timeouts. */ 942 BCE_PRINTF("%s(%d): Management firmware enabled " 943 "but not running!\n", __FILE__, __LINE__); 944 strcpy(sc->bce_mfw_ver, "NOT RUNNING!"); 945 946 /* ToDo: Any action the driver should take? */ 947 } 948 } 949 950 /* Get PCI bus information (speed and type). */ 951 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 952 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 953 u32 clkreg; 954 955 sc->bce_flags |= BCE_PCIX_FLAG; 956 957 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS); 958 959 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 960 switch (clkreg) { 961 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 962 sc->bus_speed_mhz = 133; 963 break; 964 965 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 966 sc->bus_speed_mhz = 100; 967 break; 968 969 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 970 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 971 sc->bus_speed_mhz = 66; 972 break; 973 974 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 975 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 976 sc->bus_speed_mhz = 50; 977 break; 978 979 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 980 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 981 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 982 sc->bus_speed_mhz = 33; 983 break; 984 } 985 } else { 986 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 987 sc->bus_speed_mhz = 66; 988 else 989 sc->bus_speed_mhz = 33; 990 } 991 992 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 993 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 994 995 /* Reset controller and announce to bootcode that driver is present. */ 996 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 997 BCE_PRINTF("%s(%d): Controller reset failed!\n", 998 __FILE__, __LINE__); 999 rc = ENXIO; 1000 goto bce_attach_fail; 1001 } 1002 1003 /* Initialize the controller. */ 1004 if (bce_chipinit(sc)) { 1005 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 1006 __FILE__, __LINE__); 1007 rc = ENXIO; 1008 goto bce_attach_fail; 1009 } 1010 1011 /* Perform NVRAM test. */ 1012 if (bce_nvram_test(sc)) { 1013 BCE_PRINTF("%s(%d): NVRAM test failed!\n", 1014 __FILE__, __LINE__); 1015 rc = ENXIO; 1016 goto bce_attach_fail; 1017 } 1018 1019 /* Fetch the permanent Ethernet MAC address. */ 1020 bce_get_mac_addr(sc); 1021 1022 /* 1023 * Trip points control how many BDs 1024 * should be ready before generating an 1025 * interrupt while ticks control how long 1026 * a BD can sit in the chain before 1027 * generating an interrupt. Set the default 1028 * values for the RX and TX chains. 1029 */ 1030 1031#ifdef BCE_DEBUG 1032 /* Force more frequent interrupts. */ 1033 sc->bce_tx_quick_cons_trip_int = 1; 1034 sc->bce_tx_quick_cons_trip = 1; 1035 sc->bce_tx_ticks_int = 0; 1036 sc->bce_tx_ticks = 0; 1037 1038 sc->bce_rx_quick_cons_trip_int = 1; 1039 sc->bce_rx_quick_cons_trip = 1; 1040 sc->bce_rx_ticks_int = 0; 1041 sc->bce_rx_ticks = 0; 1042#else 1043 /* Improve throughput at the expense of increased latency. */ 1044 sc->bce_tx_quick_cons_trip_int = 20; 1045 sc->bce_tx_quick_cons_trip = 20; 1046 sc->bce_tx_ticks_int = 80; 1047 sc->bce_tx_ticks = 80; 1048 1049 sc->bce_rx_quick_cons_trip_int = 6; 1050 sc->bce_rx_quick_cons_trip = 6; 1051 sc->bce_rx_ticks_int = 18; 1052 sc->bce_rx_ticks = 18; 1053#endif 1054 1055 /* Not used for L2. */ 1056 sc->bce_comp_prod_trip_int = 0; 1057 sc->bce_comp_prod_trip = 0; 1058 sc->bce_com_ticks_int = 0; 1059 sc->bce_com_ticks = 0; 1060 sc->bce_cmd_ticks_int = 0; 1061 sc->bce_cmd_ticks = 0; 1062 1063 /* Update statistics once every second. */ 1064 sc->bce_stats_ticks = 1000000 & 0xffff00; 1065 1066 /* Find the media type for the adapter. */ 1067 bce_get_media(sc); 1068 1069 /* Store data needed by PHY driver for backplane applications */ 1070 sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 1071 sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG); 1072 1073 /* Allocate DMA memory resources. */ 1074 if (bce_dma_alloc(dev)) { 1075 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n", 1076 __FILE__, __LINE__); 1077 rc = ENXIO; 1078 goto bce_attach_fail; 1079 } 1080 1081 /* Allocate an ifnet structure. */ 1082 ifp = sc->bce_ifp = if_alloc(IFT_ETHER); 1083 if (ifp == NULL) { 1084 BCE_PRINTF("%s(%d): Interface allocation failed!\n", 1085 __FILE__, __LINE__); 1086 rc = ENXIO; 1087 goto bce_attach_fail; 1088 } 1089 1090 /* Initialize the ifnet interface. */ 1091 ifp->if_softc = sc; 1092 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1093 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1094 ifp->if_ioctl = bce_ioctl; 1095 ifp->if_start = bce_start; 1096 ifp->if_init = bce_init; 1097 ifp->if_mtu = ETHERMTU; 1098 1099 if (bce_tso_enable) { 1100 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO; 1101 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 | 1102 IFCAP_VLAN_HWTSO; 1103 } else { 1104 ifp->if_hwassist = BCE_IF_HWASSIST; 1105 ifp->if_capabilities = BCE_IF_CAPABILITIES; 1106 } 1107 1108 ifp->if_capenable = ifp->if_capabilities; 1109 1110 /* 1111 * Assume standard mbuf sizes for buffer allocation. 1112 * This may change later if the MTU size is set to 1113 * something other than 1500. 1114 */ 1115#ifdef BCE_JUMBO_HDRSPLIT 1116 sc->rx_bd_mbuf_alloc_size = MHLEN; 1117 /* Make sure offset is 16 byte aligned for hardware. */ 1118 sc->rx_bd_mbuf_align_pad = 1119 roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN); 1120 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 1121 sc->rx_bd_mbuf_align_pad; 1122 sc->pg_bd_mbuf_alloc_size = MCLBYTES; 1123#else 1124 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 1125 sc->rx_bd_mbuf_align_pad = 1126 roundup2(MCLBYTES, 16) - MCLBYTES; 1127 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 1128 sc->rx_bd_mbuf_align_pad; 1129#endif 1130 1131 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD; 1132 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1133 IFQ_SET_READY(&ifp->if_snd); 1134 1135 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 1136 ifp->if_baudrate = IF_Mbps(2500ULL); 1137 else 1138 ifp->if_baudrate = IF_Mbps(1000); 1139 1140 /* Handle any special PHY initialization for SerDes PHYs. */ 1141 bce_init_media(sc); 1142 1143 /* MII child bus by attaching the PHY. */ 1144 rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd, 1145 bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr, 1146 MII_OFFSET_ANY, MIIF_DOPAUSE); 1147 if (rc != 0) { 1148 BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__, 1149 __LINE__); 1150 goto bce_attach_fail; 1151 } 1152 1153 /* Attach to the Ethernet interface list. */ 1154 ether_ifattach(ifp, sc->eaddr); 1155 1156#if __FreeBSD_version < 500000 1157 callout_init(&sc->bce_tick_callout); 1158 callout_init(&sc->bce_pulse_callout); 1159#else 1160 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0); 1161 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0); 1162#endif 1163 1164 /* Hookup IRQ last. */ 1165 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, 1166 NULL, bce_intr, sc, &sc->bce_intrhand); 1167 1168 if (rc) { 1169 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n", 1170 __FILE__, __LINE__); 1171 bce_detach(dev); 1172 goto bce_attach_exit; 1173 } 1174 1175 /* 1176 * At this point we've acquired all the resources 1177 * we need to run so there's no turning back, we're 1178 * cleared for launch. 1179 */ 1180 1181 /* Print some important debugging info. */ 1182 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc)); 1183 1184 /* Add the supported sysctls to the kernel. */ 1185 bce_add_sysctls(sc); 1186 1187 BCE_LOCK(sc); 1188 1189 /* 1190 * The chip reset earlier notified the bootcode that 1191 * a driver is present. We now need to start our pulse 1192 * routine so that the bootcode is reminded that we're 1193 * still running. 1194 */ 1195 bce_pulse(sc); 1196 1197 bce_mgmt_init_locked(sc); 1198 BCE_UNLOCK(sc); 1199 1200 /* Finally, print some useful adapter info */ 1201 bce_print_adapter_info(sc); 1202 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n", 1203 __FUNCTION__, sc); 1204 1205 goto bce_attach_exit; 1206 1207bce_attach_fail: 1208 bce_release_resources(sc); 1209 1210bce_attach_exit: 1211 1212 DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 1213 1214 return(rc); 1215} 1216 1217 1218/****************************************************************************/ 1219/* Device detach function. */ 1220/* */ 1221/* Stops the controller, resets the controller, and releases resources. */ 1222/* */ 1223/* Returns: */ 1224/* 0 on success, positive value on failure. */ 1225/****************************************************************************/ 1226static int 1227bce_detach(device_t dev) 1228{ 1229 struct bce_softc *sc = device_get_softc(dev); 1230 struct ifnet *ifp; 1231 u32 msg; 1232 1233 DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1234 1235 ifp = sc->bce_ifp; 1236 1237 /* Stop and reset the controller. */ 1238 BCE_LOCK(sc); 1239 1240 /* Stop the pulse so the bootcode can go to driver absent state. */ 1241 callout_stop(&sc->bce_pulse_callout); 1242 1243 bce_stop(sc); 1244 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1245 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1246 else 1247 msg = BCE_DRV_MSG_CODE_UNLOAD; 1248 bce_reset(sc, msg); 1249 1250 BCE_UNLOCK(sc); 1251 1252 ether_ifdetach(ifp); 1253 1254 /* If we have a child device on the MII bus remove it too. */ 1255 bus_generic_detach(dev); 1256 device_delete_child(dev, sc->bce_miibus); 1257 1258 /* Release all remaining resources. */ 1259 bce_release_resources(sc); 1260 1261 DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1262 1263 return(0); 1264} 1265 1266 1267/****************************************************************************/ 1268/* Device shutdown function. */ 1269/* */ 1270/* Stops and resets the controller. */ 1271/* */ 1272/* Returns: */ 1273/* 0 on success, positive value on failure. */ 1274/****************************************************************************/ 1275static int 1276bce_shutdown(device_t dev) 1277{ 1278 struct bce_softc *sc = device_get_softc(dev); 1279 u32 msg; 1280 1281 DBENTER(BCE_VERBOSE); 1282 1283 BCE_LOCK(sc); 1284 bce_stop(sc); 1285 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1286 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1287 else 1288 msg = BCE_DRV_MSG_CODE_UNLOAD; 1289 bce_reset(sc, msg); 1290 BCE_UNLOCK(sc); 1291 1292 DBEXIT(BCE_VERBOSE); 1293 1294 return (0); 1295} 1296 1297 1298#ifdef BCE_DEBUG 1299/****************************************************************************/ 1300/* Register read. */ 1301/* */ 1302/* Returns: */ 1303/* The value of the register. */ 1304/****************************************************************************/ 1305static u32 1306bce_reg_rd(struct bce_softc *sc, u32 offset) 1307{ 1308 u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset); 1309 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1310 __FUNCTION__, offset, val); 1311 return val; 1312} 1313 1314 1315/****************************************************************************/ 1316/* Register write (16 bit). */ 1317/* */ 1318/* Returns: */ 1319/* Nothing. */ 1320/****************************************************************************/ 1321static void 1322bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val) 1323{ 1324 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n", 1325 __FUNCTION__, offset, val); 1326 bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val); 1327} 1328 1329 1330/****************************************************************************/ 1331/* Register write. */ 1332/* */ 1333/* Returns: */ 1334/* Nothing. */ 1335/****************************************************************************/ 1336static void 1337bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val) 1338{ 1339 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1340 __FUNCTION__, offset, val); 1341 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val); 1342} 1343#endif 1344 1345/****************************************************************************/ 1346/* Indirect register read. */ 1347/* */ 1348/* Reads NetXtreme II registers using an index/data register pair in PCI */ 1349/* configuration space. Using this mechanism avoids issues with posted */ 1350/* reads but is much slower than memory-mapped I/O. */ 1351/* */ 1352/* Returns: */ 1353/* The value of the register. */ 1354/****************************************************************************/ 1355static u32 1356bce_reg_rd_ind(struct bce_softc *sc, u32 offset) 1357{ 1358 device_t dev; 1359 dev = sc->bce_dev; 1360 1361 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1362#ifdef BCE_DEBUG 1363 { 1364 u32 val; 1365 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1366 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1367 __FUNCTION__, offset, val); 1368 return val; 1369 } 1370#else 1371 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1372#endif 1373} 1374 1375 1376/****************************************************************************/ 1377/* Indirect register write. */ 1378/* */ 1379/* Writes NetXtreme II registers using an index/data register pair in PCI */ 1380/* configuration space. Using this mechanism avoids issues with posted */ 1381/* writes but is muchh slower than memory-mapped I/O. */ 1382/* */ 1383/* Returns: */ 1384/* Nothing. */ 1385/****************************************************************************/ 1386static void 1387bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val) 1388{ 1389 device_t dev; 1390 dev = sc->bce_dev; 1391 1392 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1393 __FUNCTION__, offset, val); 1394 1395 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1396 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1397} 1398 1399 1400/****************************************************************************/ 1401/* Shared memory write. */ 1402/* */ 1403/* Writes NetXtreme II shared memory region. */ 1404/* */ 1405/* Returns: */ 1406/* Nothing. */ 1407/****************************************************************************/ 1408static void 1409bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val) 1410{ 1411 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to " 1412 "0x%08X\n", __FUNCTION__, val, offset); 1413 1414 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1415} 1416 1417 1418/****************************************************************************/ 1419/* Shared memory read. */ 1420/* */ 1421/* Reads NetXtreme II shared memory region. */ 1422/* */ 1423/* Returns: */ 1424/* The 32 bit value read. */ 1425/****************************************************************************/ 1426static u32 1427bce_shmem_rd(struct bce_softc *sc, u32 offset) 1428{ 1429 u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1430 1431 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from " 1432 "0x%08X\n", __FUNCTION__, val, offset); 1433 1434 return val; 1435} 1436 1437 1438#ifdef BCE_DEBUG 1439/****************************************************************************/ 1440/* Context memory read. */ 1441/* */ 1442/* The NetXtreme II controller uses context memory to track connection */ 1443/* information for L2 and higher network protocols. */ 1444/* */ 1445/* Returns: */ 1446/* The requested 32 bit value of context memory. */ 1447/****************************************************************************/ 1448static u32 1449bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset) 1450{ 1451 u32 idx, offset, retry_cnt = 5, val; 1452 1453 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || 1454 cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID " 1455 "address: 0x%08X.\n", __FUNCTION__, cid_addr)); 1456 1457 offset = ctx_offset + cid_addr; 1458 1459 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 1460 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 1461 1462 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ)); 1463 1464 for (idx = 0; idx < retry_cnt; idx++) { 1465 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1466 if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0) 1467 break; 1468 DELAY(5); 1469 } 1470 1471 if (val & BCE_CTX_CTX_CTRL_READ_REQ) 1472 BCE_PRINTF("%s(%d); Unable to read CTX memory: " 1473 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1474 __FILE__, __LINE__, cid_addr, ctx_offset); 1475 1476 val = REG_RD(sc, BCE_CTX_CTX_DATA); 1477 } else { 1478 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1479 val = REG_RD(sc, BCE_CTX_DATA); 1480 } 1481 1482 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1483 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val); 1484 1485 return(val); 1486} 1487#endif 1488 1489 1490/****************************************************************************/ 1491/* Context memory write. */ 1492/* */ 1493/* The NetXtreme II controller uses context memory to track connection */ 1494/* information for L2 and higher network protocols. */ 1495/* */ 1496/* Returns: */ 1497/* Nothing. */ 1498/****************************************************************************/ 1499static void 1500bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val) 1501{ 1502 u32 idx, offset = ctx_offset + cid_addr; 1503 u32 val, retry_cnt = 5; 1504 1505 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1506 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val); 1507 1508 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), 1509 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n", 1510 __FUNCTION__, cid_addr)); 1511 1512 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 1513 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 1514 1515 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1516 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1517 1518 for (idx = 0; idx < retry_cnt; idx++) { 1519 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1520 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1521 break; 1522 DELAY(5); 1523 } 1524 1525 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) 1526 BCE_PRINTF("%s(%d); Unable to write CTX memory: " 1527 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1528 __FILE__, __LINE__, cid_addr, ctx_offset); 1529 1530 } else { 1531 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1532 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1533 } 1534} 1535 1536 1537/****************************************************************************/ 1538/* PHY register read. */ 1539/* */ 1540/* Implements register reads on the MII bus. */ 1541/* */ 1542/* Returns: */ 1543/* The value of the register. */ 1544/****************************************************************************/ 1545static int 1546bce_miibus_read_reg(device_t dev, int phy, int reg) 1547{ 1548 struct bce_softc *sc; 1549 u32 val; 1550 int i; 1551 1552 sc = device_get_softc(dev); 1553 1554 /* Make sure we are accessing the correct PHY address. */ 1555 if (phy != sc->bce_phy_addr) { 1556 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d " 1557 "for PHY read!\n", phy); 1558 return(0); 1559 } 1560 1561 /* 1562 * The 5709S PHY is an IEEE Clause 45 PHY 1563 * with special mappings to work with IEEE 1564 * Clause 22 register accesses. 1565 */ 1566 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1567 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1568 reg += 0x10; 1569 } 1570 1571 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1572 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1573 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1574 1575 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1576 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1577 1578 DELAY(40); 1579 } 1580 1581 1582 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1583 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1584 BCE_EMAC_MDIO_COMM_START_BUSY; 1585 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1586 1587 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1588 DELAY(10); 1589 1590 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1591 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1592 DELAY(5); 1593 1594 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1595 val &= BCE_EMAC_MDIO_COMM_DATA; 1596 1597 break; 1598 } 1599 } 1600 1601 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1602 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, " 1603 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1604 val = 0x0; 1605 } else { 1606 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1607 } 1608 1609 1610 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1611 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1612 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1613 1614 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1615 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1616 1617 DELAY(40); 1618 } 1619 1620 DB_PRINT_PHY_REG(reg, val); 1621 return (val & 0xffff); 1622 1623} 1624 1625 1626/****************************************************************************/ 1627/* PHY register write. */ 1628/* */ 1629/* Implements register writes on the MII bus. */ 1630/* */ 1631/* Returns: */ 1632/* The value of the register. */ 1633/****************************************************************************/ 1634static int 1635bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1636{ 1637 struct bce_softc *sc; 1638 u32 val1; 1639 int i; 1640 1641 sc = device_get_softc(dev); 1642 1643 /* Make sure we are accessing the correct PHY address. */ 1644 if (phy != sc->bce_phy_addr) { 1645 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d " 1646 "for PHY write!\n", phy); 1647 return(0); 1648 } 1649 1650 DB_PRINT_PHY_REG(reg, val); 1651 1652 /* 1653 * The 5709S PHY is an IEEE Clause 45 PHY 1654 * with special mappings to work with IEEE 1655 * Clause 22 register accesses. 1656 */ 1657 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1658 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1659 reg += 0x10; 1660 } 1661 1662 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1663 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1664 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1665 1666 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1667 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1668 1669 DELAY(40); 1670 } 1671 1672 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1673 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1674 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1675 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1676 1677 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1678 DELAY(10); 1679 1680 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1681 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1682 DELAY(5); 1683 break; 1684 } 1685 } 1686 1687 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1688 BCE_PRINTF("%s(%d): PHY write timeout!\n", 1689 __FILE__, __LINE__); 1690 1691 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1692 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1693 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1694 1695 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1696 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1697 1698 DELAY(40); 1699 } 1700 1701 return 0; 1702} 1703 1704 1705/****************************************************************************/ 1706/* MII bus status change. */ 1707/* */ 1708/* Called by the MII bus driver when the PHY establishes link to set the */ 1709/* MAC interface registers. */ 1710/* */ 1711/* Returns: */ 1712/* Nothing. */ 1713/****************************************************************************/ 1714static void 1715bce_miibus_statchg(device_t dev) 1716{ 1717 struct bce_softc *sc; 1718 struct mii_data *mii; 1719 int val; 1720 1721 sc = device_get_softc(dev); 1722 1723 DBENTER(BCE_VERBOSE_PHY); 1724 1725 mii = device_get_softc(sc->bce_miibus); 1726 1727 val = REG_RD(sc, BCE_EMAC_MODE); 1728 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | 1729 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK | 1730 BCE_EMAC_MODE_25G); 1731 1732 /* Set MII or GMII interface based on the PHY speed. */ 1733 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1734 case IFM_10_T: 1735 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1736 DBPRINT(sc, BCE_INFO_PHY, 1737 "Enabling 10Mb interface.\n"); 1738 val |= BCE_EMAC_MODE_PORT_MII_10; 1739 break; 1740 } 1741 /* fall-through */ 1742 case IFM_100_TX: 1743 DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n"); 1744 val |= BCE_EMAC_MODE_PORT_MII; 1745 break; 1746 case IFM_2500_SX: 1747 DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n"); 1748 val |= BCE_EMAC_MODE_25G; 1749 /* fall-through */ 1750 case IFM_1000_T: 1751 case IFM_1000_SX: 1752 DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n"); 1753 val |= BCE_EMAC_MODE_PORT_GMII; 1754 break; 1755 default: 1756 DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling " 1757 "default GMII interface.\n"); 1758 val |= BCE_EMAC_MODE_PORT_GMII; 1759 } 1760 1761 /* Set half or full duplex based on PHY settings. */ 1762 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1763 DBPRINT(sc, BCE_INFO_PHY, 1764 "Setting Half-Duplex interface.\n"); 1765 val |= BCE_EMAC_MODE_HALF_DUPLEX; 1766 } else 1767 DBPRINT(sc, BCE_INFO_PHY, 1768 "Setting Full-Duplex interface.\n"); 1769 1770 REG_WR(sc, BCE_EMAC_MODE, val); 1771 1772 if ((mii->mii_media_active & IFM_ETH_RXPAUSE) != 0) { 1773 DBPRINT(sc, BCE_INFO_PHY, 1774 "%s(): Enabling RX flow control.\n", __FUNCTION__); 1775 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 1776 } else { 1777 DBPRINT(sc, BCE_INFO_PHY, 1778 "%s(): Disabling RX flow control.\n", __FUNCTION__); 1779 BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 1780 } 1781 1782 if ((mii->mii_media_active & IFM_ETH_TXPAUSE) != 0) { 1783 DBPRINT(sc, BCE_INFO_PHY, 1784 "%s(): Enabling TX flow control.\n", __FUNCTION__); 1785 BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 1786 sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL; 1787 } else { 1788 DBPRINT(sc, BCE_INFO_PHY, 1789 "%s(): Disabling TX flow control.\n", __FUNCTION__); 1790 BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 1791 sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL; 1792 } 1793 1794 /* ToDo: Update watermarks in bce_init_rx_context(). */ 1795 1796 DBEXIT(BCE_VERBOSE_PHY); 1797} 1798 1799 1800/****************************************************************************/ 1801/* Acquire NVRAM lock. */ 1802/* */ 1803/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1804/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1805/* for use by the driver. */ 1806/* */ 1807/* Returns: */ 1808/* 0 on success, positive value on failure. */ 1809/****************************************************************************/ 1810static int 1811bce_acquire_nvram_lock(struct bce_softc *sc) 1812{ 1813 u32 val; 1814 int j, rc = 0; 1815 1816 DBENTER(BCE_VERBOSE_NVRAM); 1817 1818 /* Request access to the flash interface. */ 1819 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1820 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1821 val = REG_RD(sc, BCE_NVM_SW_ARB); 1822 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1823 break; 1824 1825 DELAY(5); 1826 } 1827 1828 if (j >= NVRAM_TIMEOUT_COUNT) { 1829 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1830 rc = EBUSY; 1831 } 1832 1833 DBEXIT(BCE_VERBOSE_NVRAM); 1834 return (rc); 1835} 1836 1837 1838/****************************************************************************/ 1839/* Release NVRAM lock. */ 1840/* */ 1841/* When the caller is finished accessing NVRAM the lock must be released. */ 1842/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1843/* for use by the driver. */ 1844/* */ 1845/* Returns: */ 1846/* 0 on success, positive value on failure. */ 1847/****************************************************************************/ 1848static int 1849bce_release_nvram_lock(struct bce_softc *sc) 1850{ 1851 u32 val; 1852 int j, rc = 0; 1853 1854 DBENTER(BCE_VERBOSE_NVRAM); 1855 1856 /* 1857 * Relinquish nvram interface. 1858 */ 1859 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1860 1861 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1862 val = REG_RD(sc, BCE_NVM_SW_ARB); 1863 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1864 break; 1865 1866 DELAY(5); 1867 } 1868 1869 if (j >= NVRAM_TIMEOUT_COUNT) { 1870 DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n"); 1871 rc = EBUSY; 1872 } 1873 1874 DBEXIT(BCE_VERBOSE_NVRAM); 1875 return (rc); 1876} 1877 1878 1879#ifdef BCE_NVRAM_WRITE_SUPPORT 1880/****************************************************************************/ 1881/* Enable NVRAM write access. */ 1882/* */ 1883/* Before writing to NVRAM the caller must enable NVRAM writes. */ 1884/* */ 1885/* Returns: */ 1886/* 0 on success, positive value on failure. */ 1887/****************************************************************************/ 1888static int 1889bce_enable_nvram_write(struct bce_softc *sc) 1890{ 1891 u32 val; 1892 int rc = 0; 1893 1894 DBENTER(BCE_VERBOSE_NVRAM); 1895 1896 val = REG_RD(sc, BCE_MISC_CFG); 1897 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); 1898 1899 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 1900 int j; 1901 1902 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1903 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); 1904 1905 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1906 DELAY(5); 1907 1908 val = REG_RD(sc, BCE_NVM_COMMAND); 1909 if (val & BCE_NVM_COMMAND_DONE) 1910 break; 1911 } 1912 1913 if (j >= NVRAM_TIMEOUT_COUNT) { 1914 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); 1915 rc = EBUSY; 1916 } 1917 } 1918 1919 DBENTER(BCE_VERBOSE_NVRAM); 1920 return (rc); 1921} 1922 1923 1924/****************************************************************************/ 1925/* Disable NVRAM write access. */ 1926/* */ 1927/* When the caller is finished writing to NVRAM write access must be */ 1928/* disabled. */ 1929/* */ 1930/* Returns: */ 1931/* Nothing. */ 1932/****************************************************************************/ 1933static void 1934bce_disable_nvram_write(struct bce_softc *sc) 1935{ 1936 u32 val; 1937 1938 DBENTER(BCE_VERBOSE_NVRAM); 1939 1940 val = REG_RD(sc, BCE_MISC_CFG); 1941 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); 1942 1943 DBEXIT(BCE_VERBOSE_NVRAM); 1944 1945} 1946#endif 1947 1948 1949/****************************************************************************/ 1950/* Enable NVRAM access. */ 1951/* */ 1952/* Before accessing NVRAM for read or write operations the caller must */ 1953/* enabled NVRAM access. */ 1954/* */ 1955/* Returns: */ 1956/* Nothing. */ 1957/****************************************************************************/ 1958static void 1959bce_enable_nvram_access(struct bce_softc *sc) 1960{ 1961 u32 val; 1962 1963 DBENTER(BCE_VERBOSE_NVRAM); 1964 1965 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1966 /* Enable both bits, even on read. */ 1967 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val | 1968 BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1969 1970 DBEXIT(BCE_VERBOSE_NVRAM); 1971} 1972 1973 1974/****************************************************************************/ 1975/* Disable NVRAM access. */ 1976/* */ 1977/* When the caller is finished accessing NVRAM access must be disabled. */ 1978/* */ 1979/* Returns: */ 1980/* Nothing. */ 1981/****************************************************************************/ 1982static void 1983bce_disable_nvram_access(struct bce_softc *sc) 1984{ 1985 u32 val; 1986 1987 DBENTER(BCE_VERBOSE_NVRAM); 1988 1989 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1990 1991 /* Disable both bits, even after read. */ 1992 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val & 1993 ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1994 1995 DBEXIT(BCE_VERBOSE_NVRAM); 1996} 1997 1998 1999#ifdef BCE_NVRAM_WRITE_SUPPORT 2000/****************************************************************************/ 2001/* Erase NVRAM page before writing. */ 2002/* */ 2003/* Non-buffered flash parts require that a page be erased before it is */ 2004/* written. */ 2005/* */ 2006/* Returns: */ 2007/* 0 on success, positive value on failure. */ 2008/****************************************************************************/ 2009static int 2010bce_nvram_erase_page(struct bce_softc *sc, u32 offset) 2011{ 2012 u32 cmd; 2013 int j, rc = 0; 2014 2015 DBENTER(BCE_VERBOSE_NVRAM); 2016 2017 /* Buffered flash doesn't require an erase. */ 2018 if (sc->bce_flash_info->flags & BCE_NV_BUFFERED) 2019 goto bce_nvram_erase_page_exit; 2020 2021 /* Build an erase command. */ 2022 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | 2023 BCE_NVM_COMMAND_DOIT; 2024 2025 /* 2026 * Clear the DONE bit separately, set the NVRAM adress to erase, 2027 * and issue the erase command. 2028 */ 2029 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2030 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2031 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2032 2033 /* Wait for completion. */ 2034 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2035 u32 val; 2036 2037 DELAY(5); 2038 2039 val = REG_RD(sc, BCE_NVM_COMMAND); 2040 if (val & BCE_NVM_COMMAND_DONE) 2041 break; 2042 } 2043 2044 if (j >= NVRAM_TIMEOUT_COUNT) { 2045 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); 2046 rc = EBUSY; 2047 } 2048 2049bce_nvram_erase_page_exit: 2050 DBEXIT(BCE_VERBOSE_NVRAM); 2051 return (rc); 2052} 2053#endif /* BCE_NVRAM_WRITE_SUPPORT */ 2054 2055 2056/****************************************************************************/ 2057/* Read a dword (32 bits) from NVRAM. */ 2058/* */ 2059/* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 2060/* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 2061/* */ 2062/* Returns: */ 2063/* 0 on success and the 32 bit value read, positive value on failure. */ 2064/****************************************************************************/ 2065static int 2066bce_nvram_read_dword(struct bce_softc *sc, 2067 u32 offset, u8 *ret_val, u32 cmd_flags) 2068{ 2069 u32 cmd; 2070 int i, rc = 0; 2071 2072 DBENTER(BCE_EXTREME_NVRAM); 2073 2074 /* Build the command word. */ 2075 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 2076 2077 /* Calculate the offset for buffered flash if translation is used. */ 2078 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2079 offset = ((offset / sc->bce_flash_info->page_size) << 2080 sc->bce_flash_info->page_bits) + 2081 (offset % sc->bce_flash_info->page_size); 2082 } 2083 2084 /* 2085 * Clear the DONE bit separately, set the address to read, 2086 * and issue the read. 2087 */ 2088 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2089 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2090 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2091 2092 /* Wait for completion. */ 2093 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 2094 u32 val; 2095 2096 DELAY(5); 2097 2098 val = REG_RD(sc, BCE_NVM_COMMAND); 2099 if (val & BCE_NVM_COMMAND_DONE) { 2100 val = REG_RD(sc, BCE_NVM_READ); 2101 2102 val = bce_be32toh(val); 2103 memcpy(ret_val, &val, 4); 2104 break; 2105 } 2106 } 2107 2108 /* Check for errors. */ 2109 if (i >= NVRAM_TIMEOUT_COUNT) { 2110 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at " 2111 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 2112 rc = EBUSY; 2113 } 2114 2115 DBEXIT(BCE_EXTREME_NVRAM); 2116 return(rc); 2117} 2118 2119 2120#ifdef BCE_NVRAM_WRITE_SUPPORT 2121/****************************************************************************/ 2122/* Write a dword (32 bits) to NVRAM. */ 2123/* */ 2124/* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 2125/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 2126/* enabled NVRAM write access. */ 2127/* */ 2128/* Returns: */ 2129/* 0 on success, positive value on failure. */ 2130/****************************************************************************/ 2131static int 2132bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val, 2133 u32 cmd_flags) 2134{ 2135 u32 cmd, val32; 2136 int j, rc = 0; 2137 2138 DBENTER(BCE_VERBOSE_NVRAM); 2139 2140 /* Build the command word. */ 2141 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; 2142 2143 /* Calculate the offset for buffered flash if translation is used. */ 2144 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2145 offset = ((offset / sc->bce_flash_info->page_size) << 2146 sc->bce_flash_info->page_bits) + 2147 (offset % sc->bce_flash_info->page_size); 2148 } 2149 2150 /* 2151 * Clear the DONE bit separately, convert NVRAM data to big-endian, 2152 * set the NVRAM address to write, and issue the write command 2153 */ 2154 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2155 memcpy(&val32, val, 4); 2156 val32 = htobe32(val32); 2157 REG_WR(sc, BCE_NVM_WRITE, val32); 2158 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2159 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2160 2161 /* Wait for completion. */ 2162 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2163 DELAY(5); 2164 2165 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) 2166 break; 2167 } 2168 if (j >= NVRAM_TIMEOUT_COUNT) { 2169 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at " 2170 "offset 0x%08X\n", __FILE__, __LINE__, offset); 2171 rc = EBUSY; 2172 } 2173 2174 DBEXIT(BCE_VERBOSE_NVRAM); 2175 return (rc); 2176} 2177#endif /* BCE_NVRAM_WRITE_SUPPORT */ 2178 2179 2180/****************************************************************************/ 2181/* Initialize NVRAM access. */ 2182/* */ 2183/* Identify the NVRAM device in use and prepare the NVRAM interface to */ 2184/* access that device. */ 2185/* */ 2186/* Returns: */ 2187/* 0 on success, positive value on failure. */ 2188/****************************************************************************/ 2189static int 2190bce_init_nvram(struct bce_softc *sc) 2191{ 2192 u32 val; 2193 int j, entry_count, rc = 0; 2194 struct flash_spec *flash; 2195 2196 DBENTER(BCE_VERBOSE_NVRAM); 2197 2198 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 2199 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 2200 sc->bce_flash_info = &flash_5709; 2201 goto bce_init_nvram_get_flash_size; 2202 } 2203 2204 /* Determine the selected interface. */ 2205 val = REG_RD(sc, BCE_NVM_CFG1); 2206 2207 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 2208 2209 /* 2210 * Flash reconfiguration is required to support additional 2211 * NVRAM devices not directly supported in hardware. 2212 * Check if the flash interface was reconfigured 2213 * by the bootcode. 2214 */ 2215 2216 if (val & 0x40000000) { 2217 /* Flash interface reconfigured by bootcode. */ 2218 2219 DBPRINT(sc,BCE_INFO_LOAD, 2220 "bce_init_nvram(): Flash WAS reconfigured.\n"); 2221 2222 for (j = 0, flash = &flash_table[0]; j < entry_count; 2223 j++, flash++) { 2224 if ((val & FLASH_BACKUP_STRAP_MASK) == 2225 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 2226 sc->bce_flash_info = flash; 2227 break; 2228 } 2229 } 2230 } else { 2231 /* Flash interface not yet reconfigured. */ 2232 u32 mask; 2233 2234 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n", 2235 __FUNCTION__); 2236 2237 if (val & (1 << 23)) 2238 mask = FLASH_BACKUP_STRAP_MASK; 2239 else 2240 mask = FLASH_STRAP_MASK; 2241 2242 /* Look for the matching NVRAM device configuration data. */ 2243 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { 2244 2245 /* Check if the device matches any of the known devices. */ 2246 if ((val & mask) == (flash->strapping & mask)) { 2247 /* Found a device match. */ 2248 sc->bce_flash_info = flash; 2249 2250 /* Request access to the flash interface. */ 2251 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2252 return rc; 2253 2254 /* Reconfigure the flash interface. */ 2255 bce_enable_nvram_access(sc); 2256 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 2257 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 2258 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 2259 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 2260 bce_disable_nvram_access(sc); 2261 bce_release_nvram_lock(sc); 2262 2263 break; 2264 } 2265 } 2266 } 2267 2268 /* Check if a matching device was found. */ 2269 if (j == entry_count) { 2270 sc->bce_flash_info = NULL; 2271 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", 2272 __FILE__, __LINE__); 2273 DBEXIT(BCE_VERBOSE_NVRAM); 2274 return (ENODEV); 2275 } 2276 2277bce_init_nvram_get_flash_size: 2278 /* Write the flash config data to the shared memory interface. */ 2279 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2); 2280 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 2281 if (val) 2282 sc->bce_flash_size = val; 2283 else 2284 sc->bce_flash_size = sc->bce_flash_info->total_size; 2285 2286 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n", 2287 __FUNCTION__, sc->bce_flash_info->name, 2288 sc->bce_flash_info->total_size); 2289 2290 DBEXIT(BCE_VERBOSE_NVRAM); 2291 return rc; 2292} 2293 2294 2295/****************************************************************************/ 2296/* Read an arbitrary range of data from NVRAM. */ 2297/* */ 2298/* Prepares the NVRAM interface for access and reads the requested data */ 2299/* into the supplied buffer. */ 2300/* */ 2301/* Returns: */ 2302/* 0 on success and the data read, positive value on failure. */ 2303/****************************************************************************/ 2304static int 2305bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf, 2306 int buf_size) 2307{ 2308 int rc = 0; 2309 u32 cmd_flags, offset32, len32, extra; 2310 2311 DBENTER(BCE_VERBOSE_NVRAM); 2312 2313 if (buf_size == 0) 2314 goto bce_nvram_read_exit; 2315 2316 /* Request access to the flash interface. */ 2317 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2318 goto bce_nvram_read_exit; 2319 2320 /* Enable access to flash interface */ 2321 bce_enable_nvram_access(sc); 2322 2323 len32 = buf_size; 2324 offset32 = offset; 2325 extra = 0; 2326 2327 cmd_flags = 0; 2328 2329 if (offset32 & 3) { 2330 u8 buf[4]; 2331 u32 pre_len; 2332 2333 offset32 &= ~3; 2334 pre_len = 4 - (offset & 3); 2335 2336 if (pre_len >= len32) { 2337 pre_len = len32; 2338 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 2339 } 2340 else { 2341 cmd_flags = BCE_NVM_COMMAND_FIRST; 2342 } 2343 2344 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2345 2346 if (rc) 2347 return rc; 2348 2349 memcpy(ret_buf, buf + (offset & 3), pre_len); 2350 2351 offset32 += 4; 2352 ret_buf += pre_len; 2353 len32 -= pre_len; 2354 } 2355 2356 if (len32 & 3) { 2357 extra = 4 - (len32 & 3); 2358 len32 = (len32 + 4) & ~3; 2359 } 2360 2361 if (len32 == 4) { 2362 u8 buf[4]; 2363 2364 if (cmd_flags) 2365 cmd_flags = BCE_NVM_COMMAND_LAST; 2366 else 2367 cmd_flags = BCE_NVM_COMMAND_FIRST | 2368 BCE_NVM_COMMAND_LAST; 2369 2370 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2371 2372 memcpy(ret_buf, buf, 4 - extra); 2373 } 2374 else if (len32 > 0) { 2375 u8 buf[4]; 2376 2377 /* Read the first word. */ 2378 if (cmd_flags) 2379 cmd_flags = 0; 2380 else 2381 cmd_flags = BCE_NVM_COMMAND_FIRST; 2382 2383 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 2384 2385 /* Advance to the next dword. */ 2386 offset32 += 4; 2387 ret_buf += 4; 2388 len32 -= 4; 2389 2390 while (len32 > 4 && rc == 0) { 2391 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 2392 2393 /* Advance to the next dword. */ 2394 offset32 += 4; 2395 ret_buf += 4; 2396 len32 -= 4; 2397 } 2398 2399 if (rc) 2400 goto bce_nvram_read_locked_exit; 2401 2402 cmd_flags = BCE_NVM_COMMAND_LAST; 2403 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2404 2405 memcpy(ret_buf, buf, 4 - extra); 2406 } 2407 2408bce_nvram_read_locked_exit: 2409 /* Disable access to flash interface and release the lock. */ 2410 bce_disable_nvram_access(sc); 2411 bce_release_nvram_lock(sc); 2412 2413bce_nvram_read_exit: 2414 DBEXIT(BCE_VERBOSE_NVRAM); 2415 return rc; 2416} 2417 2418 2419#ifdef BCE_NVRAM_WRITE_SUPPORT 2420/****************************************************************************/ 2421/* Write an arbitrary range of data from NVRAM. */ 2422/* */ 2423/* Prepares the NVRAM interface for write access and writes the requested */ 2424/* data from the supplied buffer. The caller is responsible for */ 2425/* calculating any appropriate CRCs. */ 2426/* */ 2427/* Returns: */ 2428/* 0 on success, positive value on failure. */ 2429/****************************************************************************/ 2430static int 2431bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf, 2432 int buf_size) 2433{ 2434 u32 written, offset32, len32; 2435 u8 *buf, start[4], end[4]; 2436 int rc = 0; 2437 int align_start, align_end; 2438 2439 DBENTER(BCE_VERBOSE_NVRAM); 2440 2441 buf = data_buf; 2442 offset32 = offset; 2443 len32 = buf_size; 2444 align_start = align_end = 0; 2445 2446 if ((align_start = (offset32 & 3))) { 2447 offset32 &= ~3; 2448 len32 += align_start; 2449 if ((rc = bce_nvram_read(sc, offset32, start, 4))) 2450 goto bce_nvram_write_exit; 2451 } 2452 2453 if (len32 & 3) { 2454 if ((len32 > 4) || !align_start) { 2455 align_end = 4 - (len32 & 3); 2456 len32 += align_end; 2457 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4, 2458 end, 4))) { 2459 goto bce_nvram_write_exit; 2460 } 2461 } 2462 } 2463 2464 if (align_start || align_end) { 2465 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 2466 if (buf == 0) { 2467 rc = ENOMEM; 2468 goto bce_nvram_write_exit; 2469 } 2470 2471 if (align_start) { 2472 memcpy(buf, start, 4); 2473 } 2474 2475 if (align_end) { 2476 memcpy(buf + len32 - 4, end, 4); 2477 } 2478 memcpy(buf + align_start, data_buf, buf_size); 2479 } 2480 2481 written = 0; 2482 while ((written < len32) && (rc == 0)) { 2483 u32 page_start, page_end, data_start, data_end; 2484 u32 addr, cmd_flags; 2485 int i; 2486 u8 flash_buffer[264]; 2487 2488 /* Find the page_start addr */ 2489 page_start = offset32 + written; 2490 page_start -= (page_start % sc->bce_flash_info->page_size); 2491 /* Find the page_end addr */ 2492 page_end = page_start + sc->bce_flash_info->page_size; 2493 /* Find the data_start addr */ 2494 data_start = (written == 0) ? offset32 : page_start; 2495 /* Find the data_end addr */ 2496 data_end = (page_end > offset32 + len32) ? 2497 (offset32 + len32) : page_end; 2498 2499 /* Request access to the flash interface. */ 2500 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2501 goto bce_nvram_write_exit; 2502 2503 /* Enable access to flash interface */ 2504 bce_enable_nvram_access(sc); 2505 2506 cmd_flags = BCE_NVM_COMMAND_FIRST; 2507 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2508 int j; 2509 2510 /* Read the whole page into the buffer 2511 * (non-buffer flash only) */ 2512 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { 2513 if (j == (sc->bce_flash_info->page_size - 4)) { 2514 cmd_flags |= BCE_NVM_COMMAND_LAST; 2515 } 2516 rc = bce_nvram_read_dword(sc, 2517 page_start + j, 2518 &flash_buffer[j], 2519 cmd_flags); 2520 2521 if (rc) 2522 goto bce_nvram_write_locked_exit; 2523 2524 cmd_flags = 0; 2525 } 2526 } 2527 2528 /* Enable writes to flash interface (unlock write-protect) */ 2529 if ((rc = bce_enable_nvram_write(sc)) != 0) 2530 goto bce_nvram_write_locked_exit; 2531 2532 /* Erase the page */ 2533 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0) 2534 goto bce_nvram_write_locked_exit; 2535 2536 /* Re-enable the write again for the actual write */ 2537 bce_enable_nvram_write(sc); 2538 2539 /* Loop to write back the buffer data from page_start to 2540 * data_start */ 2541 i = 0; 2542 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2543 for (addr = page_start; addr < data_start; 2544 addr += 4, i += 4) { 2545 2546 rc = bce_nvram_write_dword(sc, addr, 2547 &flash_buffer[i], cmd_flags); 2548 2549 if (rc != 0) 2550 goto bce_nvram_write_locked_exit; 2551 2552 cmd_flags = 0; 2553 } 2554 } 2555 2556 /* Loop to write the new data from data_start to data_end */ 2557 for (addr = data_start; addr < data_end; addr += 4, i++) { 2558 if ((addr == page_end - 4) || 2559 ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) && 2560 (addr == data_end - 4))) { 2561 2562 cmd_flags |= BCE_NVM_COMMAND_LAST; 2563 } 2564 rc = bce_nvram_write_dword(sc, addr, buf, 2565 cmd_flags); 2566 2567 if (rc != 0) 2568 goto bce_nvram_write_locked_exit; 2569 2570 cmd_flags = 0; 2571 buf += 4; 2572 } 2573 2574 /* Loop to write back the buffer data from data_end 2575 * to page_end */ 2576 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2577 for (addr = data_end; addr < page_end; 2578 addr += 4, i += 4) { 2579 2580 if (addr == page_end-4) { 2581 cmd_flags = BCE_NVM_COMMAND_LAST; 2582 } 2583 rc = bce_nvram_write_dword(sc, addr, 2584 &flash_buffer[i], cmd_flags); 2585 2586 if (rc != 0) 2587 goto bce_nvram_write_locked_exit; 2588 2589 cmd_flags = 0; 2590 } 2591 } 2592 2593 /* Disable writes to flash interface (lock write-protect) */ 2594 bce_disable_nvram_write(sc); 2595 2596 /* Disable access to flash interface */ 2597 bce_disable_nvram_access(sc); 2598 bce_release_nvram_lock(sc); 2599 2600 /* Increment written */ 2601 written += data_end - data_start; 2602 } 2603 2604 goto bce_nvram_write_exit; 2605 2606bce_nvram_write_locked_exit: 2607 bce_disable_nvram_write(sc); 2608 bce_disable_nvram_access(sc); 2609 bce_release_nvram_lock(sc); 2610 2611bce_nvram_write_exit: 2612 if (align_start || align_end) 2613 free(buf, M_DEVBUF); 2614 2615 DBEXIT(BCE_VERBOSE_NVRAM); 2616 return (rc); 2617} 2618#endif /* BCE_NVRAM_WRITE_SUPPORT */ 2619 2620 2621/****************************************************************************/ 2622/* Verifies that NVRAM is accessible and contains valid data. */ 2623/* */ 2624/* Reads the configuration data from NVRAM and verifies that the CRC is */ 2625/* correct. */ 2626/* */ 2627/* Returns: */ 2628/* 0 on success, positive value on failure. */ 2629/****************************************************************************/ 2630static int 2631bce_nvram_test(struct bce_softc *sc) 2632{ 2633 u32 buf[BCE_NVRAM_SIZE / 4]; 2634 u8 *data = (u8 *) buf; 2635 int rc = 0; 2636 u32 magic, csum; 2637 2638 DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2639 2640 /* 2641 * Check that the device NVRAM is valid by reading 2642 * the magic value at offset 0. 2643 */ 2644 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) { 2645 BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", 2646 __FILE__, __LINE__); 2647 goto bce_nvram_test_exit; 2648 } 2649 2650 /* 2651 * Verify that offset 0 of the NVRAM contains 2652 * a valid magic number. 2653 */ 2654 magic = bce_be32toh(buf[0]); 2655 if (magic != BCE_NVRAM_MAGIC) { 2656 rc = ENODEV; 2657 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! " 2658 "Expected: 0x%08X, Found: 0x%08X\n", 2659 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic); 2660 goto bce_nvram_test_exit; 2661 } 2662 2663 /* 2664 * Verify that the device NVRAM includes valid 2665 * configuration data. 2666 */ 2667 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) { 2668 BCE_PRINTF("%s(%d): Unable to read manufacturing " 2669 "Information from NVRAM!\n", __FILE__, __LINE__); 2670 goto bce_nvram_test_exit; 2671 } 2672 2673 csum = ether_crc32_le(data, 0x100); 2674 if (csum != BCE_CRC32_RESIDUAL) { 2675 rc = ENODEV; 2676 BCE_PRINTF("%s(%d): Invalid manufacturing information " 2677 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2678 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); 2679 goto bce_nvram_test_exit; 2680 } 2681 2682 csum = ether_crc32_le(data + 0x100, 0x100); 2683 if (csum != BCE_CRC32_RESIDUAL) { 2684 rc = ENODEV; 2685 BCE_PRINTF("%s(%d): Invalid feature configuration " 2686 "information NVRAM CRC! Expected: 0x%08X, " 2687 "Found: 08%08X\n", __FILE__, __LINE__, 2688 BCE_CRC32_RESIDUAL, csum); 2689 } 2690 2691bce_nvram_test_exit: 2692 DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2693 return rc; 2694} 2695 2696 2697/****************************************************************************/ 2698/* Identifies the current media type of the controller and sets the PHY */ 2699/* address. */ 2700/* */ 2701/* Returns: */ 2702/* Nothing. */ 2703/****************************************************************************/ 2704static void 2705bce_get_media(struct bce_softc *sc) 2706{ 2707 u32 val; 2708 2709 DBENTER(BCE_VERBOSE_PHY); 2710 2711 /* Assume PHY address for copper controllers. */ 2712 sc->bce_phy_addr = 1; 2713 2714 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 2715 u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 2716 u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2717 u32 strap; 2718 2719 /* 2720 * The BCM5709S is software configurable 2721 * for Copper or SerDes operation. 2722 */ 2723 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2724 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 2725 "for copper.\n"); 2726 goto bce_get_media_exit; 2727 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2728 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 2729 "for dual media.\n"); 2730 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2731 goto bce_get_media_exit; 2732 } 2733 2734 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2735 strap = (val & 2736 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2737 else 2738 strap = (val & 2739 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 2740 2741 if (pci_get_function(sc->bce_dev) == 0) { 2742 switch (strap) { 2743 case 0x4: 2744 case 0x5: 2745 case 0x6: 2746 DBPRINT(sc, BCE_INFO_LOAD, 2747 "BCM5709 s/w configured for SerDes.\n"); 2748 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2749 break; 2750 default: 2751 DBPRINT(sc, BCE_INFO_LOAD, 2752 "BCM5709 s/w configured for Copper.\n"); 2753 break; 2754 } 2755 } else { 2756 switch (strap) { 2757 case 0x1: 2758 case 0x2: 2759 case 0x4: 2760 DBPRINT(sc, BCE_INFO_LOAD, 2761 "BCM5709 s/w configured for SerDes.\n"); 2762 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2763 break; 2764 default: 2765 DBPRINT(sc, BCE_INFO_LOAD, 2766 "BCM5709 s/w configured for Copper.\n"); 2767 break; 2768 } 2769 } 2770 2771 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) 2772 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2773 2774 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 2775 2776 sc->bce_flags |= BCE_NO_WOL_FLAG; 2777 2778 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 2779 sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG; 2780 2781 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 2782 /* 5708S/09S/16S use a separate PHY for SerDes. */ 2783 sc->bce_phy_addr = 2; 2784 2785 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 2786 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) { 2787 sc->bce_phy_flags |= 2788 BCE_PHY_2_5G_CAPABLE_FLAG; 2789 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb " 2790 "capable adapter\n"); 2791 } 2792 } 2793 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 2794 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) 2795 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 2796 2797bce_get_media_exit: 2798 DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY), 2799 "Using PHY address %d.\n", sc->bce_phy_addr); 2800 2801 DBEXIT(BCE_VERBOSE_PHY); 2802} 2803 2804 2805/****************************************************************************/ 2806/* Performs PHY initialization required before MII drivers access the */ 2807/* device. */ 2808/* */ 2809/* Returns: */ 2810/* Nothing. */ 2811/****************************************************************************/ 2812static void 2813bce_init_media(struct bce_softc *sc) 2814{ 2815 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 2816 /* 2817 * Configure 5709S/5716S PHYs to use traditional IEEE 2818 * Clause 22 method. Otherwise we have no way to attach 2819 * the PHY in mii(4) layer. PHY specific configuration 2820 * is done in mii layer. 2821 */ 2822 2823 /* Select auto-negotiation MMD of the PHY. */ 2824 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2825 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2826 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2827 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2828 2829 /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */ 2830 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2831 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2832 } 2833} 2834 2835 2836/****************************************************************************/ 2837/* Free any DMA memory owned by the driver. */ 2838/* */ 2839/* Scans through each data structre that requires DMA memory and frees */ 2840/* the memory if allocated. */ 2841/* */ 2842/* Returns: */ 2843/* Nothing. */ 2844/****************************************************************************/ 2845static void 2846bce_dma_free(struct bce_softc *sc) 2847{ 2848 int i; 2849 2850 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 2851 2852 /* Free, unmap, and destroy the status block. */ 2853 if (sc->status_block != NULL) { 2854 bus_dmamem_free( 2855 sc->status_tag, 2856 sc->status_block, 2857 sc->status_map); 2858 sc->status_block = NULL; 2859 } 2860 2861 if (sc->status_map != NULL) { 2862 bus_dmamap_unload( 2863 sc->status_tag, 2864 sc->status_map); 2865 bus_dmamap_destroy(sc->status_tag, 2866 sc->status_map); 2867 sc->status_map = NULL; 2868 } 2869 2870 if (sc->status_tag != NULL) { 2871 bus_dma_tag_destroy(sc->status_tag); 2872 sc->status_tag = NULL; 2873 } 2874 2875 2876 /* Free, unmap, and destroy the statistics block. */ 2877 if (sc->stats_block != NULL) { 2878 bus_dmamem_free( 2879 sc->stats_tag, 2880 sc->stats_block, 2881 sc->stats_map); 2882 sc->stats_block = NULL; 2883 } 2884 2885 if (sc->stats_map != NULL) { 2886 bus_dmamap_unload( 2887 sc->stats_tag, 2888 sc->stats_map); 2889 bus_dmamap_destroy(sc->stats_tag, 2890 sc->stats_map); 2891 sc->stats_map = NULL; 2892 } 2893 2894 if (sc->stats_tag != NULL) { 2895 bus_dma_tag_destroy(sc->stats_tag); 2896 sc->stats_tag = NULL; 2897 } 2898 2899 2900 /* Free, unmap and destroy all context memory pages. */ 2901 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 2902 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 2903 for (i = 0; i < sc->ctx_pages; i++ ) { 2904 if (sc->ctx_block[i] != NULL) { 2905 bus_dmamem_free( 2906 sc->ctx_tag, 2907 sc->ctx_block[i], 2908 sc->ctx_map[i]); 2909 sc->ctx_block[i] = NULL; 2910 } 2911 2912 if (sc->ctx_map[i] != NULL) { 2913 bus_dmamap_unload( 2914 sc->ctx_tag, 2915 sc->ctx_map[i]); 2916 bus_dmamap_destroy( 2917 sc->ctx_tag, 2918 sc->ctx_map[i]); 2919 sc->ctx_map[i] = NULL; 2920 } 2921 } 2922 2923 /* Destroy the context memory tag. */ 2924 if (sc->ctx_tag != NULL) { 2925 bus_dma_tag_destroy(sc->ctx_tag); 2926 sc->ctx_tag = NULL; 2927 } 2928 } 2929 2930 2931 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2932 for (i = 0; i < TX_PAGES; i++ ) { 2933 if (sc->tx_bd_chain[i] != NULL) { 2934 bus_dmamem_free( 2935 sc->tx_bd_chain_tag, 2936 sc->tx_bd_chain[i], 2937 sc->tx_bd_chain_map[i]); 2938 sc->tx_bd_chain[i] = NULL; 2939 } 2940 2941 if (sc->tx_bd_chain_map[i] != NULL) { 2942 bus_dmamap_unload( 2943 sc->tx_bd_chain_tag, 2944 sc->tx_bd_chain_map[i]); 2945 bus_dmamap_destroy( 2946 sc->tx_bd_chain_tag, 2947 sc->tx_bd_chain_map[i]); 2948 sc->tx_bd_chain_map[i] = NULL; 2949 } 2950 } 2951 2952 /* Destroy the TX buffer descriptor tag. */ 2953 if (sc->tx_bd_chain_tag != NULL) { 2954 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2955 sc->tx_bd_chain_tag = NULL; 2956 } 2957 2958 2959 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2960 for (i = 0; i < RX_PAGES; i++ ) { 2961 if (sc->rx_bd_chain[i] != NULL) { 2962 bus_dmamem_free( 2963 sc->rx_bd_chain_tag, 2964 sc->rx_bd_chain[i], 2965 sc->rx_bd_chain_map[i]); 2966 sc->rx_bd_chain[i] = NULL; 2967 } 2968 2969 if (sc->rx_bd_chain_map[i] != NULL) { 2970 bus_dmamap_unload( 2971 sc->rx_bd_chain_tag, 2972 sc->rx_bd_chain_map[i]); 2973 bus_dmamap_destroy( 2974 sc->rx_bd_chain_tag, 2975 sc->rx_bd_chain_map[i]); 2976 sc->rx_bd_chain_map[i] = NULL; 2977 } 2978 } 2979 2980 /* Destroy the RX buffer descriptor tag. */ 2981 if (sc->rx_bd_chain_tag != NULL) { 2982 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2983 sc->rx_bd_chain_tag = NULL; 2984 } 2985 2986 2987#ifdef BCE_JUMBO_HDRSPLIT 2988 /* Free, unmap and destroy all page buffer descriptor chain pages. */ 2989 for (i = 0; i < PG_PAGES; i++ ) { 2990 if (sc->pg_bd_chain[i] != NULL) { 2991 bus_dmamem_free( 2992 sc->pg_bd_chain_tag, 2993 sc->pg_bd_chain[i], 2994 sc->pg_bd_chain_map[i]); 2995 sc->pg_bd_chain[i] = NULL; 2996 } 2997 2998 if (sc->pg_bd_chain_map[i] != NULL) { 2999 bus_dmamap_unload( 3000 sc->pg_bd_chain_tag, 3001 sc->pg_bd_chain_map[i]); 3002 bus_dmamap_destroy( 3003 sc->pg_bd_chain_tag, 3004 sc->pg_bd_chain_map[i]); 3005 sc->pg_bd_chain_map[i] = NULL; 3006 } 3007 } 3008 3009 /* Destroy the page buffer descriptor tag. */ 3010 if (sc->pg_bd_chain_tag != NULL) { 3011 bus_dma_tag_destroy(sc->pg_bd_chain_tag); 3012 sc->pg_bd_chain_tag = NULL; 3013 } 3014#endif 3015 3016 3017 /* Unload and destroy the TX mbuf maps. */ 3018 for (i = 0; i < TOTAL_TX_BD; i++) { 3019 if (sc->tx_mbuf_map[i] != NULL) { 3020 bus_dmamap_unload(sc->tx_mbuf_tag, 3021 sc->tx_mbuf_map[i]); 3022 bus_dmamap_destroy(sc->tx_mbuf_tag, 3023 sc->tx_mbuf_map[i]); 3024 sc->tx_mbuf_map[i] = NULL; 3025 } 3026 } 3027 3028 /* Destroy the TX mbuf tag. */ 3029 if (sc->tx_mbuf_tag != NULL) { 3030 bus_dma_tag_destroy(sc->tx_mbuf_tag); 3031 sc->tx_mbuf_tag = NULL; 3032 } 3033 3034 /* Unload and destroy the RX mbuf maps. */ 3035 for (i = 0; i < TOTAL_RX_BD; i++) { 3036 if (sc->rx_mbuf_map[i] != NULL) { 3037 bus_dmamap_unload(sc->rx_mbuf_tag, 3038 sc->rx_mbuf_map[i]); 3039 bus_dmamap_destroy(sc->rx_mbuf_tag, 3040 sc->rx_mbuf_map[i]); 3041 sc->rx_mbuf_map[i] = NULL; 3042 } 3043 } 3044 3045 /* Destroy the RX mbuf tag. */ 3046 if (sc->rx_mbuf_tag != NULL) { 3047 bus_dma_tag_destroy(sc->rx_mbuf_tag); 3048 sc->rx_mbuf_tag = NULL; 3049 } 3050 3051#ifdef BCE_JUMBO_HDRSPLIT 3052 /* Unload and destroy the page mbuf maps. */ 3053 for (i = 0; i < TOTAL_PG_BD; i++) { 3054 if (sc->pg_mbuf_map[i] != NULL) { 3055 bus_dmamap_unload(sc->pg_mbuf_tag, 3056 sc->pg_mbuf_map[i]); 3057 bus_dmamap_destroy(sc->pg_mbuf_tag, 3058 sc->pg_mbuf_map[i]); 3059 sc->pg_mbuf_map[i] = NULL; 3060 } 3061 } 3062 3063 /* Destroy the page mbuf tag. */ 3064 if (sc->pg_mbuf_tag != NULL) { 3065 bus_dma_tag_destroy(sc->pg_mbuf_tag); 3066 sc->pg_mbuf_tag = NULL; 3067 } 3068#endif 3069 3070 /* Destroy the parent tag */ 3071 if (sc->parent_tag != NULL) { 3072 bus_dma_tag_destroy(sc->parent_tag); 3073 sc->parent_tag = NULL; 3074 } 3075 3076 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 3077} 3078 3079 3080/****************************************************************************/ 3081/* Get DMA memory from the OS. */ 3082/* */ 3083/* Validates that the OS has provided DMA buffers in response to a */ 3084/* bus_dmamap_load() call and saves the physical address of those buffers. */ 3085/* When the callback is used the OS will return 0 for the mapping function */ 3086/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 3087/* failures back to the caller. */ 3088/* */ 3089/* Returns: */ 3090/* Nothing. */ 3091/****************************************************************************/ 3092static void 3093bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3094{ 3095 bus_addr_t *busaddr = arg; 3096 3097 KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!", 3098 __FUNCTION__, nseg)); 3099 /* Simulate a mapping failure. */ 3100 DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control), 3101 error = ENOMEM); 3102 3103 /* ToDo: How to increment debug sim_count variable here? */ 3104 3105 /* Check for an error and signal the caller that an error occurred. */ 3106 if (error) { 3107 *busaddr = 0; 3108 } else { 3109 *busaddr = segs->ds_addr; 3110 } 3111 3112 return; 3113} 3114 3115 3116/****************************************************************************/ 3117/* Allocate any DMA memory needed by the driver. */ 3118/* */ 3119/* Allocates DMA memory needed for the various global structures needed by */ 3120/* hardware. */ 3121/* */ 3122/* Memory alignment requirements: */ 3123/* +-----------------+----------+----------+----------+----------+ */ 3124/* | | 5706 | 5708 | 5709 | 5716 | */ 3125/* +-----------------+----------+----------+----------+----------+ */ 3126/* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3127/* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3128/* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 3129/* |PG Buffers | none | none | none | none | */ 3130/* |TX Buffers | none | none | none | none | */ 3131/* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 3132/* |Context Memory | | | | | */ 3133/* +-----------------+----------+----------+----------+----------+ */ 3134/* */ 3135/* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 3136/* */ 3137/* Returns: */ 3138/* 0 for success, positive value for failure. */ 3139/****************************************************************************/ 3140static int 3141bce_dma_alloc(device_t dev) 3142{ 3143 struct bce_softc *sc; 3144 int i, error, rc = 0; 3145 bus_size_t max_size, max_seg_size; 3146 int max_segments; 3147 3148 sc = device_get_softc(dev); 3149 3150 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3151 3152 /* 3153 * Allocate the parent bus DMA tag appropriate for PCI. 3154 */ 3155 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY, 3156 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3157 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 3158 &sc->parent_tag)) { 3159 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", 3160 __FILE__, __LINE__); 3161 rc = ENOMEM; 3162 goto bce_dma_alloc_exit; 3163 } 3164 3165 /* 3166 * Create a DMA tag for the status block, allocate and clear the 3167 * memory, map the memory into DMA space, and fetch the physical 3168 * address of the block. 3169 */ 3170 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3171 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3172 NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ, 3173 0, NULL, NULL, &sc->status_tag)) { 3174 BCE_PRINTF("%s(%d): Could not allocate status block " 3175 "DMA tag!\n", __FILE__, __LINE__); 3176 rc = ENOMEM; 3177 goto bce_dma_alloc_exit; 3178 } 3179 3180 if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, 3181 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3182 &sc->status_map)) { 3183 BCE_PRINTF("%s(%d): Could not allocate status block " 3184 "DMA memory!\n", __FILE__, __LINE__); 3185 rc = ENOMEM; 3186 goto bce_dma_alloc_exit; 3187 } 3188 3189 error = bus_dmamap_load(sc->status_tag, sc->status_map, 3190 sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr, 3191 &sc->status_block_paddr, BUS_DMA_NOWAIT); 3192 3193 if (error) { 3194 BCE_PRINTF("%s(%d): Could not map status block " 3195 "DMA memory!\n", __FILE__, __LINE__); 3196 rc = ENOMEM; 3197 goto bce_dma_alloc_exit; 3198 } 3199 3200 DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n", 3201 __FUNCTION__, (uintmax_t) sc->status_block_paddr); 3202 3203 /* 3204 * Create a DMA tag for the statistics block, allocate and clear the 3205 * memory, map the memory into DMA space, and fetch the physical 3206 * address of the block. 3207 */ 3208 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3209 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3210 NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ, 3211 0, NULL, NULL, &sc->stats_tag)) { 3212 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3213 "DMA tag!\n", __FILE__, __LINE__); 3214 rc = ENOMEM; 3215 goto bce_dma_alloc_exit; 3216 } 3217 3218 if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, 3219 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) { 3220 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3221 "DMA memory!\n", __FILE__, __LINE__); 3222 rc = ENOMEM; 3223 goto bce_dma_alloc_exit; 3224 } 3225 3226 error = bus_dmamap_load(sc->stats_tag, sc->stats_map, 3227 sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr, 3228 &sc->stats_block_paddr, BUS_DMA_NOWAIT); 3229 3230 if(error) { 3231 BCE_PRINTF("%s(%d): Could not map statistics block " 3232 "DMA memory!\n", __FILE__, __LINE__); 3233 rc = ENOMEM; 3234 goto bce_dma_alloc_exit; 3235 } 3236 3237 DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n", 3238 __FUNCTION__, (uintmax_t) sc->stats_block_paddr); 3239 3240 /* BCM5709 uses host memory as cache for context memory. */ 3241 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3242 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3243 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 3244 if (sc->ctx_pages == 0) 3245 sc->ctx_pages = 1; 3246 3247 DBRUNIF((sc->ctx_pages > 512), 3248 BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n", 3249 __FILE__, __LINE__, sc->ctx_pages)); 3250 3251 /* 3252 * Create a DMA tag for the context pages, 3253 * allocate and clear the memory, map the 3254 * memory into DMA space, and fetch the 3255 * physical address of the block. 3256 */ 3257 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3258 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3259 NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 3260 0, NULL, NULL, &sc->ctx_tag)) { 3261 BCE_PRINTF("%s(%d): Could not allocate CTX " 3262 "DMA tag!\n", __FILE__, __LINE__); 3263 rc = ENOMEM; 3264 goto bce_dma_alloc_exit; 3265 } 3266 3267 for (i = 0; i < sc->ctx_pages; i++) { 3268 3269 if(bus_dmamem_alloc(sc->ctx_tag, 3270 (void **)&sc->ctx_block[i], 3271 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3272 &sc->ctx_map[i])) { 3273 BCE_PRINTF("%s(%d): Could not allocate CTX " 3274 "DMA memory!\n", __FILE__, __LINE__); 3275 rc = ENOMEM; 3276 goto bce_dma_alloc_exit; 3277 } 3278 3279 error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 3280 sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr, 3281 &sc->ctx_paddr[i], BUS_DMA_NOWAIT); 3282 3283 if (error) { 3284 BCE_PRINTF("%s(%d): Could not map CTX " 3285 "DMA memory!\n", __FILE__, __LINE__); 3286 rc = ENOMEM; 3287 goto bce_dma_alloc_exit; 3288 } 3289 3290 DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] " 3291 "= 0x%jX\n", __FUNCTION__, i, 3292 (uintmax_t) sc->ctx_paddr[i]); 3293 } 3294 } 3295 3296 /* 3297 * Create a DMA tag for the TX buffer descriptor chain, 3298 * allocate and clear the memory, and fetch the 3299 * physical address of the block. 3300 */ 3301 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, 3302 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3303 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0, 3304 NULL, NULL, &sc->tx_bd_chain_tag)) { 3305 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3306 "chain DMA tag!\n", __FILE__, __LINE__); 3307 rc = ENOMEM; 3308 goto bce_dma_alloc_exit; 3309 } 3310 3311 for (i = 0; i < TX_PAGES; i++) { 3312 3313 if(bus_dmamem_alloc(sc->tx_bd_chain_tag, 3314 (void **)&sc->tx_bd_chain[i], 3315 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3316 &sc->tx_bd_chain_map[i])) { 3317 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3318 "chain DMA memory!\n", __FILE__, __LINE__); 3319 rc = ENOMEM; 3320 goto bce_dma_alloc_exit; 3321 } 3322 3323 error = bus_dmamap_load(sc->tx_bd_chain_tag, 3324 sc->tx_bd_chain_map[i], sc->tx_bd_chain[i], 3325 BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3326 &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3327 3328 if (error) { 3329 BCE_PRINTF("%s(%d): Could not map TX descriptor " 3330 "chain DMA memory!\n", __FILE__, __LINE__); 3331 rc = ENOMEM; 3332 goto bce_dma_alloc_exit; 3333 } 3334 3335 DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = " 3336 "0x%jX\n", __FUNCTION__, i, 3337 (uintmax_t) sc->tx_bd_chain_paddr[i]); 3338 } 3339 3340 /* Check the required size before mapping to conserve resources. */ 3341 if (bce_tso_enable) { 3342 max_size = BCE_TSO_MAX_SIZE; 3343 max_segments = BCE_MAX_SEGMENTS; 3344 max_seg_size = BCE_TSO_MAX_SEG_SIZE; 3345 } else { 3346 max_size = MCLBYTES * BCE_MAX_SEGMENTS; 3347 max_segments = BCE_MAX_SEGMENTS; 3348 max_seg_size = MCLBYTES; 3349 } 3350 3351 /* Create a DMA tag for TX mbufs. */ 3352 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3353 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, 3354 max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) { 3355 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n", 3356 __FILE__, __LINE__); 3357 rc = ENOMEM; 3358 goto bce_dma_alloc_exit; 3359 } 3360 3361 /* Create DMA maps for the TX mbufs clusters. */ 3362 for (i = 0; i < TOTAL_TX_BD; i++) { 3363 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, 3364 &sc->tx_mbuf_map[i])) { 3365 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA " 3366 "map!\n", __FILE__, __LINE__); 3367 rc = ENOMEM; 3368 goto bce_dma_alloc_exit; 3369 } 3370 } 3371 3372 /* 3373 * Create a DMA tag for the RX buffer descriptor chain, 3374 * allocate and clear the memory, and fetch the physical 3375 * address of the blocks. 3376 */ 3377 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3378 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, 3379 sc->max_bus_addr, NULL, NULL, 3380 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 3381 0, NULL, NULL, &sc->rx_bd_chain_tag)) { 3382 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " 3383 "DMA tag!\n", __FILE__, __LINE__); 3384 rc = ENOMEM; 3385 goto bce_dma_alloc_exit; 3386 } 3387 3388 for (i = 0; i < RX_PAGES; i++) { 3389 3390 if (bus_dmamem_alloc(sc->rx_bd_chain_tag, 3391 (void **)&sc->rx_bd_chain[i], 3392 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3393 &sc->rx_bd_chain_map[i])) { 3394 BCE_PRINTF("%s(%d): Could not allocate RX descriptor " 3395 "chain DMA memory!\n", __FILE__, __LINE__); 3396 rc = ENOMEM; 3397 goto bce_dma_alloc_exit; 3398 } 3399 3400 error = bus_dmamap_load(sc->rx_bd_chain_tag, 3401 sc->rx_bd_chain_map[i], sc->rx_bd_chain[i], 3402 BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3403 &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3404 3405 if (error) { 3406 BCE_PRINTF("%s(%d): Could not map RX descriptor " 3407 "chain DMA memory!\n", __FILE__, __LINE__); 3408 rc = ENOMEM; 3409 goto bce_dma_alloc_exit; 3410 } 3411 3412 DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = " 3413 "0x%jX\n", __FUNCTION__, i, 3414 (uintmax_t) sc->rx_bd_chain_paddr[i]); 3415 } 3416 3417 /* 3418 * Create a DMA tag for RX mbufs. 3419 */ 3420#ifdef BCE_JUMBO_HDRSPLIT 3421 max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? 3422 MCLBYTES : sc->rx_bd_mbuf_alloc_size); 3423#else 3424 max_size = max_seg_size = MJUM9BYTES; 3425#endif 3426 max_segments = 1; 3427 3428 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag " 3429 "(max size = 0x%jX max segments = %d, max segment " 3430 "size = 0x%jX)\n", __FUNCTION__, (uintmax_t) max_size, 3431 max_segments, (uintmax_t) max_seg_size); 3432 3433 if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN, 3434 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3435 max_size, max_segments, max_seg_size, 0, NULL, NULL, 3436 &sc->rx_mbuf_tag)) { 3437 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n", 3438 __FILE__, __LINE__); 3439 rc = ENOMEM; 3440 goto bce_dma_alloc_exit; 3441 } 3442 3443 /* Create DMA maps for the RX mbuf clusters. */ 3444 for (i = 0; i < TOTAL_RX_BD; i++) { 3445 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT, 3446 &sc->rx_mbuf_map[i])) { 3447 BCE_PRINTF("%s(%d): Unable to create RX mbuf " 3448 "DMA map!\n", __FILE__, __LINE__); 3449 rc = ENOMEM; 3450 goto bce_dma_alloc_exit; 3451 } 3452 } 3453 3454#ifdef BCE_JUMBO_HDRSPLIT 3455 /* 3456 * Create a DMA tag for the page buffer descriptor chain, 3457 * allocate and clear the memory, and fetch the physical 3458 * address of the blocks. 3459 */ 3460 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3461 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, 3462 NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ, 3463 0, NULL, NULL, &sc->pg_bd_chain_tag)) { 3464 BCE_PRINTF("%s(%d): Could not allocate page descriptor " 3465 "chain DMA tag!\n", __FILE__, __LINE__); 3466 rc = ENOMEM; 3467 goto bce_dma_alloc_exit; 3468 } 3469 3470 for (i = 0; i < PG_PAGES; i++) { 3471 3472 if (bus_dmamem_alloc(sc->pg_bd_chain_tag, 3473 (void **)&sc->pg_bd_chain[i], 3474 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3475 &sc->pg_bd_chain_map[i])) { 3476 BCE_PRINTF("%s(%d): Could not allocate page " 3477 "descriptor chain DMA memory!\n", 3478 __FILE__, __LINE__); 3479 rc = ENOMEM; 3480 goto bce_dma_alloc_exit; 3481 } 3482 3483 error = bus_dmamap_load(sc->pg_bd_chain_tag, 3484 sc->pg_bd_chain_map[i], sc->pg_bd_chain[i], 3485 BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr, 3486 &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3487 3488 if (error) { 3489 BCE_PRINTF("%s(%d): Could not map page descriptor " 3490 "chain DMA memory!\n", __FILE__, __LINE__); 3491 rc = ENOMEM; 3492 goto bce_dma_alloc_exit; 3493 } 3494 3495 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = " 3496 "0x%jX\n", __FUNCTION__, i, 3497 (uintmax_t) sc->pg_bd_chain_paddr[i]); 3498 } 3499 3500 /* 3501 * Create a DMA tag for page mbufs. 3502 */ 3503 max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ? 3504 MCLBYTES : sc->pg_bd_mbuf_alloc_size); 3505 3506 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3507 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3508 max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) { 3509 BCE_PRINTF("%s(%d): Could not allocate page mbuf " 3510 "DMA tag!\n", __FILE__, __LINE__); 3511 rc = ENOMEM; 3512 goto bce_dma_alloc_exit; 3513 } 3514 3515 /* Create DMA maps for the page mbuf clusters. */ 3516 for (i = 0; i < TOTAL_PG_BD; i++) { 3517 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT, 3518 &sc->pg_mbuf_map[i])) { 3519 BCE_PRINTF("%s(%d): Unable to create page mbuf " 3520 "DMA map!\n", __FILE__, __LINE__); 3521 rc = ENOMEM; 3522 goto bce_dma_alloc_exit; 3523 } 3524 } 3525#endif 3526 3527bce_dma_alloc_exit: 3528 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3529 return(rc); 3530} 3531 3532 3533/****************************************************************************/ 3534/* Release all resources used by the driver. */ 3535/* */ 3536/* Releases all resources acquired by the driver including interrupts, */ 3537/* interrupt handler, interfaces, mutexes, and DMA memory. */ 3538/* */ 3539/* Returns: */ 3540/* Nothing. */ 3541/****************************************************************************/ 3542static void 3543bce_release_resources(struct bce_softc *sc) 3544{ 3545 device_t dev; 3546 3547 DBENTER(BCE_VERBOSE_RESET); 3548 3549 dev = sc->bce_dev; 3550 3551 bce_dma_free(sc); 3552 3553 if (sc->bce_intrhand != NULL) { 3554 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n"); 3555 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 3556 } 3557 3558 if (sc->bce_res_irq != NULL) { 3559 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n"); 3560 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid, 3561 sc->bce_res_irq); 3562 } 3563 3564 if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) { 3565 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n"); 3566 pci_release_msi(dev); 3567 } 3568 3569 if (sc->bce_res_mem != NULL) { 3570 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n"); 3571 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 3572 sc->bce_res_mem); 3573 } 3574 3575 if (sc->bce_ifp != NULL) { 3576 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n"); 3577 if_free(sc->bce_ifp); 3578 } 3579 3580 if (mtx_initialized(&sc->bce_mtx)) 3581 BCE_LOCK_DESTROY(sc); 3582 3583 DBEXIT(BCE_VERBOSE_RESET); 3584} 3585 3586 3587/****************************************************************************/ 3588/* Firmware synchronization. */ 3589/* */ 3590/* Before performing certain events such as a chip reset, synchronize with */ 3591/* the firmware first. */ 3592/* */ 3593/* Returns: */ 3594/* 0 for success, positive value for failure. */ 3595/****************************************************************************/ 3596static int 3597bce_fw_sync(struct bce_softc *sc, u32 msg_data) 3598{ 3599 int i, rc = 0; 3600 u32 val; 3601 3602 DBENTER(BCE_VERBOSE_RESET); 3603 3604 /* Don't waste any time if we've timed out before. */ 3605 if (sc->bce_fw_timed_out == TRUE) { 3606 rc = EBUSY; 3607 goto bce_fw_sync_exit; 3608 } 3609 3610 /* Increment the message sequence number. */ 3611 sc->bce_fw_wr_seq++; 3612 msg_data |= sc->bce_fw_wr_seq; 3613 3614 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = " 3615 "0x%08X\n", msg_data); 3616 3617 /* Send the message to the bootcode driver mailbox. */ 3618 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3619 3620 /* Wait for the bootcode to acknowledge the message. */ 3621 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 3622 /* Check for a response in the bootcode firmware mailbox. */ 3623 val = bce_shmem_rd(sc, BCE_FW_MB); 3624 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 3625 break; 3626 DELAY(1000); 3627 } 3628 3629 /* If we've timed out, tell bootcode that we've stopped waiting. */ 3630 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) && 3631 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) { 3632 3633 BCE_PRINTF("%s(%d): Firmware synchronization timeout! " 3634 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 3635 3636 msg_data &= ~BCE_DRV_MSG_CODE; 3637 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 3638 3639 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3640 3641 sc->bce_fw_timed_out = TRUE; 3642 rc = EBUSY; 3643 } 3644 3645bce_fw_sync_exit: 3646 DBEXIT(BCE_VERBOSE_RESET); 3647 return (rc); 3648} 3649 3650 3651/****************************************************************************/ 3652/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 3653/* */ 3654/* Returns: */ 3655/* Nothing. */ 3656/****************************************************************************/ 3657static void 3658bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code, 3659 u32 rv2p_code_len, u32 rv2p_proc) 3660{ 3661 int i; 3662 u32 val; 3663 3664 DBENTER(BCE_VERBOSE_RESET); 3665 3666 /* Set the page size used by RV2P. */ 3667 if (rv2p_proc == RV2P_PROC2) { 3668 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE); 3669 } 3670 3671 for (i = 0; i < rv2p_code_len; i += 8) { 3672 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 3673 rv2p_code++; 3674 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 3675 rv2p_code++; 3676 3677 if (rv2p_proc == RV2P_PROC1) { 3678 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 3679 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 3680 } 3681 else { 3682 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 3683 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 3684 } 3685 } 3686 3687 /* Reset the processor, un-stall is done later. */ 3688 if (rv2p_proc == RV2P_PROC1) { 3689 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 3690 } 3691 else { 3692 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 3693 } 3694 3695 DBEXIT(BCE_VERBOSE_RESET); 3696} 3697 3698 3699/****************************************************************************/ 3700/* Load RISC processor firmware. */ 3701/* */ 3702/* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 3703/* associated with a particular processor. */ 3704/* */ 3705/* Returns: */ 3706/* Nothing. */ 3707/****************************************************************************/ 3708static void 3709bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 3710 struct fw_info *fw) 3711{ 3712 u32 offset; 3713 3714 DBENTER(BCE_VERBOSE_RESET); 3715 3716 bce_halt_cpu(sc, cpu_reg); 3717 3718 /* Load the Text area. */ 3719 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 3720 if (fw->text) { 3721 int j; 3722 3723 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { 3724 REG_WR_IND(sc, offset, fw->text[j]); 3725 } 3726 } 3727 3728 /* Load the Data area. */ 3729 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 3730 if (fw->data) { 3731 int j; 3732 3733 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { 3734 REG_WR_IND(sc, offset, fw->data[j]); 3735 } 3736 } 3737 3738 /* Load the SBSS area. */ 3739 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 3740 if (fw->sbss) { 3741 int j; 3742 3743 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { 3744 REG_WR_IND(sc, offset, fw->sbss[j]); 3745 } 3746 } 3747 3748 /* Load the BSS area. */ 3749 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 3750 if (fw->bss) { 3751 int j; 3752 3753 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { 3754 REG_WR_IND(sc, offset, fw->bss[j]); 3755 } 3756 } 3757 3758 /* Load the Read-Only area. */ 3759 offset = cpu_reg->spad_base + 3760 (fw->rodata_addr - cpu_reg->mips_view_base); 3761 if (fw->rodata) { 3762 int j; 3763 3764 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { 3765 REG_WR_IND(sc, offset, fw->rodata[j]); 3766 } 3767 } 3768 3769 /* Clear the pre-fetch instruction and set the FW start address. */ 3770 REG_WR_IND(sc, cpu_reg->inst, 0); 3771 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 3772 3773 DBEXIT(BCE_VERBOSE_RESET); 3774} 3775 3776 3777/****************************************************************************/ 3778/* Starts the RISC processor. */ 3779/* */ 3780/* Assumes the CPU starting address has already been set. */ 3781/* */ 3782/* Returns: */ 3783/* Nothing. */ 3784/****************************************************************************/ 3785static void 3786bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 3787{ 3788 u32 val; 3789 3790 DBENTER(BCE_VERBOSE_RESET); 3791 3792 /* Start the CPU. */ 3793 val = REG_RD_IND(sc, cpu_reg->mode); 3794 val &= ~cpu_reg->mode_value_halt; 3795 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3796 REG_WR_IND(sc, cpu_reg->mode, val); 3797 3798 DBEXIT(BCE_VERBOSE_RESET); 3799} 3800 3801 3802/****************************************************************************/ 3803/* Halts the RISC processor. */ 3804/* */ 3805/* Returns: */ 3806/* Nothing. */ 3807/****************************************************************************/ 3808static void 3809bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 3810{ 3811 u32 val; 3812 3813 DBENTER(BCE_VERBOSE_RESET); 3814 3815 /* Halt the CPU. */ 3816 val = REG_RD_IND(sc, cpu_reg->mode); 3817 val |= cpu_reg->mode_value_halt; 3818 REG_WR_IND(sc, cpu_reg->mode, val); 3819 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3820 3821 DBEXIT(BCE_VERBOSE_RESET); 3822} 3823 3824 3825/****************************************************************************/ 3826/* Initialize the RX CPU. */ 3827/* */ 3828/* Returns: */ 3829/* Nothing. */ 3830/****************************************************************************/ 3831static void 3832bce_start_rxp_cpu(struct bce_softc *sc) 3833{ 3834 struct cpu_reg cpu_reg; 3835 3836 DBENTER(BCE_VERBOSE_RESET); 3837 3838 cpu_reg.mode = BCE_RXP_CPU_MODE; 3839 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 3840 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 3841 cpu_reg.state = BCE_RXP_CPU_STATE; 3842 cpu_reg.state_value_clear = 0xffffff; 3843 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 3844 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 3845 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 3846 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 3847 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 3848 cpu_reg.spad_base = BCE_RXP_SCRATCH; 3849 cpu_reg.mips_view_base = 0x8000000; 3850 3851 DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n"); 3852 bce_start_cpu(sc, &cpu_reg); 3853 3854 DBEXIT(BCE_VERBOSE_RESET); 3855} 3856 3857 3858/****************************************************************************/ 3859/* Initialize the RX CPU. */ 3860/* */ 3861/* Returns: */ 3862/* Nothing. */ 3863/****************************************************************************/ 3864static void 3865bce_init_rxp_cpu(struct bce_softc *sc) 3866{ 3867 struct cpu_reg cpu_reg; 3868 struct fw_info fw; 3869 3870 DBENTER(BCE_VERBOSE_RESET); 3871 3872 cpu_reg.mode = BCE_RXP_CPU_MODE; 3873 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 3874 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 3875 cpu_reg.state = BCE_RXP_CPU_STATE; 3876 cpu_reg.state_value_clear = 0xffffff; 3877 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 3878 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 3879 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 3880 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 3881 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 3882 cpu_reg.spad_base = BCE_RXP_SCRATCH; 3883 cpu_reg.mips_view_base = 0x8000000; 3884 3885 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3886 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3887 fw.ver_major = bce_RXP_b09FwReleaseMajor; 3888 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 3889 fw.ver_fix = bce_RXP_b09FwReleaseFix; 3890 fw.start_addr = bce_RXP_b09FwStartAddr; 3891 3892 fw.text_addr = bce_RXP_b09FwTextAddr; 3893 fw.text_len = bce_RXP_b09FwTextLen; 3894 fw.text_index = 0; 3895 fw.text = bce_RXP_b09FwText; 3896 3897 fw.data_addr = bce_RXP_b09FwDataAddr; 3898 fw.data_len = bce_RXP_b09FwDataLen; 3899 fw.data_index = 0; 3900 fw.data = bce_RXP_b09FwData; 3901 3902 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 3903 fw.sbss_len = bce_RXP_b09FwSbssLen; 3904 fw.sbss_index = 0; 3905 fw.sbss = bce_RXP_b09FwSbss; 3906 3907 fw.bss_addr = bce_RXP_b09FwBssAddr; 3908 fw.bss_len = bce_RXP_b09FwBssLen; 3909 fw.bss_index = 0; 3910 fw.bss = bce_RXP_b09FwBss; 3911 3912 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 3913 fw.rodata_len = bce_RXP_b09FwRodataLen; 3914 fw.rodata_index = 0; 3915 fw.rodata = bce_RXP_b09FwRodata; 3916 } else { 3917 fw.ver_major = bce_RXP_b06FwReleaseMajor; 3918 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 3919 fw.ver_fix = bce_RXP_b06FwReleaseFix; 3920 fw.start_addr = bce_RXP_b06FwStartAddr; 3921 3922 fw.text_addr = bce_RXP_b06FwTextAddr; 3923 fw.text_len = bce_RXP_b06FwTextLen; 3924 fw.text_index = 0; 3925 fw.text = bce_RXP_b06FwText; 3926 3927 fw.data_addr = bce_RXP_b06FwDataAddr; 3928 fw.data_len = bce_RXP_b06FwDataLen; 3929 fw.data_index = 0; 3930 fw.data = bce_RXP_b06FwData; 3931 3932 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 3933 fw.sbss_len = bce_RXP_b06FwSbssLen; 3934 fw.sbss_index = 0; 3935 fw.sbss = bce_RXP_b06FwSbss; 3936 3937 fw.bss_addr = bce_RXP_b06FwBssAddr; 3938 fw.bss_len = bce_RXP_b06FwBssLen; 3939 fw.bss_index = 0; 3940 fw.bss = bce_RXP_b06FwBss; 3941 3942 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 3943 fw.rodata_len = bce_RXP_b06FwRodataLen; 3944 fw.rodata_index = 0; 3945 fw.rodata = bce_RXP_b06FwRodata; 3946 } 3947 3948 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 3949 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3950 3951 /* Delay RXP start until initialization is complete. */ 3952 3953 DBEXIT(BCE_VERBOSE_RESET); 3954} 3955 3956 3957/****************************************************************************/ 3958/* Initialize the TX CPU. */ 3959/* */ 3960/* Returns: */ 3961/* Nothing. */ 3962/****************************************************************************/ 3963static void 3964bce_init_txp_cpu(struct bce_softc *sc) 3965{ 3966 struct cpu_reg cpu_reg; 3967 struct fw_info fw; 3968 3969 DBENTER(BCE_VERBOSE_RESET); 3970 3971 cpu_reg.mode = BCE_TXP_CPU_MODE; 3972 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 3973 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 3974 cpu_reg.state = BCE_TXP_CPU_STATE; 3975 cpu_reg.state_value_clear = 0xffffff; 3976 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 3977 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 3978 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 3979 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 3980 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 3981 cpu_reg.spad_base = BCE_TXP_SCRATCH; 3982 cpu_reg.mips_view_base = 0x8000000; 3983 3984 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3985 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3986 fw.ver_major = bce_TXP_b09FwReleaseMajor; 3987 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 3988 fw.ver_fix = bce_TXP_b09FwReleaseFix; 3989 fw.start_addr = bce_TXP_b09FwStartAddr; 3990 3991 fw.text_addr = bce_TXP_b09FwTextAddr; 3992 fw.text_len = bce_TXP_b09FwTextLen; 3993 fw.text_index = 0; 3994 fw.text = bce_TXP_b09FwText; 3995 3996 fw.data_addr = bce_TXP_b09FwDataAddr; 3997 fw.data_len = bce_TXP_b09FwDataLen; 3998 fw.data_index = 0; 3999 fw.data = bce_TXP_b09FwData; 4000 4001 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 4002 fw.sbss_len = bce_TXP_b09FwSbssLen; 4003 fw.sbss_index = 0; 4004 fw.sbss = bce_TXP_b09FwSbss; 4005 4006 fw.bss_addr = bce_TXP_b09FwBssAddr; 4007 fw.bss_len = bce_TXP_b09FwBssLen; 4008 fw.bss_index = 0; 4009 fw.bss = bce_TXP_b09FwBss; 4010 4011 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 4012 fw.rodata_len = bce_TXP_b09FwRodataLen; 4013 fw.rodata_index = 0; 4014 fw.rodata = bce_TXP_b09FwRodata; 4015 } else { 4016 fw.ver_major = bce_TXP_b06FwReleaseMajor; 4017 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 4018 fw.ver_fix = bce_TXP_b06FwReleaseFix; 4019 fw.start_addr = bce_TXP_b06FwStartAddr; 4020 4021 fw.text_addr = bce_TXP_b06FwTextAddr; 4022 fw.text_len = bce_TXP_b06FwTextLen; 4023 fw.text_index = 0; 4024 fw.text = bce_TXP_b06FwText; 4025 4026 fw.data_addr = bce_TXP_b06FwDataAddr; 4027 fw.data_len = bce_TXP_b06FwDataLen; 4028 fw.data_index = 0; 4029 fw.data = bce_TXP_b06FwData; 4030 4031 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 4032 fw.sbss_len = bce_TXP_b06FwSbssLen; 4033 fw.sbss_index = 0; 4034 fw.sbss = bce_TXP_b06FwSbss; 4035 4036 fw.bss_addr = bce_TXP_b06FwBssAddr; 4037 fw.bss_len = bce_TXP_b06FwBssLen; 4038 fw.bss_index = 0; 4039 fw.bss = bce_TXP_b06FwBss; 4040 4041 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 4042 fw.rodata_len = bce_TXP_b06FwRodataLen; 4043 fw.rodata_index = 0; 4044 fw.rodata = bce_TXP_b06FwRodata; 4045 } 4046 4047 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 4048 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4049 bce_start_cpu(sc, &cpu_reg); 4050 4051 DBEXIT(BCE_VERBOSE_RESET); 4052} 4053 4054 4055/****************************************************************************/ 4056/* Initialize the TPAT CPU. */ 4057/* */ 4058/* Returns: */ 4059/* Nothing. */ 4060/****************************************************************************/ 4061static void 4062bce_init_tpat_cpu(struct bce_softc *sc) 4063{ 4064 struct cpu_reg cpu_reg; 4065 struct fw_info fw; 4066 4067 DBENTER(BCE_VERBOSE_RESET); 4068 4069 cpu_reg.mode = BCE_TPAT_CPU_MODE; 4070 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 4071 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 4072 cpu_reg.state = BCE_TPAT_CPU_STATE; 4073 cpu_reg.state_value_clear = 0xffffff; 4074 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 4075 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 4076 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 4077 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 4078 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 4079 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 4080 cpu_reg.mips_view_base = 0x8000000; 4081 4082 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4083 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4084 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 4085 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 4086 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 4087 fw.start_addr = bce_TPAT_b09FwStartAddr; 4088 4089 fw.text_addr = bce_TPAT_b09FwTextAddr; 4090 fw.text_len = bce_TPAT_b09FwTextLen; 4091 fw.text_index = 0; 4092 fw.text = bce_TPAT_b09FwText; 4093 4094 fw.data_addr = bce_TPAT_b09FwDataAddr; 4095 fw.data_len = bce_TPAT_b09FwDataLen; 4096 fw.data_index = 0; 4097 fw.data = bce_TPAT_b09FwData; 4098 4099 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 4100 fw.sbss_len = bce_TPAT_b09FwSbssLen; 4101 fw.sbss_index = 0; 4102 fw.sbss = bce_TPAT_b09FwSbss; 4103 4104 fw.bss_addr = bce_TPAT_b09FwBssAddr; 4105 fw.bss_len = bce_TPAT_b09FwBssLen; 4106 fw.bss_index = 0; 4107 fw.bss = bce_TPAT_b09FwBss; 4108 4109 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 4110 fw.rodata_len = bce_TPAT_b09FwRodataLen; 4111 fw.rodata_index = 0; 4112 fw.rodata = bce_TPAT_b09FwRodata; 4113 } else { 4114 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 4115 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 4116 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 4117 fw.start_addr = bce_TPAT_b06FwStartAddr; 4118 4119 fw.text_addr = bce_TPAT_b06FwTextAddr; 4120 fw.text_len = bce_TPAT_b06FwTextLen; 4121 fw.text_index = 0; 4122 fw.text = bce_TPAT_b06FwText; 4123 4124 fw.data_addr = bce_TPAT_b06FwDataAddr; 4125 fw.data_len = bce_TPAT_b06FwDataLen; 4126 fw.data_index = 0; 4127 fw.data = bce_TPAT_b06FwData; 4128 4129 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 4130 fw.sbss_len = bce_TPAT_b06FwSbssLen; 4131 fw.sbss_index = 0; 4132 fw.sbss = bce_TPAT_b06FwSbss; 4133 4134 fw.bss_addr = bce_TPAT_b06FwBssAddr; 4135 fw.bss_len = bce_TPAT_b06FwBssLen; 4136 fw.bss_index = 0; 4137 fw.bss = bce_TPAT_b06FwBss; 4138 4139 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 4140 fw.rodata_len = bce_TPAT_b06FwRodataLen; 4141 fw.rodata_index = 0; 4142 fw.rodata = bce_TPAT_b06FwRodata; 4143 } 4144 4145 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 4146 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4147 bce_start_cpu(sc, &cpu_reg); 4148 4149 DBEXIT(BCE_VERBOSE_RESET); 4150} 4151 4152 4153/****************************************************************************/ 4154/* Initialize the CP CPU. */ 4155/* */ 4156/* Returns: */ 4157/* Nothing. */ 4158/****************************************************************************/ 4159static void 4160bce_init_cp_cpu(struct bce_softc *sc) 4161{ 4162 struct cpu_reg cpu_reg; 4163 struct fw_info fw; 4164 4165 DBENTER(BCE_VERBOSE_RESET); 4166 4167 cpu_reg.mode = BCE_CP_CPU_MODE; 4168 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 4169 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 4170 cpu_reg.state = BCE_CP_CPU_STATE; 4171 cpu_reg.state_value_clear = 0xffffff; 4172 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 4173 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 4174 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 4175 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 4176 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 4177 cpu_reg.spad_base = BCE_CP_SCRATCH; 4178 cpu_reg.mips_view_base = 0x8000000; 4179 4180 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4181 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4182 fw.ver_major = bce_CP_b09FwReleaseMajor; 4183 fw.ver_minor = bce_CP_b09FwReleaseMinor; 4184 fw.ver_fix = bce_CP_b09FwReleaseFix; 4185 fw.start_addr = bce_CP_b09FwStartAddr; 4186 4187 fw.text_addr = bce_CP_b09FwTextAddr; 4188 fw.text_len = bce_CP_b09FwTextLen; 4189 fw.text_index = 0; 4190 fw.text = bce_CP_b09FwText; 4191 4192 fw.data_addr = bce_CP_b09FwDataAddr; 4193 fw.data_len = bce_CP_b09FwDataLen; 4194 fw.data_index = 0; 4195 fw.data = bce_CP_b09FwData; 4196 4197 fw.sbss_addr = bce_CP_b09FwSbssAddr; 4198 fw.sbss_len = bce_CP_b09FwSbssLen; 4199 fw.sbss_index = 0; 4200 fw.sbss = bce_CP_b09FwSbss; 4201 4202 fw.bss_addr = bce_CP_b09FwBssAddr; 4203 fw.bss_len = bce_CP_b09FwBssLen; 4204 fw.bss_index = 0; 4205 fw.bss = bce_CP_b09FwBss; 4206 4207 fw.rodata_addr = bce_CP_b09FwRodataAddr; 4208 fw.rodata_len = bce_CP_b09FwRodataLen; 4209 fw.rodata_index = 0; 4210 fw.rodata = bce_CP_b09FwRodata; 4211 } else { 4212 fw.ver_major = bce_CP_b06FwReleaseMajor; 4213 fw.ver_minor = bce_CP_b06FwReleaseMinor; 4214 fw.ver_fix = bce_CP_b06FwReleaseFix; 4215 fw.start_addr = bce_CP_b06FwStartAddr; 4216 4217 fw.text_addr = bce_CP_b06FwTextAddr; 4218 fw.text_len = bce_CP_b06FwTextLen; 4219 fw.text_index = 0; 4220 fw.text = bce_CP_b06FwText; 4221 4222 fw.data_addr = bce_CP_b06FwDataAddr; 4223 fw.data_len = bce_CP_b06FwDataLen; 4224 fw.data_index = 0; 4225 fw.data = bce_CP_b06FwData; 4226 4227 fw.sbss_addr = bce_CP_b06FwSbssAddr; 4228 fw.sbss_len = bce_CP_b06FwSbssLen; 4229 fw.sbss_index = 0; 4230 fw.sbss = bce_CP_b06FwSbss; 4231 4232 fw.bss_addr = bce_CP_b06FwBssAddr; 4233 fw.bss_len = bce_CP_b06FwBssLen; 4234 fw.bss_index = 0; 4235 fw.bss = bce_CP_b06FwBss; 4236 4237 fw.rodata_addr = bce_CP_b06FwRodataAddr; 4238 fw.rodata_len = bce_CP_b06FwRodataLen; 4239 fw.rodata_index = 0; 4240 fw.rodata = bce_CP_b06FwRodata; 4241 } 4242 4243 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 4244 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4245 bce_start_cpu(sc, &cpu_reg); 4246 4247 DBEXIT(BCE_VERBOSE_RESET); 4248} 4249 4250 4251/****************************************************************************/ 4252/* Initialize the COM CPU. */ 4253/* */ 4254/* Returns: */ 4255/* Nothing. */ 4256/****************************************************************************/ 4257static void 4258bce_init_com_cpu(struct bce_softc *sc) 4259{ 4260 struct cpu_reg cpu_reg; 4261 struct fw_info fw; 4262 4263 DBENTER(BCE_VERBOSE_RESET); 4264 4265 cpu_reg.mode = BCE_COM_CPU_MODE; 4266 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 4267 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 4268 cpu_reg.state = BCE_COM_CPU_STATE; 4269 cpu_reg.state_value_clear = 0xffffff; 4270 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 4271 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 4272 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 4273 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 4274 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 4275 cpu_reg.spad_base = BCE_COM_SCRATCH; 4276 cpu_reg.mips_view_base = 0x8000000; 4277 4278 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4279 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4280 fw.ver_major = bce_COM_b09FwReleaseMajor; 4281 fw.ver_minor = bce_COM_b09FwReleaseMinor; 4282 fw.ver_fix = bce_COM_b09FwReleaseFix; 4283 fw.start_addr = bce_COM_b09FwStartAddr; 4284 4285 fw.text_addr = bce_COM_b09FwTextAddr; 4286 fw.text_len = bce_COM_b09FwTextLen; 4287 fw.text_index = 0; 4288 fw.text = bce_COM_b09FwText; 4289 4290 fw.data_addr = bce_COM_b09FwDataAddr; 4291 fw.data_len = bce_COM_b09FwDataLen; 4292 fw.data_index = 0; 4293 fw.data = bce_COM_b09FwData; 4294 4295 fw.sbss_addr = bce_COM_b09FwSbssAddr; 4296 fw.sbss_len = bce_COM_b09FwSbssLen; 4297 fw.sbss_index = 0; 4298 fw.sbss = bce_COM_b09FwSbss; 4299 4300 fw.bss_addr = bce_COM_b09FwBssAddr; 4301 fw.bss_len = bce_COM_b09FwBssLen; 4302 fw.bss_index = 0; 4303 fw.bss = bce_COM_b09FwBss; 4304 4305 fw.rodata_addr = bce_COM_b09FwRodataAddr; 4306 fw.rodata_len = bce_COM_b09FwRodataLen; 4307 fw.rodata_index = 0; 4308 fw.rodata = bce_COM_b09FwRodata; 4309 } else { 4310 fw.ver_major = bce_COM_b06FwReleaseMajor; 4311 fw.ver_minor = bce_COM_b06FwReleaseMinor; 4312 fw.ver_fix = bce_COM_b06FwReleaseFix; 4313 fw.start_addr = bce_COM_b06FwStartAddr; 4314 4315 fw.text_addr = bce_COM_b06FwTextAddr; 4316 fw.text_len = bce_COM_b06FwTextLen; 4317 fw.text_index = 0; 4318 fw.text = bce_COM_b06FwText; 4319 4320 fw.data_addr = bce_COM_b06FwDataAddr; 4321 fw.data_len = bce_COM_b06FwDataLen; 4322 fw.data_index = 0; 4323 fw.data = bce_COM_b06FwData; 4324 4325 fw.sbss_addr = bce_COM_b06FwSbssAddr; 4326 fw.sbss_len = bce_COM_b06FwSbssLen; 4327 fw.sbss_index = 0; 4328 fw.sbss = bce_COM_b06FwSbss; 4329 4330 fw.bss_addr = bce_COM_b06FwBssAddr; 4331 fw.bss_len = bce_COM_b06FwBssLen; 4332 fw.bss_index = 0; 4333 fw.bss = bce_COM_b06FwBss; 4334 4335 fw.rodata_addr = bce_COM_b06FwRodataAddr; 4336 fw.rodata_len = bce_COM_b06FwRodataLen; 4337 fw.rodata_index = 0; 4338 fw.rodata = bce_COM_b06FwRodata; 4339 } 4340 4341 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 4342 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4343 bce_start_cpu(sc, &cpu_reg); 4344 4345 DBEXIT(BCE_VERBOSE_RESET); 4346} 4347 4348 4349/****************************************************************************/ 4350/* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 4351/* */ 4352/* Loads the firmware for each CPU and starts the CPU. */ 4353/* */ 4354/* Returns: */ 4355/* Nothing. */ 4356/****************************************************************************/ 4357static void 4358bce_init_cpus(struct bce_softc *sc) 4359{ 4360 DBENTER(BCE_VERBOSE_RESET); 4361 4362 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4363 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4364 4365 if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) { 4366 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 4367 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 4368 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 4369 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 4370 } else { 4371 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 4372 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 4373 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 4374 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 4375 } 4376 4377 } else { 4378 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 4379 sizeof(bce_rv2p_proc1), RV2P_PROC1); 4380 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 4381 sizeof(bce_rv2p_proc2), RV2P_PROC2); 4382 } 4383 4384 bce_init_rxp_cpu(sc); 4385 bce_init_txp_cpu(sc); 4386 bce_init_tpat_cpu(sc); 4387 bce_init_com_cpu(sc); 4388 bce_init_cp_cpu(sc); 4389 4390 DBEXIT(BCE_VERBOSE_RESET); 4391} 4392 4393 4394/****************************************************************************/ 4395/* Initialize context memory. */ 4396/* */ 4397/* Clears the memory associated with each Context ID (CID). */ 4398/* */ 4399/* Returns: */ 4400/* Nothing. */ 4401/****************************************************************************/ 4402static int 4403bce_init_ctx(struct bce_softc *sc) 4404{ 4405 u32 offset, val, vcid_addr; 4406 int i, j, rc, retry_cnt; 4407 4408 rc = 0; 4409 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4410 4411 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4412 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4413 retry_cnt = CTX_INIT_RETRY_COUNT; 4414 4415 DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n"); 4416 4417 /* 4418 * BCM5709 context memory may be cached 4419 * in host memory so prepare the host memory 4420 * for access. 4421 */ 4422 val = BCE_CTX_COMMAND_ENABLED | 4423 BCE_CTX_COMMAND_MEM_INIT | (1 << 12); 4424 val |= (BCM_PAGE_BITS - 8) << 16; 4425 REG_WR(sc, BCE_CTX_COMMAND, val); 4426 4427 /* Wait for mem init command to complete. */ 4428 for (i = 0; i < retry_cnt; i++) { 4429 val = REG_RD(sc, BCE_CTX_COMMAND); 4430 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 4431 break; 4432 DELAY(2); 4433 } 4434 if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) { 4435 BCE_PRINTF("%s(): Context memory initialization failed!\n", 4436 __FUNCTION__); 4437 rc = EBUSY; 4438 goto init_ctx_fail; 4439 } 4440 4441 for (i = 0; i < sc->ctx_pages; i++) { 4442 /* Set the physical address of the context memory. */ 4443 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 4444 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 4445 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 4446 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 4447 BCE_ADDR_HI(sc->ctx_paddr[i])); 4448 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i | 4449 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4450 4451 /* Verify the context memory write was successful. */ 4452 for (j = 0; j < retry_cnt; j++) { 4453 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 4454 if ((val & 4455 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 4456 break; 4457 DELAY(5); 4458 } 4459 if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) { 4460 BCE_PRINTF("%s(): Failed to initialize " 4461 "context page %d!\n", __FUNCTION__, i); 4462 rc = EBUSY; 4463 goto init_ctx_fail; 4464 } 4465 } 4466 } else { 4467 4468 DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n"); 4469 4470 /* 4471 * For the 5706/5708, context memory is local to 4472 * the controller, so initialize the controller 4473 * context memory. 4474 */ 4475 4476 vcid_addr = GET_CID_ADDR(96); 4477 while (vcid_addr) { 4478 4479 vcid_addr -= PHY_CTX_SIZE; 4480 4481 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 4482 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4483 4484 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) { 4485 CTX_WR(sc, 0x00, offset, 0); 4486 } 4487 4488 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 4489 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4490 } 4491 4492 } 4493init_ctx_fail: 4494 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4495 return (rc); 4496} 4497 4498 4499/****************************************************************************/ 4500/* Fetch the permanent MAC address of the controller. */ 4501/* */ 4502/* Returns: */ 4503/* Nothing. */ 4504/****************************************************************************/ 4505static void 4506bce_get_mac_addr(struct bce_softc *sc) 4507{ 4508 u32 mac_lo = 0, mac_hi = 0; 4509 4510 DBENTER(BCE_VERBOSE_RESET); 4511 4512 /* 4513 * The NetXtreme II bootcode populates various NIC 4514 * power-on and runtime configuration items in a 4515 * shared memory area. The factory configured MAC 4516 * address is available from both NVRAM and the 4517 * shared memory area so we'll read the value from 4518 * shared memory for speed. 4519 */ 4520 4521 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 4522 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 4523 4524 if ((mac_lo == 0) && (mac_hi == 0)) { 4525 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n", 4526 __FILE__, __LINE__); 4527 } else { 4528 sc->eaddr[0] = (u_char)(mac_hi >> 8); 4529 sc->eaddr[1] = (u_char)(mac_hi >> 0); 4530 sc->eaddr[2] = (u_char)(mac_lo >> 24); 4531 sc->eaddr[3] = (u_char)(mac_lo >> 16); 4532 sc->eaddr[4] = (u_char)(mac_lo >> 8); 4533 sc->eaddr[5] = (u_char)(mac_lo >> 0); 4534 } 4535 4536 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet " 4537 "address = %6D\n", sc->eaddr, ":"); 4538 DBEXIT(BCE_VERBOSE_RESET); 4539} 4540 4541 4542/****************************************************************************/ 4543/* Program the MAC address. */ 4544/* */ 4545/* Returns: */ 4546/* Nothing. */ 4547/****************************************************************************/ 4548static void 4549bce_set_mac_addr(struct bce_softc *sc) 4550{ 4551 u32 val; 4552 u8 *mac_addr = sc->eaddr; 4553 4554 /* ToDo: Add support for setting multiple MAC addresses. */ 4555 4556 DBENTER(BCE_VERBOSE_RESET); 4557 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = " 4558 "%6D\n", sc->eaddr, ":"); 4559 4560 val = (mac_addr[0] << 8) | mac_addr[1]; 4561 4562 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 4563 4564 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4565 (mac_addr[4] << 8) | mac_addr[5]; 4566 4567 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 4568 4569 DBEXIT(BCE_VERBOSE_RESET); 4570} 4571 4572 4573/****************************************************************************/ 4574/* Stop the controller. */ 4575/* */ 4576/* Returns: */ 4577/* Nothing. */ 4578/****************************************************************************/ 4579static void 4580bce_stop(struct bce_softc *sc) 4581{ 4582 struct ifnet *ifp; 4583 4584 DBENTER(BCE_VERBOSE_RESET); 4585 4586 BCE_LOCK_ASSERT(sc); 4587 4588 ifp = sc->bce_ifp; 4589 4590 callout_stop(&sc->bce_tick_callout); 4591 4592 /* Disable the transmit/receive blocks. */ 4593 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 4594 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4595 DELAY(20); 4596 4597 bce_disable_intr(sc); 4598 4599 /* Free RX buffers. */ 4600#ifdef BCE_JUMBO_HDRSPLIT 4601 bce_free_pg_chain(sc); 4602#endif 4603 bce_free_rx_chain(sc); 4604 4605 /* Free TX buffers. */ 4606 bce_free_tx_chain(sc); 4607 4608 sc->watchdog_timer = 0; 4609 4610 sc->bce_link_up = FALSE; 4611 4612 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4613 4614 DBEXIT(BCE_VERBOSE_RESET); 4615} 4616 4617 4618static int 4619bce_reset(struct bce_softc *sc, u32 reset_code) 4620{ 4621 u32 val; 4622 int i, rc = 0; 4623 4624 DBENTER(BCE_VERBOSE_RESET); 4625 4626 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n", 4627 __FUNCTION__, reset_code); 4628 4629 /* Wait for pending PCI transactions to complete. */ 4630 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 4631 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4632 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4633 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4634 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4635 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4636 DELAY(5); 4637 4638 /* Disable DMA */ 4639 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4640 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4641 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 4642 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 4643 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 4644 } 4645 4646 /* Assume bootcode is running. */ 4647 sc->bce_fw_timed_out = FALSE; 4648 sc->bce_drv_cardiac_arrest = FALSE; 4649 4650 /* Give the firmware a chance to prepare for the reset. */ 4651 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 4652 if (rc) 4653 goto bce_reset_exit; 4654 4655 /* Set a firmware reminder that this is a soft reset. */ 4656 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC); 4657 4658 /* Dummy read to force the chip to complete all current transactions. */ 4659 val = REG_RD(sc, BCE_MISC_ID); 4660 4661 /* Chip reset. */ 4662 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4663 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4664 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 4665 REG_RD(sc, BCE_MISC_COMMAND); 4666 DELAY(5); 4667 4668 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4669 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4670 4671 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 4672 } else { 4673 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4674 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4675 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4676 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 4677 4678 /* Allow up to 30us for reset to complete. */ 4679 for (i = 0; i < 10; i++) { 4680 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 4681 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4682 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 4683 break; 4684 } 4685 DELAY(10); 4686 } 4687 4688 /* Check that reset completed successfully. */ 4689 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4690 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 4691 BCE_PRINTF("%s(%d): Reset failed!\n", 4692 __FILE__, __LINE__); 4693 rc = EBUSY; 4694 goto bce_reset_exit; 4695 } 4696 } 4697 4698 /* Make sure byte swapping is properly configured. */ 4699 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 4700 if (val != 0x01020304) { 4701 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n", 4702 __FILE__, __LINE__); 4703 rc = ENODEV; 4704 goto bce_reset_exit; 4705 } 4706 4707 /* Just completed a reset, assume that firmware is running again. */ 4708 sc->bce_fw_timed_out = FALSE; 4709 sc->bce_drv_cardiac_arrest = FALSE; 4710 4711 /* Wait for the firmware to finish its initialization. */ 4712 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 4713 if (rc) 4714 BCE_PRINTF("%s(%d): Firmware did not complete " 4715 "initialization!\n", __FILE__, __LINE__); 4716 4717bce_reset_exit: 4718 DBEXIT(BCE_VERBOSE_RESET); 4719 return (rc); 4720} 4721 4722 4723static int 4724bce_chipinit(struct bce_softc *sc) 4725{ 4726 u32 val; 4727 int rc = 0; 4728 4729 DBENTER(BCE_VERBOSE_RESET); 4730 4731 bce_disable_intr(sc); 4732 4733 /* 4734 * Initialize DMA byte/word swapping, configure the number of DMA 4735 * channels and PCI clock compensation delay. 4736 */ 4737 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 4738 BCE_DMA_CONFIG_DATA_WORD_SWAP | 4739#if BYTE_ORDER == BIG_ENDIAN 4740 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 4741#endif 4742 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 4743 DMA_READ_CHANS << 12 | 4744 DMA_WRITE_CHANS << 16; 4745 4746 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 4747 4748 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 4749 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 4750 4751 /* 4752 * This setting resolves a problem observed on certain Intel PCI 4753 * chipsets that cannot handle multiple outstanding DMA operations. 4754 * See errata E9_5706A1_65. 4755 */ 4756 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 4757 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) && 4758 !(sc->bce_flags & BCE_PCIX_FLAG)) 4759 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 4760 4761 REG_WR(sc, BCE_DMA_CONFIG, val); 4762 4763 /* Enable the RX_V2P and Context state machines before access. */ 4764 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4765 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 4766 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 4767 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 4768 4769 /* Initialize context mapping and zero out the quick contexts. */ 4770 if ((rc = bce_init_ctx(sc)) != 0) 4771 goto bce_chipinit_exit; 4772 4773 /* Initialize the on-boards CPUs */ 4774 bce_init_cpus(sc); 4775 4776 /* Enable management frames (NC-SI) to flow to the MCP. */ 4777 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 4778 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 4779 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 4780 } 4781 4782 /* Prepare NVRAM for access. */ 4783 if ((rc = bce_init_nvram(sc)) != 0) 4784 goto bce_chipinit_exit; 4785 4786 /* Set the kernel bypass block size */ 4787 val = REG_RD(sc, BCE_MQ_CONFIG); 4788 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4789 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4790 4791 /* Enable bins used on the 5709. */ 4792 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4793 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4794 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 4795 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 4796 val |= BCE_MQ_CONFIG_HALT_DIS; 4797 } 4798 4799 REG_WR(sc, BCE_MQ_CONFIG, val); 4800 4801 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 4802 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 4803 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 4804 4805 /* Set the page size and clear the RV2P processor stall bits. */ 4806 val = (BCM_PAGE_BITS - 8) << 24; 4807 REG_WR(sc, BCE_RV2P_CONFIG, val); 4808 4809 /* Configure page size. */ 4810 val = REG_RD(sc, BCE_TBDR_CONFIG); 4811 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 4812 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 4813 REG_WR(sc, BCE_TBDR_CONFIG, val); 4814 4815 /* Set the perfect match control register to default. */ 4816 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 4817 4818bce_chipinit_exit: 4819 DBEXIT(BCE_VERBOSE_RESET); 4820 4821 return(rc); 4822} 4823 4824 4825/****************************************************************************/ 4826/* Initialize the controller in preparation to send/receive traffic. */ 4827/* */ 4828/* Returns: */ 4829/* 0 for success, positive value for failure. */ 4830/****************************************************************************/ 4831static int 4832bce_blockinit(struct bce_softc *sc) 4833{ 4834 u32 reg, val; 4835 int rc = 0; 4836 4837 DBENTER(BCE_VERBOSE_RESET); 4838 4839 /* Load the hardware default MAC address. */ 4840 bce_set_mac_addr(sc); 4841 4842 /* Set the Ethernet backoff seed value */ 4843 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + 4844 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + 4845 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 4846 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 4847 4848 sc->last_status_idx = 0; 4849 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 4850 4851 /* Set up link change interrupt generation. */ 4852 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 4853 4854 /* Program the physical address of the status block. */ 4855 REG_WR(sc, BCE_HC_STATUS_ADDR_L, 4856 BCE_ADDR_LO(sc->status_block_paddr)); 4857 REG_WR(sc, BCE_HC_STATUS_ADDR_H, 4858 BCE_ADDR_HI(sc->status_block_paddr)); 4859 4860 /* Program the physical address of the statistics block. */ 4861 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 4862 BCE_ADDR_LO(sc->stats_block_paddr)); 4863 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 4864 BCE_ADDR_HI(sc->stats_block_paddr)); 4865 4866 /* Program various host coalescing parameters. */ 4867 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4868 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip); 4869 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4870 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip); 4871 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 4872 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 4873 REG_WR(sc, BCE_HC_TX_TICKS, 4874 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 4875 REG_WR(sc, BCE_HC_RX_TICKS, 4876 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 4877 REG_WR(sc, BCE_HC_COM_TICKS, 4878 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 4879 REG_WR(sc, BCE_HC_CMD_TICKS, 4880 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 4881 REG_WR(sc, BCE_HC_STATS_TICKS, 4882 (sc->bce_stats_ticks & 0xffff00)); 4883 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 4884 4885 /* Configure the Host Coalescing block. */ 4886 val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | 4887 BCE_HC_CONFIG_COLLECT_STATS; 4888 4889#if 0 4890 /* ToDo: Add MSI-X support. */ 4891 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 4892 u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) + 4893 BCE_HC_SB_CONFIG_1; 4894 4895 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); 4896 4897 REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE | 4898 BCE_HC_SB_CONFIG_1_ONE_SHOT); 4899 4900 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 4901 (sc->tx_quick_cons_trip_int << 16) | 4902 sc->tx_quick_cons_trip); 4903 4904 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 4905 (sc->tx_ticks_int << 16) | sc->tx_ticks); 4906 4907 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 4908 } 4909 4910 /* 4911 * Tell the HC block to automatically set the 4912 * INT_MASK bit after an MSI/MSI-X interrupt 4913 * is generated so the driver doesn't have to. 4914 */ 4915 if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG) 4916 val |= BCE_HC_CONFIG_ONE_SHOT; 4917 4918 /* Set the MSI-X status blocks to 128 byte boundaries. */ 4919 if (sc->bce_flags & BCE_USING_MSIX_FLAG) 4920 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 4921#endif 4922 4923 REG_WR(sc, BCE_HC_CONFIG, val); 4924 4925 /* Clear the internal statistics counters. */ 4926 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 4927 4928 /* Verify that bootcode is running. */ 4929 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 4930 4931 DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control), 4932 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n", 4933 __FILE__, __LINE__); 4934 reg = 0); 4935 4936 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 4937 BCE_DEV_INFO_SIGNATURE_MAGIC) { 4938 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, " 4939 "Expected: 08%08X\n", __FILE__, __LINE__, 4940 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK), 4941 BCE_DEV_INFO_SIGNATURE_MAGIC); 4942 rc = ENODEV; 4943 goto bce_blockinit_exit; 4944 } 4945 4946 /* Enable DMA */ 4947 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4948 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4949 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 4950 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 4951 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 4952 } 4953 4954 /* Allow bootcode to apply additional fixes before enabling MAC. */ 4955 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | 4956 BCE_DRV_MSG_CODE_RESET); 4957 4958 /* Enable link state change interrupt generation. */ 4959 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 4960 4961 /* Enable the RXP. */ 4962 bce_start_rxp_cpu(sc); 4963 4964 /* Disable management frames (NC-SI) from flowing to the MCP. */ 4965 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 4966 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 4967 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 4968 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 4969 } 4970 4971 /* Enable all remaining blocks in the MAC. */ 4972 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4973 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 4974 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4975 BCE_MISC_ENABLE_DEFAULT_XI); 4976 else 4977 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4978 BCE_MISC_ENABLE_DEFAULT); 4979 4980 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4981 DELAY(20); 4982 4983 /* Save the current host coalescing block settings. */ 4984 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 4985 4986bce_blockinit_exit: 4987 DBEXIT(BCE_VERBOSE_RESET); 4988 4989 return (rc); 4990} 4991 4992 4993/****************************************************************************/ 4994/* Encapsulate an mbuf into the rx_bd chain. */ 4995/* */ 4996/* Returns: */ 4997/* 0 for success, positive value for failure. */ 4998/****************************************************************************/ 4999static int 5000bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 5001 u16 *chain_prod, u32 *prod_bseq) 5002{ 5003 bus_dmamap_t map; 5004 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 5005 struct mbuf *m_new = NULL; 5006 struct rx_bd *rxbd; 5007 int nsegs, error, rc = 0; 5008#ifdef BCE_DEBUG 5009 u16 debug_chain_prod = *chain_prod; 5010#endif 5011 5012 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5013 5014 /* Make sure the inputs are valid. */ 5015 DBRUNIF((*chain_prod > MAX_RX_BD), 5016 BCE_PRINTF("%s(%d): RX producer out of range: " 5017 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5018 *chain_prod, (u16) MAX_RX_BD)); 5019 5020 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5021 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, 5022 *prod, *chain_prod, *prod_bseq); 5023 5024 /* Update some debug statistic counters */ 5025 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 5026 sc->rx_low_watermark = sc->free_rx_bd); 5027 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 5028 sc->rx_empty_count++); 5029 5030 /* Check whether this is a new mbuf allocation. */ 5031 if (m == NULL) { 5032 5033 /* Simulate an mbuf allocation failure. */ 5034 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5035 sc->mbuf_alloc_failed_count++; 5036 sc->mbuf_alloc_failed_sim_count++; 5037 rc = ENOBUFS; 5038 goto bce_get_rx_buf_exit); 5039 5040 /* This is a new mbuf allocation. */ 5041#ifdef BCE_JUMBO_HDRSPLIT 5042 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 5043#else 5044 m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, 5045 sc->rx_bd_mbuf_alloc_size); 5046#endif 5047 5048 if (m_new == NULL) { 5049 sc->mbuf_alloc_failed_count++; 5050 rc = ENOBUFS; 5051 goto bce_get_rx_buf_exit; 5052 } 5053 5054 DBRUN(sc->debug_rx_mbuf_alloc++); 5055 } else { 5056 /* Reuse an existing mbuf. */ 5057 m_new = m; 5058 } 5059 5060 /* Make sure we have a valid packet header. */ 5061 M_ASSERTPKTHDR(m_new); 5062 5063 /* Initialize the mbuf size and pad if necessary for alignment. */ 5064 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size; 5065 m_adj(m_new, sc->rx_bd_mbuf_align_pad); 5066 5067 /* ToDo: Consider calling m_fragment() to test error handling. */ 5068 5069 /* Map the mbuf cluster into device memory. */ 5070 map = sc->rx_mbuf_map[*chain_prod]; 5071 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new, 5072 segs, &nsegs, BUS_DMA_NOWAIT); 5073 5074 /* Handle any mapping errors. */ 5075 if (error) { 5076 BCE_PRINTF("%s(%d): Error mapping mbuf into RX " 5077 "chain (%d)!\n", __FILE__, __LINE__, error); 5078 5079 sc->dma_map_addr_rx_failed_count++; 5080 m_freem(m_new); 5081 5082 DBRUN(sc->debug_rx_mbuf_alloc--); 5083 5084 rc = ENOBUFS; 5085 goto bce_get_rx_buf_exit; 5086 } 5087 5088 /* All mbufs must map to a single segment. */ 5089 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", 5090 __FUNCTION__, nsegs)); 5091 5092 /* Setup the rx_bd for the segment. */ 5093 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 5094 5095 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); 5096 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); 5097 rxbd->rx_bd_len = htole32(segs[0].ds_len); 5098 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5099 *prod_bseq += segs[0].ds_len; 5100 5101 /* Save the mbuf and update our counter. */ 5102 sc->rx_mbuf_ptr[*chain_prod] = m_new; 5103 sc->free_rx_bd -= nsegs; 5104 5105 DBRUNMSG(BCE_INSANE_RECV, 5106 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs)); 5107 5108 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5109 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", 5110 __FUNCTION__, *prod, *chain_prod, *prod_bseq); 5111 5112bce_get_rx_buf_exit: 5113 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5114 5115 return(rc); 5116} 5117 5118 5119#ifdef BCE_JUMBO_HDRSPLIT 5120/****************************************************************************/ 5121/* Encapsulate an mbuf cluster into the page chain. */ 5122/* */ 5123/* Returns: */ 5124/* 0 for success, positive value for failure. */ 5125/****************************************************************************/ 5126static int 5127bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 5128 u16 *prod_idx) 5129{ 5130 bus_dmamap_t map; 5131 bus_addr_t busaddr; 5132 struct mbuf *m_new = NULL; 5133 struct rx_bd *pgbd; 5134 int error, rc = 0; 5135#ifdef BCE_DEBUG 5136 u16 debug_prod_idx = *prod_idx; 5137#endif 5138 5139 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5140 5141 /* Make sure the inputs are valid. */ 5142 DBRUNIF((*prod_idx > MAX_PG_BD), 5143 BCE_PRINTF("%s(%d): page producer out of range: " 5144 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5145 *prod_idx, (u16) MAX_PG_BD)); 5146 5147 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5148 "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 5149 5150 /* Update counters if we've hit a new low or run out of pages. */ 5151 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark), 5152 sc->pg_low_watermark = sc->free_pg_bd); 5153 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++); 5154 5155 /* Check whether this is a new mbuf allocation. */ 5156 if (m == NULL) { 5157 5158 /* Simulate an mbuf allocation failure. */ 5159 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5160 sc->mbuf_alloc_failed_count++; 5161 sc->mbuf_alloc_failed_sim_count++; 5162 rc = ENOBUFS; 5163 goto bce_get_pg_buf_exit); 5164 5165 /* This is a new mbuf allocation. */ 5166 m_new = m_getcl(M_DONTWAIT, MT_DATA, 0); 5167 if (m_new == NULL) { 5168 sc->mbuf_alloc_failed_count++; 5169 rc = ENOBUFS; 5170 goto bce_get_pg_buf_exit; 5171 } 5172 5173 DBRUN(sc->debug_pg_mbuf_alloc++); 5174 } else { 5175 /* Reuse an existing mbuf. */ 5176 m_new = m; 5177 m_new->m_data = m_new->m_ext.ext_buf; 5178 } 5179 5180 m_new->m_len = sc->pg_bd_mbuf_alloc_size; 5181 5182 /* ToDo: Consider calling m_fragment() to test error handling. */ 5183 5184 /* Map the mbuf cluster into device memory. */ 5185 map = sc->pg_mbuf_map[*prod_idx]; 5186 error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *), 5187 sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, 5188 &busaddr, BUS_DMA_NOWAIT); 5189 5190 /* Handle any mapping errors. */ 5191 if (error) { 5192 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", 5193 __FILE__, __LINE__); 5194 5195 m_freem(m_new); 5196 DBRUN(sc->debug_pg_mbuf_alloc--); 5197 5198 rc = ENOBUFS; 5199 goto bce_get_pg_buf_exit; 5200 } 5201 5202 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */ 5203 5204 /* 5205 * The page chain uses the same rx_bd data structure 5206 * as the receive chain but doesn't require a byte sequence (bseq). 5207 */ 5208 pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)]; 5209 5210 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr)); 5211 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr)); 5212 pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size); 5213 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5214 5215 /* Save the mbuf and update our counter. */ 5216 sc->pg_mbuf_ptr[*prod_idx] = m_new; 5217 sc->free_pg_bd--; 5218 5219 DBRUNMSG(BCE_INSANE_RECV, 5220 bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1)); 5221 5222 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5223 "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 5224 5225bce_get_pg_buf_exit: 5226 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5227 5228 return(rc); 5229} 5230#endif /* BCE_JUMBO_HDRSPLIT */ 5231 5232 5233/****************************************************************************/ 5234/* Initialize the TX context memory. */ 5235/* */ 5236/* Returns: */ 5237/* Nothing */ 5238/****************************************************************************/ 5239static void 5240bce_init_tx_context(struct bce_softc *sc) 5241{ 5242 u32 val; 5243 5244 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5245 5246 /* Initialize the context ID for an L2 TX chain. */ 5247 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5248 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5249 /* Set the CID type to support an L2 connection. */ 5250 val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | 5251 BCE_L2CTX_TX_TYPE_SIZE_L2_XI; 5252 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 5253 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16); 5254 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5255 BCE_L2CTX_TX_CMD_TYPE_XI, val); 5256 5257 /* Point the hardware to the first page in the chain. */ 5258 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5259 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5260 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 5261 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5262 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5263 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 5264 } else { 5265 /* Set the CID type to support an L2 connection. */ 5266 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 5267 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 5268 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 5269 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 5270 5271 /* Point the hardware to the first page in the chain. */ 5272 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5273 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5274 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 5275 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5276 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5277 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 5278 } 5279 5280 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5281} 5282 5283 5284/****************************************************************************/ 5285/* Allocate memory and initialize the TX data structures. */ 5286/* */ 5287/* Returns: */ 5288/* 0 for success, positive value for failure. */ 5289/****************************************************************************/ 5290static int 5291bce_init_tx_chain(struct bce_softc *sc) 5292{ 5293 struct tx_bd *txbd; 5294 int i, rc = 0; 5295 5296 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5297 5298 /* Set the initial TX producer/consumer indices. */ 5299 sc->tx_prod = 0; 5300 sc->tx_cons = 0; 5301 sc->tx_prod_bseq = 0; 5302 sc->used_tx_bd = 0; 5303 sc->max_tx_bd = USABLE_TX_BD; 5304 DBRUN(sc->tx_hi_watermark = 0); 5305 DBRUN(sc->tx_full_count = 0); 5306 5307 /* 5308 * The NetXtreme II supports a linked-list structre called 5309 * a Buffer Descriptor Chain (or BD chain). A BD chain 5310 * consists of a series of 1 or more chain pages, each of which 5311 * consists of a fixed number of BD entries. 5312 * The last BD entry on each page is a pointer to the next page 5313 * in the chain, and the last pointer in the BD chain 5314 * points back to the beginning of the chain. 5315 */ 5316 5317 /* Set the TX next pointer chain entries. */ 5318 for (i = 0; i < TX_PAGES; i++) { 5319 int j; 5320 5321 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 5322 5323 /* Check if we've reached the last page. */ 5324 if (i == (TX_PAGES - 1)) 5325 j = 0; 5326 else 5327 j = i + 1; 5328 5329 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 5330 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 5331 } 5332 5333 bce_init_tx_context(sc); 5334 5335 DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD)); 5336 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5337 5338 return(rc); 5339} 5340 5341 5342/****************************************************************************/ 5343/* Free memory and clear the TX data structures. */ 5344/* */ 5345/* Returns: */ 5346/* Nothing. */ 5347/****************************************************************************/ 5348static void 5349bce_free_tx_chain(struct bce_softc *sc) 5350{ 5351 int i; 5352 5353 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5354 5355 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 5356 for (i = 0; i < TOTAL_TX_BD; i++) { 5357 if (sc->tx_mbuf_ptr[i] != NULL) { 5358 if (sc->tx_mbuf_map[i] != NULL) 5359 bus_dmamap_sync(sc->tx_mbuf_tag, 5360 sc->tx_mbuf_map[i], 5361 BUS_DMASYNC_POSTWRITE); 5362 m_freem(sc->tx_mbuf_ptr[i]); 5363 sc->tx_mbuf_ptr[i] = NULL; 5364 DBRUN(sc->debug_tx_mbuf_alloc--); 5365 } 5366 } 5367 5368 /* Clear each TX chain page. */ 5369 for (i = 0; i < TX_PAGES; i++) 5370 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 5371 5372 sc->used_tx_bd = 0; 5373 5374 /* Check if we lost any mbufs in the process. */ 5375 DBRUNIF((sc->debug_tx_mbuf_alloc), 5376 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs " 5377 "from tx chain!\n", __FILE__, __LINE__, 5378 sc->debug_tx_mbuf_alloc)); 5379 5380 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5381} 5382 5383 5384/****************************************************************************/ 5385/* Initialize the RX context memory. */ 5386/* */ 5387/* Returns: */ 5388/* Nothing */ 5389/****************************************************************************/ 5390static void 5391bce_init_rx_context(struct bce_softc *sc) 5392{ 5393 u32 val; 5394 5395 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5396 5397 /* Init the type, size, and BD cache levels for the RX context. */ 5398 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 5399 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | 5400 (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT); 5401 5402 /* 5403 * Set the level for generating pause frames 5404 * when the number of available rx_bd's gets 5405 * too low (the low watermark) and the level 5406 * when pause frames can be stopped (the high 5407 * watermark). 5408 */ 5409 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5410 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5411 u32 lo_water, hi_water; 5412 5413 if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) { 5414 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 5415 } else { 5416 lo_water = 0; 5417 } 5418 5419 if (lo_water >= USABLE_RX_BD) { 5420 lo_water = 0; 5421 } 5422 5423 hi_water = USABLE_RX_BD / 4; 5424 5425 if (hi_water <= lo_water) { 5426 lo_water = 0; 5427 } 5428 5429 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 5430 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 5431 5432 if (hi_water > 0xf) 5433 hi_water = 0xf; 5434 else if (hi_water == 0) 5435 lo_water = 0; 5436 5437 val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) | 5438 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 5439 } 5440 5441 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 5442 5443 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 5444 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5445 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5446 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 5447 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 5448 } 5449 5450 /* Point the hardware to the first page in the chain. */ 5451 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 5452 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 5453 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 5454 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 5455 5456 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5457} 5458 5459 5460/****************************************************************************/ 5461/* Allocate memory and initialize the RX data structures. */ 5462/* */ 5463/* Returns: */ 5464/* 0 for success, positive value for failure. */ 5465/****************************************************************************/ 5466static int 5467bce_init_rx_chain(struct bce_softc *sc) 5468{ 5469 struct rx_bd *rxbd; 5470 int i, rc = 0; 5471 5472 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5473 BCE_VERBOSE_CTX); 5474 5475 /* Initialize the RX producer and consumer indices. */ 5476 sc->rx_prod = 0; 5477 sc->rx_cons = 0; 5478 sc->rx_prod_bseq = 0; 5479 sc->free_rx_bd = USABLE_RX_BD; 5480 sc->max_rx_bd = USABLE_RX_BD; 5481 5482 /* Initialize the RX next pointer chain entries. */ 5483 for (i = 0; i < RX_PAGES; i++) { 5484 int j; 5485 5486 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 5487 5488 /* Check if we've reached the last page. */ 5489 if (i == (RX_PAGES - 1)) 5490 j = 0; 5491 else 5492 j = i + 1; 5493 5494 /* Setup the chain page pointers. */ 5495 rxbd->rx_bd_haddr_hi = 5496 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 5497 rxbd->rx_bd_haddr_lo = 5498 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 5499 } 5500 5501 /* Fill up the RX chain. */ 5502 bce_fill_rx_chain(sc); 5503 5504 DBRUN(sc->rx_low_watermark = USABLE_RX_BD); 5505 DBRUN(sc->rx_empty_count = 0); 5506 for (i = 0; i < RX_PAGES; i++) { 5507 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], 5508 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 5509 } 5510 5511 bce_init_rx_context(sc); 5512 5513 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD)); 5514 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5515 BCE_VERBOSE_CTX); 5516 5517 /* ToDo: Are there possible failure modes here? */ 5518 5519 return(rc); 5520} 5521 5522 5523/****************************************************************************/ 5524/* Add mbufs to the RX chain until its full or an mbuf allocation error */ 5525/* occurs. */ 5526/* */ 5527/* Returns: */ 5528/* Nothing */ 5529/****************************************************************************/ 5530static void 5531bce_fill_rx_chain(struct bce_softc *sc) 5532{ 5533 u16 prod, prod_idx; 5534 u32 prod_bseq; 5535 5536 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5537 BCE_VERBOSE_CTX); 5538 5539 /* Get the RX chain producer indices. */ 5540 prod = sc->rx_prod; 5541 prod_bseq = sc->rx_prod_bseq; 5542 5543 /* Keep filling the RX chain until it's full. */ 5544 while (sc->free_rx_bd > 0) { 5545 prod_idx = RX_CHAIN_IDX(prod); 5546 if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) { 5547 /* Bail out if we can't add an mbuf to the chain. */ 5548 break; 5549 } 5550 prod = NEXT_RX_BD(prod); 5551 } 5552 5553 /* Save the RX chain producer indices. */ 5554 sc->rx_prod = prod; 5555 sc->rx_prod_bseq = prod_bseq; 5556 5557 /* We should never end up pointing to a next page pointer. */ 5558 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 5559 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n", 5560 __FUNCTION__, sc->rx_prod)); 5561 5562 /* Write the mailbox and tell the chip about the waiting rx_bd's. */ 5563 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + 5564 BCE_L2MQ_RX_HOST_BDIDX, sc->rx_prod); 5565 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + 5566 BCE_L2MQ_RX_HOST_BSEQ, sc->rx_prod_bseq); 5567 5568 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5569 BCE_VERBOSE_CTX); 5570} 5571 5572 5573/****************************************************************************/ 5574/* Free memory and clear the RX data structures. */ 5575/* */ 5576/* Returns: */ 5577/* Nothing. */ 5578/****************************************************************************/ 5579static void 5580bce_free_rx_chain(struct bce_softc *sc) 5581{ 5582 int i; 5583 5584 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5585 5586 /* Free any mbufs still in the RX mbuf chain. */ 5587 for (i = 0; i < TOTAL_RX_BD; i++) { 5588 if (sc->rx_mbuf_ptr[i] != NULL) { 5589 if (sc->rx_mbuf_map[i] != NULL) 5590 bus_dmamap_sync(sc->rx_mbuf_tag, 5591 sc->rx_mbuf_map[i], 5592 BUS_DMASYNC_POSTREAD); 5593 m_freem(sc->rx_mbuf_ptr[i]); 5594 sc->rx_mbuf_ptr[i] = NULL; 5595 DBRUN(sc->debug_rx_mbuf_alloc--); 5596 } 5597 } 5598 5599 /* Clear each RX chain page. */ 5600 for (i = 0; i < RX_PAGES; i++) 5601 if (sc->rx_bd_chain[i] != NULL) { 5602 bzero((char *)sc->rx_bd_chain[i], 5603 BCE_RX_CHAIN_PAGE_SZ); 5604 } 5605 5606 sc->free_rx_bd = sc->max_rx_bd; 5607 5608 /* Check if we lost any mbufs in the process. */ 5609 DBRUNIF((sc->debug_rx_mbuf_alloc), 5610 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n", 5611 __FUNCTION__, sc->debug_rx_mbuf_alloc)); 5612 5613 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5614} 5615 5616 5617#ifdef BCE_JUMBO_HDRSPLIT 5618/****************************************************************************/ 5619/* Allocate memory and initialize the page data structures. */ 5620/* Assumes that bce_init_rx_chain() has not already been called. */ 5621/* */ 5622/* Returns: */ 5623/* 0 for success, positive value for failure. */ 5624/****************************************************************************/ 5625static int 5626bce_init_pg_chain(struct bce_softc *sc) 5627{ 5628 struct rx_bd *pgbd; 5629 int i, rc = 0; 5630 u32 val; 5631 5632 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5633 BCE_VERBOSE_CTX); 5634 5635 /* Initialize the page producer and consumer indices. */ 5636 sc->pg_prod = 0; 5637 sc->pg_cons = 0; 5638 sc->free_pg_bd = USABLE_PG_BD; 5639 sc->max_pg_bd = USABLE_PG_BD; 5640 DBRUN(sc->pg_low_watermark = sc->max_pg_bd); 5641 DBRUN(sc->pg_empty_count = 0); 5642 5643 /* Initialize the page next pointer chain entries. */ 5644 for (i = 0; i < PG_PAGES; i++) { 5645 int j; 5646 5647 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE]; 5648 5649 /* Check if we've reached the last page. */ 5650 if (i == (PG_PAGES - 1)) 5651 j = 0; 5652 else 5653 j = i + 1; 5654 5655 /* Setup the chain page pointers. */ 5656 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j])); 5657 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); 5658 } 5659 5660 /* Setup the MQ BIN mapping for host_pg_bidx. */ 5661 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5662 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 5663 REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT); 5664 5665 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0); 5666 5667 /* Configure the rx_bd and page chain mbuf cluster size. */ 5668 val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size; 5669 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val); 5670 5671 /* Configure the context reserved for jumbo support. */ 5672 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY, 5673 BCE_L2CTX_RX_RBDC_JUMBO_KEY); 5674 5675 /* Point the hardware to the first page in the page chain. */ 5676 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]); 5677 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val); 5678 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); 5679 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val); 5680 5681 /* Fill up the page chain. */ 5682 bce_fill_pg_chain(sc); 5683 5684 for (i = 0; i < PG_PAGES; i++) { 5685 bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], 5686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 5687 } 5688 5689 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD)); 5690 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5691 BCE_VERBOSE_CTX); 5692 return(rc); 5693} 5694 5695 5696/****************************************************************************/ 5697/* Add mbufs to the page chain until its full or an mbuf allocation error */ 5698/* occurs. */ 5699/* */ 5700/* Returns: */ 5701/* Nothing */ 5702/****************************************************************************/ 5703static void 5704bce_fill_pg_chain(struct bce_softc *sc) 5705{ 5706 u16 prod, prod_idx; 5707 5708 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5709 BCE_VERBOSE_CTX); 5710 5711 /* Get the page chain prodcuer index. */ 5712 prod = sc->pg_prod; 5713 5714 /* Keep filling the page chain until it's full. */ 5715 while (sc->free_pg_bd > 0) { 5716 prod_idx = PG_CHAIN_IDX(prod); 5717 if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) { 5718 /* Bail out if we can't add an mbuf to the chain. */ 5719 break; 5720 } 5721 prod = NEXT_PG_BD(prod); 5722 } 5723 5724 /* Save the page chain producer index. */ 5725 sc->pg_prod = prod; 5726 5727 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 5728 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n", 5729 __FUNCTION__, sc->pg_prod)); 5730 5731 /* 5732 * Write the mailbox and tell the chip about 5733 * the new rx_bd's in the page chain. 5734 */ 5735 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + 5736 BCE_L2MQ_RX_HOST_PG_BDIDX, sc->pg_prod); 5737 5738 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5739 BCE_VERBOSE_CTX); 5740} 5741 5742 5743/****************************************************************************/ 5744/* Free memory and clear the RX data structures. */ 5745/* */ 5746/* Returns: */ 5747/* Nothing. */ 5748/****************************************************************************/ 5749static void 5750bce_free_pg_chain(struct bce_softc *sc) 5751{ 5752 int i; 5753 5754 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5755 5756 /* Free any mbufs still in the mbuf page chain. */ 5757 for (i = 0; i < TOTAL_PG_BD; i++) { 5758 if (sc->pg_mbuf_ptr[i] != NULL) { 5759 if (sc->pg_mbuf_map[i] != NULL) 5760 bus_dmamap_sync(sc->pg_mbuf_tag, 5761 sc->pg_mbuf_map[i], 5762 BUS_DMASYNC_POSTREAD); 5763 m_freem(sc->pg_mbuf_ptr[i]); 5764 sc->pg_mbuf_ptr[i] = NULL; 5765 DBRUN(sc->debug_pg_mbuf_alloc--); 5766 } 5767 } 5768 5769 /* Clear each page chain pages. */ 5770 for (i = 0; i < PG_PAGES; i++) 5771 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); 5772 5773 sc->free_pg_bd = sc->max_pg_bd; 5774 5775 /* Check if we lost any mbufs in the process. */ 5776 DBRUNIF((sc->debug_pg_mbuf_alloc), 5777 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n", 5778 __FUNCTION__, sc->debug_pg_mbuf_alloc)); 5779 5780 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5781} 5782#endif /* BCE_JUMBO_HDRSPLIT */ 5783 5784 5785/****************************************************************************/ 5786/* Set media options. */ 5787/* */ 5788/* Returns: */ 5789/* 0 for success, positive value for failure. */ 5790/****************************************************************************/ 5791static int 5792bce_ifmedia_upd(struct ifnet *ifp) 5793{ 5794 struct bce_softc *sc = ifp->if_softc; 5795 int error; 5796 5797 DBENTER(BCE_VERBOSE); 5798 5799 BCE_LOCK(sc); 5800 error = bce_ifmedia_upd_locked(ifp); 5801 BCE_UNLOCK(sc); 5802 5803 DBEXIT(BCE_VERBOSE); 5804 return (error); 5805} 5806 5807 5808/****************************************************************************/ 5809/* Set media options. */ 5810/* */ 5811/* Returns: */ 5812/* Nothing. */ 5813/****************************************************************************/ 5814static int 5815bce_ifmedia_upd_locked(struct ifnet *ifp) 5816{ 5817 struct bce_softc *sc = ifp->if_softc; 5818 struct mii_data *mii; 5819 int error; 5820 5821 DBENTER(BCE_VERBOSE_PHY); 5822 5823 error = 0; 5824 BCE_LOCK_ASSERT(sc); 5825 5826 mii = device_get_softc(sc->bce_miibus); 5827 5828 /* Make sure the MII bus has been enumerated. */ 5829 if (mii) { 5830 sc->bce_link_up = FALSE; 5831 if (mii->mii_instance) { 5832 struct mii_softc *miisc; 5833 5834 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 5835 mii_phy_reset(miisc); 5836 } 5837 error = mii_mediachg(mii); 5838 } 5839 5840 DBEXIT(BCE_VERBOSE_PHY); 5841 return (error); 5842} 5843 5844 5845/****************************************************************************/ 5846/* Reports current media status. */ 5847/* */ 5848/* Returns: */ 5849/* Nothing. */ 5850/****************************************************************************/ 5851static void 5852bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5853{ 5854 struct bce_softc *sc = ifp->if_softc; 5855 struct mii_data *mii; 5856 5857 DBENTER(BCE_VERBOSE_PHY); 5858 5859 BCE_LOCK(sc); 5860 5861 if ((ifp->if_flags & IFF_UP) == 0) { 5862 BCE_UNLOCK(sc); 5863 return; 5864 } 5865 mii = device_get_softc(sc->bce_miibus); 5866 5867 mii_pollstat(mii); 5868 ifmr->ifm_active = mii->mii_media_active; 5869 ifmr->ifm_status = mii->mii_media_status; 5870 5871 BCE_UNLOCK(sc); 5872 5873 DBEXIT(BCE_VERBOSE_PHY); 5874} 5875 5876 5877/****************************************************************************/ 5878/* Handles PHY generated interrupt events. */ 5879/* */ 5880/* Returns: */ 5881/* Nothing. */ 5882/****************************************************************************/ 5883static void 5884bce_phy_intr(struct bce_softc *sc) 5885{ 5886 u32 new_link_state, old_link_state; 5887 5888 DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 5889 5890 DBRUN(sc->phy_interrupts++); 5891 5892 new_link_state = sc->status_block->status_attn_bits & 5893 STATUS_ATTN_BITS_LINK_STATE; 5894 old_link_state = sc->status_block->status_attn_bits_ack & 5895 STATUS_ATTN_BITS_LINK_STATE; 5896 5897 /* Handle any changes if the link state has changed. */ 5898 if (new_link_state != old_link_state) { 5899 5900 /* Update the status_attn_bits_ack field. */ 5901 if (new_link_state) { 5902 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 5903 STATUS_ATTN_BITS_LINK_STATE); 5904 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n", 5905 __FUNCTION__); 5906 } 5907 else { 5908 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 5909 STATUS_ATTN_BITS_LINK_STATE); 5910 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n", 5911 __FUNCTION__); 5912 } 5913 5914 /* 5915 * Assume link is down and allow 5916 * tick routine to update the state 5917 * based on the actual media state. 5918 */ 5919 sc->bce_link_up = FALSE; 5920 callout_stop(&sc->bce_tick_callout); 5921 bce_tick(sc); 5922 } 5923 5924 /* Acknowledge the link change interrupt. */ 5925 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 5926 5927 DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 5928} 5929 5930 5931/****************************************************************************/ 5932/* Reads the receive consumer value from the status block (skipping over */ 5933/* chain page pointer if necessary). */ 5934/* */ 5935/* Returns: */ 5936/* hw_cons */ 5937/****************************************************************************/ 5938static inline u16 5939bce_get_hw_rx_cons(struct bce_softc *sc) 5940{ 5941 u16 hw_cons; 5942 5943 rmb(); 5944 hw_cons = sc->status_block->status_rx_quick_consumer_index0; 5945 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5946 hw_cons++; 5947 5948 return hw_cons; 5949} 5950 5951/****************************************************************************/ 5952/* Handles received frame interrupt events. */ 5953/* */ 5954/* Returns: */ 5955/* Nothing. */ 5956/****************************************************************************/ 5957static void 5958bce_rx_intr(struct bce_softc *sc) 5959{ 5960 struct ifnet *ifp = sc->bce_ifp; 5961 struct l2_fhdr *l2fhdr; 5962 struct ether_vlan_header *vh; 5963 unsigned int pkt_len; 5964 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; 5965 u32 status; 5966#ifdef BCE_JUMBO_HDRSPLIT 5967 unsigned int rem_len; 5968 u16 sw_pg_cons, sw_pg_cons_idx; 5969#endif 5970 5971 DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 5972 DBRUN(sc->interrupts_rx++); 5973 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, " 5974 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 5975 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 5976 5977 /* Prepare the RX chain pages to be accessed by the host CPU. */ 5978 for (int i = 0; i < RX_PAGES; i++) 5979 bus_dmamap_sync(sc->rx_bd_chain_tag, 5980 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 5981 5982#ifdef BCE_JUMBO_HDRSPLIT 5983 /* Prepare the page chain pages to be accessed by the host CPU. */ 5984 for (int i = 0; i < PG_PAGES; i++) 5985 bus_dmamap_sync(sc->pg_bd_chain_tag, 5986 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 5987#endif 5988 5989 /* Get the hardware's view of the RX consumer index. */ 5990 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 5991 5992 /* Get working copies of the driver's view of the consumer indices. */ 5993 sw_rx_cons = sc->rx_cons; 5994 5995#ifdef BCE_JUMBO_HDRSPLIT 5996 sw_pg_cons = sc->pg_cons; 5997#endif 5998 5999 /* Update some debug statistics counters */ 6000 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 6001 sc->rx_low_watermark = sc->free_rx_bd); 6002 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 6003 sc->rx_empty_count++); 6004 6005 /* Scan through the receive chain as long as there is work to do */ 6006 /* ToDo: Consider setting a limit on the number of packets processed. */ 6007 rmb(); 6008 while (sw_rx_cons != hw_rx_cons) { 6009 struct mbuf *m0; 6010 6011 /* Convert the producer/consumer indices to an actual rx_bd index. */ 6012 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); 6013 6014 /* Unmap the mbuf from DMA space. */ 6015 bus_dmamap_sync(sc->rx_mbuf_tag, 6016 sc->rx_mbuf_map[sw_rx_cons_idx], 6017 BUS_DMASYNC_POSTREAD); 6018 bus_dmamap_unload(sc->rx_mbuf_tag, 6019 sc->rx_mbuf_map[sw_rx_cons_idx]); 6020 6021 /* Remove the mbuf from the RX chain. */ 6022 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx]; 6023 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL; 6024 DBRUN(sc->debug_rx_mbuf_alloc--); 6025 sc->free_rx_bd++; 6026 6027 if(m0 == NULL) { 6028 DBPRINT(sc, BCE_EXTREME_RECV, 6029 "%s(): Oops! Empty mbuf pointer " 6030 "found in sc->rx_mbuf_ptr[0x%04X]!\n", 6031 __FUNCTION__, sw_rx_cons_idx); 6032 goto bce_rx_int_next_rx; 6033 } 6034 6035 /* 6036 * Frames received on the NetXteme II are prepended 6037 * with an l2_fhdr structure which provides status 6038 * information about the received frame (including 6039 * VLAN tags and checksum info). The frames are 6040 * also automatically adjusted to align the IP 6041 * header (i.e. two null bytes are inserted before 6042 * the Ethernet header). As a result the data 6043 * DMA'd by the controller into the mbuf looks 6044 * like this: 6045 * 6046 * +---------+-----+---------------------+-----+ 6047 * | l2_fhdr | pad | packet data | FCS | 6048 * +---------+-----+---------------------+-----+ 6049 * 6050 * The l2_fhdr needs to be checked and skipped and 6051 * the FCS needs to be stripped before sending the 6052 * packet up the stack. 6053 */ 6054 l2fhdr = mtod(m0, struct l2_fhdr *); 6055 6056 /* Get the packet data + FCS length and the status. */ 6057 pkt_len = l2fhdr->l2_fhdr_pkt_len; 6058 status = l2fhdr->l2_fhdr_status; 6059 6060 /* 6061 * Skip over the l2_fhdr and pad, resulting in the 6062 * following data in the mbuf: 6063 * +---------------------+-----+ 6064 * | packet data | FCS | 6065 * +---------------------+-----+ 6066 */ 6067 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN); 6068 6069#ifdef BCE_JUMBO_HDRSPLIT 6070 /* 6071 * Check whether the received frame fits in a single 6072 * mbuf or not (i.e. packet data + FCS <= 6073 * sc->rx_bd_mbuf_data_len bytes). 6074 */ 6075 if (pkt_len > m0->m_len) { 6076 /* 6077 * The received frame is larger than a single mbuf. 6078 * If the frame was a TCP frame then only the TCP 6079 * header is placed in the mbuf, the remaining 6080 * payload (including FCS) is placed in the page 6081 * chain, the SPLIT flag is set, and the header 6082 * length is placed in the IP checksum field. 6083 * If the frame is not a TCP frame then the mbuf 6084 * is filled and the remaining bytes are placed 6085 * in the page chain. 6086 */ 6087 6088 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large " 6089 "packet.\n", __FUNCTION__); 6090 6091 /* 6092 * When the page chain is enabled and the TCP 6093 * header has been split from the TCP payload, 6094 * the ip_xsum structure will reflect the length 6095 * of the TCP header, not the IP checksum. Set 6096 * the packet length of the mbuf accordingly. 6097 */ 6098 if (status & L2_FHDR_STATUS_SPLIT) 6099 m0->m_len = l2fhdr->l2_fhdr_ip_xsum; 6100 6101 rem_len = pkt_len - m0->m_len; 6102 6103 /* Pull mbufs off the page chain for the remaining data. */ 6104 while (rem_len > 0) { 6105 struct mbuf *m_pg; 6106 6107 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons); 6108 6109 /* Remove the mbuf from the page chain. */ 6110 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx]; 6111 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL; 6112 DBRUN(sc->debug_pg_mbuf_alloc--); 6113 sc->free_pg_bd++; 6114 6115 /* Unmap the page chain mbuf from DMA space. */ 6116 bus_dmamap_sync(sc->pg_mbuf_tag, 6117 sc->pg_mbuf_map[sw_pg_cons_idx], 6118 BUS_DMASYNC_POSTREAD); 6119 bus_dmamap_unload(sc->pg_mbuf_tag, 6120 sc->pg_mbuf_map[sw_pg_cons_idx]); 6121 6122 /* Adjust the mbuf length. */ 6123 if (rem_len < m_pg->m_len) { 6124 /* The mbuf chain is complete. */ 6125 m_pg->m_len = rem_len; 6126 rem_len = 0; 6127 } else { 6128 /* More packet data is waiting. */ 6129 rem_len -= m_pg->m_len; 6130 } 6131 6132 /* Concatenate the mbuf cluster to the mbuf. */ 6133 m_cat(m0, m_pg); 6134 6135 sw_pg_cons = NEXT_PG_BD(sw_pg_cons); 6136 } 6137 6138 /* Set the total packet length. */ 6139 m0->m_pkthdr.len = pkt_len; 6140 6141 } else { 6142 /* 6143 * The received packet is small and fits in a 6144 * single mbuf (i.e. the l2_fhdr + pad + packet + 6145 * FCS <= MHLEN). In other words, the packet is 6146 * 154 bytes or less in size. 6147 */ 6148 6149 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small " 6150 "packet.\n", __FUNCTION__); 6151 6152 /* Set the total packet length. */ 6153 m0->m_pkthdr.len = m0->m_len = pkt_len; 6154 } 6155#else 6156 /* Set the total packet length. */ 6157 m0->m_pkthdr.len = m0->m_len = pkt_len; 6158#endif 6159 6160 /* Remove the trailing Ethernet FCS. */ 6161 m_adj(m0, -ETHER_CRC_LEN); 6162 6163 /* Check that the resulting mbuf chain is valid. */ 6164 DBRUN(m_sanity(m0, FALSE)); 6165 DBRUNIF(((m0->m_len < ETHER_HDR_LEN) | 6166 (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)), 6167 BCE_PRINTF("Invalid Ethernet frame size!\n"); 6168 m_print(m0, 128)); 6169 6170 DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control), 6171 sc->l2fhdr_error_sim_count++; 6172 status = status | L2_FHDR_ERRORS_PHY_DECODE); 6173 6174 /* Check the received frame for errors. */ 6175 if (status & (L2_FHDR_ERRORS_BAD_CRC | 6176 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 6177 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { 6178 6179 /* Log the error and release the mbuf. */ 6180 ifp->if_ierrors++; 6181 sc->l2fhdr_error_count++; 6182 6183 m_freem(m0); 6184 m0 = NULL; 6185 goto bce_rx_int_next_rx; 6186 } 6187 6188 /* Send the packet to the appropriate interface. */ 6189 m0->m_pkthdr.rcvif = ifp; 6190 6191 /* Assume no hardware checksum. */ 6192 m0->m_pkthdr.csum_flags = 0; 6193 6194 /* Validate the checksum if offload enabled. */ 6195 if (ifp->if_capenable & IFCAP_RXCSUM) { 6196 6197 /* Check for an IP datagram. */ 6198 if (!(status & L2_FHDR_STATUS_SPLIT) && 6199 (status & L2_FHDR_STATUS_IP_DATAGRAM)) { 6200 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 6201 DBRUN(sc->csum_offload_ip++); 6202 /* Check if the IP checksum is valid. */ 6203 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 6204 m0->m_pkthdr.csum_flags |= 6205 CSUM_IP_VALID; 6206 } 6207 6208 /* Check for a valid TCP/UDP frame. */ 6209 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 6210 L2_FHDR_STATUS_UDP_DATAGRAM)) { 6211 6212 /* Check for a good TCP/UDP checksum. */ 6213 if ((status & (L2_FHDR_ERRORS_TCP_XSUM | 6214 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 6215 DBRUN(sc->csum_offload_tcp_udp++); 6216 m0->m_pkthdr.csum_data = 6217 l2fhdr->l2_fhdr_tcp_udp_xsum; 6218 m0->m_pkthdr.csum_flags |= 6219 (CSUM_DATA_VALID 6220 | CSUM_PSEUDO_HDR); 6221 } 6222 } 6223 } 6224 6225 /* Attach the VLAN tag. */ 6226 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 6227 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 6228#if __FreeBSD_version < 700000 6229 VLAN_INPUT_TAG(ifp, m0, 6230 l2fhdr->l2_fhdr_vlan_tag, continue); 6231#else 6232 m0->m_pkthdr.ether_vtag = 6233 l2fhdr->l2_fhdr_vlan_tag; 6234 m0->m_flags |= M_VLANTAG; 6235#endif 6236 } else { 6237 /* 6238 * bce(4) controllers can't disable VLAN 6239 * tag stripping if management firmware 6240 * (ASF/IPMI/UMP) is running. So we always 6241 * strip VLAN tag and manually reconstruct 6242 * the VLAN frame by appending stripped 6243 * VLAN tag in driver if VLAN tag stripping 6244 * was disabled. 6245 * 6246 * TODO: LLC SNAP handling. 6247 */ 6248 bcopy(mtod(m0, uint8_t *), 6249 mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN, 6250 ETHER_ADDR_LEN * 2); 6251 m0->m_data -= ETHER_VLAN_ENCAP_LEN; 6252 vh = mtod(m0, struct ether_vlan_header *); 6253 vh->evl_encap_proto = htons(ETHERTYPE_VLAN); 6254 vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag); 6255 m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN; 6256 m0->m_len += ETHER_VLAN_ENCAP_LEN; 6257 } 6258 } 6259 6260 /* Increment received packet statistics. */ 6261 ifp->if_ipackets++; 6262 6263bce_rx_int_next_rx: 6264 sw_rx_cons = NEXT_RX_BD(sw_rx_cons); 6265 6266 /* If we have a packet, pass it up the stack */ 6267 if (m0) { 6268 /* Make sure we don't lose our place when we release the lock. */ 6269 sc->rx_cons = sw_rx_cons; 6270#ifdef BCE_JUMBO_HDRSPLIT 6271 sc->pg_cons = sw_pg_cons; 6272#endif 6273 6274 BCE_UNLOCK(sc); 6275 (*ifp->if_input)(ifp, m0); 6276 BCE_LOCK(sc); 6277 6278 /* Recover our place. */ 6279 sw_rx_cons = sc->rx_cons; 6280#ifdef BCE_JUMBO_HDRSPLIT 6281 sw_pg_cons = sc->pg_cons; 6282#endif 6283 } 6284 6285 /* Refresh hw_cons to see if there's new work */ 6286 if (sw_rx_cons == hw_rx_cons) 6287 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 6288 } 6289 6290#ifdef BCE_JUMBO_HDRSPLIT 6291 /* No new packets. Refill the page chain. */ 6292 sc->pg_cons = sw_pg_cons; 6293 bce_fill_pg_chain(sc); 6294#endif 6295 6296 /* No new packets. Refill the RX chain. */ 6297 sc->rx_cons = sw_rx_cons; 6298 bce_fill_rx_chain(sc); 6299 6300 /* Prepare the page chain pages to be accessed by the NIC. */ 6301 for (int i = 0; i < RX_PAGES; i++) 6302 bus_dmamap_sync(sc->rx_bd_chain_tag, 6303 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6304 6305#ifdef BCE_JUMBO_HDRSPLIT 6306 for (int i = 0; i < PG_PAGES; i++) 6307 bus_dmamap_sync(sc->pg_bd_chain_tag, 6308 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6309#endif 6310 6311 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, " 6312 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 6313 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 6314 DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 6315} 6316 6317 6318/****************************************************************************/ 6319/* Reads the transmit consumer value from the status block (skipping over */ 6320/* chain page pointer if necessary). */ 6321/* */ 6322/* Returns: */ 6323/* hw_cons */ 6324/****************************************************************************/ 6325static inline u16 6326bce_get_hw_tx_cons(struct bce_softc *sc) 6327{ 6328 u16 hw_cons; 6329 6330 mb(); 6331 hw_cons = sc->status_block->status_tx_quick_consumer_index0; 6332 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 6333 hw_cons++; 6334 6335 return hw_cons; 6336} 6337 6338 6339/****************************************************************************/ 6340/* Handles transmit completion interrupt events. */ 6341/* */ 6342/* Returns: */ 6343/* Nothing. */ 6344/****************************************************************************/ 6345static void 6346bce_tx_intr(struct bce_softc *sc) 6347{ 6348 struct ifnet *ifp = sc->bce_ifp; 6349 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 6350 6351 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 6352 DBRUN(sc->interrupts_tx++); 6353 DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, " 6354 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 6355 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 6356 6357 BCE_LOCK_ASSERT(sc); 6358 6359 /* Get the hardware's view of the TX consumer index. */ 6360 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6361 sw_tx_cons = sc->tx_cons; 6362 6363 /* Prevent speculative reads of the status block. */ 6364 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6365 BUS_SPACE_BARRIER_READ); 6366 6367 /* Cycle through any completed TX chain page entries. */ 6368 while (sw_tx_cons != hw_tx_cons) { 6369#ifdef BCE_DEBUG 6370 struct tx_bd *txbd = NULL; 6371#endif 6372 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 6373 6374 DBPRINT(sc, BCE_INFO_SEND, 6375 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 6376 "sw_tx_chain_cons = 0x%04X\n", 6377 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 6378 6379 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 6380 BCE_PRINTF("%s(%d): TX chain consumer out of range! " 6381 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons, 6382 (int) MAX_TX_BD); 6383 bce_breakpoint(sc)); 6384 6385 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 6386 [TX_IDX(sw_tx_chain_cons)]); 6387 6388 DBRUNIF((txbd == NULL), 6389 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", 6390 __FILE__, __LINE__, sw_tx_chain_cons); 6391 bce_breakpoint(sc)); 6392 6393 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__); 6394 bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); 6395 6396 /* 6397 * Free the associated mbuf. Remember 6398 * that only the last tx_bd of a packet 6399 * has an mbuf pointer and DMA map. 6400 */ 6401 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 6402 6403 /* Validate that this is the last tx_bd. */ 6404 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), 6405 BCE_PRINTF("%s(%d): tx_bd END flag not set but " 6406 "txmbuf == NULL!\n", __FILE__, __LINE__); 6407 bce_breakpoint(sc)); 6408 6409 DBRUNMSG(BCE_INFO_SEND, 6410 BCE_PRINTF("%s(): Unloading map/freeing mbuf " 6411 "from tx_bd[0x%04X]\n", __FUNCTION__, 6412 sw_tx_chain_cons)); 6413 6414 /* Unmap the mbuf. */ 6415 bus_dmamap_unload(sc->tx_mbuf_tag, 6416 sc->tx_mbuf_map[sw_tx_chain_cons]); 6417 6418 /* Free the mbuf. */ 6419 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 6420 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 6421 DBRUN(sc->debug_tx_mbuf_alloc--); 6422 6423 ifp->if_opackets++; 6424 } 6425 6426 sc->used_tx_bd--; 6427 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 6428 6429 /* Refresh hw_cons to see if there's new work. */ 6430 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6431 6432 /* Prevent speculative reads of the status block. */ 6433 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6434 BUS_SPACE_BARRIER_READ); 6435 } 6436 6437 /* Clear the TX timeout timer. */ 6438 sc->watchdog_timer = 0; 6439 6440 /* Clear the tx hardware queue full flag. */ 6441 if (sc->used_tx_bd < sc->max_tx_bd) { 6442 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE), 6443 DBPRINT(sc, BCE_INFO_SEND, 6444 "%s(): Open TX chain! %d/%d (used/total)\n", 6445 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd)); 6446 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6447 } 6448 6449 sc->tx_cons = sw_tx_cons; 6450 6451 DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, " 6452 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 6453 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 6454 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 6455} 6456 6457 6458/****************************************************************************/ 6459/* Disables interrupt generation. */ 6460/* */ 6461/* Returns: */ 6462/* Nothing. */ 6463/****************************************************************************/ 6464static void 6465bce_disable_intr(struct bce_softc *sc) 6466{ 6467 DBENTER(BCE_VERBOSE_INTR); 6468 6469 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 6470 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 6471 6472 DBEXIT(BCE_VERBOSE_INTR); 6473} 6474 6475 6476/****************************************************************************/ 6477/* Enables interrupt generation. */ 6478/* */ 6479/* Returns: */ 6480/* Nothing. */ 6481/****************************************************************************/ 6482static void 6483bce_enable_intr(struct bce_softc *sc, int coal_now) 6484{ 6485 DBENTER(BCE_VERBOSE_INTR); 6486 6487 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 6488 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 6489 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 6490 6491 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 6492 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 6493 6494 /* Force an immediate interrupt (whether there is new data or not). */ 6495 if (coal_now) 6496 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 6497 6498 DBEXIT(BCE_VERBOSE_INTR); 6499} 6500 6501 6502/****************************************************************************/ 6503/* Handles controller initialization. */ 6504/* */ 6505/* Returns: */ 6506/* Nothing. */ 6507/****************************************************************************/ 6508static void 6509bce_init_locked(struct bce_softc *sc) 6510{ 6511 struct ifnet *ifp; 6512 u32 ether_mtu = 0; 6513 6514 DBENTER(BCE_VERBOSE_RESET); 6515 6516 BCE_LOCK_ASSERT(sc); 6517 6518 ifp = sc->bce_ifp; 6519 6520 /* Check if the driver is still running and bail out if it is. */ 6521 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6522 goto bce_init_locked_exit; 6523 6524 bce_stop(sc); 6525 6526 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 6527 BCE_PRINTF("%s(%d): Controller reset failed!\n", 6528 __FILE__, __LINE__); 6529 goto bce_init_locked_exit; 6530 } 6531 6532 if (bce_chipinit(sc)) { 6533 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 6534 __FILE__, __LINE__); 6535 goto bce_init_locked_exit; 6536 } 6537 6538 if (bce_blockinit(sc)) { 6539 BCE_PRINTF("%s(%d): Block initialization failed!\n", 6540 __FILE__, __LINE__); 6541 goto bce_init_locked_exit; 6542 } 6543 6544 /* Load our MAC address. */ 6545 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN); 6546 bce_set_mac_addr(sc); 6547 6548 /* 6549 * Calculate and program the hardware Ethernet MTU 6550 * size. Be generous on the receive if we have room. 6551 */ 6552#ifdef BCE_JUMBO_HDRSPLIT 6553 if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + 6554 sc->pg_bd_mbuf_alloc_size)) 6555 ether_mtu = sc->rx_bd_mbuf_data_len + 6556 sc->pg_bd_mbuf_alloc_size; 6557#else 6558 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len) 6559 ether_mtu = sc->rx_bd_mbuf_data_len; 6560#endif 6561 else 6562 ether_mtu = ifp->if_mtu; 6563 6564 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 6565 6566 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", 6567 __FUNCTION__, ether_mtu); 6568 6569 /* Program the mtu, enabling jumbo frame support if necessary. */ 6570 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) 6571 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 6572 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 6573 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 6574 else 6575 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 6576 6577 DBPRINT(sc, BCE_INFO_LOAD, 6578 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, " 6579 "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__, 6580 sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len, 6581 sc->rx_bd_mbuf_align_pad); 6582 6583 /* Program appropriate promiscuous/multicast filtering. */ 6584 bce_set_rx_mode(sc); 6585 6586#ifdef BCE_JUMBO_HDRSPLIT 6587 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n", 6588 __FUNCTION__, sc->pg_bd_mbuf_alloc_size); 6589 6590 /* Init page buffer descriptor chain. */ 6591 bce_init_pg_chain(sc); 6592#endif 6593 6594 /* Init RX buffer descriptor chain. */ 6595 bce_init_rx_chain(sc); 6596 6597 /* Init TX buffer descriptor chain. */ 6598 bce_init_tx_chain(sc); 6599 6600 /* Enable host interrupts. */ 6601 bce_enable_intr(sc, 1); 6602 6603 bce_ifmedia_upd_locked(ifp); 6604 6605 /* Let the OS know the driver is up and running. */ 6606 ifp->if_drv_flags |= IFF_DRV_RUNNING; 6607 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6608 6609 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 6610 6611bce_init_locked_exit: 6612 DBEXIT(BCE_VERBOSE_RESET); 6613} 6614 6615 6616/****************************************************************************/ 6617/* Initialize the controller just enough so that any management firmware */ 6618/* running on the device will continue to operate correctly. */ 6619/* */ 6620/* Returns: */ 6621/* Nothing. */ 6622/****************************************************************************/ 6623static void 6624bce_mgmt_init_locked(struct bce_softc *sc) 6625{ 6626 struct ifnet *ifp; 6627 6628 DBENTER(BCE_VERBOSE_RESET); 6629 6630 BCE_LOCK_ASSERT(sc); 6631 6632 /* Bail out if management firmware is not running. */ 6633 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) { 6634 DBPRINT(sc, BCE_VERBOSE_SPECIAL, 6635 "No management firmware running...\n"); 6636 goto bce_mgmt_init_locked_exit; 6637 } 6638 6639 ifp = sc->bce_ifp; 6640 6641 /* Enable all critical blocks in the MAC. */ 6642 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 6643 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 6644 DELAY(20); 6645 6646 bce_ifmedia_upd_locked(ifp); 6647 6648bce_mgmt_init_locked_exit: 6649 DBEXIT(BCE_VERBOSE_RESET); 6650} 6651 6652 6653/****************************************************************************/ 6654/* Handles controller initialization when called from an unlocked routine. */ 6655/* */ 6656/* Returns: */ 6657/* Nothing. */ 6658/****************************************************************************/ 6659static void 6660bce_init(void *xsc) 6661{ 6662 struct bce_softc *sc = xsc; 6663 6664 DBENTER(BCE_VERBOSE_RESET); 6665 6666 BCE_LOCK(sc); 6667 bce_init_locked(sc); 6668 BCE_UNLOCK(sc); 6669 6670 DBEXIT(BCE_VERBOSE_RESET); 6671} 6672 6673 6674/****************************************************************************/ 6675/* Modifies an mbuf for TSO on the hardware. */ 6676/* */ 6677/* Returns: */ 6678/* Pointer to a modified mbuf. */ 6679/****************************************************************************/ 6680static struct mbuf * 6681bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags) 6682{ 6683 struct mbuf *m; 6684 struct ether_header *eh; 6685 struct ip *ip; 6686 struct tcphdr *th; 6687 u16 etype; 6688 int hdr_len, ip_hlen = 0, tcp_hlen = 0, ip_len = 0; 6689 6690 DBRUN(sc->tso_frames_requested++); 6691 6692 /* Controller may modify mbuf chains. */ 6693 if (M_WRITABLE(*m_head) == 0) { 6694 m = m_dup(*m_head, M_DONTWAIT); 6695 m_freem(*m_head); 6696 if (m == NULL) { 6697 sc->mbuf_alloc_failed_count++; 6698 *m_head = NULL; 6699 return (NULL); 6700 } 6701 *m_head = m; 6702 } 6703 6704 /* 6705 * For TSO the controller needs two pieces of info, 6706 * the MSS and the IP+TCP options length. 6707 */ 6708 m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip)); 6709 if (m == NULL) { 6710 *m_head = NULL; 6711 return (NULL); 6712 } 6713 eh = mtod(m, struct ether_header *); 6714 etype = ntohs(eh->ether_type); 6715 6716 /* Check for supported TSO Ethernet types (only IPv4 for now) */ 6717 switch (etype) { 6718 case ETHERTYPE_IP: 6719 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 6720 /* TSO only supported for TCP protocol. */ 6721 if (ip->ip_p != IPPROTO_TCP) { 6722 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n", 6723 __FILE__, __LINE__); 6724 m_freem(*m_head); 6725 *m_head = NULL; 6726 return (NULL); 6727 } 6728 6729 /* Get IP header length in bytes (min 20) */ 6730 ip_hlen = ip->ip_hl << 2; 6731 m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen + 6732 sizeof(struct tcphdr)); 6733 if (m == NULL) { 6734 *m_head = NULL; 6735 return (NULL); 6736 } 6737 6738 /* Get the TCP header length in bytes (min 20) */ 6739 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 6740 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 6741 tcp_hlen = (th->th_off << 2); 6742 6743 /* Make sure all IP/TCP options live in the same buffer. */ 6744 m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen + 6745 tcp_hlen); 6746 if (m == NULL) { 6747 *m_head = NULL; 6748 return (NULL); 6749 } 6750 6751 /* IP header length and checksum will be calc'd by hardware */ 6752 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 6753 ip_len = ip->ip_len; 6754 ip->ip_len = 0; 6755 ip->ip_sum = 0; 6756 break; 6757 case ETHERTYPE_IPV6: 6758 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n", 6759 __FILE__, __LINE__); 6760 m_freem(*m_head); 6761 *m_head = NULL; 6762 return (NULL); 6763 /* NOT REACHED */ 6764 default: 6765 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n", 6766 __FILE__, __LINE__); 6767 m_freem(*m_head); 6768 *m_head = NULL; 6769 return (NULL); 6770 } 6771 6772 hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen; 6773 6774 DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, " 6775 "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n", 6776 __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen, 6777 tcp_hlen, ip_len); 6778 6779 /* Set the LSO flag in the TX BD */ 6780 *flags |= TX_BD_FLAGS_SW_LSO; 6781 6782 /* Set the length of IP + TCP options (in 32 bit words) */ 6783 *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) - 6784 sizeof(struct tcphdr)) >> 2) << 8); 6785 6786 DBRUN(sc->tso_frames_completed++); 6787 return (*m_head); 6788} 6789 6790 6791/****************************************************************************/ 6792/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 6793/* memory visible to the controller. */ 6794/* */ 6795/* Returns: */ 6796/* 0 for success, positive value for failure. */ 6797/* Modified: */ 6798/* m_head: May be set to NULL if MBUF is excessively fragmented. */ 6799/****************************************************************************/ 6800static int 6801bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head) 6802{ 6803 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 6804 bus_dmamap_t map; 6805 struct tx_bd *txbd = NULL; 6806 struct mbuf *m0; 6807 u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0; 6808 u32 prod_bseq; 6809 6810#ifdef BCE_DEBUG 6811 u16 debug_prod; 6812#endif 6813 6814 int i, error, nsegs, rc = 0; 6815 6816 DBENTER(BCE_VERBOSE_SEND); 6817 6818 /* Make sure we have room in the TX chain. */ 6819 if (sc->used_tx_bd >= sc->max_tx_bd) 6820 goto bce_tx_encap_exit; 6821 6822 /* Transfer any checksum offload flags to the bd. */ 6823 m0 = *m_head; 6824 if (m0->m_pkthdr.csum_flags) { 6825 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 6826 m0 = bce_tso_setup(sc, m_head, &flags); 6827 if (m0 == NULL) { 6828 DBRUN(sc->tso_frames_failed++); 6829 goto bce_tx_encap_exit; 6830 } 6831 mss = htole16(m0->m_pkthdr.tso_segsz); 6832 } else { 6833 if (m0->m_pkthdr.csum_flags & CSUM_IP) 6834 flags |= TX_BD_FLAGS_IP_CKSUM; 6835 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 6836 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 6837 } 6838 } 6839 6840 /* Transfer any VLAN tags to the bd. */ 6841 if (m0->m_flags & M_VLANTAG) { 6842 flags |= TX_BD_FLAGS_VLAN_TAG; 6843 vlan_tag = m0->m_pkthdr.ether_vtag; 6844 } 6845 6846 /* Map the mbuf into DMAable memory. */ 6847 prod = sc->tx_prod; 6848 chain_prod = TX_CHAIN_IDX(prod); 6849 map = sc->tx_mbuf_map[chain_prod]; 6850 6851 /* Map the mbuf into our DMA address space. */ 6852 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, 6853 segs, &nsegs, BUS_DMA_NOWAIT); 6854 6855 /* Check if the DMA mapping was successful */ 6856 if (error == EFBIG) { 6857 sc->mbuf_frag_count++; 6858 6859 /* Try to defrag the mbuf. */ 6860 m0 = m_collapse(*m_head, M_DONTWAIT, BCE_MAX_SEGMENTS); 6861 if (m0 == NULL) { 6862 /* Defrag was unsuccessful */ 6863 m_freem(*m_head); 6864 *m_head = NULL; 6865 sc->mbuf_alloc_failed_count++; 6866 rc = ENOBUFS; 6867 goto bce_tx_encap_exit; 6868 } 6869 6870 /* Defrag was successful, try mapping again */ 6871 *m_head = m0; 6872 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, 6873 map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 6874 6875 /* Still getting an error after a defrag. */ 6876 if (error == ENOMEM) { 6877 /* Insufficient DMA buffers available. */ 6878 sc->dma_map_addr_tx_failed_count++; 6879 rc = error; 6880 goto bce_tx_encap_exit; 6881 } else if (error != 0) { 6882 /* Release it and return an error. */ 6883 BCE_PRINTF("%s(%d): Unknown error mapping mbuf into " 6884 "TX chain!\n", __FILE__, __LINE__); 6885 m_freem(m0); 6886 *m_head = NULL; 6887 sc->dma_map_addr_tx_failed_count++; 6888 rc = ENOBUFS; 6889 goto bce_tx_encap_exit; 6890 } 6891 } else if (error == ENOMEM) { 6892 /* Insufficient DMA buffers available. */ 6893 sc->dma_map_addr_tx_failed_count++; 6894 rc = error; 6895 goto bce_tx_encap_exit; 6896 } else if (error != 0) { 6897 m_freem(m0); 6898 *m_head = NULL; 6899 sc->dma_map_addr_tx_failed_count++; 6900 rc = error; 6901 goto bce_tx_encap_exit; 6902 } 6903 6904 /* Make sure there's room in the chain */ 6905 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { 6906 bus_dmamap_unload(sc->tx_mbuf_tag, map); 6907 rc = ENOBUFS; 6908 goto bce_tx_encap_exit; 6909 } 6910 6911 /* prod points to an empty tx_bd at this point. */ 6912 prod_bseq = sc->tx_prod_bseq; 6913 6914#ifdef BCE_DEBUG 6915 debug_prod = chain_prod; 6916#endif 6917 6918 DBPRINT(sc, BCE_INFO_SEND, 6919 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, " 6920 "prod_bseq = 0x%08X\n", 6921 __FUNCTION__, prod, chain_prod, prod_bseq); 6922 6923 /* 6924 * Cycle through each mbuf segment that makes up 6925 * the outgoing frame, gathering the mapping info 6926 * for that segment and creating a tx_bd for 6927 * the mbuf. 6928 */ 6929 for (i = 0; i < nsegs ; i++) { 6930 6931 chain_prod = TX_CHAIN_IDX(prod); 6932 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)] 6933 [TX_IDX(chain_prod)]; 6934 6935 txbd->tx_bd_haddr_lo = 6936 htole32(BCE_ADDR_LO(segs[i].ds_addr)); 6937 txbd->tx_bd_haddr_hi = 6938 htole32(BCE_ADDR_HI(segs[i].ds_addr)); 6939 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 6940 htole16(segs[i].ds_len); 6941 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 6942 txbd->tx_bd_flags = htole16(flags); 6943 prod_bseq += segs[i].ds_len; 6944 if (i == 0) 6945 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 6946 prod = NEXT_TX_BD(prod); 6947 } 6948 6949 /* Set the END flag on the last TX buffer descriptor. */ 6950 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 6951 6952 DBRUNMSG(BCE_EXTREME_SEND, 6953 bce_dump_tx_chain(sc, debug_prod, nsegs)); 6954 6955 /* 6956 * Ensure that the mbuf pointer for this transmission 6957 * is placed at the array index of the last 6958 * descriptor in this chain. This is done 6959 * because a single map is used for all 6960 * segments of the mbuf and we don't want to 6961 * unload the map before all of the segments 6962 * have been freed. 6963 */ 6964 sc->tx_mbuf_ptr[chain_prod] = m0; 6965 sc->used_tx_bd += nsegs; 6966 6967 /* Update some debug statistic counters */ 6968 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 6969 sc->tx_hi_watermark = sc->used_tx_bd); 6970 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); 6971 DBRUNIF(sc->debug_tx_mbuf_alloc++); 6972 6973 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1)); 6974 6975 /* prod points to the next free tx_bd at this point. */ 6976 sc->tx_prod = prod; 6977 sc->tx_prod_bseq = prod_bseq; 6978 6979 /* Tell the chip about the waiting TX frames. */ 6980 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + 6981 BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod); 6982 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + 6983 BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq); 6984 6985bce_tx_encap_exit: 6986 DBEXIT(BCE_VERBOSE_SEND); 6987 return(rc); 6988} 6989 6990 6991/****************************************************************************/ 6992/* Main transmit routine when called from another routine with a lock. */ 6993/* */ 6994/* Returns: */ 6995/* Nothing. */ 6996/****************************************************************************/ 6997static void 6998bce_start_locked(struct ifnet *ifp) 6999{ 7000 struct bce_softc *sc = ifp->if_softc; 7001 struct mbuf *m_head = NULL; 7002 int count = 0; 7003 u16 tx_prod, tx_chain_prod; 7004 7005 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 7006 7007 BCE_LOCK_ASSERT(sc); 7008 7009 /* prod points to the next free tx_bd. */ 7010 tx_prod = sc->tx_prod; 7011 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 7012 7013 DBPRINT(sc, BCE_INFO_SEND, 7014 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 7015 "tx_prod_bseq = 0x%08X\n", 7016 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); 7017 7018 /* If there's no link or the transmit queue is empty then just exit. */ 7019 if (sc->bce_link_up == FALSE) { 7020 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", 7021 __FUNCTION__); 7022 goto bce_start_locked_exit; 7023 } 7024 7025 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 7026 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n", 7027 __FUNCTION__); 7028 goto bce_start_locked_exit; 7029 } 7030 7031 /* 7032 * Keep adding entries while there is space in the ring. 7033 */ 7034 while (sc->used_tx_bd < sc->max_tx_bd) { 7035 7036 /* Check for any frames to send. */ 7037 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 7038 7039 /* Stop when the transmit queue is empty. */ 7040 if (m_head == NULL) 7041 break; 7042 7043 /* 7044 * Pack the data into the transmit ring. If we 7045 * don't have room, place the mbuf back at the 7046 * head of the queue and set the OACTIVE flag 7047 * to wait for the NIC to drain the chain. 7048 */ 7049 if (bce_tx_encap(sc, &m_head)) { 7050 if (m_head != NULL) 7051 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 7052 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 7053 DBPRINT(sc, BCE_INFO_SEND, 7054 "TX chain is closed for business! Total " 7055 "tx_bd used = %d\n", sc->used_tx_bd); 7056 break; 7057 } 7058 7059 count++; 7060 7061 /* Send a copy of the frame to any BPF listeners. */ 7062 ETHER_BPF_MTAP(ifp, m_head); 7063 } 7064 7065 /* Exit if no packets were dequeued. */ 7066 if (count == 0) { 7067 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were " 7068 "dequeued\n", __FUNCTION__); 7069 goto bce_start_locked_exit; 7070 } 7071 7072 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into " 7073 "send queue.\n", __FUNCTION__, count); 7074 7075 /* Set the tx timeout. */ 7076 sc->watchdog_timer = BCE_TX_TIMEOUT; 7077 7078 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID)); 7079 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc)); 7080 7081bce_start_locked_exit: 7082 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 7083 return; 7084} 7085 7086 7087/****************************************************************************/ 7088/* Main transmit routine when called from another routine without a lock. */ 7089/* */ 7090/* Returns: */ 7091/* Nothing. */ 7092/****************************************************************************/ 7093static void 7094bce_start(struct ifnet *ifp) 7095{ 7096 struct bce_softc *sc = ifp->if_softc; 7097 7098 DBENTER(BCE_VERBOSE_SEND); 7099 7100 BCE_LOCK(sc); 7101 bce_start_locked(ifp); 7102 BCE_UNLOCK(sc); 7103 7104 DBEXIT(BCE_VERBOSE_SEND); 7105} 7106 7107 7108/****************************************************************************/ 7109/* Handles any IOCTL calls from the operating system. */ 7110/* */ 7111/* Returns: */ 7112/* 0 for success, positive value for failure. */ 7113/****************************************************************************/ 7114static int 7115bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 7116{ 7117 struct bce_softc *sc = ifp->if_softc; 7118 struct ifreq *ifr = (struct ifreq *) data; 7119 struct mii_data *mii; 7120 int mask, error = 0, reinit; 7121 7122 DBENTER(BCE_VERBOSE_MISC); 7123 7124 switch(command) { 7125 7126 /* Set the interface MTU. */ 7127 case SIOCSIFMTU: 7128 /* Check that the MTU setting is supported. */ 7129 if ((ifr->ifr_mtu < BCE_MIN_MTU) || 7130 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) { 7131 error = EINVAL; 7132 break; 7133 } 7134 7135 DBPRINT(sc, BCE_INFO_MISC, 7136 "SIOCSIFMTU: Changing MTU from %d to %d\n", 7137 (int) ifp->if_mtu, (int) ifr->ifr_mtu); 7138 7139 BCE_LOCK(sc); 7140 ifp->if_mtu = ifr->ifr_mtu; 7141 reinit = 0; 7142 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7143 /* 7144 * Because allocation size is used in RX 7145 * buffer allocation, stop controller if 7146 * it is already running. 7147 */ 7148 bce_stop(sc); 7149 reinit = 1; 7150 } 7151#ifdef BCE_JUMBO_HDRSPLIT 7152 /* No buffer allocation size changes are necessary. */ 7153#else 7154 /* Recalculate our buffer allocation sizes. */ 7155 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 7156 ETHER_CRC_LEN) > MCLBYTES) { 7157 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES; 7158 sc->rx_bd_mbuf_align_pad = 7159 roundup2(MJUM9BYTES, 16) - MJUM9BYTES; 7160 sc->rx_bd_mbuf_data_len = 7161 sc->rx_bd_mbuf_alloc_size - 7162 sc->rx_bd_mbuf_align_pad; 7163 } else { 7164 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 7165 sc->rx_bd_mbuf_align_pad = 7166 roundup2(MCLBYTES, 16) - MCLBYTES; 7167 sc->rx_bd_mbuf_data_len = 7168 sc->rx_bd_mbuf_alloc_size - 7169 sc->rx_bd_mbuf_align_pad; 7170 } 7171#endif 7172 7173 if (reinit != 0) 7174 bce_init_locked(sc); 7175 BCE_UNLOCK(sc); 7176 break; 7177 7178 /* Set interface flags. */ 7179 case SIOCSIFFLAGS: 7180 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n"); 7181 7182 BCE_LOCK(sc); 7183 7184 /* Check if the interface is up. */ 7185 if (ifp->if_flags & IFF_UP) { 7186 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7187 /* Change promiscuous/multicast flags as necessary. */ 7188 bce_set_rx_mode(sc); 7189 } else { 7190 /* Start the HW */ 7191 bce_init_locked(sc); 7192 } 7193 } else { 7194 /* The interface is down, check if driver is running. */ 7195 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7196 bce_stop(sc); 7197 7198 /* If MFW is running, restart the controller a bit. */ 7199 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 7200 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 7201 bce_chipinit(sc); 7202 bce_mgmt_init_locked(sc); 7203 } 7204 } 7205 } 7206 7207 BCE_UNLOCK(sc); 7208 break; 7209 7210 /* Add/Delete multicast address */ 7211 case SIOCADDMULTI: 7212 case SIOCDELMULTI: 7213 DBPRINT(sc, BCE_VERBOSE_MISC, 7214 "Received SIOCADDMULTI/SIOCDELMULTI\n"); 7215 7216 BCE_LOCK(sc); 7217 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 7218 bce_set_rx_mode(sc); 7219 BCE_UNLOCK(sc); 7220 7221 break; 7222 7223 /* Set/Get Interface media */ 7224 case SIOCSIFMEDIA: 7225 case SIOCGIFMEDIA: 7226 DBPRINT(sc, BCE_VERBOSE_MISC, 7227 "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n"); 7228 7229 mii = device_get_softc(sc->bce_miibus); 7230 error = ifmedia_ioctl(ifp, ifr, 7231 &mii->mii_media, command); 7232 break; 7233 7234 /* Set interface capability */ 7235 case SIOCSIFCAP: 7236 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 7237 DBPRINT(sc, BCE_INFO_MISC, 7238 "Received SIOCSIFCAP = 0x%08X\n", (u32) mask); 7239 7240 /* Toggle the TX checksum capabilities enable flag. */ 7241 if (mask & IFCAP_TXCSUM && 7242 ifp->if_capabilities & IFCAP_TXCSUM) { 7243 ifp->if_capenable ^= IFCAP_TXCSUM; 7244 if (IFCAP_TXCSUM & ifp->if_capenable) 7245 ifp->if_hwassist |= BCE_IF_HWASSIST; 7246 else 7247 ifp->if_hwassist &= ~BCE_IF_HWASSIST; 7248 } 7249 7250 /* Toggle the RX checksum capabilities enable flag. */ 7251 if (mask & IFCAP_RXCSUM && 7252 ifp->if_capabilities & IFCAP_RXCSUM) 7253 ifp->if_capenable ^= IFCAP_RXCSUM; 7254 7255 /* Toggle the TSO capabilities enable flag. */ 7256 if (bce_tso_enable && (mask & IFCAP_TSO4) && 7257 ifp->if_capabilities & IFCAP_TSO4) { 7258 ifp->if_capenable ^= IFCAP_TSO4; 7259 if (IFCAP_TSO4 & ifp->if_capenable) 7260 ifp->if_hwassist |= CSUM_TSO; 7261 else 7262 ifp->if_hwassist &= ~CSUM_TSO; 7263 } 7264 7265 if (mask & IFCAP_VLAN_HWCSUM && 7266 ifp->if_capabilities & IFCAP_VLAN_HWCSUM) 7267 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 7268 7269 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 7270 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 7271 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 7272 /* 7273 * Don't actually disable VLAN tag stripping as 7274 * management firmware (ASF/IPMI/UMP) requires the 7275 * feature. If VLAN tag stripping is disabled driver 7276 * will manually reconstruct the VLAN frame by 7277 * appending stripped VLAN tag. 7278 */ 7279 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 7280 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 7281 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 7282 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 7283 == 0) 7284 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 7285 } 7286 VLAN_CAPABILITIES(ifp); 7287 break; 7288 default: 7289 /* We don't know how to handle the IOCTL, pass it on. */ 7290 error = ether_ioctl(ifp, command, data); 7291 break; 7292 } 7293 7294 DBEXIT(BCE_VERBOSE_MISC); 7295 return(error); 7296} 7297 7298 7299/****************************************************************************/ 7300/* Transmit timeout handler. */ 7301/* */ 7302/* Returns: */ 7303/* Nothing. */ 7304/****************************************************************************/ 7305static void 7306bce_watchdog(struct bce_softc *sc) 7307{ 7308 DBENTER(BCE_EXTREME_SEND); 7309 7310 BCE_LOCK_ASSERT(sc); 7311 7312 /* If the watchdog timer hasn't expired then just exit. */ 7313 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 7314 goto bce_watchdog_exit; 7315 7316 /* If pause frames are active then don't reset the hardware. */ 7317 /* ToDo: Should we reset the timer here? */ 7318 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 7319 goto bce_watchdog_exit; 7320 7321 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n", 7322 __FILE__, __LINE__); 7323 7324 DBRUNMSG(BCE_INFO, 7325 bce_dump_driver_state(sc); 7326 bce_dump_status_block(sc); 7327 bce_dump_stats_block(sc); 7328 bce_dump_ftqs(sc); 7329 bce_dump_txp_state(sc, 0); 7330 bce_dump_rxp_state(sc, 0); 7331 bce_dump_tpat_state(sc, 0); 7332 bce_dump_cp_state(sc, 0); 7333 bce_dump_com_state(sc, 0)); 7334 7335 DBRUN(bce_breakpoint(sc)); 7336 7337 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 7338 7339 bce_init_locked(sc); 7340 sc->bce_ifp->if_oerrors++; 7341 7342bce_watchdog_exit: 7343 DBEXIT(BCE_EXTREME_SEND); 7344} 7345 7346 7347/* 7348 * Interrupt handler. 7349 */ 7350/****************************************************************************/ 7351/* Main interrupt entry point. Verifies that the controller generated the */ 7352/* interrupt and then calls a separate routine for handle the various */ 7353/* interrupt causes (PHY, TX, RX). */ 7354/* */ 7355/* Returns: */ 7356/* 0 for success, positive value for failure. */ 7357/****************************************************************************/ 7358static void 7359bce_intr(void *xsc) 7360{ 7361 struct bce_softc *sc; 7362 struct ifnet *ifp; 7363 u32 status_attn_bits; 7364 u16 hw_rx_cons, hw_tx_cons; 7365 7366 sc = xsc; 7367 ifp = sc->bce_ifp; 7368 7369 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 7370 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 7371 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc)); 7372 7373 BCE_LOCK(sc); 7374 7375 DBRUN(sc->interrupts_generated++); 7376 7377 /* Synchnorize before we read from interface's status block */ 7378 bus_dmamap_sync(sc->status_tag, sc->status_map, 7379 BUS_DMASYNC_POSTREAD); 7380 7381 /* 7382 * If the hardware status block index 7383 * matches the last value read by the 7384 * driver and we haven't asserted our 7385 * interrupt then there's nothing to do. 7386 */ 7387 if ((sc->status_block->status_idx == sc->last_status_idx) && 7388 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 7389 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { 7390 DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n", 7391 __FUNCTION__); 7392 goto bce_intr_exit; 7393 } 7394 7395 /* Ack the interrupt and stop others from occuring. */ 7396 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 7397 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 7398 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 7399 7400 /* Check if the hardware has finished any work. */ 7401 hw_rx_cons = bce_get_hw_rx_cons(sc); 7402 hw_tx_cons = bce_get_hw_tx_cons(sc); 7403 7404 /* Keep processing data as long as there is work to do. */ 7405 for (;;) { 7406 7407 status_attn_bits = sc->status_block->status_attn_bits; 7408 7409 DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), 7410 BCE_PRINTF("Simulating unexpected status attention " 7411 "bit set."); 7412 sc->unexpected_attention_sim_count++; 7413 status_attn_bits = status_attn_bits | 7414 STATUS_ATTN_BITS_PARITY_ERROR); 7415 7416 /* Was it a link change interrupt? */ 7417 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 7418 (sc->status_block->status_attn_bits_ack & 7419 STATUS_ATTN_BITS_LINK_STATE)) { 7420 bce_phy_intr(sc); 7421 7422 /* Clear transient updates during link state change. */ 7423 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | 7424 BCE_HC_COMMAND_COAL_NOW_WO_INT); 7425 REG_RD(sc, BCE_HC_COMMAND); 7426 } 7427 7428 /* If any other attention is asserted, the chip is toast. */ 7429 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 7430 (sc->status_block->status_attn_bits_ack & 7431 ~STATUS_ATTN_BITS_LINK_STATE))) { 7432 7433 sc->unexpected_attention_count++; 7434 7435 BCE_PRINTF("%s(%d): Fatal attention detected: " 7436 "0x%08X\n", __FILE__, __LINE__, 7437 sc->status_block->status_attn_bits); 7438 7439 DBRUNMSG(BCE_FATAL, 7440 if (unexpected_attention_sim_control == 0) 7441 bce_breakpoint(sc)); 7442 7443 bce_init_locked(sc); 7444 goto bce_intr_exit; 7445 } 7446 7447 /* Check for any completed RX frames. */ 7448 if (hw_rx_cons != sc->hw_rx_cons) 7449 bce_rx_intr(sc); 7450 7451 /* Check for any completed TX frames. */ 7452 if (hw_tx_cons != sc->hw_tx_cons) 7453 bce_tx_intr(sc); 7454 7455 /* Save status block index value for the next interrupt. */ 7456 sc->last_status_idx = sc->status_block->status_idx; 7457 7458 /* 7459 * Prevent speculative reads from getting 7460 * ahead of the status block. 7461 */ 7462 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 7463 BUS_SPACE_BARRIER_READ); 7464 7465 /* 7466 * If there's no work left then exit the 7467 * interrupt service routine. 7468 */ 7469 hw_rx_cons = bce_get_hw_rx_cons(sc); 7470 hw_tx_cons = bce_get_hw_tx_cons(sc); 7471 7472 if ((hw_rx_cons == sc->hw_rx_cons) && 7473 (hw_tx_cons == sc->hw_tx_cons)) 7474 break; 7475 7476 } 7477 7478 bus_dmamap_sync(sc->status_tag, sc->status_map, 7479 BUS_DMASYNC_PREREAD); 7480 7481 /* Re-enable interrupts. */ 7482 bce_enable_intr(sc, 0); 7483 7484 /* Handle any frames that arrived while handling the interrupt. */ 7485 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 7486 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 7487 bce_start_locked(ifp); 7488 7489bce_intr_exit: 7490 BCE_UNLOCK(sc); 7491 7492 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 7493} 7494 7495 7496/****************************************************************************/ 7497/* Programs the various packet receive modes (broadcast and multicast). */ 7498/* */ 7499/* Returns: */ 7500/* Nothing. */ 7501/****************************************************************************/ 7502static void 7503bce_set_rx_mode(struct bce_softc *sc) 7504{ 7505 struct ifnet *ifp; 7506 struct ifmultiaddr *ifma; 7507 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 7508 u32 rx_mode, sort_mode; 7509 int h, i; 7510 7511 DBENTER(BCE_VERBOSE_MISC); 7512 7513 BCE_LOCK_ASSERT(sc); 7514 7515 ifp = sc->bce_ifp; 7516 7517 /* Initialize receive mode default settings. */ 7518 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 7519 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 7520 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 7521 7522 /* 7523 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 7524 * be enbled. 7525 */ 7526 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 7527 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))) 7528 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 7529 7530 /* 7531 * Check for promiscuous, all multicast, or selected 7532 * multicast address filtering. 7533 */ 7534 if (ifp->if_flags & IFF_PROMISC) { 7535 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n"); 7536 7537 /* Enable promiscuous mode. */ 7538 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 7539 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 7540 } else if (ifp->if_flags & IFF_ALLMULTI) { 7541 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n"); 7542 7543 /* Enable all multicast addresses. */ 7544 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 7545 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); 7546 } 7547 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 7548 } else { 7549 /* Accept one or more multicast(s). */ 7550 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n"); 7551 7552 if_maddr_rlock(ifp); 7553 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 7554 if (ifma->ifma_addr->sa_family != AF_LINK) 7555 continue; 7556 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 7557 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; 7558 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 7559 } 7560 if_maddr_runlock(ifp); 7561 7562 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 7563 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); 7564 7565 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 7566 } 7567 7568 /* Only make changes if the recive mode has actually changed. */ 7569 if (rx_mode != sc->rx_mode) { 7570 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: " 7571 "0x%08X\n", rx_mode); 7572 7573 sc->rx_mode = rx_mode; 7574 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 7575 } 7576 7577 /* Disable and clear the exisitng sort before enabling a new sort. */ 7578 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 7579 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 7580 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 7581 7582 DBEXIT(BCE_VERBOSE_MISC); 7583} 7584 7585 7586/****************************************************************************/ 7587/* Called periodically to updates statistics from the controllers */ 7588/* statistics block. */ 7589/* */ 7590/* Returns: */ 7591/* Nothing. */ 7592/****************************************************************************/ 7593static void 7594bce_stats_update(struct bce_softc *sc) 7595{ 7596 struct ifnet *ifp; 7597 struct statistics_block *stats; 7598 7599 DBENTER(BCE_EXTREME_MISC); 7600 7601 ifp = sc->bce_ifp; 7602 7603 stats = (struct statistics_block *) sc->stats_block; 7604 7605 /* 7606 * Certain controllers don't report 7607 * carrier sense errors correctly. 7608 * See errata E11_5708CA0_1165. 7609 */ 7610 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 7611 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) 7612 ifp->if_oerrors += 7613 (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 7614 7615 /* 7616 * Update the sysctl statistics from the 7617 * hardware statistics. 7618 */ 7619 sc->stat_IfHCInOctets = 7620 ((u64) stats->stat_IfHCInOctets_hi << 32) + 7621 (u64) stats->stat_IfHCInOctets_lo; 7622 7623 sc->stat_IfHCInBadOctets = 7624 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 7625 (u64) stats->stat_IfHCInBadOctets_lo; 7626 7627 sc->stat_IfHCOutOctets = 7628 ((u64) stats->stat_IfHCOutOctets_hi << 32) + 7629 (u64) stats->stat_IfHCOutOctets_lo; 7630 7631 sc->stat_IfHCOutBadOctets = 7632 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + 7633 (u64) stats->stat_IfHCOutBadOctets_lo; 7634 7635 sc->stat_IfHCInUcastPkts = 7636 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + 7637 (u64) stats->stat_IfHCInUcastPkts_lo; 7638 7639 sc->stat_IfHCInMulticastPkts = 7640 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + 7641 (u64) stats->stat_IfHCInMulticastPkts_lo; 7642 7643 sc->stat_IfHCInBroadcastPkts = 7644 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + 7645 (u64) stats->stat_IfHCInBroadcastPkts_lo; 7646 7647 sc->stat_IfHCOutUcastPkts = 7648 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + 7649 (u64) stats->stat_IfHCOutUcastPkts_lo; 7650 7651 sc->stat_IfHCOutMulticastPkts = 7652 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + 7653 (u64) stats->stat_IfHCOutMulticastPkts_lo; 7654 7655 sc->stat_IfHCOutBroadcastPkts = 7656 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 7657 (u64) stats->stat_IfHCOutBroadcastPkts_lo; 7658 7659 /* ToDo: Preserve counters beyond 32 bits? */ 7660 /* ToDo: Read the statistics from auto-clear regs? */ 7661 7662 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 7663 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 7664 7665 sc->stat_Dot3StatsCarrierSenseErrors = 7666 stats->stat_Dot3StatsCarrierSenseErrors; 7667 7668 sc->stat_Dot3StatsFCSErrors = 7669 stats->stat_Dot3StatsFCSErrors; 7670 7671 sc->stat_Dot3StatsAlignmentErrors = 7672 stats->stat_Dot3StatsAlignmentErrors; 7673 7674 sc->stat_Dot3StatsSingleCollisionFrames = 7675 stats->stat_Dot3StatsSingleCollisionFrames; 7676 7677 sc->stat_Dot3StatsMultipleCollisionFrames = 7678 stats->stat_Dot3StatsMultipleCollisionFrames; 7679 7680 sc->stat_Dot3StatsDeferredTransmissions = 7681 stats->stat_Dot3StatsDeferredTransmissions; 7682 7683 sc->stat_Dot3StatsExcessiveCollisions = 7684 stats->stat_Dot3StatsExcessiveCollisions; 7685 7686 sc->stat_Dot3StatsLateCollisions = 7687 stats->stat_Dot3StatsLateCollisions; 7688 7689 sc->stat_EtherStatsCollisions = 7690 stats->stat_EtherStatsCollisions; 7691 7692 sc->stat_EtherStatsFragments = 7693 stats->stat_EtherStatsFragments; 7694 7695 sc->stat_EtherStatsJabbers = 7696 stats->stat_EtherStatsJabbers; 7697 7698 sc->stat_EtherStatsUndersizePkts = 7699 stats->stat_EtherStatsUndersizePkts; 7700 7701 sc->stat_EtherStatsOversizePkts = 7702 stats->stat_EtherStatsOversizePkts; 7703 7704 sc->stat_EtherStatsPktsRx64Octets = 7705 stats->stat_EtherStatsPktsRx64Octets; 7706 7707 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 7708 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 7709 7710 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 7711 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 7712 7713 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 7714 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 7715 7716 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 7717 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 7718 7719 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 7720 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 7721 7722 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 7723 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 7724 7725 sc->stat_EtherStatsPktsTx64Octets = 7726 stats->stat_EtherStatsPktsTx64Octets; 7727 7728 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 7729 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 7730 7731 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 7732 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 7733 7734 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 7735 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 7736 7737 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 7738 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 7739 7740 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 7741 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 7742 7743 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 7744 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 7745 7746 sc->stat_XonPauseFramesReceived = 7747 stats->stat_XonPauseFramesReceived; 7748 7749 sc->stat_XoffPauseFramesReceived = 7750 stats->stat_XoffPauseFramesReceived; 7751 7752 sc->stat_OutXonSent = 7753 stats->stat_OutXonSent; 7754 7755 sc->stat_OutXoffSent = 7756 stats->stat_OutXoffSent; 7757 7758 sc->stat_FlowControlDone = 7759 stats->stat_FlowControlDone; 7760 7761 sc->stat_MacControlFramesReceived = 7762 stats->stat_MacControlFramesReceived; 7763 7764 sc->stat_XoffStateEntered = 7765 stats->stat_XoffStateEntered; 7766 7767 sc->stat_IfInFramesL2FilterDiscards = 7768 stats->stat_IfInFramesL2FilterDiscards; 7769 7770 sc->stat_IfInRuleCheckerDiscards = 7771 stats->stat_IfInRuleCheckerDiscards; 7772 7773 sc->stat_IfInFTQDiscards = 7774 stats->stat_IfInFTQDiscards; 7775 7776 sc->stat_IfInMBUFDiscards = 7777 stats->stat_IfInMBUFDiscards; 7778 7779 sc->stat_IfInRuleCheckerP4Hit = 7780 stats->stat_IfInRuleCheckerP4Hit; 7781 7782 sc->stat_CatchupInRuleCheckerDiscards = 7783 stats->stat_CatchupInRuleCheckerDiscards; 7784 7785 sc->stat_CatchupInFTQDiscards = 7786 stats->stat_CatchupInFTQDiscards; 7787 7788 sc->stat_CatchupInMBUFDiscards = 7789 stats->stat_CatchupInMBUFDiscards; 7790 7791 sc->stat_CatchupInRuleCheckerP4Hit = 7792 stats->stat_CatchupInRuleCheckerP4Hit; 7793 7794 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 7795 7796 /* 7797 * Update the interface statistics from the 7798 * hardware statistics. 7799 */ 7800 ifp->if_collisions = 7801 (u_long) sc->stat_EtherStatsCollisions; 7802 7803 /* ToDo: This method loses soft errors. */ 7804 ifp->if_ierrors = 7805 (u_long) sc->stat_EtherStatsUndersizePkts + 7806 (u_long) sc->stat_EtherStatsOversizePkts + 7807 (u_long) sc->stat_IfInMBUFDiscards + 7808 (u_long) sc->stat_Dot3StatsAlignmentErrors + 7809 (u_long) sc->stat_Dot3StatsFCSErrors + 7810 (u_long) sc->stat_IfInRuleCheckerDiscards + 7811 (u_long) sc->stat_IfInFTQDiscards + 7812 (u_long) sc->com_no_buffers; 7813 7814 /* ToDo: This method loses soft errors. */ 7815 ifp->if_oerrors = 7816 (u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 7817 (u_long) sc->stat_Dot3StatsExcessiveCollisions + 7818 (u_long) sc->stat_Dot3StatsLateCollisions; 7819 7820 /* ToDo: Add additional statistics? */ 7821 7822 DBEXIT(BCE_EXTREME_MISC); 7823} 7824 7825 7826/****************************************************************************/ 7827/* Periodic function to notify the bootcode that the driver is still */ 7828/* present. */ 7829/* */ 7830/* Returns: */ 7831/* Nothing. */ 7832/****************************************************************************/ 7833static void 7834bce_pulse(void *xsc) 7835{ 7836 struct bce_softc *sc = xsc; 7837 u32 msg; 7838 7839 DBENTER(BCE_EXTREME_MISC); 7840 7841 BCE_LOCK_ASSERT(sc); 7842 7843 /* Tell the firmware that the driver is still running. */ 7844 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq; 7845 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 7846 7847 /* Update the bootcode condition. */ 7848 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 7849 7850 /* Report whether the bootcode still knows the driver is running. */ 7851 if (bootverbose) { 7852 if (sc->bce_drv_cardiac_arrest == FALSE) { 7853 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 7854 sc->bce_drv_cardiac_arrest = TRUE; 7855 BCE_PRINTF("%s(): Warning: bootcode " 7856 "thinks driver is absent! " 7857 "(bc_state = 0x%08X)\n", 7858 __FUNCTION__, sc->bc_state); 7859 } 7860 } else { 7861 /* 7862 * Not supported by all bootcode versions. 7863 * (v5.0.11+ and v5.2.1+) Older bootcode 7864 * will require the driver to reset the 7865 * controller to clear this condition. 7866 */ 7867 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 7868 sc->bce_drv_cardiac_arrest = FALSE; 7869 BCE_PRINTF("%s(): Bootcode found the " 7870 "driver pulse! (bc_state = 0x%08X)\n", 7871 __FUNCTION__, sc->bc_state); 7872 } 7873 } 7874 } 7875 7876 7877 /* Schedule the next pulse. */ 7878 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); 7879 7880 DBEXIT(BCE_EXTREME_MISC); 7881} 7882 7883 7884/****************************************************************************/ 7885/* Periodic function to perform maintenance tasks. */ 7886/* */ 7887/* Returns: */ 7888/* Nothing. */ 7889/****************************************************************************/ 7890static void 7891bce_tick(void *xsc) 7892{ 7893 struct bce_softc *sc = xsc; 7894 struct mii_data *mii; 7895 struct ifnet *ifp; 7896 7897 ifp = sc->bce_ifp; 7898 7899 DBENTER(BCE_EXTREME_MISC); 7900 7901 BCE_LOCK_ASSERT(sc); 7902 7903 /* Schedule the next tick. */ 7904 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 7905 7906 /* Update the statistics from the hardware statistics block. */ 7907 bce_stats_update(sc); 7908 7909 /* Top off the receive and page chains. */ 7910#ifdef BCE_JUMBO_HDRSPLIT 7911 bce_fill_pg_chain(sc); 7912#endif 7913 bce_fill_rx_chain(sc); 7914 7915 /* Check that chip hasn't hung. */ 7916 bce_watchdog(sc); 7917 7918 /* If link is up already up then we're done. */ 7919 if (sc->bce_link_up == TRUE) 7920 goto bce_tick_exit; 7921 7922 /* Link is down. Check what the PHY's doing. */ 7923 mii = device_get_softc(sc->bce_miibus); 7924 mii_tick(mii); 7925 7926 /* Check if the link has come up. */ 7927 if ((mii->mii_media_status & IFM_ACTIVE) && 7928 (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) { 7929 DBPRINT(sc, BCE_VERBOSE_MISC, 7930 "%s(): Link up!\n", __FUNCTION__); 7931 sc->bce_link_up = TRUE; 7932 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 7933 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX || 7934 IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) && 7935 bootverbose) 7936 BCE_PRINTF("Gigabit link up!\n"); 7937 7938 /* Now that link is up, handle any outstanding TX traffic. */ 7939 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 7940 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found " 7941 "pending TX traffic.\n", __FUNCTION__); 7942 bce_start_locked(ifp); 7943 } 7944 } 7945 7946bce_tick_exit: 7947 DBEXIT(BCE_EXTREME_MISC); 7948 return; 7949} 7950 7951 7952#ifdef BCE_DEBUG 7953/****************************************************************************/ 7954/* Allows the driver state to be dumped through the sysctl interface. */ 7955/* */ 7956/* Returns: */ 7957/* 0 for success, positive value for failure. */ 7958/****************************************************************************/ 7959static int 7960bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 7961{ 7962 int error; 7963 int result; 7964 struct bce_softc *sc; 7965 7966 result = -1; 7967 error = sysctl_handle_int(oidp, &result, 0, req); 7968 7969 if (error || !req->newptr) 7970 return (error); 7971 7972 if (result == 1) { 7973 sc = (struct bce_softc *)arg1; 7974 bce_dump_driver_state(sc); 7975 } 7976 7977 return error; 7978} 7979 7980 7981/****************************************************************************/ 7982/* Allows the hardware state to be dumped through the sysctl interface. */ 7983/* */ 7984/* Returns: */ 7985/* 0 for success, positive value for failure. */ 7986/****************************************************************************/ 7987static int 7988bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 7989{ 7990 int error; 7991 int result; 7992 struct bce_softc *sc; 7993 7994 result = -1; 7995 error = sysctl_handle_int(oidp, &result, 0, req); 7996 7997 if (error || !req->newptr) 7998 return (error); 7999 8000 if (result == 1) { 8001 sc = (struct bce_softc *)arg1; 8002 bce_dump_hw_state(sc); 8003 } 8004 8005 return error; 8006} 8007 8008 8009/****************************************************************************/ 8010/* Allows the status block to be dumped through the sysctl interface. */ 8011/* */ 8012/* Returns: */ 8013/* 0 for success, positive value for failure. */ 8014/****************************************************************************/ 8015static int 8016bce_sysctl_status_block(SYSCTL_HANDLER_ARGS) 8017{ 8018 int error; 8019 int result; 8020 struct bce_softc *sc; 8021 8022 result = -1; 8023 error = sysctl_handle_int(oidp, &result, 0, req); 8024 8025 if (error || !req->newptr) 8026 return (error); 8027 8028 if (result == 1) { 8029 sc = (struct bce_softc *)arg1; 8030 bce_dump_status_block(sc); 8031 } 8032 8033 return error; 8034} 8035 8036 8037/****************************************************************************/ 8038/* Allows the stats block to be dumped through the sysctl interface. */ 8039/* */ 8040/* Returns: */ 8041/* 0 for success, positive value for failure. */ 8042/****************************************************************************/ 8043static int 8044bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS) 8045{ 8046 int error; 8047 int result; 8048 struct bce_softc *sc; 8049 8050 result = -1; 8051 error = sysctl_handle_int(oidp, &result, 0, req); 8052 8053 if (error || !req->newptr) 8054 return (error); 8055 8056 if (result == 1) { 8057 sc = (struct bce_softc *)arg1; 8058 bce_dump_stats_block(sc); 8059 } 8060 8061 return error; 8062} 8063 8064 8065/****************************************************************************/ 8066/* Allows the stat counters to be cleared without unloading/reloading the */ 8067/* driver. */ 8068/* */ 8069/* Returns: */ 8070/* 0 for success, positive value for failure. */ 8071/****************************************************************************/ 8072static int 8073bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS) 8074{ 8075 int error; 8076 int result; 8077 struct bce_softc *sc; 8078 8079 result = -1; 8080 error = sysctl_handle_int(oidp, &result, 0, req); 8081 8082 if (error || !req->newptr) 8083 return (error); 8084 8085 if (result == 1) { 8086 sc = (struct bce_softc *)arg1; 8087 8088 /* Clear the internal H/W statistics counters. */ 8089 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 8090 8091 /* Reset the driver maintained statistics. */ 8092 sc->interrupts_rx = 8093 sc->interrupts_tx = 0; 8094 sc->tso_frames_requested = 8095 sc->tso_frames_completed = 8096 sc->tso_frames_failed = 0; 8097 sc->rx_empty_count = 8098 sc->tx_full_count = 0; 8099 sc->rx_low_watermark = USABLE_RX_BD; 8100 sc->tx_hi_watermark = 0; 8101 sc->l2fhdr_error_count = 8102 sc->l2fhdr_error_sim_count = 0; 8103 sc->mbuf_alloc_failed_count = 8104 sc->mbuf_alloc_failed_sim_count = 0; 8105 sc->dma_map_addr_rx_failed_count = 8106 sc->dma_map_addr_tx_failed_count = 0; 8107 sc->mbuf_frag_count = 0; 8108 sc->csum_offload_tcp_udp = 8109 sc->csum_offload_ip = 0; 8110 sc->vlan_tagged_frames_rcvd = 8111 sc->vlan_tagged_frames_stripped = 0; 8112 8113 /* Clear firmware maintained statistics. */ 8114 REG_WR_IND(sc, 0x120084, 0); 8115 } 8116 8117 return error; 8118} 8119 8120 8121/****************************************************************************/ 8122/* Allows the bootcode state to be dumped through the sysctl interface. */ 8123/* */ 8124/* Returns: */ 8125/* 0 for success, positive value for failure. */ 8126/****************************************************************************/ 8127static int 8128bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS) 8129{ 8130 int error; 8131 int result; 8132 struct bce_softc *sc; 8133 8134 result = -1; 8135 error = sysctl_handle_int(oidp, &result, 0, req); 8136 8137 if (error || !req->newptr) 8138 return (error); 8139 8140 if (result == 1) { 8141 sc = (struct bce_softc *)arg1; 8142 bce_dump_bc_state(sc); 8143 } 8144 8145 return error; 8146} 8147 8148 8149/****************************************************************************/ 8150/* Provides a sysctl interface to allow dumping the RX BD chain. */ 8151/* */ 8152/* Returns: */ 8153/* 0 for success, positive value for failure. */ 8154/****************************************************************************/ 8155static int 8156bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS) 8157{ 8158 int error; 8159 int result; 8160 struct bce_softc *sc; 8161 8162 result = -1; 8163 error = sysctl_handle_int(oidp, &result, 0, req); 8164 8165 if (error || !req->newptr) 8166 return (error); 8167 8168 if (result == 1) { 8169 sc = (struct bce_softc *)arg1; 8170 bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD); 8171 } 8172 8173 return error; 8174} 8175 8176 8177/****************************************************************************/ 8178/* Provides a sysctl interface to allow dumping the RX MBUF chain. */ 8179/* */ 8180/* Returns: */ 8181/* 0 for success, positive value for failure. */ 8182/****************************************************************************/ 8183static int 8184bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS) 8185{ 8186 int error; 8187 int result; 8188 struct bce_softc *sc; 8189 8190 result = -1; 8191 error = sysctl_handle_int(oidp, &result, 0, req); 8192 8193 if (error || !req->newptr) 8194 return (error); 8195 8196 if (result == 1) { 8197 sc = (struct bce_softc *)arg1; 8198 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 8199 } 8200 8201 return error; 8202} 8203 8204 8205/****************************************************************************/ 8206/* Provides a sysctl interface to allow dumping the TX chain. */ 8207/* */ 8208/* Returns: */ 8209/* 0 for success, positive value for failure. */ 8210/****************************************************************************/ 8211static int 8212bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 8213{ 8214 int error; 8215 int result; 8216 struct bce_softc *sc; 8217 8218 result = -1; 8219 error = sysctl_handle_int(oidp, &result, 0, req); 8220 8221 if (error || !req->newptr) 8222 return (error); 8223 8224 if (result == 1) { 8225 sc = (struct bce_softc *)arg1; 8226 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD); 8227 } 8228 8229 return error; 8230} 8231 8232 8233#ifdef BCE_JUMBO_HDRSPLIT 8234/****************************************************************************/ 8235/* Provides a sysctl interface to allow dumping the page chain. */ 8236/* */ 8237/* Returns: */ 8238/* 0 for success, positive value for failure. */ 8239/****************************************************************************/ 8240static int 8241bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS) 8242{ 8243 int error; 8244 int result; 8245 struct bce_softc *sc; 8246 8247 result = -1; 8248 error = sysctl_handle_int(oidp, &result, 0, req); 8249 8250 if (error || !req->newptr) 8251 return (error); 8252 8253 if (result == 1) { 8254 sc = (struct bce_softc *)arg1; 8255 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD); 8256 } 8257 8258 return error; 8259} 8260#endif 8261 8262/****************************************************************************/ 8263/* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */ 8264/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8265/* */ 8266/* Returns: */ 8267/* 0 for success, positive value for failure. */ 8268/****************************************************************************/ 8269static int 8270bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS) 8271{ 8272 struct bce_softc *sc = (struct bce_softc *)arg1; 8273 int error; 8274 u32 result; 8275 u32 val[1]; 8276 u8 *data = (u8 *) val; 8277 8278 result = -1; 8279 error = sysctl_handle_int(oidp, &result, 0, req); 8280 if (error || (req->newptr == NULL)) 8281 return (error); 8282 8283 bce_nvram_read(sc, result, data, 4); 8284 BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0])); 8285 8286 return (error); 8287} 8288 8289 8290/****************************************************************************/ 8291/* Provides a sysctl interface to allow reading arbitrary registers in the */ 8292/* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8293/* */ 8294/* Returns: */ 8295/* 0 for success, positive value for failure. */ 8296/****************************************************************************/ 8297static int 8298bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 8299{ 8300 struct bce_softc *sc = (struct bce_softc *)arg1; 8301 int error; 8302 u32 val, result; 8303 8304 result = -1; 8305 error = sysctl_handle_int(oidp, &result, 0, req); 8306 if (error || (req->newptr == NULL)) 8307 return (error); 8308 8309 /* Make sure the register is accessible. */ 8310 if (result < 0x8000) { 8311 val = REG_RD(sc, result); 8312 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8313 } else if (result < 0x0280000) { 8314 val = REG_RD_IND(sc, result); 8315 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8316 } 8317 8318 return (error); 8319} 8320 8321 8322/****************************************************************************/ 8323/* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 8324/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8325/* */ 8326/* Returns: */ 8327/* 0 for success, positive value for failure. */ 8328/****************************************************************************/ 8329static int 8330bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 8331{ 8332 struct bce_softc *sc; 8333 device_t dev; 8334 int error, result; 8335 u16 val; 8336 8337 result = -1; 8338 error = sysctl_handle_int(oidp, &result, 0, req); 8339 if (error || (req->newptr == NULL)) 8340 return (error); 8341 8342 /* Make sure the register is accessible. */ 8343 if (result < 0x20) { 8344 sc = (struct bce_softc *)arg1; 8345 dev = sc->bce_dev; 8346 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 8347 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val); 8348 } 8349 return (error); 8350} 8351 8352 8353static int 8354sysctl_nvram_dump(SYSCTL_HANDLER_ARGS) 8355{ 8356 struct bce_softc *sc = (struct bce_softc *)arg1; 8357 int error, i; 8358 8359 if (sc->nvram_buf == NULL) { 8360 sc->nvram_buf = malloc(sc->bce_flash_size, 8361 M_TEMP, M_ZERO | M_WAITOK); 8362 } 8363 if (sc->nvram_buf == NULL) { 8364 return(ENOMEM); 8365 } 8366 if (req->oldlen == sc->bce_flash_size) { 8367 for (i = 0; i < sc->bce_flash_size; i++) { 8368 bce_nvram_read(sc, i, &sc->nvram_buf[i], 1); 8369 } 8370 } 8371 8372 error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size); 8373 8374 return error; 8375} 8376 8377#ifdef BCE_NVRAM_WRITE_SUPPORT 8378static int 8379sysctl_nvram_write(SYSCTL_HANDLER_ARGS) 8380{ 8381 struct bce_softc *sc = (struct bce_softc *)arg1; 8382 int error; 8383 8384 if (sc->nvram_buf == NULL) { 8385 sc->nvram_buf = malloc(sc->bce_flash_size, 8386 M_TEMP, M_ZERO | M_WAITOK); 8387 } 8388 if (sc->nvram_buf == NULL) { 8389 return(ENOMEM); 8390 } 8391 bzero(sc->nvram_buf, sc->bce_flash_size); 8392 error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size); 8393 8394 if (req->newlen == sc->bce_flash_size) { 8395 bce_nvram_write(sc, 0, sc->nvram_buf , sc->bce_flash_size); 8396 } 8397 8398 8399 return error; 8400} 8401#endif 8402 8403 8404/****************************************************************************/ 8405/* Provides a sysctl interface to allow reading a CID. */ 8406/* */ 8407/* Returns: */ 8408/* 0 for success, positive value for failure. */ 8409/****************************************************************************/ 8410static int 8411bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS) 8412{ 8413 struct bce_softc *sc; 8414 int error, result; 8415 8416 result = -1; 8417 error = sysctl_handle_int(oidp, &result, 0, req); 8418 if (error || (req->newptr == NULL)) 8419 return (error); 8420 8421 /* Make sure the register is accessible. */ 8422 if (result <= TX_CID) { 8423 sc = (struct bce_softc *)arg1; 8424 bce_dump_ctx(sc, result); 8425 } 8426 8427 return (error); 8428} 8429 8430 8431 /****************************************************************************/ 8432/* Provides a sysctl interface to forcing the driver to dump state and */ 8433/* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8434/* */ 8435/* Returns: */ 8436/* 0 for success, positive value for failure. */ 8437/****************************************************************************/ 8438static int 8439bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 8440{ 8441 int error; 8442 int result; 8443 struct bce_softc *sc; 8444 8445 result = -1; 8446 error = sysctl_handle_int(oidp, &result, 0, req); 8447 8448 if (error || !req->newptr) 8449 return (error); 8450 8451 if (result == 1) { 8452 sc = (struct bce_softc *)arg1; 8453 bce_breakpoint(sc); 8454 } 8455 8456 return error; 8457} 8458#endif 8459 8460 8461/****************************************************************************/ 8462/* Adds any sysctl parameters for tuning or debugging purposes. */ 8463/* */ 8464/* Returns: */ 8465/* 0 for success, positive value for failure. */ 8466/****************************************************************************/ 8467static void 8468bce_add_sysctls(struct bce_softc *sc) 8469{ 8470 struct sysctl_ctx_list *ctx; 8471 struct sysctl_oid_list *children; 8472 8473 DBENTER(BCE_VERBOSE_MISC); 8474 8475 ctx = device_get_sysctl_ctx(sc->bce_dev); 8476 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); 8477 8478#ifdef BCE_DEBUG 8479 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8480 "l2fhdr_error_sim_control", 8481 CTLFLAG_RW, &l2fhdr_error_sim_control, 8482 0, "Debug control to force l2fhdr errors"); 8483 8484 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8485 "l2fhdr_error_sim_count", 8486 CTLFLAG_RD, &sc->l2fhdr_error_sim_count, 8487 0, "Number of simulated l2_fhdr errors"); 8488#endif 8489 8490 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8491 "l2fhdr_error_count", 8492 CTLFLAG_RD, &sc->l2fhdr_error_count, 8493 0, "Number of l2_fhdr errors"); 8494 8495#ifdef BCE_DEBUG 8496 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8497 "mbuf_alloc_failed_sim_control", 8498 CTLFLAG_RW, &mbuf_alloc_failed_sim_control, 8499 0, "Debug control to force mbuf allocation failures"); 8500 8501 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8502 "mbuf_alloc_failed_sim_count", 8503 CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count, 8504 0, "Number of simulated mbuf cluster allocation failures"); 8505#endif 8506 8507 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8508 "mbuf_alloc_failed_count", 8509 CTLFLAG_RD, &sc->mbuf_alloc_failed_count, 8510 0, "Number of mbuf allocation failures"); 8511 8512 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8513 "mbuf_frag_count", 8514 CTLFLAG_RD, &sc->mbuf_frag_count, 8515 0, "Number of fragmented mbufs"); 8516 8517#ifdef BCE_DEBUG 8518 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8519 "dma_map_addr_failed_sim_control", 8520 CTLFLAG_RW, &dma_map_addr_failed_sim_control, 8521 0, "Debug control to force DMA mapping failures"); 8522 8523 /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */ 8524 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8525 "dma_map_addr_failed_sim_count", 8526 CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count, 8527 0, "Number of simulated DMA mapping failures"); 8528 8529#endif 8530 8531 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8532 "dma_map_addr_rx_failed_count", 8533 CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count, 8534 0, "Number of RX DMA mapping failures"); 8535 8536 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8537 "dma_map_addr_tx_failed_count", 8538 CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count, 8539 0, "Number of TX DMA mapping failures"); 8540 8541#ifdef BCE_DEBUG 8542 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8543 "unexpected_attention_sim_control", 8544 CTLFLAG_RW, &unexpected_attention_sim_control, 8545 0, "Debug control to simulate unexpected attentions"); 8546 8547 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8548 "unexpected_attention_sim_count", 8549 CTLFLAG_RW, &sc->unexpected_attention_sim_count, 8550 0, "Number of simulated unexpected attentions"); 8551#endif 8552 8553 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8554 "unexpected_attention_count", 8555 CTLFLAG_RW, &sc->unexpected_attention_count, 8556 0, "Number of unexpected attentions"); 8557 8558#ifdef BCE_DEBUG 8559 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8560 "debug_bootcode_running_failure", 8561 CTLFLAG_RW, &bootcode_running_failure_sim_control, 8562 0, "Debug control to force bootcode running failures"); 8563 8564 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8565 "rx_low_watermark", 8566 CTLFLAG_RD, &sc->rx_low_watermark, 8567 0, "Lowest level of free rx_bd's"); 8568 8569 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8570 "rx_empty_count", 8571 CTLFLAG_RD, &sc->rx_empty_count, 8572 0, "Number of times the RX chain was empty"); 8573 8574 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8575 "tx_hi_watermark", 8576 CTLFLAG_RD, &sc->tx_hi_watermark, 8577 0, "Highest level of used tx_bd's"); 8578 8579 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8580 "tx_full_count", 8581 CTLFLAG_RD, &sc->tx_full_count, 8582 0, "Number of times the TX chain was full"); 8583 8584 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8585 "tso_frames_requested", 8586 CTLFLAG_RD, &sc->tso_frames_requested, 8587 0, "Number of TSO frames requested"); 8588 8589 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8590 "tso_frames_completed", 8591 CTLFLAG_RD, &sc->tso_frames_completed, 8592 0, "Number of TSO frames completed"); 8593 8594 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8595 "tso_frames_failed", 8596 CTLFLAG_RD, &sc->tso_frames_failed, 8597 0, "Number of TSO frames failed"); 8598 8599 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8600 "csum_offload_ip", 8601 CTLFLAG_RD, &sc->csum_offload_ip, 8602 0, "Number of IP checksum offload frames"); 8603 8604 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8605 "csum_offload_tcp_udp", 8606 CTLFLAG_RD, &sc->csum_offload_tcp_udp, 8607 0, "Number of TCP/UDP checksum offload frames"); 8608 8609 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8610 "vlan_tagged_frames_rcvd", 8611 CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd, 8612 0, "Number of VLAN tagged frames received"); 8613 8614 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8615 "vlan_tagged_frames_stripped", 8616 CTLFLAG_RD, &sc->vlan_tagged_frames_stripped, 8617 0, "Number of VLAN tagged frames stripped"); 8618 8619 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8620 "interrupts_rx", 8621 CTLFLAG_RD, &sc->interrupts_rx, 8622 0, "Number of RX interrupts"); 8623 8624 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8625 "interrupts_tx", 8626 CTLFLAG_RD, &sc->interrupts_tx, 8627 0, "Number of TX interrupts"); 8628 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8629 "nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD, 8630 (void *)sc, 0, 8631 sysctl_nvram_dump, "S", ""); 8632#ifdef BCE_NVRAM_WRITE_SUPPORT 8633 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8634 "nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR, 8635 (void *)sc, 0, 8636 sysctl_nvram_write, "S", ""); 8637#endif 8638#endif 8639 8640 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8641 "stat_IfHcInOctets", 8642 CTLFLAG_RD, &sc->stat_IfHCInOctets, 8643 "Bytes received"); 8644 8645 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8646 "stat_IfHCInBadOctets", 8647 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 8648 "Bad bytes received"); 8649 8650 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8651 "stat_IfHCOutOctets", 8652 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 8653 "Bytes sent"); 8654 8655 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8656 "stat_IfHCOutBadOctets", 8657 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 8658 "Bad bytes sent"); 8659 8660 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8661 "stat_IfHCInUcastPkts", 8662 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 8663 "Unicast packets received"); 8664 8665 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8666 "stat_IfHCInMulticastPkts", 8667 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 8668 "Multicast packets received"); 8669 8670 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8671 "stat_IfHCInBroadcastPkts", 8672 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 8673 "Broadcast packets received"); 8674 8675 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8676 "stat_IfHCOutUcastPkts", 8677 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 8678 "Unicast packets sent"); 8679 8680 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8681 "stat_IfHCOutMulticastPkts", 8682 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 8683 "Multicast packets sent"); 8684 8685 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8686 "stat_IfHCOutBroadcastPkts", 8687 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 8688 "Broadcast packets sent"); 8689 8690 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8691 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 8692 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 8693 0, "Internal MAC transmit errors"); 8694 8695 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8696 "stat_Dot3StatsCarrierSenseErrors", 8697 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 8698 0, "Carrier sense errors"); 8699 8700 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8701 "stat_Dot3StatsFCSErrors", 8702 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 8703 0, "Frame check sequence errors"); 8704 8705 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8706 "stat_Dot3StatsAlignmentErrors", 8707 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 8708 0, "Alignment errors"); 8709 8710 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8711 "stat_Dot3StatsSingleCollisionFrames", 8712 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 8713 0, "Single Collision Frames"); 8714 8715 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8716 "stat_Dot3StatsMultipleCollisionFrames", 8717 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 8718 0, "Multiple Collision Frames"); 8719 8720 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8721 "stat_Dot3StatsDeferredTransmissions", 8722 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 8723 0, "Deferred Transmissions"); 8724 8725 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8726 "stat_Dot3StatsExcessiveCollisions", 8727 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 8728 0, "Excessive Collisions"); 8729 8730 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8731 "stat_Dot3StatsLateCollisions", 8732 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 8733 0, "Late Collisions"); 8734 8735 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8736 "stat_EtherStatsCollisions", 8737 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 8738 0, "Collisions"); 8739 8740 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8741 "stat_EtherStatsFragments", 8742 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 8743 0, "Fragments"); 8744 8745 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8746 "stat_EtherStatsJabbers", 8747 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 8748 0, "Jabbers"); 8749 8750 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8751 "stat_EtherStatsUndersizePkts", 8752 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 8753 0, "Undersize packets"); 8754 8755 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8756 "stat_EtherStatsOversizePkts", 8757 CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts, 8758 0, "stat_EtherStatsOversizePkts"); 8759 8760 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8761 "stat_EtherStatsPktsRx64Octets", 8762 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 8763 0, "Bytes received in 64 byte packets"); 8764 8765 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8766 "stat_EtherStatsPktsRx65Octetsto127Octets", 8767 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 8768 0, "Bytes received in 65 to 127 byte packets"); 8769 8770 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8771 "stat_EtherStatsPktsRx128Octetsto255Octets", 8772 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 8773 0, "Bytes received in 128 to 255 byte packets"); 8774 8775 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8776 "stat_EtherStatsPktsRx256Octetsto511Octets", 8777 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 8778 0, "Bytes received in 256 to 511 byte packets"); 8779 8780 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8781 "stat_EtherStatsPktsRx512Octetsto1023Octets", 8782 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 8783 0, "Bytes received in 512 to 1023 byte packets"); 8784 8785 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8786 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 8787 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 8788 0, "Bytes received in 1024 t0 1522 byte packets"); 8789 8790 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8791 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 8792 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 8793 0, "Bytes received in 1523 to 9022 byte packets"); 8794 8795 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8796 "stat_EtherStatsPktsTx64Octets", 8797 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 8798 0, "Bytes sent in 64 byte packets"); 8799 8800 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8801 "stat_EtherStatsPktsTx65Octetsto127Octets", 8802 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 8803 0, "Bytes sent in 65 to 127 byte packets"); 8804 8805 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8806 "stat_EtherStatsPktsTx128Octetsto255Octets", 8807 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 8808 0, "Bytes sent in 128 to 255 byte packets"); 8809 8810 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8811 "stat_EtherStatsPktsTx256Octetsto511Octets", 8812 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 8813 0, "Bytes sent in 256 to 511 byte packets"); 8814 8815 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8816 "stat_EtherStatsPktsTx512Octetsto1023Octets", 8817 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 8818 0, "Bytes sent in 512 to 1023 byte packets"); 8819 8820 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8821 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 8822 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 8823 0, "Bytes sent in 1024 to 1522 byte packets"); 8824 8825 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8826 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 8827 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 8828 0, "Bytes sent in 1523 to 9022 byte packets"); 8829 8830 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8831 "stat_XonPauseFramesReceived", 8832 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 8833 0, "XON pause frames receved"); 8834 8835 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8836 "stat_XoffPauseFramesReceived", 8837 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 8838 0, "XOFF pause frames received"); 8839 8840 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8841 "stat_OutXonSent", 8842 CTLFLAG_RD, &sc->stat_OutXonSent, 8843 0, "XON pause frames sent"); 8844 8845 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8846 "stat_OutXoffSent", 8847 CTLFLAG_RD, &sc->stat_OutXoffSent, 8848 0, "XOFF pause frames sent"); 8849 8850 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8851 "stat_FlowControlDone", 8852 CTLFLAG_RD, &sc->stat_FlowControlDone, 8853 0, "Flow control done"); 8854 8855 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8856 "stat_MacControlFramesReceived", 8857 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 8858 0, "MAC control frames received"); 8859 8860 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8861 "stat_XoffStateEntered", 8862 CTLFLAG_RD, &sc->stat_XoffStateEntered, 8863 0, "XOFF state entered"); 8864 8865 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8866 "stat_IfInFramesL2FilterDiscards", 8867 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 8868 0, "Received L2 packets discarded"); 8869 8870 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8871 "stat_IfInRuleCheckerDiscards", 8872 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 8873 0, "Received packets discarded by rule"); 8874 8875 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8876 "stat_IfInFTQDiscards", 8877 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 8878 0, "Received packet FTQ discards"); 8879 8880 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8881 "stat_IfInMBUFDiscards", 8882 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 8883 0, "Received packets discarded due to lack " 8884 "of controller buffer memory"); 8885 8886 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8887 "stat_IfInRuleCheckerP4Hit", 8888 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 8889 0, "Received packets rule checker hits"); 8890 8891 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8892 "stat_CatchupInRuleCheckerDiscards", 8893 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 8894 0, "Received packets discarded in Catchup path"); 8895 8896 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8897 "stat_CatchupInFTQDiscards", 8898 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 8899 0, "Received packets discarded in FTQ in Catchup path"); 8900 8901 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8902 "stat_CatchupInMBUFDiscards", 8903 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 8904 0, "Received packets discarded in controller " 8905 "buffer memory in Catchup path"); 8906 8907 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8908 "stat_CatchupInRuleCheckerP4Hit", 8909 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 8910 0, "Received packets rule checker hits in Catchup path"); 8911 8912 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8913 "com_no_buffers", 8914 CTLFLAG_RD, &sc->com_no_buffers, 8915 0, "Valid packets received but no RX buffers available"); 8916 8917#ifdef BCE_DEBUG 8918 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8919 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 8920 (void *)sc, 0, 8921 bce_sysctl_driver_state, "I", "Drive state information"); 8922 8923 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8924 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 8925 (void *)sc, 0, 8926 bce_sysctl_hw_state, "I", "Hardware state information"); 8927 8928 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8929 "status_block", CTLTYPE_INT | CTLFLAG_RW, 8930 (void *)sc, 0, 8931 bce_sysctl_status_block, "I", "Dump status block"); 8932 8933 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8934 "stats_block", CTLTYPE_INT | CTLFLAG_RW, 8935 (void *)sc, 0, 8936 bce_sysctl_stats_block, "I", "Dump statistics block"); 8937 8938 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8939 "stats_clear", CTLTYPE_INT | CTLFLAG_RW, 8940 (void *)sc, 0, 8941 bce_sysctl_stats_clear, "I", "Clear statistics block"); 8942 8943 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8944 "bc_state", CTLTYPE_INT | CTLFLAG_RW, 8945 (void *)sc, 0, 8946 bce_sysctl_bc_state, "I", "Bootcode state information"); 8947 8948 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8949 "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW, 8950 (void *)sc, 0, 8951 bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain"); 8952 8953 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8954 "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW, 8955 (void *)sc, 0, 8956 bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain"); 8957 8958 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8959 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 8960 (void *)sc, 0, 8961 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 8962 8963#ifdef BCE_JUMBO_HDRSPLIT 8964 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8965 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW, 8966 (void *)sc, 0, 8967 bce_sysctl_dump_pg_chain, "I", "Dump page chain"); 8968#endif 8969 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8970 "dump_ctx", CTLTYPE_INT | CTLFLAG_RW, 8971 (void *)sc, 0, 8972 bce_sysctl_dump_ctx, "I", "Dump context memory"); 8973 8974 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8975 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 8976 (void *)sc, 0, 8977 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 8978 8979 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8980 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 8981 (void *)sc, 0, 8982 bce_sysctl_reg_read, "I", "Register read"); 8983 8984 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8985 "nvram_read", CTLTYPE_INT | CTLFLAG_RW, 8986 (void *)sc, 0, 8987 bce_sysctl_nvram_read, "I", "NVRAM read"); 8988 8989 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8990 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 8991 (void *)sc, 0, 8992 bce_sysctl_phy_read, "I", "PHY register read"); 8993 8994#endif 8995 8996 DBEXIT(BCE_VERBOSE_MISC); 8997} 8998 8999 9000/****************************************************************************/ 9001/* BCE Debug Routines */ 9002/****************************************************************************/ 9003#ifdef BCE_DEBUG 9004 9005/****************************************************************************/ 9006/* Freezes the controller to allow for a cohesive state dump. */ 9007/* */ 9008/* Returns: */ 9009/* Nothing. */ 9010/****************************************************************************/ 9011static __attribute__ ((noinline)) void 9012bce_freeze_controller(struct bce_softc *sc) 9013{ 9014 u32 val; 9015 val = REG_RD(sc, BCE_MISC_COMMAND); 9016 val |= BCE_MISC_COMMAND_DISABLE_ALL; 9017 REG_WR(sc, BCE_MISC_COMMAND, val); 9018} 9019 9020 9021/****************************************************************************/ 9022/* Unfreezes the controller after a freeze operation. This may not always */ 9023/* work and the controller will require a reset! */ 9024/* */ 9025/* Returns: */ 9026/* Nothing. */ 9027/****************************************************************************/ 9028static __attribute__ ((noinline)) void 9029bce_unfreeze_controller(struct bce_softc *sc) 9030{ 9031 u32 val; 9032 val = REG_RD(sc, BCE_MISC_COMMAND); 9033 val |= BCE_MISC_COMMAND_ENABLE_ALL; 9034 REG_WR(sc, BCE_MISC_COMMAND, val); 9035} 9036 9037 9038/****************************************************************************/ 9039/* Prints out Ethernet frame information from an mbuf. */ 9040/* */ 9041/* Partially decode an Ethernet frame to look at some important headers. */ 9042/* */ 9043/* Returns: */ 9044/* Nothing. */ 9045/****************************************************************************/ 9046static __attribute__ ((noinline)) void 9047bce_dump_enet(struct bce_softc *sc, struct mbuf *m) 9048{ 9049 struct ether_vlan_header *eh; 9050 u16 etype; 9051 int ehlen; 9052 struct ip *ip; 9053 struct tcphdr *th; 9054 struct udphdr *uh; 9055 struct arphdr *ah; 9056 9057 BCE_PRINTF( 9058 "-----------------------------" 9059 " Frame Decode " 9060 "-----------------------------\n"); 9061 9062 eh = mtod(m, struct ether_vlan_header *); 9063 9064 /* Handle VLAN encapsulation if present. */ 9065 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 9066 etype = ntohs(eh->evl_proto); 9067 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 9068 } else { 9069 etype = ntohs(eh->evl_encap_proto); 9070 ehlen = ETHER_HDR_LEN; 9071 } 9072 9073 /* ToDo: Add VLAN output. */ 9074 BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n", 9075 eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen); 9076 9077 switch (etype) { 9078 case ETHERTYPE_IP: 9079 ip = (struct ip *)(m->m_data + ehlen); 9080 BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, " 9081 "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n", 9082 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr), 9083 ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum)); 9084 9085 switch (ip->ip_p) { 9086 case IPPROTO_TCP: 9087 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9088 BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = " 9089 "%d bytes, flags = 0x%b, csum = 0x%04X\n", 9090 ntohs(th->th_dport), ntohs(th->th_sport), 9091 (th->th_off << 2), th->th_flags, 9092 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST" 9093 "\02SYN\01FIN", ntohs(th->th_sum)); 9094 break; 9095 case IPPROTO_UDP: 9096 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9097 BCE_PRINTF("-udp: dest = %d, src = %d, len = %d " 9098 "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport), 9099 ntohs(uh->uh_sport), ntohs(uh->uh_ulen), 9100 ntohs(uh->uh_sum)); 9101 break; 9102 case IPPROTO_ICMP: 9103 BCE_PRINTF("icmp:\n"); 9104 break; 9105 default: 9106 BCE_PRINTF("----: Other IP protocol.\n"); 9107 } 9108 break; 9109 case ETHERTYPE_IPV6: 9110 BCE_PRINTF("ipv6: No decode supported.\n"); 9111 break; 9112 case ETHERTYPE_ARP: 9113 BCE_PRINTF("-arp: "); 9114 ah = (struct arphdr *) (m->m_data + ehlen); 9115 switch (ntohs(ah->ar_op)) { 9116 case ARPOP_REVREQUEST: 9117 printf("reverse ARP request\n"); 9118 break; 9119 case ARPOP_REVREPLY: 9120 printf("reverse ARP reply\n"); 9121 break; 9122 case ARPOP_REQUEST: 9123 printf("ARP request\n"); 9124 break; 9125 case ARPOP_REPLY: 9126 printf("ARP reply\n"); 9127 break; 9128 default: 9129 printf("other ARP operation\n"); 9130 } 9131 break; 9132 default: 9133 BCE_PRINTF("----: Other protocol.\n"); 9134 } 9135 9136 BCE_PRINTF( 9137 "-----------------------------" 9138 "--------------" 9139 "-----------------------------\n"); 9140} 9141 9142 9143/****************************************************************************/ 9144/* Prints out information about an mbuf. */ 9145/* */ 9146/* Returns: */ 9147/* Nothing. */ 9148/****************************************************************************/ 9149static __attribute__ ((noinline)) void 9150bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 9151{ 9152 struct mbuf *mp = m; 9153 9154 if (m == NULL) { 9155 BCE_PRINTF("mbuf: null pointer\n"); 9156 return; 9157 } 9158 9159 while (mp) { 9160 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, " 9161 "m_data = %p\n", mp, mp->m_len, mp->m_flags, 9162 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data); 9163 9164 if (mp->m_flags & M_PKTHDR) { 9165 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, " 9166 "csum_flags = %b\n", mp->m_pkthdr.len, 9167 mp->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG" 9168 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" 9169 "\22M_PROMISC\23M_NOFREE", 9170 mp->m_pkthdr.csum_flags, 9171 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 9172 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 9173 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 9174 "\14CSUM_PSEUDO_HDR"); 9175 } 9176 9177 if (mp->m_flags & M_EXT) { 9178 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ", 9179 mp->m_ext.ext_buf, mp->m_ext.ext_size); 9180 switch (mp->m_ext.ext_type) { 9181 case EXT_CLUSTER: 9182 printf("EXT_CLUSTER\n"); break; 9183 case EXT_SFBUF: 9184 printf("EXT_SFBUF\n"); break; 9185 case EXT_JUMBO9: 9186 printf("EXT_JUMBO9\n"); break; 9187 case EXT_JUMBO16: 9188 printf("EXT_JUMBO16\n"); break; 9189 case EXT_PACKET: 9190 printf("EXT_PACKET\n"); break; 9191 case EXT_MBUF: 9192 printf("EXT_MBUF\n"); break; 9193 case EXT_NET_DRV: 9194 printf("EXT_NET_DRV\n"); break; 9195 case EXT_MOD_TYPE: 9196 printf("EXT_MDD_TYPE\n"); break; 9197 case EXT_DISPOSABLE: 9198 printf("EXT_DISPOSABLE\n"); break; 9199 case EXT_EXTREF: 9200 printf("EXT_EXTREF\n"); break; 9201 default: 9202 printf("UNKNOWN\n"); 9203 } 9204 } 9205 9206 mp = mp->m_next; 9207 } 9208} 9209 9210 9211/****************************************************************************/ 9212/* Prints out the mbufs in the TX mbuf chain. */ 9213/* */ 9214/* Returns: */ 9215/* Nothing. */ 9216/****************************************************************************/ 9217static __attribute__ ((noinline)) void 9218bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9219{ 9220 struct mbuf *m; 9221 9222 BCE_PRINTF( 9223 "----------------------------" 9224 " tx mbuf data " 9225 "----------------------------\n"); 9226 9227 for (int i = 0; i < count; i++) { 9228 m = sc->tx_mbuf_ptr[chain_prod]; 9229 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod); 9230 bce_dump_mbuf(sc, m); 9231 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 9232 } 9233 9234 BCE_PRINTF( 9235 "----------------------------" 9236 "----------------" 9237 "----------------------------\n"); 9238} 9239 9240 9241/****************************************************************************/ 9242/* Prints out the mbufs in the RX mbuf chain. */ 9243/* */ 9244/* Returns: */ 9245/* Nothing. */ 9246/****************************************************************************/ 9247static __attribute__ ((noinline)) void 9248bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9249{ 9250 struct mbuf *m; 9251 9252 BCE_PRINTF( 9253 "----------------------------" 9254 " rx mbuf data " 9255 "----------------------------\n"); 9256 9257 for (int i = 0; i < count; i++) { 9258 m = sc->rx_mbuf_ptr[chain_prod]; 9259 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod); 9260 bce_dump_mbuf(sc, m); 9261 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 9262 } 9263 9264 9265 BCE_PRINTF( 9266 "----------------------------" 9267 "----------------" 9268 "----------------------------\n"); 9269} 9270 9271 9272#ifdef BCE_JUMBO_HDRSPLIT 9273/****************************************************************************/ 9274/* Prints out the mbufs in the mbuf page chain. */ 9275/* */ 9276/* Returns: */ 9277/* Nothing. */ 9278/****************************************************************************/ 9279static __attribute__ ((noinline)) void 9280bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9281{ 9282 struct mbuf *m; 9283 9284 BCE_PRINTF( 9285 "----------------------------" 9286 " pg mbuf data " 9287 "----------------------------\n"); 9288 9289 for (int i = 0; i < count; i++) { 9290 m = sc->pg_mbuf_ptr[chain_prod]; 9291 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod); 9292 bce_dump_mbuf(sc, m); 9293 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod)); 9294 } 9295 9296 9297 BCE_PRINTF( 9298 "----------------------------" 9299 "----------------" 9300 "----------------------------\n"); 9301} 9302#endif 9303 9304 9305/****************************************************************************/ 9306/* Prints out a tx_bd structure. */ 9307/* */ 9308/* Returns: */ 9309/* Nothing. */ 9310/****************************************************************************/ 9311static __attribute__ ((noinline)) void 9312bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 9313{ 9314 int i = 0; 9315 9316 if (idx > MAX_TX_BD) 9317 /* Index out of range. */ 9318 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 9319 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 9320 /* TX Chain page pointer. */ 9321 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 9322 "pointer\n", idx, txbd->tx_bd_haddr_hi, 9323 txbd->tx_bd_haddr_lo); 9324 else { 9325 /* Normal tx_bd entry. */ 9326 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 9327 "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = " 9328 "0x%04X (", idx, txbd->tx_bd_haddr_hi, 9329 txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes, 9330 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 9331 9332 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) { 9333 if (i>0) 9334 printf("|"); 9335 printf("CONN_FAULT"); 9336 i++; 9337 } 9338 9339 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) { 9340 if (i>0) 9341 printf("|"); 9342 printf("TCP_UDP_CKSUM"); 9343 i++; 9344 } 9345 9346 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) { 9347 if (i>0) 9348 printf("|"); 9349 printf("IP_CKSUM"); 9350 i++; 9351 } 9352 9353 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) { 9354 if (i>0) 9355 printf("|"); 9356 printf("VLAN"); 9357 i++; 9358 } 9359 9360 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) { 9361 if (i>0) 9362 printf("|"); 9363 printf("COAL_NOW"); 9364 i++; 9365 } 9366 9367 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) { 9368 if (i>0) 9369 printf("|"); 9370 printf("DONT_GEN_CRC"); 9371 i++; 9372 } 9373 9374 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) { 9375 if (i>0) 9376 printf("|"); 9377 printf("START"); 9378 i++; 9379 } 9380 9381 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) { 9382 if (i>0) 9383 printf("|"); 9384 printf("END"); 9385 i++; 9386 } 9387 9388 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) { 9389 if (i>0) 9390 printf("|"); 9391 printf("LSO"); 9392 i++; 9393 } 9394 9395 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) { 9396 if (i>0) 9397 printf("|"); 9398 printf("SW_OPTION=%d", ((txbd->tx_bd_flags & 9399 TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++; 9400 } 9401 9402 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) { 9403 if (i>0) 9404 printf("|"); 9405 printf("SW_FLAGS"); 9406 i++; 9407 } 9408 9409 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) { 9410 if (i>0) 9411 printf("|"); 9412 printf("SNAP)"); 9413 } else { 9414 printf(")\n"); 9415 } 9416 } 9417} 9418 9419 9420/****************************************************************************/ 9421/* Prints out a rx_bd structure. */ 9422/* */ 9423/* Returns: */ 9424/* Nothing. */ 9425/****************************************************************************/ 9426static __attribute__ ((noinline)) void 9427bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 9428{ 9429 if (idx > MAX_RX_BD) 9430 /* Index out of range. */ 9431 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 9432 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 9433 /* RX Chain page pointer. */ 9434 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 9435 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 9436 rxbd->rx_bd_haddr_lo); 9437 else 9438 /* Normal rx_bd entry. */ 9439 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 9440 "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi, 9441 rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len, 9442 rxbd->rx_bd_flags); 9443} 9444 9445 9446#ifdef BCE_JUMBO_HDRSPLIT 9447/****************************************************************************/ 9448/* Prints out a rx_bd structure in the page chain. */ 9449/* */ 9450/* Returns: */ 9451/* Nothing. */ 9452/****************************************************************************/ 9453static __attribute__ ((noinline)) void 9454bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd) 9455{ 9456 if (idx > MAX_PG_BD) 9457 /* Index out of range. */ 9458 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx); 9459 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE) 9460 /* Page Chain page pointer. */ 9461 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 9462 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo); 9463 else 9464 /* Normal rx_bd entry. */ 9465 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 9466 "flags = 0x%08X\n", idx, 9467 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo, 9468 pgbd->rx_bd_len, pgbd->rx_bd_flags); 9469} 9470#endif 9471 9472 9473/****************************************************************************/ 9474/* Prints out a l2_fhdr structure. */ 9475/* */ 9476/* Returns: */ 9477/* Nothing. */ 9478/****************************************************************************/ 9479static __attribute__ ((noinline)) void 9480bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 9481{ 9482 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, " 9483 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, " 9484 "tcp_udp_xsum = 0x%04X\n", idx, 9485 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB, 9486 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 9487 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 9488} 9489 9490 9491/****************************************************************************/ 9492/* Prints out context memory info. (Only useful for CID 0 to 16.) */ 9493/* */ 9494/* Returns: */ 9495/* Nothing. */ 9496/****************************************************************************/ 9497static __attribute__ ((noinline)) void 9498bce_dump_ctx(struct bce_softc *sc, u16 cid) 9499{ 9500 if (cid > TX_CID) { 9501 BCE_PRINTF(" Unknown CID\n"); 9502 return; 9503 } 9504 9505 BCE_PRINTF( 9506 "----------------------------" 9507 " CTX Data " 9508 "----------------------------\n"); 9509 9510 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid); 9511 9512 if (cid == RX_CID) { 9513 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx " 9514 "producer index\n", 9515 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX)); 9516 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host " 9517 "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 9518 BCE_L2CTX_RX_HOST_BSEQ)); 9519 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n", 9520 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ)); 9521 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer " 9522 "descriptor address\n", 9523 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI)); 9524 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer " 9525 "descriptor address\n", 9526 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO)); 9527 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer " 9528 "index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9529 BCE_L2CTX_RX_NX_BDIDX)); 9530 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page " 9531 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9532 BCE_L2CTX_RX_HOST_PG_BDIDX)); 9533 BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page " 9534 "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid), 9535 BCE_L2CTX_RX_PG_BUF_SIZE)); 9536 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page " 9537 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 9538 BCE_L2CTX_RX_NX_PG_BDHADDR_HI)); 9539 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page " 9540 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 9541 BCE_L2CTX_RX_NX_PG_BDHADDR_LO)); 9542 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page " 9543 "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9544 BCE_L2CTX_RX_NX_PG_BDIDX)); 9545 } else if (cid == TX_CID) { 9546 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9547 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 9548 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n", 9549 CTX_RD(sc, GET_CID_ADDR(cid), 9550 BCE_L2CTX_TX_TYPE_XI)); 9551 BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx " 9552 "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), 9553 BCE_L2CTX_TX_CMD_TYPE_XI)); 9554 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) " 9555 "h/w buffer descriptor address\n", 9556 CTX_RD(sc, GET_CID_ADDR(cid), 9557 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI)); 9558 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) " 9559 "h/w buffer descriptor address\n", 9560 CTX_RD(sc, GET_CID_ADDR(cid), 9561 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI)); 9562 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) " 9563 "host producer index\n", 9564 CTX_RD(sc, GET_CID_ADDR(cid), 9565 BCE_L2CTX_TX_HOST_BIDX_XI)); 9566 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) " 9567 "host byte sequence\n", 9568 CTX_RD(sc, GET_CID_ADDR(cid), 9569 BCE_L2CTX_TX_HOST_BSEQ_XI)); 9570 } else { 9571 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n", 9572 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE)); 9573 BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n", 9574 CTX_RD(sc, GET_CID_ADDR(cid), 9575 BCE_L2CTX_TX_CMD_TYPE)); 9576 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) " 9577 "h/w buffer descriptor address\n", 9578 CTX_RD(sc, GET_CID_ADDR(cid), 9579 BCE_L2CTX_TX_TBDR_BHADDR_HI)); 9580 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) " 9581 "h/w buffer descriptor address\n", 9582 CTX_RD(sc, GET_CID_ADDR(cid), 9583 BCE_L2CTX_TX_TBDR_BHADDR_LO)); 9584 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host " 9585 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9586 BCE_L2CTX_TX_HOST_BIDX)); 9587 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte " 9588 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 9589 BCE_L2CTX_TX_HOST_BSEQ)); 9590 } 9591 } 9592 9593 BCE_PRINTF( 9594 "----------------------------" 9595 " Raw CTX " 9596 "----------------------------\n"); 9597 9598 for (int i = 0x0; i < 0x300; i += 0x10) { 9599 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 9600 CTX_RD(sc, GET_CID_ADDR(cid), i), 9601 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4), 9602 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8), 9603 CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc)); 9604 } 9605 9606 9607 BCE_PRINTF( 9608 "----------------------------" 9609 "----------------" 9610 "----------------------------\n"); 9611} 9612 9613 9614/****************************************************************************/ 9615/* Prints out the FTQ data. */ 9616/* */ 9617/* Returns: */ 9618/* Nothing. */ 9619/****************************************************************************/ 9620static __attribute__ ((noinline)) void 9621bce_dump_ftqs(struct bce_softc *sc) 9622{ 9623 u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val; 9624 9625 BCE_PRINTF( 9626 "----------------------------" 9627 " FTQ Data " 9628 "----------------------------\n"); 9629 9630 BCE_PRINTF(" FTQ Command Control Depth_Now " 9631 "Max_Depth Valid_Cnt \n"); 9632 BCE_PRINTF(" ------- ---------- ---------- ---------- " 9633 "---------- ----------\n"); 9634 9635 /* Setup the generic statistic counters for the FTQ valid count. */ 9636 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) | 9637 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) | 9638 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) | 9639 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT); 9640 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 9641 9642 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) | 9643 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) | 9644 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) | 9645 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT); 9646 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val); 9647 9648 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) | 9649 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) | 9650 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) | 9651 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT); 9652 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val); 9653 9654 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) | 9655 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) | 9656 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) | 9657 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT); 9658 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val); 9659 9660 /* Input queue to the Receive Lookup state machine */ 9661 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD); 9662 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL); 9663 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22; 9664 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12; 9665 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 9666 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9667 cmd, ctl, cur_depth, max_depth, valid_cnt); 9668 9669 /* Input queue to the Receive Processor */ 9670 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD); 9671 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL); 9672 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22; 9673 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12; 9674 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 9675 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9676 cmd, ctl, cur_depth, max_depth, valid_cnt); 9677 9678 /* Input queue to the Recevie Processor */ 9679 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD); 9680 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL); 9681 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22; 9682 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12; 9683 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 9684 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9685 cmd, ctl, cur_depth, max_depth, valid_cnt); 9686 9687 /* Input queue to the Receive Virtual to Physical state machine */ 9688 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD); 9689 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL); 9690 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22; 9691 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12; 9692 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 9693 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9694 cmd, ctl, cur_depth, max_depth, valid_cnt); 9695 9696 /* Input queue to the Recevie Virtual to Physical state machine */ 9697 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD); 9698 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL); 9699 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22; 9700 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12; 9701 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4); 9702 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9703 cmd, ctl, cur_depth, max_depth, valid_cnt); 9704 9705 /* Input queue to the Receive Virtual to Physical state machine */ 9706 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD); 9707 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL); 9708 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22; 9709 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12; 9710 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5); 9711 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9712 cmd, ctl, cur_depth, max_depth, valid_cnt); 9713 9714 /* Input queue to the Receive DMA state machine */ 9715 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD); 9716 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL); 9717 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22; 9718 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12; 9719 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6); 9720 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9721 cmd, ctl, cur_depth, max_depth, valid_cnt); 9722 9723 /* Input queue to the Transmit Scheduler state machine */ 9724 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD); 9725 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL); 9726 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22; 9727 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12; 9728 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7); 9729 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9730 cmd, ctl, cur_depth, max_depth, valid_cnt); 9731 9732 /* Input queue to the Transmit Buffer Descriptor state machine */ 9733 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD); 9734 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL); 9735 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22; 9736 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12; 9737 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8); 9738 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9739 cmd, ctl, cur_depth, max_depth, valid_cnt); 9740 9741 /* Input queue to the Transmit Processor */ 9742 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD); 9743 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL); 9744 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22; 9745 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12; 9746 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9); 9747 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9748 cmd, ctl, cur_depth, max_depth, valid_cnt); 9749 9750 /* Input queue to the Transmit DMA state machine */ 9751 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD); 9752 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL); 9753 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22; 9754 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12; 9755 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10); 9756 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9757 cmd, ctl, cur_depth, max_depth, valid_cnt); 9758 9759 /* Input queue to the Transmit Patch-Up Processor */ 9760 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD); 9761 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL); 9762 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22; 9763 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12; 9764 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11); 9765 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9766 cmd, ctl, cur_depth, max_depth, valid_cnt); 9767 9768 /* Input queue to the Transmit Assembler state machine */ 9769 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD); 9770 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL); 9771 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22; 9772 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12; 9773 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12); 9774 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9775 cmd, ctl, cur_depth, max_depth, valid_cnt); 9776 9777 /* Input queue to the Completion Processor */ 9778 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD); 9779 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL); 9780 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22; 9781 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12; 9782 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13); 9783 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9784 cmd, ctl, cur_depth, max_depth, valid_cnt); 9785 9786 /* Input queue to the Completion Processor */ 9787 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD); 9788 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL); 9789 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22; 9790 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12; 9791 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14); 9792 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9793 cmd, ctl, cur_depth, max_depth, valid_cnt); 9794 9795 /* Input queue to the Completion Processor */ 9796 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD); 9797 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL); 9798 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22; 9799 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12; 9800 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15); 9801 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9802 cmd, ctl, cur_depth, max_depth, valid_cnt); 9803 9804 /* Setup the generic statistic counters for the FTQ valid count. */ 9805 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) | 9806 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) | 9807 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT); 9808 9809 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9810 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 9811 val = val | 9812 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 9813 24); 9814 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 9815 9816 /* Input queue to the Management Control Processor */ 9817 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD); 9818 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL); 9819 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22; 9820 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12; 9821 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 9822 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9823 cmd, ctl, cur_depth, max_depth, valid_cnt); 9824 9825 /* Input queue to the Command Processor */ 9826 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD); 9827 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL); 9828 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22; 9829 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12; 9830 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 9831 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9832 cmd, ctl, cur_depth, max_depth, valid_cnt); 9833 9834 /* Input queue to the Completion Scheduler state machine */ 9835 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD); 9836 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL); 9837 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22; 9838 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12; 9839 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 9840 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9841 cmd, ctl, cur_depth, max_depth, valid_cnt); 9842 9843 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9844 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 9845 /* Input queue to the RV2P Command Scheduler */ 9846 cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD); 9847 ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL); 9848 cur_depth = (ctl & 0xFFC00000) >> 22; 9849 max_depth = (ctl & 0x003FF000) >> 12; 9850 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 9851 BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9852 cmd, ctl, cur_depth, max_depth, valid_cnt); 9853 } 9854 9855 BCE_PRINTF( 9856 "----------------------------" 9857 "----------------" 9858 "----------------------------\n"); 9859} 9860 9861 9862/****************************************************************************/ 9863/* Prints out the TX chain. */ 9864/* */ 9865/* Returns: */ 9866/* Nothing. */ 9867/****************************************************************************/ 9868static __attribute__ ((noinline)) void 9869bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count) 9870{ 9871 struct tx_bd *txbd; 9872 9873 /* First some info about the tx_bd chain structure. */ 9874 BCE_PRINTF( 9875 "----------------------------" 9876 " tx_bd chain " 9877 "----------------------------\n"); 9878 9879 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n", 9880 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES); 9881 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 9882 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); 9883 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD); 9884 9885 BCE_PRINTF( 9886 "----------------------------" 9887 " tx_bd data " 9888 "----------------------------\n"); 9889 9890 /* Now print out a decoded list of TX buffer descriptors. */ 9891 for (int i = 0; i < count; i++) { 9892 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 9893 bce_dump_txbd(sc, tx_prod, txbd); 9894 tx_prod++; 9895 } 9896 9897 BCE_PRINTF( 9898 "----------------------------" 9899 "----------------" 9900 "----------------------------\n"); 9901} 9902 9903 9904/****************************************************************************/ 9905/* Prints out the RX chain. */ 9906/* */ 9907/* Returns: */ 9908/* Nothing. */ 9909/****************************************************************************/ 9910static __attribute__ ((noinline)) void 9911bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count) 9912{ 9913 struct rx_bd *rxbd; 9914 9915 /* First some info about the rx_bd chain structure. */ 9916 BCE_PRINTF( 9917 "----------------------------" 9918 " rx_bd chain " 9919 "----------------------------\n"); 9920 9921 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n", 9922 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES); 9923 9924 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 9925 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); 9926 9927 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD); 9928 9929 BCE_PRINTF( 9930 "----------------------------" 9931 " rx_bd data " 9932 "----------------------------\n"); 9933 9934 /* Now print out the rx_bd's themselves. */ 9935 for (int i = 0; i < count; i++) { 9936 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 9937 bce_dump_rxbd(sc, rx_prod, rxbd); 9938 rx_prod = RX_CHAIN_IDX(rx_prod + 1); 9939 } 9940 9941 BCE_PRINTF( 9942 "----------------------------" 9943 "----------------" 9944 "----------------------------\n"); 9945} 9946 9947 9948#ifdef BCE_JUMBO_HDRSPLIT 9949/****************************************************************************/ 9950/* Prints out the page chain. */ 9951/* */ 9952/* Returns: */ 9953/* Nothing. */ 9954/****************************************************************************/ 9955static __attribute__ ((noinline)) void 9956bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count) 9957{ 9958 struct rx_bd *pgbd; 9959 9960 /* First some info about the page chain structure. */ 9961 BCE_PRINTF( 9962 "----------------------------" 9963 " page chain " 9964 "----------------------------\n"); 9965 9966 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n", 9967 (u32) BCM_PAGE_SIZE, (u32) PG_PAGES); 9968 9969 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 9970 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE); 9971 9972 BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n", 9973 (u32) TOTAL_PG_BD, (u32) MAX_PG_BD); 9974 9975 BCE_PRINTF( 9976 "----------------------------" 9977 " page data " 9978 "----------------------------\n"); 9979 9980 /* Now print out the rx_bd's themselves. */ 9981 for (int i = 0; i < count; i++) { 9982 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)]; 9983 bce_dump_pgbd(sc, pg_prod, pgbd); 9984 pg_prod = PG_CHAIN_IDX(pg_prod + 1); 9985 } 9986 9987 BCE_PRINTF( 9988 "----------------------------" 9989 "----------------" 9990 "----------------------------\n"); 9991} 9992#endif 9993 9994 9995#define BCE_PRINT_RX_CONS(arg) \ 9996if (sblk->status_rx_quick_consumer_index##arg) \ 9997 BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \ 9998 sblk->status_rx_quick_consumer_index##arg, (u16) \ 9999 RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \ 10000 arg); 10001 10002 10003#define BCE_PRINT_TX_CONS(arg) \ 10004if (sblk->status_tx_quick_consumer_index##arg) \ 10005 BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \ 10006 sblk->status_tx_quick_consumer_index##arg, (u16) \ 10007 TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \ 10008 arg); 10009 10010/****************************************************************************/ 10011/* Prints out the status block from host memory. */ 10012/* */ 10013/* Returns: */ 10014/* Nothing. */ 10015/****************************************************************************/ 10016static __attribute__ ((noinline)) void 10017bce_dump_status_block(struct bce_softc *sc) 10018{ 10019 struct status_block *sblk; 10020 10021 sblk = sc->status_block; 10022 10023 BCE_PRINTF( 10024 "----------------------------" 10025 " Status Block " 10026 "----------------------------\n"); 10027 10028 /* Theses indices are used for normal L2 drivers. */ 10029 BCE_PRINTF(" 0x%08X - attn_bits\n", 10030 sblk->status_attn_bits); 10031 10032 BCE_PRINTF(" 0x%08X - attn_bits_ack\n", 10033 sblk->status_attn_bits_ack); 10034 10035 BCE_PRINT_RX_CONS(0); 10036 BCE_PRINT_TX_CONS(0) 10037 10038 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx); 10039 10040 /* Theses indices are not used for normal L2 drivers. */ 10041 BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3); 10042 BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6); 10043 BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9); 10044 BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12); 10045 BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15); 10046 10047 BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3); 10048 10049 if (sblk->status_completion_producer_index || 10050 sblk->status_cmd_consumer_index) 10051 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n", 10052 sblk->status_completion_producer_index, 10053 sblk->status_cmd_consumer_index); 10054 10055 BCE_PRINTF( 10056 "----------------------------" 10057 "----------------" 10058 "----------------------------\n"); 10059} 10060 10061 10062#define BCE_PRINT_64BIT_STAT(arg) \ 10063if (sblk->arg##_lo || sblk->arg##_hi) \ 10064 BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \ 10065 sblk->arg##_lo, #arg); 10066 10067#define BCE_PRINT_32BIT_STAT(arg) \ 10068if (sblk->arg) \ 10069 BCE_PRINTF(" 0x%08X : %s\n", \ 10070 sblk->arg, #arg); 10071 10072/****************************************************************************/ 10073/* Prints out the statistics block from host memory. */ 10074/* */ 10075/* Returns: */ 10076/* Nothing. */ 10077/****************************************************************************/ 10078static __attribute__ ((noinline)) void 10079bce_dump_stats_block(struct bce_softc *sc) 10080{ 10081 struct statistics_block *sblk; 10082 10083 sblk = sc->stats_block; 10084 10085 BCE_PRINTF( 10086 "---------------" 10087 " Stats Block (All Stats Not Shown Are 0) " 10088 "---------------\n"); 10089 10090 BCE_PRINT_64BIT_STAT(stat_IfHCInOctets); 10091 BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets); 10092 BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets); 10093 BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets); 10094 BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts); 10095 BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts); 10096 BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts); 10097 BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts); 10098 BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts); 10099 BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts); 10100 BCE_PRINT_32BIT_STAT( 10101 stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 10102 BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors); 10103 BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors); 10104 BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors); 10105 BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames); 10106 BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames); 10107 BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions); 10108 BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions); 10109 BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions); 10110 BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions); 10111 BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments); 10112 BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers); 10113 BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts); 10114 BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts); 10115 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets); 10116 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets); 10117 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets); 10118 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets); 10119 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets); 10120 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets); 10121 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets); 10122 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets); 10123 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets); 10124 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets); 10125 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets); 10126 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets); 10127 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets); 10128 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets); 10129 BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived); 10130 BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived); 10131 BCE_PRINT_32BIT_STAT(stat_OutXonSent); 10132 BCE_PRINT_32BIT_STAT(stat_OutXoffSent); 10133 BCE_PRINT_32BIT_STAT(stat_FlowControlDone); 10134 BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived); 10135 BCE_PRINT_32BIT_STAT(stat_XoffStateEntered); 10136 BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards); 10137 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards); 10138 BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards); 10139 BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards); 10140 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit); 10141 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards); 10142 BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards); 10143 BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards); 10144 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit); 10145 10146 BCE_PRINTF( 10147 "----------------------------" 10148 "----------------" 10149 "----------------------------\n"); 10150} 10151 10152 10153/****************************************************************************/ 10154/* Prints out a summary of the driver state. */ 10155/* */ 10156/* Returns: */ 10157/* Nothing. */ 10158/****************************************************************************/ 10159static __attribute__ ((noinline)) void 10160bce_dump_driver_state(struct bce_softc *sc) 10161{ 10162 u32 val_hi, val_lo; 10163 10164 BCE_PRINTF( 10165 "-----------------------------" 10166 " Driver State " 10167 "-----------------------------\n"); 10168 10169 val_hi = BCE_ADDR_HI(sc); 10170 val_lo = BCE_ADDR_LO(sc); 10171 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual " 10172 "address\n", val_hi, val_lo); 10173 10174 val_hi = BCE_ADDR_HI(sc->bce_vhandle); 10175 val_lo = BCE_ADDR_LO(sc->bce_vhandle); 10176 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual " 10177 "address\n", val_hi, val_lo); 10178 10179 val_hi = BCE_ADDR_HI(sc->status_block); 10180 val_lo = BCE_ADDR_LO(sc->status_block); 10181 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block " 10182 "virtual address\n", val_hi, val_lo); 10183 10184 val_hi = BCE_ADDR_HI(sc->stats_block); 10185 val_lo = BCE_ADDR_LO(sc->stats_block); 10186 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block " 10187 "virtual address\n", val_hi, val_lo); 10188 10189 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 10190 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 10191 BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 10192 "virtual adddress\n", val_hi, val_lo); 10193 10194 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 10195 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 10196 BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 10197 "virtual address\n", val_hi, val_lo); 10198 10199#ifdef BCE_JUMBO_HDRSPLIT 10200 val_hi = BCE_ADDR_HI(sc->pg_bd_chain); 10201 val_lo = BCE_ADDR_LO(sc->pg_bd_chain); 10202 BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain " 10203 "virtual address\n", val_hi, val_lo); 10204#endif 10205 10206 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 10207 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 10208 BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 10209 "virtual address\n", val_hi, val_lo); 10210 10211 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 10212 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 10213 BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 10214 "virtual address\n", val_hi, val_lo); 10215 10216#ifdef BCE_JUMBO_HDRSPLIT 10217 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr); 10218 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr); 10219 BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain " 10220 "virtual address\n", val_hi, val_lo); 10221#endif 10222 10223 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) " 10224 "h/w intrs\n", sc->interrupts_generated); 10225 10226 BCE_PRINTF(" 0x%08X - (sc->interrupts_rx) " 10227 "rx interrupts handled\n", sc->interrupts_rx); 10228 10229 BCE_PRINTF(" 0x%08X - (sc->interrupts_tx) " 10230 "tx interrupts handled\n", sc->interrupts_tx); 10231 10232 BCE_PRINTF(" 0x%08X - (sc->phy_interrupts) " 10233 "phy interrupts handled\n", sc->phy_interrupts); 10234 10235 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) " 10236 "status block index\n", sc->last_status_idx); 10237 10238 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer " 10239 "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod)); 10240 10241 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer " 10242 "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons)); 10243 10244 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer " 10245 "byte seq index\n", sc->tx_prod_bseq); 10246 10247 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx " 10248 "mbufs allocated\n", sc->debug_tx_mbuf_alloc); 10249 10250 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used " 10251 "tx_bd's\n", sc->used_tx_bd); 10252 10253 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi " 10254 "watermark\n", sc->tx_hi_watermark, sc->max_tx_bd); 10255 10256 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer " 10257 "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod)); 10258 10259 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer " 10260 "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons)); 10261 10262 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer " 10263 "byte seq index\n", sc->rx_prod_bseq); 10264 10265 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx " 10266 "mbufs allocated\n", sc->debug_rx_mbuf_alloc); 10267 10268 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free " 10269 "rx_bd's\n", sc->free_rx_bd); 10270 10271#ifdef BCE_JUMBO_HDRSPLIT 10272 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer " 10273 "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod)); 10274 10275 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer " 10276 "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons)); 10277 10278 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page " 10279 "mbufs allocated\n", sc->debug_pg_mbuf_alloc); 10280 10281 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page " 10282 "rx_bd's\n", sc->free_pg_bd); 10283 10284 BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low " 10285 "watermark\n", sc->pg_low_watermark, sc->max_pg_bd); 10286#endif 10287 10288 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) " 10289 "mbuf alloc failures\n", sc->mbuf_alloc_failed_count); 10290 10291 BCE_PRINTF(" 0x%08X - (sc->bce_flags) " 10292 "bce mac flags\n", sc->bce_flags); 10293 10294 BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) " 10295 "bce phy flags\n", sc->bce_phy_flags); 10296 10297 BCE_PRINTF( 10298 "----------------------------" 10299 "----------------" 10300 "----------------------------\n"); 10301} 10302 10303 10304/****************************************************************************/ 10305/* Prints out the hardware state through a summary of important register, */ 10306/* followed by a complete register dump. */ 10307/* */ 10308/* Returns: */ 10309/* Nothing. */ 10310/****************************************************************************/ 10311static __attribute__ ((noinline)) void 10312bce_dump_hw_state(struct bce_softc *sc) 10313{ 10314 u32 val; 10315 10316 BCE_PRINTF( 10317 "----------------------------" 10318 " Hardware State " 10319 "----------------------------\n"); 10320 10321 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 10322 10323 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 10324 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n", 10325 val, BCE_MISC_ENABLE_STATUS_BITS); 10326 10327 val = REG_RD(sc, BCE_DMA_STATUS); 10328 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", 10329 val, BCE_DMA_STATUS); 10330 10331 val = REG_RD(sc, BCE_CTX_STATUS); 10332 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", 10333 val, BCE_CTX_STATUS); 10334 10335 val = REG_RD(sc, BCE_EMAC_STATUS); 10336 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", 10337 val, BCE_EMAC_STATUS); 10338 10339 val = REG_RD(sc, BCE_RPM_STATUS); 10340 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", 10341 val, BCE_RPM_STATUS); 10342 10343 /* ToDo: Create a #define for this constant. */ 10344 val = REG_RD(sc, 0x2004); 10345 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", 10346 val, 0x2004); 10347 10348 val = REG_RD(sc, BCE_RV2P_STATUS); 10349 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", 10350 val, BCE_RV2P_STATUS); 10351 10352 /* ToDo: Create a #define for this constant. */ 10353 val = REG_RD(sc, 0x2c04); 10354 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", 10355 val, 0x2c04); 10356 10357 val = REG_RD(sc, BCE_TBDR_STATUS); 10358 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", 10359 val, BCE_TBDR_STATUS); 10360 10361 val = REG_RD(sc, BCE_TDMA_STATUS); 10362 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", 10363 val, BCE_TDMA_STATUS); 10364 10365 val = REG_RD(sc, BCE_HC_STATUS); 10366 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", 10367 val, BCE_HC_STATUS); 10368 10369 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 10370 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 10371 val, BCE_TXP_CPU_STATE); 10372 10373 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 10374 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 10375 val, BCE_TPAT_CPU_STATE); 10376 10377 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 10378 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 10379 val, BCE_RXP_CPU_STATE); 10380 10381 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 10382 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 10383 val, BCE_COM_CPU_STATE); 10384 10385 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 10386 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", 10387 val, BCE_MCP_CPU_STATE); 10388 10389 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 10390 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 10391 val, BCE_CP_CPU_STATE); 10392 10393 BCE_PRINTF( 10394 "----------------------------" 10395 "----------------" 10396 "----------------------------\n"); 10397 10398 BCE_PRINTF( 10399 "----------------------------" 10400 " Register Dump " 10401 "----------------------------\n"); 10402 10403 for (int i = 0x400; i < 0x8000; i += 0x10) { 10404 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 10405 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 10406 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 10407 } 10408 10409 BCE_PRINTF( 10410 "----------------------------" 10411 "----------------" 10412 "----------------------------\n"); 10413} 10414 10415 10416/****************************************************************************/ 10417/* Prints out the mailbox queue registers. */ 10418/* */ 10419/* Returns: */ 10420/* Nothing. */ 10421/****************************************************************************/ 10422static __attribute__ ((noinline)) void 10423bce_dump_mq_regs(struct bce_softc *sc) 10424{ 10425 BCE_PRINTF( 10426 "----------------------------" 10427 " MQ Regs " 10428 "----------------------------\n"); 10429 10430 BCE_PRINTF( 10431 "----------------------------" 10432 "----------------" 10433 "----------------------------\n"); 10434 10435 for (int i = 0x3c00; i < 0x4000; i += 0x10) { 10436 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 10437 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 10438 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 10439 } 10440 10441 BCE_PRINTF( 10442 "----------------------------" 10443 "----------------" 10444 "----------------------------\n"); 10445} 10446 10447 10448/****************************************************************************/ 10449/* Prints out the bootcode state. */ 10450/* */ 10451/* Returns: */ 10452/* Nothing. */ 10453/****************************************************************************/ 10454static __attribute__ ((noinline)) void 10455bce_dump_bc_state(struct bce_softc *sc) 10456{ 10457 u32 val; 10458 10459 BCE_PRINTF( 10460 "----------------------------" 10461 " Bootcode State " 10462 "----------------------------\n"); 10463 10464 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 10465 10466 val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE); 10467 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n", 10468 val, BCE_BC_RESET_TYPE); 10469 10470 val = bce_shmem_rd(sc, BCE_BC_STATE); 10471 BCE_PRINTF("0x%08X - (0x%06X) state\n", 10472 val, BCE_BC_STATE); 10473 10474 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 10475 BCE_PRINTF("0x%08X - (0x%06X) condition\n", 10476 val, BCE_BC_STATE_CONDITION); 10477 10478 val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD); 10479 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n", 10480 val, BCE_BC_STATE_DEBUG_CMD); 10481 10482 BCE_PRINTF( 10483 "----------------------------" 10484 "----------------" 10485 "----------------------------\n"); 10486} 10487 10488 10489/****************************************************************************/ 10490/* Prints out the TXP processor state. */ 10491/* */ 10492/* Returns: */ 10493/* Nothing. */ 10494/****************************************************************************/ 10495static __attribute__ ((noinline)) void 10496bce_dump_txp_state(struct bce_softc *sc, int regs) 10497{ 10498 u32 val; 10499 u32 fw_version[3]; 10500 10501 BCE_PRINTF( 10502 "----------------------------" 10503 " TXP State " 10504 "----------------------------\n"); 10505 10506 for (int i = 0; i < 3; i++) 10507 fw_version[i] = htonl(REG_RD_IND(sc, 10508 (BCE_TXP_SCRATCH + 0x10 + i * 4))); 10509 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10510 10511 val = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 10512 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", 10513 val, BCE_TXP_CPU_MODE); 10514 10515 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 10516 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 10517 val, BCE_TXP_CPU_STATE); 10518 10519 val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 10520 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", 10521 val, BCE_TXP_CPU_EVENT_MASK); 10522 10523 if (regs) { 10524 BCE_PRINTF( 10525 "----------------------------" 10526 " Register Dump " 10527 "----------------------------\n"); 10528 10529 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 10530 /* Skip the big blank spaces */ 10531 if (i < 0x454000 && i > 0x5ffff) 10532 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10533 "0x%08X 0x%08X\n", i, 10534 REG_RD_IND(sc, i), 10535 REG_RD_IND(sc, i + 0x4), 10536 REG_RD_IND(sc, i + 0x8), 10537 REG_RD_IND(sc, i + 0xC)); 10538 } 10539 } 10540 10541 BCE_PRINTF( 10542 "----------------------------" 10543 "----------------" 10544 "----------------------------\n"); 10545} 10546 10547 10548/****************************************************************************/ 10549/* Prints out the RXP processor state. */ 10550/* */ 10551/* Returns: */ 10552/* Nothing. */ 10553/****************************************************************************/ 10554static __attribute__ ((noinline)) void 10555bce_dump_rxp_state(struct bce_softc *sc, int regs) 10556{ 10557 u32 val; 10558 u32 fw_version[3]; 10559 10560 BCE_PRINTF( 10561 "----------------------------" 10562 " RXP State " 10563 "----------------------------\n"); 10564 10565 for (int i = 0; i < 3; i++) 10566 fw_version[i] = htonl(REG_RD_IND(sc, 10567 (BCE_RXP_SCRATCH + 0x10 + i * 4))); 10568 10569 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10570 10571 val = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 10572 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", 10573 val, BCE_RXP_CPU_MODE); 10574 10575 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 10576 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 10577 val, BCE_RXP_CPU_STATE); 10578 10579 val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 10580 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", 10581 val, BCE_RXP_CPU_EVENT_MASK); 10582 10583 if (regs) { 10584 BCE_PRINTF( 10585 "----------------------------" 10586 " Register Dump " 10587 "----------------------------\n"); 10588 10589 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 10590 /* Skip the big blank sapces */ 10591 if (i < 0xc5400 && i > 0xdffff) 10592 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10593 "0x%08X 0x%08X\n", i, 10594 REG_RD_IND(sc, i), 10595 REG_RD_IND(sc, i + 0x4), 10596 REG_RD_IND(sc, i + 0x8), 10597 REG_RD_IND(sc, i + 0xC)); 10598 } 10599 } 10600 10601 BCE_PRINTF( 10602 "----------------------------" 10603 "----------------" 10604 "----------------------------\n"); 10605} 10606 10607 10608/****************************************************************************/ 10609/* Prints out the TPAT processor state. */ 10610/* */ 10611/* Returns: */ 10612/* Nothing. */ 10613/****************************************************************************/ 10614static __attribute__ ((noinline)) void 10615bce_dump_tpat_state(struct bce_softc *sc, int regs) 10616{ 10617 u32 val; 10618 u32 fw_version[3]; 10619 10620 BCE_PRINTF( 10621 "----------------------------" 10622 " TPAT State " 10623 "----------------------------\n"); 10624 10625 for (int i = 0; i < 3; i++) 10626 fw_version[i] = htonl(REG_RD_IND(sc, 10627 (BCE_TPAT_SCRATCH + 0x410 + i * 4))); 10628 10629 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10630 10631 val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 10632 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", 10633 val, BCE_TPAT_CPU_MODE); 10634 10635 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 10636 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 10637 val, BCE_TPAT_CPU_STATE); 10638 10639 val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 10640 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", 10641 val, BCE_TPAT_CPU_EVENT_MASK); 10642 10643 if (regs) { 10644 BCE_PRINTF( 10645 "----------------------------" 10646 " Register Dump " 10647 "----------------------------\n"); 10648 10649 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 10650 /* Skip the big blank spaces */ 10651 if (i < 0x854000 && i > 0x9ffff) 10652 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10653 "0x%08X 0x%08X\n", i, 10654 REG_RD_IND(sc, i), 10655 REG_RD_IND(sc, i + 0x4), 10656 REG_RD_IND(sc, i + 0x8), 10657 REG_RD_IND(sc, i + 0xC)); 10658 } 10659 } 10660 10661 BCE_PRINTF( 10662 "----------------------------" 10663 "----------------" 10664 "----------------------------\n"); 10665} 10666 10667 10668/****************************************************************************/ 10669/* Prints out the Command Procesor (CP) state. */ 10670/* */ 10671/* Returns: */ 10672/* Nothing. */ 10673/****************************************************************************/ 10674static __attribute__ ((noinline)) void 10675bce_dump_cp_state(struct bce_softc *sc, int regs) 10676{ 10677 u32 val; 10678 u32 fw_version[3]; 10679 10680 BCE_PRINTF( 10681 "----------------------------" 10682 " CP State " 10683 "----------------------------\n"); 10684 10685 for (int i = 0; i < 3; i++) 10686 fw_version[i] = htonl(REG_RD_IND(sc, 10687 (BCE_CP_SCRATCH + 0x10 + i * 4))); 10688 10689 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10690 10691 val = REG_RD_IND(sc, BCE_CP_CPU_MODE); 10692 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", 10693 val, BCE_CP_CPU_MODE); 10694 10695 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 10696 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 10697 val, BCE_CP_CPU_STATE); 10698 10699 val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK); 10700 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val, 10701 BCE_CP_CPU_EVENT_MASK); 10702 10703 if (regs) { 10704 BCE_PRINTF( 10705 "----------------------------" 10706 " Register Dump " 10707 "----------------------------\n"); 10708 10709 for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) { 10710 /* Skip the big blank spaces */ 10711 if (i < 0x185400 && i > 0x19ffff) 10712 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10713 "0x%08X 0x%08X\n", i, 10714 REG_RD_IND(sc, i), 10715 REG_RD_IND(sc, i + 0x4), 10716 REG_RD_IND(sc, i + 0x8), 10717 REG_RD_IND(sc, i + 0xC)); 10718 } 10719 } 10720 10721 BCE_PRINTF( 10722 "----------------------------" 10723 "----------------" 10724 "----------------------------\n"); 10725} 10726 10727 10728/****************************************************************************/ 10729/* Prints out the Completion Procesor (COM) state. */ 10730/* */ 10731/* Returns: */ 10732/* Nothing. */ 10733/****************************************************************************/ 10734static __attribute__ ((noinline)) void 10735bce_dump_com_state(struct bce_softc *sc, int regs) 10736{ 10737 u32 val; 10738 u32 fw_version[4]; 10739 10740 BCE_PRINTF( 10741 "----------------------------" 10742 " COM State " 10743 "----------------------------\n"); 10744 10745 for (int i = 0; i < 3; i++) 10746 fw_version[i] = htonl(REG_RD_IND(sc, 10747 (BCE_COM_SCRATCH + 0x10 + i * 4))); 10748 10749 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10750 10751 val = REG_RD_IND(sc, BCE_COM_CPU_MODE); 10752 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", 10753 val, BCE_COM_CPU_MODE); 10754 10755 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 10756 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 10757 val, BCE_COM_CPU_STATE); 10758 10759 val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK); 10760 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val, 10761 BCE_COM_CPU_EVENT_MASK); 10762 10763 if (regs) { 10764 BCE_PRINTF( 10765 "----------------------------" 10766 " Register Dump " 10767 "----------------------------\n"); 10768 10769 for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) { 10770 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10771 "0x%08X 0x%08X\n", i, 10772 REG_RD_IND(sc, i), 10773 REG_RD_IND(sc, i + 0x4), 10774 REG_RD_IND(sc, i + 0x8), 10775 REG_RD_IND(sc, i + 0xC)); 10776 } 10777 } 10778 10779 BCE_PRINTF( 10780 "----------------------------" 10781 "----------------" 10782 "----------------------------\n"); 10783} 10784 10785 10786/****************************************************************************/ 10787/* Prints out the Receive Virtual 2 Physical (RV2P) state. */ 10788/* */ 10789/* Returns: */ 10790/* Nothing. */ 10791/****************************************************************************/ 10792static __attribute__ ((noinline)) void 10793bce_dump_rv2p_state(struct bce_softc *sc) 10794{ 10795 u32 val, pc1, pc2, fw_ver_high, fw_ver_low; 10796 10797 BCE_PRINTF( 10798 "----------------------------" 10799 " RV2P State " 10800 "----------------------------\n"); 10801 10802 /* Stall the RV2P processors. */ 10803 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 10804 val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2; 10805 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 10806 10807 /* Read the firmware version. */ 10808 val = 0x00000001; 10809 REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 10810 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 10811 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 10812 BCE_RV2P_INSTR_HIGH_HIGH; 10813 BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n", 10814 fw_ver_high, fw_ver_low); 10815 10816 val = 0x00000001; 10817 REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 10818 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 10819 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 10820 BCE_RV2P_INSTR_HIGH_HIGH; 10821 BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n", 10822 fw_ver_high, fw_ver_low); 10823 10824 /* Resume the RV2P processors. */ 10825 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 10826 val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2); 10827 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 10828 10829 /* Fetch the program counter value. */ 10830 val = 0x68007800; 10831 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 10832 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 10833 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 10834 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 10835 BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1); 10836 BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2); 10837 10838 /* Fetch the program counter value again to see if it is advancing. */ 10839 val = 0x68007800; 10840 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 10841 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 10842 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 10843 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 10844 BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1); 10845 BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2); 10846 10847 BCE_PRINTF( 10848 "----------------------------" 10849 "----------------" 10850 "----------------------------\n"); 10851} 10852 10853 10854/****************************************************************************/ 10855/* Prints out the driver state and then enters the debugger. */ 10856/* */ 10857/* Returns: */ 10858/* Nothing. */ 10859/****************************************************************************/ 10860static __attribute__ ((noinline)) void 10861bce_breakpoint(struct bce_softc *sc) 10862{ 10863 10864 /* 10865 * Unreachable code to silence compiler warnings 10866 * about unused functions. 10867 */ 10868 if (0) { 10869 bce_freeze_controller(sc); 10870 bce_unfreeze_controller(sc); 10871 bce_dump_enet(sc, NULL); 10872 bce_dump_txbd(sc, 0, NULL); 10873 bce_dump_rxbd(sc, 0, NULL); 10874 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 10875 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 10876 bce_dump_l2fhdr(sc, 0, NULL); 10877 bce_dump_ctx(sc, RX_CID); 10878 bce_dump_ftqs(sc); 10879 bce_dump_tx_chain(sc, 0, USABLE_TX_BD); 10880 bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD); 10881 bce_dump_status_block(sc); 10882 bce_dump_stats_block(sc); 10883 bce_dump_driver_state(sc); 10884 bce_dump_hw_state(sc); 10885 bce_dump_bc_state(sc); 10886 bce_dump_txp_state(sc, 0); 10887 bce_dump_rxp_state(sc, 0); 10888 bce_dump_tpat_state(sc, 0); 10889 bce_dump_cp_state(sc, 0); 10890 bce_dump_com_state(sc, 0); 10891 bce_dump_rv2p_state(sc); 10892 10893#ifdef BCE_JUMBO_HDRSPLIT 10894 bce_dump_pgbd(sc, 0, NULL); 10895 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD); 10896 bce_dump_pg_chain(sc, 0, USABLE_PG_BD); 10897#endif 10898 } 10899 10900 bce_dump_status_block(sc); 10901 bce_dump_driver_state(sc); 10902 10903 /* Call the debugger. */ 10904 breakpoint(); 10905 10906 return; 10907} 10908#endif 10909 10910