1/*- 2 * Copyright (c) 2006-2014 QLogic Corporation 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD$"); 29 30/* 31 * The following controllers are supported by this driver: 32 * BCM5706C A2, A3 33 * BCM5706S A2, A3 34 * BCM5708C B1, B2 35 * BCM5708S B1, B2 36 * BCM5709C A1, C0 37 * BCM5709S A1, C0 38 * BCM5716C C0 39 * BCM5716S C0 40 * 41 * The following controllers are not supported by this driver: 42 * BCM5706C A0, A1 (pre-production) 43 * BCM5706S A0, A1 (pre-production) 44 * BCM5708C A0, B0 (pre-production) 45 * BCM5708S A0, B0 (pre-production) 46 * BCM5709C A0 B0, B1, B2 (pre-production) 47 * BCM5709S A0, B0, B1, B2 (pre-production) 48 */ 49 50#include "opt_bce.h" 51 52#include <dev/bce/if_bcereg.h> 53#include <dev/bce/if_bcefw.h> 54 55/****************************************************************************/ 56/* BCE Debug Options */ 57/****************************************************************************/ 58#ifdef BCE_DEBUG 59 u32 bce_debug = BCE_WARN; 60 61 /* 0 = Never */ 62 /* 1 = 1 in 2,147,483,648 */ 63 /* 256 = 1 in 8,388,608 */ 64 /* 2048 = 1 in 1,048,576 */ 65 /* 65536 = 1 in 32,768 */ 66 /* 1048576 = 1 in 2,048 */ 67 /* 268435456 = 1 in 8 */ 68 /* 536870912 = 1 in 4 */ 69 /* 1073741824 = 1 in 2 */ 70 71 /* Controls how often the l2_fhdr frame error check will fail. */ 72 int l2fhdr_error_sim_control = 0; 73 74 /* Controls how often the unexpected attention check will fail. */ 75 int unexpected_attention_sim_control = 0; 76 77 /* Controls how often to simulate an mbuf allocation failure. */ 78 int mbuf_alloc_failed_sim_control = 0; 79 80 /* Controls how often to simulate a DMA mapping failure. */ 81 int dma_map_addr_failed_sim_control = 0; 82 83 /* Controls how often to simulate a bootcode failure. */ 84 int bootcode_running_failure_sim_control = 0; 85#endif 86 87/****************************************************************************/ 88/* PCI Device ID Table */ 89/* */ 90/* Used by bce_probe() to identify the devices supported by this driver. */ 91/****************************************************************************/ 92#define BCE_DEVDESC_MAX 64 93 94static const struct bce_type bce_devs[] = { 95 /* BCM5706C Controllers and OEM boards. */ 96 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 97 "HP NC370T Multifunction Gigabit Server Adapter" }, 98 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 99 "HP NC370i Multifunction Gigabit Server Adapter" }, 100 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 101 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 102 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 103 "HP NC371i Multifunction Gigabit Server Adapter" }, 104 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 105 "QLogic NetXtreme II BCM5706 1000Base-T" }, 106 107 /* BCM5706S controllers and OEM boards. */ 108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 109 "HP NC370F Multifunction Gigabit Server Adapter" }, 110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 111 "QLogic NetXtreme II BCM5706 1000Base-SX" }, 112 113 /* BCM5708C controllers and OEM boards. */ 114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 115 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 116 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 117 "HP NC373i Multifunction Gigabit Server Adapter" }, 118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 119 "HP NC374m PCIe Multifunction Adapter" }, 120 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 121 "QLogic NetXtreme II BCM5708 1000Base-T" }, 122 123 /* BCM5708S controllers and OEM boards. */ 124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 125 "HP NC373m Multifunction Gigabit Server Adapter" }, 126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 127 "HP NC373i Multifunction Gigabit Server Adapter" }, 128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 129 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 130 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 131 "QLogic NetXtreme II BCM5708 1000Base-SX" }, 132 133 /* BCM5709C controllers and OEM boards. */ 134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 135 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 137 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 139 "QLogic NetXtreme II BCM5709 1000Base-T" }, 140 141 /* BCM5709S controllers and OEM boards. */ 142 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 143 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 145 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 147 "QLogic NetXtreme II BCM5709 1000Base-SX" }, 148 149 /* BCM5716 controllers and OEM boards. */ 150 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 151 "QLogic NetXtreme II BCM5716 1000Base-T" }, 152 153 { 0, 0, 0, 0, NULL } 154}; 155 156 157/****************************************************************************/ 158/* Supported Flash NVRAM device data. */ 159/****************************************************************************/ 160static const struct flash_spec flash_table[] = 161{ 162#define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 163#define NONBUFFERED_FLAGS (BCE_NV_WREN) 164 165 /* Slow EEPROM */ 166 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 167 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 168 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 169 "EEPROM - slow"}, 170 /* Expansion entry 0001 */ 171 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 174 "Entry 0001"}, 175 /* Saifun SA25F010 (non-buffered flash) */ 176 /* strap, cfg1, & write1 need updates */ 177 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 178 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 179 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 180 "Non-buffered flash (128kB)"}, 181 /* Saifun SA25F020 (non-buffered flash) */ 182 /* strap, cfg1, & write1 need updates */ 183 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 184 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 185 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 186 "Non-buffered flash (256kB)"}, 187 /* Expansion entry 0100 */ 188 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 189 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 190 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 191 "Entry 0100"}, 192 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 193 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 194 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 195 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 196 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 197 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 198 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 199 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 200 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 201 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 202 /* Saifun SA25F005 (non-buffered flash) */ 203 /* strap, cfg1, & write1 need updates */ 204 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 205 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 206 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 207 "Non-buffered flash (64kB)"}, 208 /* Fast EEPROM */ 209 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 210 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 211 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 212 "EEPROM - fast"}, 213 /* Expansion entry 1001 */ 214 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 216 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 217 "Entry 1001"}, 218 /* Expansion entry 1010 */ 219 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 220 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 221 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 222 "Entry 1010"}, 223 /* ATMEL AT45DB011B (buffered flash) */ 224 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 225 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 226 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 227 "Buffered flash (128kB)"}, 228 /* Expansion entry 1100 */ 229 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 231 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 232 "Entry 1100"}, 233 /* Expansion entry 1101 */ 234 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 235 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 236 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 237 "Entry 1101"}, 238 /* Ateml Expansion entry 1110 */ 239 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 240 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 241 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 242 "Entry 1110 (Atmel)"}, 243 /* ATMEL AT45DB021B (buffered flash) */ 244 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 245 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 246 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 247 "Buffered flash (256kB)"}, 248}; 249 250/* 251 * The BCM5709 controllers transparently handle the 252 * differences between Atmel 264 byte pages and all 253 * flash devices which use 256 byte pages, so no 254 * logical-to-physical mapping is required in the 255 * driver. 256 */ 257static const struct flash_spec flash_5709 = { 258 .flags = BCE_NV_BUFFERED, 259 .page_bits = BCM5709_FLASH_PAGE_BITS, 260 .page_size = BCM5709_FLASH_PAGE_SIZE, 261 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 262 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 263 .name = "5709/5716 buffered flash (256kB)", 264}; 265 266 267/****************************************************************************/ 268/* FreeBSD device entry points. */ 269/****************************************************************************/ 270static int bce_probe (device_t); 271static int bce_attach (device_t); 272static int bce_detach (device_t); 273static int bce_shutdown (device_t); 274 275 276/****************************************************************************/ 277/* BCE Debug Data Structure Dump Routines */ 278/****************************************************************************/ 279#ifdef BCE_DEBUG 280static u32 bce_reg_rd (struct bce_softc *, u32); 281static void bce_reg_wr (struct bce_softc *, u32, u32); 282static void bce_reg_wr16 (struct bce_softc *, u32, u16); 283static u32 bce_ctx_rd (struct bce_softc *, u32, u32); 284static void bce_dump_enet (struct bce_softc *, struct mbuf *); 285static void bce_dump_mbuf (struct bce_softc *, struct mbuf *); 286static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int); 287static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int); 288static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int); 289static void bce_dump_txbd (struct bce_softc *, 290 int, struct tx_bd *); 291static void bce_dump_rxbd (struct bce_softc *, 292 int, struct rx_bd *); 293static void bce_dump_pgbd (struct bce_softc *, 294 int, struct rx_bd *); 295static void bce_dump_l2fhdr (struct bce_softc *, 296 int, struct l2_fhdr *); 297static void bce_dump_ctx (struct bce_softc *, u16); 298static void bce_dump_ftqs (struct bce_softc *); 299static void bce_dump_tx_chain (struct bce_softc *, u16, int); 300static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int); 301static void bce_dump_pg_chain (struct bce_softc *, u16, int); 302static void bce_dump_status_block (struct bce_softc *); 303static void bce_dump_stats_block (struct bce_softc *); 304static void bce_dump_driver_state (struct bce_softc *); 305static void bce_dump_hw_state (struct bce_softc *); 306static void bce_dump_shmem_state (struct bce_softc *); 307static void bce_dump_mq_regs (struct bce_softc *); 308static void bce_dump_bc_state (struct bce_softc *); 309static void bce_dump_txp_state (struct bce_softc *, int); 310static void bce_dump_rxp_state (struct bce_softc *, int); 311static void bce_dump_tpat_state (struct bce_softc *, int); 312static void bce_dump_cp_state (struct bce_softc *, int); 313static void bce_dump_com_state (struct bce_softc *, int); 314static void bce_dump_rv2p_state (struct bce_softc *); 315static void bce_breakpoint (struct bce_softc *); 316#endif /*BCE_DEBUG */ 317 318 319/****************************************************************************/ 320/* BCE Register/Memory Access Routines */ 321/****************************************************************************/ 322static u32 bce_reg_rd_ind (struct bce_softc *, u32); 323static void bce_reg_wr_ind (struct bce_softc *, u32, u32); 324static void bce_shmem_wr (struct bce_softc *, u32, u32); 325static u32 bce_shmem_rd (struct bce_softc *, u32); 326static void bce_ctx_wr (struct bce_softc *, u32, u32, u32); 327static int bce_miibus_read_reg (device_t, int, int); 328static int bce_miibus_write_reg (device_t, int, int, int); 329static void bce_miibus_statchg (device_t); 330 331#ifdef BCE_DEBUG 332static int bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS); 333#ifdef BCE_NVRAM_WRITE_SUPPORT 334static int bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS); 335#endif 336#endif 337 338/****************************************************************************/ 339/* BCE NVRAM Access Routines */ 340/****************************************************************************/ 341static int bce_acquire_nvram_lock (struct bce_softc *); 342static int bce_release_nvram_lock (struct bce_softc *); 343static void bce_enable_nvram_access(struct bce_softc *); 344static void bce_disable_nvram_access(struct bce_softc *); 345static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32); 346static int bce_init_nvram (struct bce_softc *); 347static int bce_nvram_read (struct bce_softc *, u32, u8 *, int); 348static int bce_nvram_test (struct bce_softc *); 349#ifdef BCE_NVRAM_WRITE_SUPPORT 350static int bce_enable_nvram_write (struct bce_softc *); 351static void bce_disable_nvram_write(struct bce_softc *); 352static int bce_nvram_erase_page (struct bce_softc *, u32); 353static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32); 354static int bce_nvram_write (struct bce_softc *, u32, u8 *, int); 355#endif 356 357/****************************************************************************/ 358/* */ 359/****************************************************************************/ 360static void bce_get_rx_buffer_sizes(struct bce_softc *, int); 361static void bce_get_media (struct bce_softc *); 362static void bce_init_media (struct bce_softc *); 363static u32 bce_get_rphy_link (struct bce_softc *); 364static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int); 365static int bce_dma_alloc (device_t); 366static void bce_dma_free (struct bce_softc *); 367static void bce_release_resources (struct bce_softc *); 368 369/****************************************************************************/ 370/* BCE Firmware Synchronization and Load */ 371/****************************************************************************/ 372static void bce_fw_cap_init (struct bce_softc *); 373static int bce_fw_sync (struct bce_softc *, u32); 374static void bce_load_rv2p_fw (struct bce_softc *, const u32 *, u32, 375 u32); 376static void bce_load_cpu_fw (struct bce_softc *, 377 struct cpu_reg *, struct fw_info *); 378static void bce_start_cpu (struct bce_softc *, struct cpu_reg *); 379static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *); 380static void bce_start_rxp_cpu (struct bce_softc *); 381static void bce_init_rxp_cpu (struct bce_softc *); 382static void bce_init_txp_cpu (struct bce_softc *); 383static void bce_init_tpat_cpu (struct bce_softc *); 384static void bce_init_cp_cpu (struct bce_softc *); 385static void bce_init_com_cpu (struct bce_softc *); 386static void bce_init_cpus (struct bce_softc *); 387 388static void bce_print_adapter_info (struct bce_softc *); 389static void bce_probe_pci_caps (device_t, struct bce_softc *); 390static void bce_stop (struct bce_softc *); 391static int bce_reset (struct bce_softc *, u32); 392static int bce_chipinit (struct bce_softc *); 393static int bce_blockinit (struct bce_softc *); 394 395static int bce_init_tx_chain (struct bce_softc *); 396static void bce_free_tx_chain (struct bce_softc *); 397 398static int bce_get_rx_buf (struct bce_softc *, u16, u16, u32 *); 399static int bce_init_rx_chain (struct bce_softc *); 400static void bce_fill_rx_chain (struct bce_softc *); 401static void bce_free_rx_chain (struct bce_softc *); 402 403static int bce_get_pg_buf (struct bce_softc *, u16, u16); 404static int bce_init_pg_chain (struct bce_softc *); 405static void bce_fill_pg_chain (struct bce_softc *); 406static void bce_free_pg_chain (struct bce_softc *); 407 408static struct mbuf *bce_tso_setup (struct bce_softc *, 409 struct mbuf **, u16 *); 410static int bce_tx_encap (struct bce_softc *, struct mbuf **); 411static void bce_start_locked (struct ifnet *); 412static void bce_start (struct ifnet *); 413static int bce_ioctl (struct ifnet *, u_long, caddr_t); 414static void bce_watchdog (struct bce_softc *); 415static int bce_ifmedia_upd (struct ifnet *); 416static int bce_ifmedia_upd_locked (struct ifnet *); 417static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *); 418static void bce_ifmedia_sts_rphy (struct bce_softc *, struct ifmediareq *); 419static void bce_init_locked (struct bce_softc *); 420static void bce_init (void *); 421static void bce_mgmt_init_locked (struct bce_softc *sc); 422 423static int bce_init_ctx (struct bce_softc *); 424static void bce_get_mac_addr (struct bce_softc *); 425static void bce_set_mac_addr (struct bce_softc *); 426static void bce_phy_intr (struct bce_softc *); 427static inline u16 bce_get_hw_rx_cons (struct bce_softc *); 428static void bce_rx_intr (struct bce_softc *); 429static void bce_tx_intr (struct bce_softc *); 430static void bce_disable_intr (struct bce_softc *); 431static void bce_enable_intr (struct bce_softc *, int); 432 433static void bce_intr (void *); 434static void bce_set_rx_mode (struct bce_softc *); 435static void bce_stats_update (struct bce_softc *); 436static void bce_tick (void *); 437static void bce_pulse (void *); 438static void bce_add_sysctls (struct bce_softc *); 439 440 441/****************************************************************************/ 442/* FreeBSD device dispatch table. */ 443/****************************************************************************/ 444static device_method_t bce_methods[] = { 445 /* Device interface (device_if.h) */ 446 DEVMETHOD(device_probe, bce_probe), 447 DEVMETHOD(device_attach, bce_attach), 448 DEVMETHOD(device_detach, bce_detach), 449 DEVMETHOD(device_shutdown, bce_shutdown), 450/* Supported by device interface but not used here. */ 451/* DEVMETHOD(device_identify, bce_identify), */ 452/* DEVMETHOD(device_suspend, bce_suspend), */ 453/* DEVMETHOD(device_resume, bce_resume), */ 454/* DEVMETHOD(device_quiesce, bce_quiesce), */ 455 456 /* MII interface (miibus_if.h) */ 457 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 458 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 459 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 460/* Supported by MII interface but not used here. */ 461/* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */ 462/* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */ 463 464 DEVMETHOD_END 465}; 466 467static driver_t bce_driver = { 468 "bce", 469 bce_methods, 470 sizeof(struct bce_softc) 471}; 472 473static devclass_t bce_devclass; 474 475MODULE_DEPEND(bce, pci, 1, 1, 1); 476MODULE_DEPEND(bce, ether, 1, 1, 1); 477MODULE_DEPEND(bce, miibus, 1, 1, 1); 478 479DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, NULL, NULL); 480DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 481 482 483/****************************************************************************/ 484/* Tunable device values */ 485/****************************************************************************/ 486static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters"); 487 488/* Allowable values are TRUE or FALSE */ 489static int bce_verbose = TRUE; 490TUNABLE_INT("hw.bce.verbose", &bce_verbose); 491SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0, 492 "Verbose output enable/disable"); 493 494/* Allowable values are TRUE or FALSE */ 495static int bce_tso_enable = TRUE; 496TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable); 497SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, 498 "TSO Enable/Disable"); 499 500/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 501/* ToDo: Add MSI-X support. */ 502static int bce_msi_enable = 1; 503TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable); 504SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, 505 "MSI-X|MSI|INTx selector"); 506 507/* Allowable values are 1, 2, 4, 8. */ 508static int bce_rx_pages = DEFAULT_RX_PAGES; 509TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages); 510SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0, 511 "Receive buffer descriptor pages (1 page = 255 buffer descriptors)"); 512 513/* Allowable values are 1, 2, 4, 8. */ 514static int bce_tx_pages = DEFAULT_TX_PAGES; 515TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages); 516SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0, 517 "Transmit buffer descriptor pages (1 page = 255 buffer descriptors)"); 518 519/* Allowable values are TRUE or FALSE. */ 520static int bce_hdr_split = TRUE; 521TUNABLE_INT("hw.bce.hdr_split", &bce_hdr_split); 522SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0, 523 "Frame header/payload splitting Enable/Disable"); 524 525/* Allowable values are TRUE or FALSE. */ 526static int bce_strict_rx_mtu = FALSE; 527TUNABLE_INT("hw.bce.strict_rx_mtu", &bce_strict_rx_mtu); 528SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN, 529 &bce_strict_rx_mtu, 0, 530 "Enable/Disable strict RX frame size checking"); 531 532/* Allowable values are 0 ... 100 */ 533#ifdef BCE_DEBUG 534/* Generate 1 interrupt for every transmit completion. */ 535static int bce_tx_quick_cons_trip_int = 1; 536#else 537/* Generate 1 interrupt for every 20 transmit completions. */ 538static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT; 539#endif 540TUNABLE_INT("hw.bce.tx_quick_cons_trip_int", &bce_tx_quick_cons_trip_int); 541SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN, 542 &bce_tx_quick_cons_trip_int, 0, 543 "Transmit BD trip point during interrupts"); 544 545/* Allowable values are 0 ... 100 */ 546/* Generate 1 interrupt for every transmit completion. */ 547#ifdef BCE_DEBUG 548static int bce_tx_quick_cons_trip = 1; 549#else 550/* Generate 1 interrupt for every 20 transmit completions. */ 551static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; 552#endif 553TUNABLE_INT("hw.bce.tx_quick_cons_trip", &bce_tx_quick_cons_trip); 554SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN, 555 &bce_tx_quick_cons_trip, 0, 556 "Transmit BD trip point"); 557 558/* Allowable values are 0 ... 100 */ 559#ifdef BCE_DEBUG 560/* Generate an interrupt if 0us have elapsed since the last TX completion. */ 561static int bce_tx_ticks_int = 0; 562#else 563/* Generate an interrupt if 80us have elapsed since the last TX completion. */ 564static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT; 565#endif 566TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 567SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN, 568 &bce_tx_ticks_int, 0, "Transmit ticks count during interrupt"); 569 570/* Allowable values are 0 ... 100 */ 571#ifdef BCE_DEBUG 572/* Generate an interrupt if 0us have elapsed since the last TX completion. */ 573static int bce_tx_ticks = 0; 574#else 575/* Generate an interrupt if 80us have elapsed since the last TX completion. */ 576static int bce_tx_ticks = DEFAULT_TX_TICKS; 577#endif 578TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 579SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN, 580 &bce_tx_ticks, 0, "Transmit ticks count"); 581 582/* Allowable values are 1 ... 100 */ 583#ifdef BCE_DEBUG 584/* Generate 1 interrupt for every received frame. */ 585static int bce_rx_quick_cons_trip_int = 1; 586#else 587/* Generate 1 interrupt for every 6 received frames. */ 588static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT; 589#endif 590TUNABLE_INT("hw.bce.rx_quick_cons_trip_int", &bce_rx_quick_cons_trip_int); 591SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN, 592 &bce_rx_quick_cons_trip_int, 0, 593 "Receive BD trip point duirng interrupts"); 594 595/* Allowable values are 1 ... 100 */ 596#ifdef BCE_DEBUG 597/* Generate 1 interrupt for every received frame. */ 598static int bce_rx_quick_cons_trip = 1; 599#else 600/* Generate 1 interrupt for every 6 received frames. */ 601static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; 602#endif 603TUNABLE_INT("hw.bce.rx_quick_cons_trip", &bce_rx_quick_cons_trip); 604SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN, 605 &bce_rx_quick_cons_trip, 0, 606 "Receive BD trip point"); 607 608/* Allowable values are 0 ... 100 */ 609#ifdef BCE_DEBUG 610/* Generate an int. if 0us have elapsed since the last received frame. */ 611static int bce_rx_ticks_int = 0; 612#else 613/* Generate an int. if 18us have elapsed since the last received frame. */ 614static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT; 615#endif 616TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 617SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN, 618 &bce_rx_ticks_int, 0, "Receive ticks count during interrupt"); 619 620/* Allowable values are 0 ... 100 */ 621#ifdef BCE_DEBUG 622/* Generate an int. if 0us have elapsed since the last received frame. */ 623static int bce_rx_ticks = 0; 624#else 625/* Generate an int. if 18us have elapsed since the last received frame. */ 626static int bce_rx_ticks = DEFAULT_RX_TICKS; 627#endif 628TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 629SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN, 630 &bce_rx_ticks, 0, "Receive ticks count"); 631 632 633/****************************************************************************/ 634/* Device probe function. */ 635/* */ 636/* Compares the device to the driver's list of supported devices and */ 637/* reports back to the OS whether this is the right driver for the device. */ 638/* */ 639/* Returns: */ 640/* BUS_PROBE_DEFAULT on success, positive value on failure. */ 641/****************************************************************************/ 642static int 643bce_probe(device_t dev) 644{ 645 const struct bce_type *t; 646 struct bce_softc *sc; 647 char *descbuf; 648 u16 vid = 0, did = 0, svid = 0, sdid = 0; 649 650 t = bce_devs; 651 652 sc = device_get_softc(dev); 653 sc->bce_unit = device_get_unit(dev); 654 sc->bce_dev = dev; 655 656 /* Get the data for the device to be probed. */ 657 vid = pci_get_vendor(dev); 658 did = pci_get_device(dev); 659 svid = pci_get_subvendor(dev); 660 sdid = pci_get_subdevice(dev); 661 662 DBPRINT(sc, BCE_EXTREME_LOAD, 663 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 664 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 665 666 /* Look through the list of known devices for a match. */ 667 while(t->bce_name != NULL) { 668 669 if ((vid == t->bce_vid) && (did == t->bce_did) && 670 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) && 671 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) { 672 673 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 674 675 if (descbuf == NULL) 676 return(ENOMEM); 677 678 /* Print out the device identity. */ 679 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 680 t->bce_name, (((pci_read_config(dev, 681 PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), 682 (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); 683 684 device_set_desc_copy(dev, descbuf); 685 free(descbuf, M_TEMP); 686 return(BUS_PROBE_DEFAULT); 687 } 688 t++; 689 } 690 691 return(ENXIO); 692} 693 694 695/****************************************************************************/ 696/* PCI Capabilities Probe Function. */ 697/* */ 698/* Walks the PCI capabiites list for the device to find what features are */ 699/* supported. */ 700/* */ 701/* Returns: */ 702/* None. */ 703/****************************************************************************/ 704static void 705bce_print_adapter_info(struct bce_softc *sc) 706{ 707 int i = 0; 708 709 DBENTER(BCE_VERBOSE_LOAD); 710 711 if (bce_verbose || bootverbose) { 712 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid); 713 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 714 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 715 716 717 /* Bus info. */ 718 if (sc->bce_flags & BCE_PCIE_FLAG) { 719 printf("Bus (PCIe x%d, ", sc->link_width); 720 switch (sc->link_speed) { 721 case 1: printf("2.5Gbps); "); break; 722 case 2: printf("5Gbps); "); break; 723 default: printf("Unknown link speed); "); 724 } 725 } else { 726 printf("Bus (PCI%s, %s, %dMHz); ", 727 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 728 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? 729 "32-bit" : "64-bit"), sc->bus_speed_mhz); 730 } 731 732 /* Firmware version and device features. */ 733 printf("B/C (%s); Bufs (RX:%d;TX:%d;PG:%d); Flags (", 734 sc->bce_bc_ver, sc->rx_pages, sc->tx_pages, 735 (bce_hdr_split == TRUE ? sc->pg_pages: 0)); 736 737 if (bce_hdr_split == TRUE) { 738 printf("SPLT"); 739 i++; 740 } 741 742 if (sc->bce_flags & BCE_USING_MSI_FLAG) { 743 if (i > 0) printf("|"); 744 printf("MSI"); i++; 745 } 746 747 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 748 if (i > 0) printf("|"); 749 printf("MSI-X"); i++; 750 } 751 752 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { 753 if (i > 0) printf("|"); 754 printf("2.5G"); i++; 755 } 756 757 if (sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) { 758 if (i > 0) printf("|"); 759 printf("Remote PHY(%s)", 760 sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG ? 761 "FIBER" : "TP"); i++; 762 } 763 764 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 765 if (i > 0) printf("|"); 766 printf("MFW); MFW (%s)\n", sc->bce_mfw_ver); 767 } else { 768 printf(")\n"); 769 } 770 771 printf("Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", 772 sc->bce_rx_quick_cons_trip_int, 773 sc->bce_rx_quick_cons_trip, 774 sc->bce_rx_ticks_int, 775 sc->bce_rx_ticks, 776 sc->bce_tx_quick_cons_trip_int, 777 sc->bce_tx_quick_cons_trip, 778 sc->bce_tx_ticks_int, 779 sc->bce_tx_ticks); 780 781 } 782 783 DBEXIT(BCE_VERBOSE_LOAD); 784} 785 786 787/****************************************************************************/ 788/* PCI Capabilities Probe Function. */ 789/* */ 790/* Walks the PCI capabiites list for the device to find what features are */ 791/* supported. */ 792/* */ 793/* Returns: */ 794/* None. */ 795/****************************************************************************/ 796static void 797bce_probe_pci_caps(device_t dev, struct bce_softc *sc) 798{ 799 u32 reg; 800 801 DBENTER(BCE_VERBOSE_LOAD); 802 803 /* Check if PCI-X capability is enabled. */ 804 if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) { 805 if (reg != 0) 806 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 807 } 808 809 /* Check if PCIe capability is enabled. */ 810 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 811 if (reg != 0) { 812 u16 link_status = pci_read_config(dev, reg + 0x12, 2); 813 DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = " 814 "0x%08X\n", link_status); 815 sc->link_speed = link_status & 0xf; 816 sc->link_width = (link_status >> 4) & 0x3f; 817 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 818 sc->bce_flags |= BCE_PCIE_FLAG; 819 } 820 } 821 822 /* Check if MSI capability is enabled. */ 823 if (pci_find_cap(dev, PCIY_MSI, ®) == 0) { 824 if (reg != 0) 825 sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG; 826 } 827 828 /* Check if MSI-X capability is enabled. */ 829 if (pci_find_cap(dev, PCIY_MSIX, ®) == 0) { 830 if (reg != 0) 831 sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG; 832 } 833 834 DBEXIT(BCE_VERBOSE_LOAD); 835} 836 837 838/****************************************************************************/ 839/* Load and validate user tunable settings. */ 840/* */ 841/* Returns: */ 842/* Nothing. */ 843/****************************************************************************/ 844static void 845bce_set_tunables(struct bce_softc *sc) 846{ 847 /* Set sysctl values for RX page count. */ 848 switch (bce_rx_pages) { 849 case 1: 850 /* fall-through */ 851 case 2: 852 /* fall-through */ 853 case 4: 854 /* fall-through */ 855 case 8: 856 sc->rx_pages = bce_rx_pages; 857 break; 858 default: 859 sc->rx_pages = DEFAULT_RX_PAGES; 860 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 861 "hw.bce.rx_pages! Setting default of %d.\n", 862 __FILE__, __LINE__, bce_rx_pages, DEFAULT_RX_PAGES); 863 } 864 865 /* ToDo: Consider allowing user setting for pg_pages. */ 866 sc->pg_pages = min((sc->rx_pages * 4), MAX_PG_PAGES); 867 868 /* Set sysctl values for TX page count. */ 869 switch (bce_tx_pages) { 870 case 1: 871 /* fall-through */ 872 case 2: 873 /* fall-through */ 874 case 4: 875 /* fall-through */ 876 case 8: 877 sc->tx_pages = bce_tx_pages; 878 break; 879 default: 880 sc->tx_pages = DEFAULT_TX_PAGES; 881 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 882 "hw.bce.tx_pages! Setting default of %d.\n", 883 __FILE__, __LINE__, bce_tx_pages, DEFAULT_TX_PAGES); 884 } 885 886 /* 887 * Validate the TX trip point (i.e. the number of 888 * TX completions before a status block update is 889 * generated and an interrupt is asserted. 890 */ 891 if (bce_tx_quick_cons_trip_int <= 100) { 892 sc->bce_tx_quick_cons_trip_int = 893 bce_tx_quick_cons_trip_int; 894 } else { 895 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 896 "hw.bce.tx_quick_cons_trip_int! Setting default of %d.\n", 897 __FILE__, __LINE__, bce_tx_quick_cons_trip_int, 898 DEFAULT_TX_QUICK_CONS_TRIP_INT); 899 sc->bce_tx_quick_cons_trip_int = 900 DEFAULT_TX_QUICK_CONS_TRIP_INT; 901 } 902 903 if (bce_tx_quick_cons_trip <= 100) { 904 sc->bce_tx_quick_cons_trip = 905 bce_tx_quick_cons_trip; 906 } else { 907 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 908 "hw.bce.tx_quick_cons_trip! Setting default of %d.\n", 909 __FILE__, __LINE__, bce_tx_quick_cons_trip, 910 DEFAULT_TX_QUICK_CONS_TRIP); 911 sc->bce_tx_quick_cons_trip = 912 DEFAULT_TX_QUICK_CONS_TRIP; 913 } 914 915 /* 916 * Validate the TX ticks count (i.e. the maximum amount 917 * of time to wait after the last TX completion has 918 * occurred before a status block update is generated 919 * and an interrupt is asserted. 920 */ 921 if (bce_tx_ticks_int <= 100) { 922 sc->bce_tx_ticks_int = 923 bce_tx_ticks_int; 924 } else { 925 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 926 "hw.bce.tx_ticks_int! Setting default of %d.\n", 927 __FILE__, __LINE__, bce_tx_ticks_int, 928 DEFAULT_TX_TICKS_INT); 929 sc->bce_tx_ticks_int = 930 DEFAULT_TX_TICKS_INT; 931 } 932 933 if (bce_tx_ticks <= 100) { 934 sc->bce_tx_ticks = 935 bce_tx_ticks; 936 } else { 937 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 938 "hw.bce.tx_ticks! Setting default of %d.\n", 939 __FILE__, __LINE__, bce_tx_ticks, 940 DEFAULT_TX_TICKS); 941 sc->bce_tx_ticks = 942 DEFAULT_TX_TICKS; 943 } 944 945 /* 946 * Validate the RX trip point (i.e. the number of 947 * RX frames received before a status block update is 948 * generated and an interrupt is asserted. 949 */ 950 if (bce_rx_quick_cons_trip_int <= 100) { 951 sc->bce_rx_quick_cons_trip_int = 952 bce_rx_quick_cons_trip_int; 953 } else { 954 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 955 "hw.bce.rx_quick_cons_trip_int! Setting default of %d.\n", 956 __FILE__, __LINE__, bce_rx_quick_cons_trip_int, 957 DEFAULT_RX_QUICK_CONS_TRIP_INT); 958 sc->bce_rx_quick_cons_trip_int = 959 DEFAULT_RX_QUICK_CONS_TRIP_INT; 960 } 961 962 if (bce_rx_quick_cons_trip <= 100) { 963 sc->bce_rx_quick_cons_trip = 964 bce_rx_quick_cons_trip; 965 } else { 966 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 967 "hw.bce.rx_quick_cons_trip! Setting default of %d.\n", 968 __FILE__, __LINE__, bce_rx_quick_cons_trip, 969 DEFAULT_RX_QUICK_CONS_TRIP); 970 sc->bce_rx_quick_cons_trip = 971 DEFAULT_RX_QUICK_CONS_TRIP; 972 } 973 974 /* 975 * Validate the RX ticks count (i.e. the maximum amount 976 * of time to wait after the last RX frame has been 977 * received before a status block update is generated 978 * and an interrupt is asserted. 979 */ 980 if (bce_rx_ticks_int <= 100) { 981 sc->bce_rx_ticks_int = bce_rx_ticks_int; 982 } else { 983 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 984 "hw.bce.rx_ticks_int! Setting default of %d.\n", 985 __FILE__, __LINE__, bce_rx_ticks_int, 986 DEFAULT_RX_TICKS_INT); 987 sc->bce_rx_ticks_int = DEFAULT_RX_TICKS_INT; 988 } 989 990 if (bce_rx_ticks <= 100) { 991 sc->bce_rx_ticks = bce_rx_ticks; 992 } else { 993 BCE_PRINTF("%s(%d): Illegal value (%d) specified for " 994 "hw.bce.rx_ticks! Setting default of %d.\n", 995 __FILE__, __LINE__, bce_rx_ticks, 996 DEFAULT_RX_TICKS); 997 sc->bce_rx_ticks = DEFAULT_RX_TICKS; 998 } 999 1000 /* Disabling both RX ticks and RX trips will prevent interrupts. */ 1001 if ((bce_rx_quick_cons_trip == 0) && (bce_rx_ticks == 0)) { 1002 BCE_PRINTF("%s(%d): Cannot set both hw.bce.rx_ticks and " 1003 "hw.bce.rx_quick_cons_trip to 0. Setting default values.\n", 1004 __FILE__, __LINE__); 1005 sc->bce_rx_ticks = DEFAULT_RX_TICKS; 1006 sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; 1007 } 1008 1009 /* Disabling both TX ticks and TX trips will prevent interrupts. */ 1010 if ((bce_tx_quick_cons_trip == 0) && (bce_tx_ticks == 0)) { 1011 BCE_PRINTF("%s(%d): Cannot set both hw.bce.tx_ticks and " 1012 "hw.bce.tx_quick_cons_trip to 0. Setting default values.\n", 1013 __FILE__, __LINE__); 1014 sc->bce_tx_ticks = DEFAULT_TX_TICKS; 1015 sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; 1016 } 1017} 1018 1019 1020/****************************************************************************/ 1021/* Device attach function. */ 1022/* */ 1023/* Allocates device resources, performs secondary chip identification, */ 1024/* resets and initializes the hardware, and initializes driver instance */ 1025/* variables. */ 1026/* */ 1027/* Returns: */ 1028/* 0 on success, positive value on failure. */ 1029/****************************************************************************/ 1030static int 1031bce_attach(device_t dev) 1032{ 1033 struct bce_softc *sc; 1034 struct ifnet *ifp; 1035 u32 val; 1036 int count, error, rc = 0, rid; 1037 1038 sc = device_get_softc(dev); 1039 sc->bce_dev = dev; 1040 1041 DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 1042 1043 sc->bce_unit = device_get_unit(dev); 1044 1045 /* Set initial device and PHY flags */ 1046 sc->bce_flags = 0; 1047 sc->bce_phy_flags = 0; 1048 1049 bce_set_tunables(sc); 1050 1051 pci_enable_busmaster(dev); 1052 1053 /* Allocate PCI memory resources. */ 1054 rid = PCIR_BAR(0); 1055 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1056 &rid, RF_ACTIVE); 1057 1058 if (sc->bce_res_mem == NULL) { 1059 BCE_PRINTF("%s(%d): PCI memory allocation failed\n", 1060 __FILE__, __LINE__); 1061 rc = ENXIO; 1062 goto bce_attach_fail; 1063 } 1064 1065 /* Get various resource handles. */ 1066 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 1067 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 1068 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem); 1069 1070 bce_probe_pci_caps(dev, sc); 1071 1072 rid = 1; 1073 count = 0; 1074#if 0 1075 /* Try allocating MSI-X interrupts. */ 1076 if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) && 1077 (bce_msi_enable >= 2) && 1078 ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1079 &rid, RF_ACTIVE)) != NULL)) { 1080 1081 msi_needed = count = 1; 1082 1083 if (((error = pci_alloc_msix(dev, &count)) != 0) || 1084 (count != msi_needed)) { 1085 BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d," 1086 "Received = %d, error = %d\n", __FILE__, __LINE__, 1087 msi_needed, count, error); 1088 count = 0; 1089 pci_release_msi(dev); 1090 bus_release_resource(dev, SYS_RES_MEMORY, rid, 1091 sc->bce_res_irq); 1092 sc->bce_res_irq = NULL; 1093 } else { 1094 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n", 1095 __FUNCTION__); 1096 sc->bce_flags |= BCE_USING_MSIX_FLAG; 1097 } 1098 } 1099#endif 1100 1101 /* Try allocating a MSI interrupt. */ 1102 if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) && 1103 (bce_msi_enable >= 1) && (count == 0)) { 1104 count = 1; 1105 if ((error = pci_alloc_msi(dev, &count)) != 0) { 1106 BCE_PRINTF("%s(%d): MSI allocation failed! " 1107 "error = %d\n", __FILE__, __LINE__, error); 1108 count = 0; 1109 pci_release_msi(dev); 1110 } else { 1111 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI " 1112 "interrupt.\n", __FUNCTION__); 1113 sc->bce_flags |= BCE_USING_MSI_FLAG; 1114 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 1115 sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG; 1116 rid = 1; 1117 } 1118 } 1119 1120 /* Try allocating a legacy interrupt. */ 1121 if (count == 0) { 1122 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n", 1123 __FUNCTION__); 1124 rid = 0; 1125 } 1126 1127 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1128 &rid, RF_ACTIVE | (count != 0 ? 0 : RF_SHAREABLE)); 1129 1130 /* Report any IRQ allocation errors. */ 1131 if (sc->bce_res_irq == NULL) { 1132 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n", 1133 __FILE__, __LINE__); 1134 rc = ENXIO; 1135 goto bce_attach_fail; 1136 } 1137 1138 /* Initialize mutex for the current device instance. */ 1139 BCE_LOCK_INIT(sc, device_get_nameunit(dev)); 1140 1141 /* 1142 * Configure byte swap and enable indirect register access. 1143 * Rely on CPU to do target byte swapping on big endian systems. 1144 * Access to registers outside of PCI configurtion space are not 1145 * valid until this is done. 1146 */ 1147 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 1148 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 1149 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 1150 1151 /* Save ASIC revsion info. */ 1152 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 1153 1154 /* Weed out any non-production controller revisions. */ 1155 switch(BCE_CHIP_ID(sc)) { 1156 case BCE_CHIP_ID_5706_A0: 1157 case BCE_CHIP_ID_5706_A1: 1158 case BCE_CHIP_ID_5708_A0: 1159 case BCE_CHIP_ID_5708_B0: 1160 case BCE_CHIP_ID_5709_A0: 1161 case BCE_CHIP_ID_5709_B0: 1162 case BCE_CHIP_ID_5709_B1: 1163 case BCE_CHIP_ID_5709_B2: 1164 BCE_PRINTF("%s(%d): Unsupported controller " 1165 "revision (%c%d)!\n", __FILE__, __LINE__, 1166 (((pci_read_config(dev, PCIR_REVID, 4) & 1167 0xf0) >> 4) + 'A'), (pci_read_config(dev, 1168 PCIR_REVID, 4) & 0xf)); 1169 rc = ENODEV; 1170 goto bce_attach_fail; 1171 } 1172 1173 /* 1174 * The embedded PCIe to PCI-X bridge (EPB) 1175 * in the 5708 cannot address memory above 1176 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 1177 */ 1178 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 1179 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; 1180 else 1181 sc->max_bus_addr = BUS_SPACE_MAXADDR; 1182 1183 /* 1184 * Find the base address for shared memory access. 1185 * Newer versions of bootcode use a signature and offset 1186 * while older versions use a fixed address. 1187 */ 1188 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 1189 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) 1190 /* Multi-port devices use different offsets in shared memory. */ 1191 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 + 1192 (pci_get_function(sc->bce_dev) << 2)); 1193 else 1194 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 1195 1196 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n", 1197 __FUNCTION__, sc->bce_shmem_base); 1198 1199 /* Fetch the bootcode revision. */ 1200 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 1201 for (int i = 0, j = 0; i < 3; i++) { 1202 u8 num; 1203 1204 num = (u8) (val >> (24 - (i * 8))); 1205 for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 1206 if (num >= k || !skip0 || k == 1) { 1207 sc->bce_bc_ver[j++] = (num / k) + '0'; 1208 skip0 = 0; 1209 } 1210 } 1211 1212 if (i != 2) 1213 sc->bce_bc_ver[j++] = '.'; 1214 } 1215 1216 /* Check if any management firwmare is enabled. */ 1217 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 1218 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 1219 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 1220 1221 /* Allow time for firmware to enter the running state. */ 1222 for (int i = 0; i < 30; i++) { 1223 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 1224 if (val & BCE_CONDITION_MFW_RUN_MASK) 1225 break; 1226 DELAY(10000); 1227 } 1228 1229 /* Check if management firmware is running. */ 1230 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 1231 val &= BCE_CONDITION_MFW_RUN_MASK; 1232 if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) && 1233 (val != BCE_CONDITION_MFW_RUN_NONE)) { 1234 u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 1235 int i = 0; 1236 1237 /* Read the management firmware version string. */ 1238 for (int j = 0; j < 3; j++) { 1239 val = bce_reg_rd_ind(sc, addr + j * 4); 1240 val = bswap32(val); 1241 memcpy(&sc->bce_mfw_ver[i], &val, 4); 1242 i += 4; 1243 } 1244 } else { 1245 /* May cause firmware synchronization timeouts. */ 1246 BCE_PRINTF("%s(%d): Management firmware enabled " 1247 "but not running!\n", __FILE__, __LINE__); 1248 strcpy(sc->bce_mfw_ver, "NOT RUNNING!"); 1249 1250 /* ToDo: Any action the driver should take? */ 1251 } 1252 } 1253 1254 /* Get PCI bus information (speed and type). */ 1255 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 1256 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 1257 u32 clkreg; 1258 1259 sc->bce_flags |= BCE_PCIX_FLAG; 1260 1261 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS); 1262 1263 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 1264 switch (clkreg) { 1265 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 1266 sc->bus_speed_mhz = 133; 1267 break; 1268 1269 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 1270 sc->bus_speed_mhz = 100; 1271 break; 1272 1273 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 1274 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 1275 sc->bus_speed_mhz = 66; 1276 break; 1277 1278 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 1279 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 1280 sc->bus_speed_mhz = 50; 1281 break; 1282 1283 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 1284 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 1285 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 1286 sc->bus_speed_mhz = 33; 1287 break; 1288 } 1289 } else { 1290 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 1291 sc->bus_speed_mhz = 66; 1292 else 1293 sc->bus_speed_mhz = 33; 1294 } 1295 1296 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 1297 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 1298 1299 /* Find the media type for the adapter. */ 1300 bce_get_media(sc); 1301 1302 /* Reset controller and announce to bootcode that driver is present. */ 1303 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 1304 BCE_PRINTF("%s(%d): Controller reset failed!\n", 1305 __FILE__, __LINE__); 1306 rc = ENXIO; 1307 goto bce_attach_fail; 1308 } 1309 1310 /* Initialize the controller. */ 1311 if (bce_chipinit(sc)) { 1312 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 1313 __FILE__, __LINE__); 1314 rc = ENXIO; 1315 goto bce_attach_fail; 1316 } 1317 1318 /* Perform NVRAM test. */ 1319 if (bce_nvram_test(sc)) { 1320 BCE_PRINTF("%s(%d): NVRAM test failed!\n", 1321 __FILE__, __LINE__); 1322 rc = ENXIO; 1323 goto bce_attach_fail; 1324 } 1325 1326 /* Fetch the permanent Ethernet MAC address. */ 1327 bce_get_mac_addr(sc); 1328 1329 /* Update statistics once every second. */ 1330 sc->bce_stats_ticks = 1000000 & 0xffff00; 1331 1332 /* Store data needed by PHY driver for backplane applications */ 1333 sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 1334 sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG); 1335 1336 /* Allocate DMA memory resources. */ 1337 if (bce_dma_alloc(dev)) { 1338 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n", 1339 __FILE__, __LINE__); 1340 rc = ENXIO; 1341 goto bce_attach_fail; 1342 } 1343 1344 /* Allocate an ifnet structure. */ 1345 ifp = sc->bce_ifp = if_alloc(IFT_ETHER); 1346 if (ifp == NULL) { 1347 BCE_PRINTF("%s(%d): Interface allocation failed!\n", 1348 __FILE__, __LINE__); 1349 rc = ENXIO; 1350 goto bce_attach_fail; 1351 } 1352 1353 /* Initialize the ifnet interface. */ 1354 ifp->if_softc = sc; 1355 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1357 ifp->if_ioctl = bce_ioctl; 1358 ifp->if_start = bce_start; 1359 ifp->if_init = bce_init; 1360 ifp->if_mtu = ETHERMTU; 1361 1362 if (bce_tso_enable) { 1363 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO; 1364 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 | 1365 IFCAP_VLAN_HWTSO; 1366 } else { 1367 ifp->if_hwassist = BCE_IF_HWASSIST; 1368 ifp->if_capabilities = BCE_IF_CAPABILITIES; 1369 } 1370 1371#if __FreeBSD_version >= 800505 1372 /* 1373 * Introducing IFCAP_LINKSTATE didn't bump __FreeBSD_version 1374 * so it's approximate value. 1375 */ 1376 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) 1377 ifp->if_capabilities |= IFCAP_LINKSTATE; 1378#endif 1379 1380 ifp->if_capenable = ifp->if_capabilities; 1381 1382 /* 1383 * Assume standard mbuf sizes for buffer allocation. 1384 * This may change later if the MTU size is set to 1385 * something other than 1500. 1386 */ 1387 bce_get_rx_buffer_sizes(sc, 1388 (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN)); 1389 1390 /* Recalculate our buffer allocation sizes. */ 1391 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD_ALLOC; 1392 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1393 IFQ_SET_READY(&ifp->if_snd); 1394 1395 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 1396 ifp->if_baudrate = IF_Mbps(2500ULL); 1397 else 1398 ifp->if_baudrate = IF_Mbps(1000); 1399 1400 /* Handle any special PHY initialization for SerDes PHYs. */ 1401 bce_init_media(sc); 1402 1403 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { 1404 ifmedia_init(&sc->bce_ifmedia, IFM_IMASK, bce_ifmedia_upd, 1405 bce_ifmedia_sts); 1406 /* 1407 * We can't manually override remote PHY's link and assume 1408 * PHY port configuration(Fiber or TP) is not changed after 1409 * device attach. This may not be correct though. 1410 */ 1411 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) { 1412 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { 1413 ifmedia_add(&sc->bce_ifmedia, 1414 IFM_ETHER | IFM_2500_SX, 0, NULL); 1415 ifmedia_add(&sc->bce_ifmedia, 1416 IFM_ETHER | IFM_2500_SX | IFM_FDX, 0, NULL); 1417 } 1418 ifmedia_add(&sc->bce_ifmedia, 1419 IFM_ETHER | IFM_1000_SX, 0, NULL); 1420 ifmedia_add(&sc->bce_ifmedia, 1421 IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); 1422 } else { 1423 ifmedia_add(&sc->bce_ifmedia, 1424 IFM_ETHER | IFM_10_T, 0, NULL); 1425 ifmedia_add(&sc->bce_ifmedia, 1426 IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 1427 ifmedia_add(&sc->bce_ifmedia, 1428 IFM_ETHER | IFM_100_TX, 0, NULL); 1429 ifmedia_add(&sc->bce_ifmedia, 1430 IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 1431 ifmedia_add(&sc->bce_ifmedia, 1432 IFM_ETHER | IFM_1000_T, 0, NULL); 1433 ifmedia_add(&sc->bce_ifmedia, 1434 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1435 } 1436 ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 1437 ifmedia_set(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO); 1438 sc->bce_ifmedia.ifm_media = sc->bce_ifmedia.ifm_cur->ifm_media; 1439 } else { 1440 /* MII child bus by attaching the PHY. */ 1441 rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd, 1442 bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr, 1443 MII_OFFSET_ANY, MIIF_DOPAUSE); 1444 if (rc != 0) { 1445 BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__, 1446 __LINE__); 1447 goto bce_attach_fail; 1448 } 1449 } 1450 1451 /* Attach to the Ethernet interface list. */ 1452 ether_ifattach(ifp, sc->eaddr); 1453 1454#if __FreeBSD_version < 500000 1455 callout_init(&sc->bce_tick_callout); 1456 callout_init(&sc->bce_pulse_callout); 1457#else 1458 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0); 1459 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0); 1460#endif 1461 1462 /* Hookup IRQ last. */ 1463 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, 1464 NULL, bce_intr, sc, &sc->bce_intrhand); 1465 1466 if (rc) { 1467 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n", 1468 __FILE__, __LINE__); 1469 bce_detach(dev); 1470 goto bce_attach_exit; 1471 } 1472 1473 /* 1474 * At this point we've acquired all the resources 1475 * we need to run so there's no turning back, we're 1476 * cleared for launch. 1477 */ 1478 1479 /* Print some important debugging info. */ 1480 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc)); 1481 1482 /* Add the supported sysctls to the kernel. */ 1483 bce_add_sysctls(sc); 1484 1485 BCE_LOCK(sc); 1486 1487 /* 1488 * The chip reset earlier notified the bootcode that 1489 * a driver is present. We now need to start our pulse 1490 * routine so that the bootcode is reminded that we're 1491 * still running. 1492 */ 1493 bce_pulse(sc); 1494 1495 bce_mgmt_init_locked(sc); 1496 BCE_UNLOCK(sc); 1497 1498 /* Finally, print some useful adapter info */ 1499 bce_print_adapter_info(sc); 1500 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n", 1501 __FUNCTION__, sc); 1502 1503 goto bce_attach_exit; 1504 1505bce_attach_fail: 1506 bce_release_resources(sc); 1507 1508bce_attach_exit: 1509 1510 DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 1511 1512 return(rc); 1513} 1514 1515 1516/****************************************************************************/ 1517/* Device detach function. */ 1518/* */ 1519/* Stops the controller, resets the controller, and releases resources. */ 1520/* */ 1521/* Returns: */ 1522/* 0 on success, positive value on failure. */ 1523/****************************************************************************/ 1524static int 1525bce_detach(device_t dev) 1526{ 1527 struct bce_softc *sc = device_get_softc(dev); 1528 struct ifnet *ifp; 1529 u32 msg; 1530 1531 DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1532 1533 ifp = sc->bce_ifp; 1534 1535 /* Stop and reset the controller. */ 1536 BCE_LOCK(sc); 1537 1538 /* Stop the pulse so the bootcode can go to driver absent state. */ 1539 callout_stop(&sc->bce_pulse_callout); 1540 1541 bce_stop(sc); 1542 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1543 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1544 else 1545 msg = BCE_DRV_MSG_CODE_UNLOAD; 1546 bce_reset(sc, msg); 1547 1548 BCE_UNLOCK(sc); 1549 1550 ether_ifdetach(ifp); 1551 1552 /* If we have a child device on the MII bus remove it too. */ 1553 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) 1554 ifmedia_removeall(&sc->bce_ifmedia); 1555 else { 1556 bus_generic_detach(dev); 1557 device_delete_child(dev, sc->bce_miibus); 1558 } 1559 1560 /* Release all remaining resources. */ 1561 bce_release_resources(sc); 1562 1563 DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1564 1565 return(0); 1566} 1567 1568 1569/****************************************************************************/ 1570/* Device shutdown function. */ 1571/* */ 1572/* Stops and resets the controller. */ 1573/* */ 1574/* Returns: */ 1575/* 0 on success, positive value on failure. */ 1576/****************************************************************************/ 1577static int 1578bce_shutdown(device_t dev) 1579{ 1580 struct bce_softc *sc = device_get_softc(dev); 1581 u32 msg; 1582 1583 DBENTER(BCE_VERBOSE); 1584 1585 BCE_LOCK(sc); 1586 bce_stop(sc); 1587 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1588 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1589 else 1590 msg = BCE_DRV_MSG_CODE_UNLOAD; 1591 bce_reset(sc, msg); 1592 BCE_UNLOCK(sc); 1593 1594 DBEXIT(BCE_VERBOSE); 1595 1596 return (0); 1597} 1598 1599 1600#ifdef BCE_DEBUG 1601/****************************************************************************/ 1602/* Register read. */ 1603/* */ 1604/* Returns: */ 1605/* The value of the register. */ 1606/****************************************************************************/ 1607static u32 1608bce_reg_rd(struct bce_softc *sc, u32 offset) 1609{ 1610 u32 val = REG_RD(sc, offset); 1611 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1612 __FUNCTION__, offset, val); 1613 return val; 1614} 1615 1616 1617/****************************************************************************/ 1618/* Register write (16 bit). */ 1619/* */ 1620/* Returns: */ 1621/* Nothing. */ 1622/****************************************************************************/ 1623static void 1624bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val) 1625{ 1626 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n", 1627 __FUNCTION__, offset, val); 1628 REG_WR16(sc, offset, val); 1629} 1630 1631 1632/****************************************************************************/ 1633/* Register write. */ 1634/* */ 1635/* Returns: */ 1636/* Nothing. */ 1637/****************************************************************************/ 1638static void 1639bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val) 1640{ 1641 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1642 __FUNCTION__, offset, val); 1643 REG_WR(sc, offset, val); 1644} 1645#endif 1646 1647/****************************************************************************/ 1648/* Indirect register read. */ 1649/* */ 1650/* Reads NetXtreme II registers using an index/data register pair in PCI */ 1651/* configuration space. Using this mechanism avoids issues with posted */ 1652/* reads but is much slower than memory-mapped I/O. */ 1653/* */ 1654/* Returns: */ 1655/* The value of the register. */ 1656/****************************************************************************/ 1657static u32 1658bce_reg_rd_ind(struct bce_softc *sc, u32 offset) 1659{ 1660 device_t dev; 1661 dev = sc->bce_dev; 1662 1663 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1664#ifdef BCE_DEBUG 1665 { 1666 u32 val; 1667 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1668 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1669 __FUNCTION__, offset, val); 1670 return val; 1671 } 1672#else 1673 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1674#endif 1675} 1676 1677 1678/****************************************************************************/ 1679/* Indirect register write. */ 1680/* */ 1681/* Writes NetXtreme II registers using an index/data register pair in PCI */ 1682/* configuration space. Using this mechanism avoids issues with posted */ 1683/* writes but is muchh slower than memory-mapped I/O. */ 1684/* */ 1685/* Returns: */ 1686/* Nothing. */ 1687/****************************************************************************/ 1688static void 1689bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val) 1690{ 1691 device_t dev; 1692 dev = sc->bce_dev; 1693 1694 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1695 __FUNCTION__, offset, val); 1696 1697 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1698 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1699} 1700 1701 1702/****************************************************************************/ 1703/* Shared memory write. */ 1704/* */ 1705/* Writes NetXtreme II shared memory region. */ 1706/* */ 1707/* Returns: */ 1708/* Nothing. */ 1709/****************************************************************************/ 1710static void 1711bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val) 1712{ 1713 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to " 1714 "0x%08X\n", __FUNCTION__, val, offset); 1715 1716 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1717} 1718 1719 1720/****************************************************************************/ 1721/* Shared memory read. */ 1722/* */ 1723/* Reads NetXtreme II shared memory region. */ 1724/* */ 1725/* Returns: */ 1726/* The 32 bit value read. */ 1727/****************************************************************************/ 1728static u32 1729bce_shmem_rd(struct bce_softc *sc, u32 offset) 1730{ 1731 u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1732 1733 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from " 1734 "0x%08X\n", __FUNCTION__, val, offset); 1735 1736 return val; 1737} 1738 1739 1740#ifdef BCE_DEBUG 1741/****************************************************************************/ 1742/* Context memory read. */ 1743/* */ 1744/* The NetXtreme II controller uses context memory to track connection */ 1745/* information for L2 and higher network protocols. */ 1746/* */ 1747/* Returns: */ 1748/* The requested 32 bit value of context memory. */ 1749/****************************************************************************/ 1750static u32 1751bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset) 1752{ 1753 u32 idx, offset, retry_cnt = 5, val; 1754 1755 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || 1756 cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID " 1757 "address: 0x%08X.\n", __FUNCTION__, cid_addr)); 1758 1759 offset = ctx_offset + cid_addr; 1760 1761 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 1762 1763 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ)); 1764 1765 for (idx = 0; idx < retry_cnt; idx++) { 1766 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1767 if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0) 1768 break; 1769 DELAY(5); 1770 } 1771 1772 if (val & BCE_CTX_CTX_CTRL_READ_REQ) 1773 BCE_PRINTF("%s(%d); Unable to read CTX memory: " 1774 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1775 __FILE__, __LINE__, cid_addr, ctx_offset); 1776 1777 val = REG_RD(sc, BCE_CTX_CTX_DATA); 1778 } else { 1779 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1780 val = REG_RD(sc, BCE_CTX_DATA); 1781 } 1782 1783 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1784 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val); 1785 1786 return(val); 1787} 1788#endif 1789 1790 1791/****************************************************************************/ 1792/* Context memory write. */ 1793/* */ 1794/* The NetXtreme II controller uses context memory to track connection */ 1795/* information for L2 and higher network protocols. */ 1796/* */ 1797/* Returns: */ 1798/* Nothing. */ 1799/****************************************************************************/ 1800static void 1801bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val) 1802{ 1803 u32 idx, offset = ctx_offset + cid_addr; 1804 u32 val, retry_cnt = 5; 1805 1806 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1807 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val); 1808 1809 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), 1810 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n", 1811 __FUNCTION__, cid_addr)); 1812 1813 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 1814 1815 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1816 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1817 1818 for (idx = 0; idx < retry_cnt; idx++) { 1819 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1820 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1821 break; 1822 DELAY(5); 1823 } 1824 1825 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) 1826 BCE_PRINTF("%s(%d); Unable to write CTX memory: " 1827 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1828 __FILE__, __LINE__, cid_addr, ctx_offset); 1829 1830 } else { 1831 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1832 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1833 } 1834} 1835 1836 1837/****************************************************************************/ 1838/* PHY register read. */ 1839/* */ 1840/* Implements register reads on the MII bus. */ 1841/* */ 1842/* Returns: */ 1843/* The value of the register. */ 1844/****************************************************************************/ 1845static int 1846bce_miibus_read_reg(device_t dev, int phy, int reg) 1847{ 1848 struct bce_softc *sc; 1849 u32 val; 1850 int i; 1851 1852 sc = device_get_softc(dev); 1853 1854 /* 1855 * The 5709S PHY is an IEEE Clause 45 PHY 1856 * with special mappings to work with IEEE 1857 * Clause 22 register accesses. 1858 */ 1859 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1860 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1861 reg += 0x10; 1862 } 1863 1864 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1865 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1866 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1867 1868 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1869 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1870 1871 DELAY(40); 1872 } 1873 1874 1875 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1876 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1877 BCE_EMAC_MDIO_COMM_START_BUSY; 1878 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1879 1880 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1881 DELAY(10); 1882 1883 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1884 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1885 DELAY(5); 1886 1887 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1888 val &= BCE_EMAC_MDIO_COMM_DATA; 1889 1890 break; 1891 } 1892 } 1893 1894 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1895 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, " 1896 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1897 val = 0x0; 1898 } else { 1899 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1900 } 1901 1902 1903 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1904 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1905 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1906 1907 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1908 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1909 1910 DELAY(40); 1911 } 1912 1913 DB_PRINT_PHY_REG(reg, val); 1914 return (val & 0xffff); 1915} 1916 1917 1918/****************************************************************************/ 1919/* PHY register write. */ 1920/* */ 1921/* Implements register writes on the MII bus. */ 1922/* */ 1923/* Returns: */ 1924/* The value of the register. */ 1925/****************************************************************************/ 1926static int 1927bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1928{ 1929 struct bce_softc *sc; 1930 u32 val1; 1931 int i; 1932 1933 sc = device_get_softc(dev); 1934 1935 DB_PRINT_PHY_REG(reg, val); 1936 1937 /* 1938 * The 5709S PHY is an IEEE Clause 45 PHY 1939 * with special mappings to work with IEEE 1940 * Clause 22 register accesses. 1941 */ 1942 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1943 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1944 reg += 0x10; 1945 } 1946 1947 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1948 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1949 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1950 1951 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1952 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1953 1954 DELAY(40); 1955 } 1956 1957 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1958 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1959 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1960 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1961 1962 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1963 DELAY(10); 1964 1965 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1966 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1967 DELAY(5); 1968 break; 1969 } 1970 } 1971 1972 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1973 BCE_PRINTF("%s(%d): PHY write timeout!\n", 1974 __FILE__, __LINE__); 1975 1976 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1977 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1978 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1979 1980 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1981 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1982 1983 DELAY(40); 1984 } 1985 1986 return 0; 1987} 1988 1989 1990/****************************************************************************/ 1991/* MII bus status change. */ 1992/* */ 1993/* Called by the MII bus driver when the PHY establishes link to set the */ 1994/* MAC interface registers. */ 1995/* */ 1996/* Returns: */ 1997/* Nothing. */ 1998/****************************************************************************/ 1999static void 2000bce_miibus_statchg(device_t dev) 2001{ 2002 struct bce_softc *sc; 2003 struct mii_data *mii; 2004 struct ifmediareq ifmr; 2005 int media_active, media_status, val; 2006 2007 sc = device_get_softc(dev); 2008 2009 DBENTER(BCE_VERBOSE_PHY); 2010 2011 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { 2012 bzero(&ifmr, sizeof(ifmr)); 2013 bce_ifmedia_sts_rphy(sc, &ifmr); 2014 media_active = ifmr.ifm_active; 2015 media_status = ifmr.ifm_status; 2016 } else { 2017 mii = device_get_softc(sc->bce_miibus); 2018 media_active = mii->mii_media_active; 2019 media_status = mii->mii_media_status; 2020 } 2021 2022 /* Ignore invalid media status. */ 2023 if ((media_status & (IFM_ACTIVE | IFM_AVALID)) != 2024 (IFM_ACTIVE | IFM_AVALID)) 2025 goto bce_miibus_statchg_exit; 2026 2027 val = REG_RD(sc, BCE_EMAC_MODE); 2028 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | 2029 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK | 2030 BCE_EMAC_MODE_25G); 2031 2032 /* Set MII or GMII interface based on the PHY speed. */ 2033 switch (IFM_SUBTYPE(media_active)) { 2034 case IFM_10_T: 2035 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 2036 DBPRINT(sc, BCE_INFO_PHY, 2037 "Enabling 10Mb interface.\n"); 2038 val |= BCE_EMAC_MODE_PORT_MII_10; 2039 break; 2040 } 2041 /* fall-through */ 2042 case IFM_100_TX: 2043 DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n"); 2044 val |= BCE_EMAC_MODE_PORT_MII; 2045 break; 2046 case IFM_2500_SX: 2047 DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n"); 2048 val |= BCE_EMAC_MODE_25G; 2049 /* fall-through */ 2050 case IFM_1000_T: 2051 case IFM_1000_SX: 2052 DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n"); 2053 val |= BCE_EMAC_MODE_PORT_GMII; 2054 break; 2055 default: 2056 DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling " 2057 "default GMII interface.\n"); 2058 val |= BCE_EMAC_MODE_PORT_GMII; 2059 } 2060 2061 /* Set half or full duplex based on PHY settings. */ 2062 if ((IFM_OPTIONS(media_active) & IFM_FDX) == 0) { 2063 DBPRINT(sc, BCE_INFO_PHY, 2064 "Setting Half-Duplex interface.\n"); 2065 val |= BCE_EMAC_MODE_HALF_DUPLEX; 2066 } else 2067 DBPRINT(sc, BCE_INFO_PHY, 2068 "Setting Full-Duplex interface.\n"); 2069 2070 REG_WR(sc, BCE_EMAC_MODE, val); 2071 2072 if ((IFM_OPTIONS(media_active) & IFM_ETH_RXPAUSE) != 0) { 2073 DBPRINT(sc, BCE_INFO_PHY, 2074 "%s(): Enabling RX flow control.\n", __FUNCTION__); 2075 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 2076 sc->bce_flags |= BCE_USING_RX_FLOW_CONTROL; 2077 } else { 2078 DBPRINT(sc, BCE_INFO_PHY, 2079 "%s(): Disabling RX flow control.\n", __FUNCTION__); 2080 BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 2081 sc->bce_flags &= ~BCE_USING_RX_FLOW_CONTROL; 2082 } 2083 2084 if ((IFM_OPTIONS(media_active) & IFM_ETH_TXPAUSE) != 0) { 2085 DBPRINT(sc, BCE_INFO_PHY, 2086 "%s(): Enabling TX flow control.\n", __FUNCTION__); 2087 BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 2088 sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL; 2089 } else { 2090 DBPRINT(sc, BCE_INFO_PHY, 2091 "%s(): Disabling TX flow control.\n", __FUNCTION__); 2092 BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 2093 sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL; 2094 } 2095 2096 /* ToDo: Update watermarks in bce_init_rx_context(). */ 2097 2098bce_miibus_statchg_exit: 2099 DBEXIT(BCE_VERBOSE_PHY); 2100} 2101 2102 2103/****************************************************************************/ 2104/* Acquire NVRAM lock. */ 2105/* */ 2106/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 2107/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 2108/* for use by the driver. */ 2109/* */ 2110/* Returns: */ 2111/* 0 on success, positive value on failure. */ 2112/****************************************************************************/ 2113static int 2114bce_acquire_nvram_lock(struct bce_softc *sc) 2115{ 2116 u32 val; 2117 int j, rc = 0; 2118 2119 DBENTER(BCE_VERBOSE_NVRAM); 2120 2121 /* Request access to the flash interface. */ 2122 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 2123 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2124 val = REG_RD(sc, BCE_NVM_SW_ARB); 2125 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 2126 break; 2127 2128 DELAY(5); 2129 } 2130 2131 if (j >= NVRAM_TIMEOUT_COUNT) { 2132 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 2133 rc = EBUSY; 2134 } 2135 2136 DBEXIT(BCE_VERBOSE_NVRAM); 2137 return (rc); 2138} 2139 2140 2141/****************************************************************************/ 2142/* Release NVRAM lock. */ 2143/* */ 2144/* When the caller is finished accessing NVRAM the lock must be released. */ 2145/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 2146/* for use by the driver. */ 2147/* */ 2148/* Returns: */ 2149/* 0 on success, positive value on failure. */ 2150/****************************************************************************/ 2151static int 2152bce_release_nvram_lock(struct bce_softc *sc) 2153{ 2154 u32 val; 2155 int j, rc = 0; 2156 2157 DBENTER(BCE_VERBOSE_NVRAM); 2158 2159 /* 2160 * Relinquish nvram interface. 2161 */ 2162 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 2163 2164 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2165 val = REG_RD(sc, BCE_NVM_SW_ARB); 2166 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 2167 break; 2168 2169 DELAY(5); 2170 } 2171 2172 if (j >= NVRAM_TIMEOUT_COUNT) { 2173 DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n"); 2174 rc = EBUSY; 2175 } 2176 2177 DBEXIT(BCE_VERBOSE_NVRAM); 2178 return (rc); 2179} 2180 2181 2182#ifdef BCE_NVRAM_WRITE_SUPPORT 2183/****************************************************************************/ 2184/* Enable NVRAM write access. */ 2185/* */ 2186/* Before writing to NVRAM the caller must enable NVRAM writes. */ 2187/* */ 2188/* Returns: */ 2189/* 0 on success, positive value on failure. */ 2190/****************************************************************************/ 2191static int 2192bce_enable_nvram_write(struct bce_softc *sc) 2193{ 2194 u32 val; 2195 int rc = 0; 2196 2197 DBENTER(BCE_VERBOSE_NVRAM); 2198 2199 val = REG_RD(sc, BCE_MISC_CFG); 2200 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); 2201 2202 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2203 int j; 2204 2205 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2206 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); 2207 2208 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2209 DELAY(5); 2210 2211 val = REG_RD(sc, BCE_NVM_COMMAND); 2212 if (val & BCE_NVM_COMMAND_DONE) 2213 break; 2214 } 2215 2216 if (j >= NVRAM_TIMEOUT_COUNT) { 2217 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); 2218 rc = EBUSY; 2219 } 2220 } 2221 2222 DBENTER(BCE_VERBOSE_NVRAM); 2223 return (rc); 2224} 2225 2226 2227/****************************************************************************/ 2228/* Disable NVRAM write access. */ 2229/* */ 2230/* When the caller is finished writing to NVRAM write access must be */ 2231/* disabled. */ 2232/* */ 2233/* Returns: */ 2234/* Nothing. */ 2235/****************************************************************************/ 2236static void 2237bce_disable_nvram_write(struct bce_softc *sc) 2238{ 2239 u32 val; 2240 2241 DBENTER(BCE_VERBOSE_NVRAM); 2242 2243 val = REG_RD(sc, BCE_MISC_CFG); 2244 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); 2245 2246 DBEXIT(BCE_VERBOSE_NVRAM); 2247 2248} 2249#endif 2250 2251 2252/****************************************************************************/ 2253/* Enable NVRAM access. */ 2254/* */ 2255/* Before accessing NVRAM for read or write operations the caller must */ 2256/* enabled NVRAM access. */ 2257/* */ 2258/* Returns: */ 2259/* Nothing. */ 2260/****************************************************************************/ 2261static void 2262bce_enable_nvram_access(struct bce_softc *sc) 2263{ 2264 u32 val; 2265 2266 DBENTER(BCE_VERBOSE_NVRAM); 2267 2268 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 2269 /* Enable both bits, even on read. */ 2270 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val | 2271 BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 2272 2273 DBEXIT(BCE_VERBOSE_NVRAM); 2274} 2275 2276 2277/****************************************************************************/ 2278/* Disable NVRAM access. */ 2279/* */ 2280/* When the caller is finished accessing NVRAM access must be disabled. */ 2281/* */ 2282/* Returns: */ 2283/* Nothing. */ 2284/****************************************************************************/ 2285static void 2286bce_disable_nvram_access(struct bce_softc *sc) 2287{ 2288 u32 val; 2289 2290 DBENTER(BCE_VERBOSE_NVRAM); 2291 2292 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 2293 2294 /* Disable both bits, even after read. */ 2295 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val & 2296 ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 2297 2298 DBEXIT(BCE_VERBOSE_NVRAM); 2299} 2300 2301 2302#ifdef BCE_NVRAM_WRITE_SUPPORT 2303/****************************************************************************/ 2304/* Erase NVRAM page before writing. */ 2305/* */ 2306/* Non-buffered flash parts require that a page be erased before it is */ 2307/* written. */ 2308/* */ 2309/* Returns: */ 2310/* 0 on success, positive value on failure. */ 2311/****************************************************************************/ 2312static int 2313bce_nvram_erase_page(struct bce_softc *sc, u32 offset) 2314{ 2315 u32 cmd; 2316 int j, rc = 0; 2317 2318 DBENTER(BCE_VERBOSE_NVRAM); 2319 2320 /* Buffered flash doesn't require an erase. */ 2321 if (sc->bce_flash_info->flags & BCE_NV_BUFFERED) 2322 goto bce_nvram_erase_page_exit; 2323 2324 /* Build an erase command. */ 2325 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | 2326 BCE_NVM_COMMAND_DOIT; 2327 2328 /* 2329 * Clear the DONE bit separately, set the NVRAM adress to erase, 2330 * and issue the erase command. 2331 */ 2332 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2333 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2334 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2335 2336 /* Wait for completion. */ 2337 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2338 u32 val; 2339 2340 DELAY(5); 2341 2342 val = REG_RD(sc, BCE_NVM_COMMAND); 2343 if (val & BCE_NVM_COMMAND_DONE) 2344 break; 2345 } 2346 2347 if (j >= NVRAM_TIMEOUT_COUNT) { 2348 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); 2349 rc = EBUSY; 2350 } 2351 2352bce_nvram_erase_page_exit: 2353 DBEXIT(BCE_VERBOSE_NVRAM); 2354 return (rc); 2355} 2356#endif /* BCE_NVRAM_WRITE_SUPPORT */ 2357 2358 2359/****************************************************************************/ 2360/* Read a dword (32 bits) from NVRAM. */ 2361/* */ 2362/* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 2363/* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 2364/* */ 2365/* Returns: */ 2366/* 0 on success and the 32 bit value read, positive value on failure. */ 2367/****************************************************************************/ 2368static int 2369bce_nvram_read_dword(struct bce_softc *sc, 2370 u32 offset, u8 *ret_val, u32 cmd_flags) 2371{ 2372 u32 cmd; 2373 int i, rc = 0; 2374 2375 DBENTER(BCE_EXTREME_NVRAM); 2376 2377 /* Build the command word. */ 2378 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 2379 2380 /* Calculate the offset for buffered flash if translation is used. */ 2381 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2382 offset = ((offset / sc->bce_flash_info->page_size) << 2383 sc->bce_flash_info->page_bits) + 2384 (offset % sc->bce_flash_info->page_size); 2385 } 2386 2387 /* 2388 * Clear the DONE bit separately, set the address to read, 2389 * and issue the read. 2390 */ 2391 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2392 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2393 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2394 2395 /* Wait for completion. */ 2396 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 2397 u32 val; 2398 2399 DELAY(5); 2400 2401 val = REG_RD(sc, BCE_NVM_COMMAND); 2402 if (val & BCE_NVM_COMMAND_DONE) { 2403 val = REG_RD(sc, BCE_NVM_READ); 2404 2405 val = bce_be32toh(val); 2406 memcpy(ret_val, &val, 4); 2407 break; 2408 } 2409 } 2410 2411 /* Check for errors. */ 2412 if (i >= NVRAM_TIMEOUT_COUNT) { 2413 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at " 2414 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 2415 rc = EBUSY; 2416 } 2417 2418 DBEXIT(BCE_EXTREME_NVRAM); 2419 return(rc); 2420} 2421 2422 2423#ifdef BCE_NVRAM_WRITE_SUPPORT 2424/****************************************************************************/ 2425/* Write a dword (32 bits) to NVRAM. */ 2426/* */ 2427/* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 2428/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 2429/* enabled NVRAM write access. */ 2430/* */ 2431/* Returns: */ 2432/* 0 on success, positive value on failure. */ 2433/****************************************************************************/ 2434static int 2435bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val, 2436 u32 cmd_flags) 2437{ 2438 u32 cmd, val32; 2439 int j, rc = 0; 2440 2441 DBENTER(BCE_VERBOSE_NVRAM); 2442 2443 /* Build the command word. */ 2444 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; 2445 2446 /* Calculate the offset for buffered flash if translation is used. */ 2447 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2448 offset = ((offset / sc->bce_flash_info->page_size) << 2449 sc->bce_flash_info->page_bits) + 2450 (offset % sc->bce_flash_info->page_size); 2451 } 2452 2453 /* 2454 * Clear the DONE bit separately, convert NVRAM data to big-endian, 2455 * set the NVRAM address to write, and issue the write command 2456 */ 2457 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2458 memcpy(&val32, val, 4); 2459 val32 = htobe32(val32); 2460 REG_WR(sc, BCE_NVM_WRITE, val32); 2461 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2462 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2463 2464 /* Wait for completion. */ 2465 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2466 DELAY(5); 2467 2468 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) 2469 break; 2470 } 2471 if (j >= NVRAM_TIMEOUT_COUNT) { 2472 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at " 2473 "offset 0x%08X\n", __FILE__, __LINE__, offset); 2474 rc = EBUSY; 2475 } 2476 2477 DBEXIT(BCE_VERBOSE_NVRAM); 2478 return (rc); 2479} 2480#endif /* BCE_NVRAM_WRITE_SUPPORT */ 2481 2482 2483/****************************************************************************/ 2484/* Initialize NVRAM access. */ 2485/* */ 2486/* Identify the NVRAM device in use and prepare the NVRAM interface to */ 2487/* access that device. */ 2488/* */ 2489/* Returns: */ 2490/* 0 on success, positive value on failure. */ 2491/****************************************************************************/ 2492static int 2493bce_init_nvram(struct bce_softc *sc) 2494{ 2495 u32 val; 2496 int j, entry_count, rc = 0; 2497 const struct flash_spec *flash; 2498 2499 DBENTER(BCE_VERBOSE_NVRAM); 2500 2501 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 2502 sc->bce_flash_info = &flash_5709; 2503 goto bce_init_nvram_get_flash_size; 2504 } 2505 2506 /* Determine the selected interface. */ 2507 val = REG_RD(sc, BCE_NVM_CFG1); 2508 2509 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 2510 2511 /* 2512 * Flash reconfiguration is required to support additional 2513 * NVRAM devices not directly supported in hardware. 2514 * Check if the flash interface was reconfigured 2515 * by the bootcode. 2516 */ 2517 2518 if (val & 0x40000000) { 2519 /* Flash interface reconfigured by bootcode. */ 2520 2521 DBPRINT(sc,BCE_INFO_LOAD, 2522 "bce_init_nvram(): Flash WAS reconfigured.\n"); 2523 2524 for (j = 0, flash = &flash_table[0]; j < entry_count; 2525 j++, flash++) { 2526 if ((val & FLASH_BACKUP_STRAP_MASK) == 2527 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 2528 sc->bce_flash_info = flash; 2529 break; 2530 } 2531 } 2532 } else { 2533 /* Flash interface not yet reconfigured. */ 2534 u32 mask; 2535 2536 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n", 2537 __FUNCTION__); 2538 2539 if (val & (1 << 23)) 2540 mask = FLASH_BACKUP_STRAP_MASK; 2541 else 2542 mask = FLASH_STRAP_MASK; 2543 2544 /* Look for the matching NVRAM device configuration data. */ 2545 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { 2546 2547 /* Check if the device matches any of the known devices. */ 2548 if ((val & mask) == (flash->strapping & mask)) { 2549 /* Found a device match. */ 2550 sc->bce_flash_info = flash; 2551 2552 /* Request access to the flash interface. */ 2553 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2554 return rc; 2555 2556 /* Reconfigure the flash interface. */ 2557 bce_enable_nvram_access(sc); 2558 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 2559 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 2560 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 2561 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 2562 bce_disable_nvram_access(sc); 2563 bce_release_nvram_lock(sc); 2564 2565 break; 2566 } 2567 } 2568 } 2569 2570 /* Check if a matching device was found. */ 2571 if (j == entry_count) { 2572 sc->bce_flash_info = NULL; 2573 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", 2574 __FILE__, __LINE__); 2575 DBEXIT(BCE_VERBOSE_NVRAM); 2576 return (ENODEV); 2577 } 2578 2579bce_init_nvram_get_flash_size: 2580 /* Write the flash config data to the shared memory interface. */ 2581 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2); 2582 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 2583 if (val) 2584 sc->bce_flash_size = val; 2585 else 2586 sc->bce_flash_size = sc->bce_flash_info->total_size; 2587 2588 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n", 2589 __FUNCTION__, sc->bce_flash_info->name, 2590 sc->bce_flash_info->total_size); 2591 2592 DBEXIT(BCE_VERBOSE_NVRAM); 2593 return rc; 2594} 2595 2596 2597/****************************************************************************/ 2598/* Read an arbitrary range of data from NVRAM. */ 2599/* */ 2600/* Prepares the NVRAM interface for access and reads the requested data */ 2601/* into the supplied buffer. */ 2602/* */ 2603/* Returns: */ 2604/* 0 on success and the data read, positive value on failure. */ 2605/****************************************************************************/ 2606static int 2607bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf, 2608 int buf_size) 2609{ 2610 int rc = 0; 2611 u32 cmd_flags, offset32, len32, extra; 2612 2613 DBENTER(BCE_VERBOSE_NVRAM); 2614 2615 if (buf_size == 0) 2616 goto bce_nvram_read_exit; 2617 2618 /* Request access to the flash interface. */ 2619 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2620 goto bce_nvram_read_exit; 2621 2622 /* Enable access to flash interface */ 2623 bce_enable_nvram_access(sc); 2624 2625 len32 = buf_size; 2626 offset32 = offset; 2627 extra = 0; 2628 2629 cmd_flags = 0; 2630 2631 if (offset32 & 3) { 2632 u8 buf[4]; 2633 u32 pre_len; 2634 2635 offset32 &= ~3; 2636 pre_len = 4 - (offset & 3); 2637 2638 if (pre_len >= len32) { 2639 pre_len = len32; 2640 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 2641 } 2642 else { 2643 cmd_flags = BCE_NVM_COMMAND_FIRST; 2644 } 2645 2646 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2647 2648 if (rc) 2649 return rc; 2650 2651 memcpy(ret_buf, buf + (offset & 3), pre_len); 2652 2653 offset32 += 4; 2654 ret_buf += pre_len; 2655 len32 -= pre_len; 2656 } 2657 2658 if (len32 & 3) { 2659 extra = 4 - (len32 & 3); 2660 len32 = (len32 + 4) & ~3; 2661 } 2662 2663 if (len32 == 4) { 2664 u8 buf[4]; 2665 2666 if (cmd_flags) 2667 cmd_flags = BCE_NVM_COMMAND_LAST; 2668 else 2669 cmd_flags = BCE_NVM_COMMAND_FIRST | 2670 BCE_NVM_COMMAND_LAST; 2671 2672 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2673 2674 memcpy(ret_buf, buf, 4 - extra); 2675 } 2676 else if (len32 > 0) { 2677 u8 buf[4]; 2678 2679 /* Read the first word. */ 2680 if (cmd_flags) 2681 cmd_flags = 0; 2682 else 2683 cmd_flags = BCE_NVM_COMMAND_FIRST; 2684 2685 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 2686 2687 /* Advance to the next dword. */ 2688 offset32 += 4; 2689 ret_buf += 4; 2690 len32 -= 4; 2691 2692 while (len32 > 4 && rc == 0) { 2693 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 2694 2695 /* Advance to the next dword. */ 2696 offset32 += 4; 2697 ret_buf += 4; 2698 len32 -= 4; 2699 } 2700 2701 if (rc) 2702 goto bce_nvram_read_locked_exit; 2703 2704 cmd_flags = BCE_NVM_COMMAND_LAST; 2705 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2706 2707 memcpy(ret_buf, buf, 4 - extra); 2708 } 2709 2710bce_nvram_read_locked_exit: 2711 /* Disable access to flash interface and release the lock. */ 2712 bce_disable_nvram_access(sc); 2713 bce_release_nvram_lock(sc); 2714 2715bce_nvram_read_exit: 2716 DBEXIT(BCE_VERBOSE_NVRAM); 2717 return rc; 2718} 2719 2720 2721#ifdef BCE_NVRAM_WRITE_SUPPORT 2722/****************************************************************************/ 2723/* Write an arbitrary range of data from NVRAM. */ 2724/* */ 2725/* Prepares the NVRAM interface for write access and writes the requested */ 2726/* data from the supplied buffer. The caller is responsible for */ 2727/* calculating any appropriate CRCs. */ 2728/* */ 2729/* Returns: */ 2730/* 0 on success, positive value on failure. */ 2731/****************************************************************************/ 2732static int 2733bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf, 2734 int buf_size) 2735{ 2736 u32 written, offset32, len32; 2737 u8 *buf, start[4], end[4]; 2738 int rc = 0; 2739 int align_start, align_end; 2740 2741 DBENTER(BCE_VERBOSE_NVRAM); 2742 2743 buf = data_buf; 2744 offset32 = offset; 2745 len32 = buf_size; 2746 align_start = align_end = 0; 2747 2748 if ((align_start = (offset32 & 3))) { 2749 offset32 &= ~3; 2750 len32 += align_start; 2751 if ((rc = bce_nvram_read(sc, offset32, start, 4))) 2752 goto bce_nvram_write_exit; 2753 } 2754 2755 if (len32 & 3) { 2756 if ((len32 > 4) || !align_start) { 2757 align_end = 4 - (len32 & 3); 2758 len32 += align_end; 2759 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4, 2760 end, 4))) { 2761 goto bce_nvram_write_exit; 2762 } 2763 } 2764 } 2765 2766 if (align_start || align_end) { 2767 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 2768 if (buf == 0) { 2769 rc = ENOMEM; 2770 goto bce_nvram_write_exit; 2771 } 2772 2773 if (align_start) { 2774 memcpy(buf, start, 4); 2775 } 2776 2777 if (align_end) { 2778 memcpy(buf + len32 - 4, end, 4); 2779 } 2780 memcpy(buf + align_start, data_buf, buf_size); 2781 } 2782 2783 written = 0; 2784 while ((written < len32) && (rc == 0)) { 2785 u32 page_start, page_end, data_start, data_end; 2786 u32 addr, cmd_flags; 2787 int i; 2788 u8 flash_buffer[264]; 2789 2790 /* Find the page_start addr */ 2791 page_start = offset32 + written; 2792 page_start -= (page_start % sc->bce_flash_info->page_size); 2793 /* Find the page_end addr */ 2794 page_end = page_start + sc->bce_flash_info->page_size; 2795 /* Find the data_start addr */ 2796 data_start = (written == 0) ? offset32 : page_start; 2797 /* Find the data_end addr */ 2798 data_end = (page_end > offset32 + len32) ? 2799 (offset32 + len32) : page_end; 2800 2801 /* Request access to the flash interface. */ 2802 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2803 goto bce_nvram_write_exit; 2804 2805 /* Enable access to flash interface */ 2806 bce_enable_nvram_access(sc); 2807 2808 cmd_flags = BCE_NVM_COMMAND_FIRST; 2809 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2810 int j; 2811 2812 /* Read the whole page into the buffer 2813 * (non-buffer flash only) */ 2814 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { 2815 if (j == (sc->bce_flash_info->page_size - 4)) { 2816 cmd_flags |= BCE_NVM_COMMAND_LAST; 2817 } 2818 rc = bce_nvram_read_dword(sc, 2819 page_start + j, 2820 &flash_buffer[j], 2821 cmd_flags); 2822 2823 if (rc) 2824 goto bce_nvram_write_locked_exit; 2825 2826 cmd_flags = 0; 2827 } 2828 } 2829 2830 /* Enable writes to flash interface (unlock write-protect) */ 2831 if ((rc = bce_enable_nvram_write(sc)) != 0) 2832 goto bce_nvram_write_locked_exit; 2833 2834 /* Erase the page */ 2835 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0) 2836 goto bce_nvram_write_locked_exit; 2837 2838 /* Re-enable the write again for the actual write */ 2839 bce_enable_nvram_write(sc); 2840 2841 /* Loop to write back the buffer data from page_start to 2842 * data_start */ 2843 i = 0; 2844 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2845 for (addr = page_start; addr < data_start; 2846 addr += 4, i += 4) { 2847 2848 rc = bce_nvram_write_dword(sc, addr, 2849 &flash_buffer[i], cmd_flags); 2850 2851 if (rc != 0) 2852 goto bce_nvram_write_locked_exit; 2853 2854 cmd_flags = 0; 2855 } 2856 } 2857 2858 /* Loop to write the new data from data_start to data_end */ 2859 for (addr = data_start; addr < data_end; addr += 4, i++) { 2860 if ((addr == page_end - 4) || 2861 ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) && 2862 (addr == data_end - 4))) { 2863 2864 cmd_flags |= BCE_NVM_COMMAND_LAST; 2865 } 2866 rc = bce_nvram_write_dword(sc, addr, buf, 2867 cmd_flags); 2868 2869 if (rc != 0) 2870 goto bce_nvram_write_locked_exit; 2871 2872 cmd_flags = 0; 2873 buf += 4; 2874 } 2875 2876 /* Loop to write back the buffer data from data_end 2877 * to page_end */ 2878 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2879 for (addr = data_end; addr < page_end; 2880 addr += 4, i += 4) { 2881 2882 if (addr == page_end-4) { 2883 cmd_flags = BCE_NVM_COMMAND_LAST; 2884 } 2885 rc = bce_nvram_write_dword(sc, addr, 2886 &flash_buffer[i], cmd_flags); 2887 2888 if (rc != 0) 2889 goto bce_nvram_write_locked_exit; 2890 2891 cmd_flags = 0; 2892 } 2893 } 2894 2895 /* Disable writes to flash interface (lock write-protect) */ 2896 bce_disable_nvram_write(sc); 2897 2898 /* Disable access to flash interface */ 2899 bce_disable_nvram_access(sc); 2900 bce_release_nvram_lock(sc); 2901 2902 /* Increment written */ 2903 written += data_end - data_start; 2904 } 2905 2906 goto bce_nvram_write_exit; 2907 2908bce_nvram_write_locked_exit: 2909 bce_disable_nvram_write(sc); 2910 bce_disable_nvram_access(sc); 2911 bce_release_nvram_lock(sc); 2912 2913bce_nvram_write_exit: 2914 if (align_start || align_end) 2915 free(buf, M_DEVBUF); 2916 2917 DBEXIT(BCE_VERBOSE_NVRAM); 2918 return (rc); 2919} 2920#endif /* BCE_NVRAM_WRITE_SUPPORT */ 2921 2922 2923/****************************************************************************/ 2924/* Verifies that NVRAM is accessible and contains valid data. */ 2925/* */ 2926/* Reads the configuration data from NVRAM and verifies that the CRC is */ 2927/* correct. */ 2928/* */ 2929/* Returns: */ 2930/* 0 on success, positive value on failure. */ 2931/****************************************************************************/ 2932static int 2933bce_nvram_test(struct bce_softc *sc) 2934{ 2935 u32 buf[BCE_NVRAM_SIZE / 4]; 2936 u8 *data = (u8 *) buf; 2937 int rc = 0; 2938 u32 magic, csum; 2939 2940 DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2941 2942 /* 2943 * Check that the device NVRAM is valid by reading 2944 * the magic value at offset 0. 2945 */ 2946 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) { 2947 BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", 2948 __FILE__, __LINE__); 2949 goto bce_nvram_test_exit; 2950 } 2951 2952 /* 2953 * Verify that offset 0 of the NVRAM contains 2954 * a valid magic number. 2955 */ 2956 magic = bce_be32toh(buf[0]); 2957 if (magic != BCE_NVRAM_MAGIC) { 2958 rc = ENODEV; 2959 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! " 2960 "Expected: 0x%08X, Found: 0x%08X\n", 2961 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic); 2962 goto bce_nvram_test_exit; 2963 } 2964 2965 /* 2966 * Verify that the device NVRAM includes valid 2967 * configuration data. 2968 */ 2969 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) { 2970 BCE_PRINTF("%s(%d): Unable to read manufacturing " 2971 "Information from NVRAM!\n", __FILE__, __LINE__); 2972 goto bce_nvram_test_exit; 2973 } 2974 2975 csum = ether_crc32_le(data, 0x100); 2976 if (csum != BCE_CRC32_RESIDUAL) { 2977 rc = ENODEV; 2978 BCE_PRINTF("%s(%d): Invalid manufacturing information " 2979 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2980 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); 2981 goto bce_nvram_test_exit; 2982 } 2983 2984 csum = ether_crc32_le(data + 0x100, 0x100); 2985 if (csum != BCE_CRC32_RESIDUAL) { 2986 rc = ENODEV; 2987 BCE_PRINTF("%s(%d): Invalid feature configuration " 2988 "information NVRAM CRC! Expected: 0x%08X, " 2989 "Found: 08%08X\n", __FILE__, __LINE__, 2990 BCE_CRC32_RESIDUAL, csum); 2991 } 2992 2993bce_nvram_test_exit: 2994 DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2995 return rc; 2996} 2997 2998 2999/****************************************************************************/ 3000/* Calculates the size of the buffers to allocate based on the MTU. */ 3001/* */ 3002/* Returns: */ 3003/* Nothing. */ 3004/****************************************************************************/ 3005static void 3006bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu) 3007{ 3008 DBENTER(BCE_VERBOSE_LOAD); 3009 3010 /* Use a single allocation type when header splitting enabled. */ 3011 if (bce_hdr_split == TRUE) { 3012 sc->rx_bd_mbuf_alloc_size = MHLEN; 3013 /* Make sure offset is 16 byte aligned for hardware. */ 3014 sc->rx_bd_mbuf_align_pad = 3015 roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN); 3016 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 3017 sc->rx_bd_mbuf_align_pad; 3018 } else { 3019 if ((mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 3020 ETHER_CRC_LEN) > MCLBYTES) { 3021 /* Setup for jumbo RX buffer allocations. */ 3022 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES; 3023 sc->rx_bd_mbuf_align_pad = 3024 roundup2(MJUM9BYTES, 16) - MJUM9BYTES; 3025 sc->rx_bd_mbuf_data_len = 3026 sc->rx_bd_mbuf_alloc_size - 3027 sc->rx_bd_mbuf_align_pad; 3028 } else { 3029 /* Setup for standard RX buffer allocations. */ 3030 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 3031 sc->rx_bd_mbuf_align_pad = 3032 roundup2(MCLBYTES, 16) - MCLBYTES; 3033 sc->rx_bd_mbuf_data_len = 3034 sc->rx_bd_mbuf_alloc_size - 3035 sc->rx_bd_mbuf_align_pad; 3036 } 3037 } 3038 3039// DBPRINT(sc, BCE_INFO_LOAD, 3040 DBPRINT(sc, BCE_WARN, 3041 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bd_mbuf_data_len = %d, " 3042 "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__, 3043 sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len, 3044 sc->rx_bd_mbuf_align_pad); 3045 3046 DBEXIT(BCE_VERBOSE_LOAD); 3047} 3048 3049/****************************************************************************/ 3050/* Identifies the current media type of the controller and sets the PHY */ 3051/* address. */ 3052/* */ 3053/* Returns: */ 3054/* Nothing. */ 3055/****************************************************************************/ 3056static void 3057bce_get_media(struct bce_softc *sc) 3058{ 3059 u32 val; 3060 3061 DBENTER(BCE_VERBOSE_PHY); 3062 3063 /* Assume PHY address for copper controllers. */ 3064 sc->bce_phy_addr = 1; 3065 3066 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 3067 u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 3068 u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 3069 u32 strap; 3070 3071 /* 3072 * The BCM5709S is software configurable 3073 * for Copper or SerDes operation. 3074 */ 3075 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 3076 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 3077 "for copper.\n"); 3078 goto bce_get_media_exit; 3079 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 3080 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 3081 "for dual media.\n"); 3082 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 3083 goto bce_get_media_exit; 3084 } 3085 3086 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 3087 strap = (val & 3088 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 3089 else 3090 strap = (val & 3091 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 3092 3093 if (pci_get_function(sc->bce_dev) == 0) { 3094 switch (strap) { 3095 case 0x4: 3096 case 0x5: 3097 case 0x6: 3098 DBPRINT(sc, BCE_INFO_LOAD, 3099 "BCM5709 s/w configured for SerDes.\n"); 3100 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 3101 break; 3102 default: 3103 DBPRINT(sc, BCE_INFO_LOAD, 3104 "BCM5709 s/w configured for Copper.\n"); 3105 break; 3106 } 3107 } else { 3108 switch (strap) { 3109 case 0x1: 3110 case 0x2: 3111 case 0x4: 3112 DBPRINT(sc, BCE_INFO_LOAD, 3113 "BCM5709 s/w configured for SerDes.\n"); 3114 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 3115 break; 3116 default: 3117 DBPRINT(sc, BCE_INFO_LOAD, 3118 "BCM5709 s/w configured for Copper.\n"); 3119 break; 3120 } 3121 } 3122 3123 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) 3124 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 3125 3126 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 3127 3128 sc->bce_flags |= BCE_NO_WOL_FLAG; 3129 3130 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 3131 sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG; 3132 3133 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 3134 /* 5708S/09S/16S use a separate PHY for SerDes. */ 3135 sc->bce_phy_addr = 2; 3136 3137 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 3138 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) { 3139 sc->bce_phy_flags |= 3140 BCE_PHY_2_5G_CAPABLE_FLAG; 3141 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb " 3142 "capable adapter\n"); 3143 } 3144 } 3145 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 3146 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) 3147 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 3148 3149bce_get_media_exit: 3150 DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY), 3151 "Using PHY address %d.\n", sc->bce_phy_addr); 3152 3153 DBEXIT(BCE_VERBOSE_PHY); 3154} 3155 3156 3157/****************************************************************************/ 3158/* Performs PHY initialization required before MII drivers access the */ 3159/* device. */ 3160/* */ 3161/* Returns: */ 3162/* Nothing. */ 3163/****************************************************************************/ 3164static void 3165bce_init_media(struct bce_softc *sc) 3166{ 3167 if ((sc->bce_phy_flags & (BCE_PHY_IEEE_CLAUSE_45_FLAG | 3168 BCE_PHY_REMOTE_CAP_FLAG)) == BCE_PHY_IEEE_CLAUSE_45_FLAG) { 3169 /* 3170 * Configure 5709S/5716S PHYs to use traditional IEEE 3171 * Clause 22 method. Otherwise we have no way to attach 3172 * the PHY in mii(4) layer. PHY specific configuration 3173 * is done in mii layer. 3174 */ 3175 3176 /* Select auto-negotiation MMD of the PHY. */ 3177 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 3178 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 3179 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 3180 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 3181 3182 /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */ 3183 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 3184 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 3185 } 3186} 3187 3188 3189/****************************************************************************/ 3190/* Free any DMA memory owned by the driver. */ 3191/* */ 3192/* Scans through each data structre that requires DMA memory and frees */ 3193/* the memory if allocated. */ 3194/* */ 3195/* Returns: */ 3196/* Nothing. */ 3197/****************************************************************************/ 3198static void 3199bce_dma_free(struct bce_softc *sc) 3200{ 3201 int i; 3202 3203 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 3204 3205 /* Free, unmap, and destroy the status block. */ 3206 if (sc->status_block != NULL) { 3207 bus_dmamem_free( 3208 sc->status_tag, 3209 sc->status_block, 3210 sc->status_map); 3211 sc->status_block = NULL; 3212 } 3213 3214 if (sc->status_map != NULL) { 3215 bus_dmamap_unload( 3216 sc->status_tag, 3217 sc->status_map); 3218 bus_dmamap_destroy(sc->status_tag, 3219 sc->status_map); 3220 sc->status_map = NULL; 3221 } 3222 3223 if (sc->status_tag != NULL) { 3224 bus_dma_tag_destroy(sc->status_tag); 3225 sc->status_tag = NULL; 3226 } 3227 3228 3229 /* Free, unmap, and destroy the statistics block. */ 3230 if (sc->stats_block != NULL) { 3231 bus_dmamem_free( 3232 sc->stats_tag, 3233 sc->stats_block, 3234 sc->stats_map); 3235 sc->stats_block = NULL; 3236 } 3237 3238 if (sc->stats_map != NULL) { 3239 bus_dmamap_unload( 3240 sc->stats_tag, 3241 sc->stats_map); 3242 bus_dmamap_destroy(sc->stats_tag, 3243 sc->stats_map); 3244 sc->stats_map = NULL; 3245 } 3246 3247 if (sc->stats_tag != NULL) { 3248 bus_dma_tag_destroy(sc->stats_tag); 3249 sc->stats_tag = NULL; 3250 } 3251 3252 3253 /* Free, unmap and destroy all context memory pages. */ 3254 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 3255 for (i = 0; i < sc->ctx_pages; i++ ) { 3256 if (sc->ctx_block[i] != NULL) { 3257 bus_dmamem_free( 3258 sc->ctx_tag, 3259 sc->ctx_block[i], 3260 sc->ctx_map[i]); 3261 sc->ctx_block[i] = NULL; 3262 } 3263 3264 if (sc->ctx_map[i] != NULL) { 3265 bus_dmamap_unload( 3266 sc->ctx_tag, 3267 sc->ctx_map[i]); 3268 bus_dmamap_destroy( 3269 sc->ctx_tag, 3270 sc->ctx_map[i]); 3271 sc->ctx_map[i] = NULL; 3272 } 3273 } 3274 3275 /* Destroy the context memory tag. */ 3276 if (sc->ctx_tag != NULL) { 3277 bus_dma_tag_destroy(sc->ctx_tag); 3278 sc->ctx_tag = NULL; 3279 } 3280 } 3281 3282 3283 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 3284 for (i = 0; i < sc->tx_pages; i++ ) { 3285 if (sc->tx_bd_chain[i] != NULL) { 3286 bus_dmamem_free( 3287 sc->tx_bd_chain_tag, 3288 sc->tx_bd_chain[i], 3289 sc->tx_bd_chain_map[i]); 3290 sc->tx_bd_chain[i] = NULL; 3291 } 3292 3293 if (sc->tx_bd_chain_map[i] != NULL) { 3294 bus_dmamap_unload( 3295 sc->tx_bd_chain_tag, 3296 sc->tx_bd_chain_map[i]); 3297 bus_dmamap_destroy( 3298 sc->tx_bd_chain_tag, 3299 sc->tx_bd_chain_map[i]); 3300 sc->tx_bd_chain_map[i] = NULL; 3301 } 3302 } 3303 3304 /* Destroy the TX buffer descriptor tag. */ 3305 if (sc->tx_bd_chain_tag != NULL) { 3306 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 3307 sc->tx_bd_chain_tag = NULL; 3308 } 3309 3310 3311 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 3312 for (i = 0; i < sc->rx_pages; i++ ) { 3313 if (sc->rx_bd_chain[i] != NULL) { 3314 bus_dmamem_free( 3315 sc->rx_bd_chain_tag, 3316 sc->rx_bd_chain[i], 3317 sc->rx_bd_chain_map[i]); 3318 sc->rx_bd_chain[i] = NULL; 3319 } 3320 3321 if (sc->rx_bd_chain_map[i] != NULL) { 3322 bus_dmamap_unload( 3323 sc->rx_bd_chain_tag, 3324 sc->rx_bd_chain_map[i]); 3325 bus_dmamap_destroy( 3326 sc->rx_bd_chain_tag, 3327 sc->rx_bd_chain_map[i]); 3328 sc->rx_bd_chain_map[i] = NULL; 3329 } 3330 } 3331 3332 /* Destroy the RX buffer descriptor tag. */ 3333 if (sc->rx_bd_chain_tag != NULL) { 3334 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 3335 sc->rx_bd_chain_tag = NULL; 3336 } 3337 3338 3339 /* Free, unmap and destroy all page buffer descriptor chain pages. */ 3340 if (bce_hdr_split == TRUE) { 3341 for (i = 0; i < sc->pg_pages; i++ ) { 3342 if (sc->pg_bd_chain[i] != NULL) { 3343 bus_dmamem_free( 3344 sc->pg_bd_chain_tag, 3345 sc->pg_bd_chain[i], 3346 sc->pg_bd_chain_map[i]); 3347 sc->pg_bd_chain[i] = NULL; 3348 } 3349 3350 if (sc->pg_bd_chain_map[i] != NULL) { 3351 bus_dmamap_unload( 3352 sc->pg_bd_chain_tag, 3353 sc->pg_bd_chain_map[i]); 3354 bus_dmamap_destroy( 3355 sc->pg_bd_chain_tag, 3356 sc->pg_bd_chain_map[i]); 3357 sc->pg_bd_chain_map[i] = NULL; 3358 } 3359 } 3360 3361 /* Destroy the page buffer descriptor tag. */ 3362 if (sc->pg_bd_chain_tag != NULL) { 3363 bus_dma_tag_destroy(sc->pg_bd_chain_tag); 3364 sc->pg_bd_chain_tag = NULL; 3365 } 3366 } 3367 3368 3369 /* Unload and destroy the TX mbuf maps. */ 3370 for (i = 0; i < MAX_TX_BD_AVAIL; i++) { 3371 if (sc->tx_mbuf_map[i] != NULL) { 3372 bus_dmamap_unload(sc->tx_mbuf_tag, 3373 sc->tx_mbuf_map[i]); 3374 bus_dmamap_destroy(sc->tx_mbuf_tag, 3375 sc->tx_mbuf_map[i]); 3376 sc->tx_mbuf_map[i] = NULL; 3377 } 3378 } 3379 3380 /* Destroy the TX mbuf tag. */ 3381 if (sc->tx_mbuf_tag != NULL) { 3382 bus_dma_tag_destroy(sc->tx_mbuf_tag); 3383 sc->tx_mbuf_tag = NULL; 3384 } 3385 3386 /* Unload and destroy the RX mbuf maps. */ 3387 for (i = 0; i < MAX_RX_BD_AVAIL; i++) { 3388 if (sc->rx_mbuf_map[i] != NULL) { 3389 bus_dmamap_unload(sc->rx_mbuf_tag, 3390 sc->rx_mbuf_map[i]); 3391 bus_dmamap_destroy(sc->rx_mbuf_tag, 3392 sc->rx_mbuf_map[i]); 3393 sc->rx_mbuf_map[i] = NULL; 3394 } 3395 } 3396 3397 /* Destroy the RX mbuf tag. */ 3398 if (sc->rx_mbuf_tag != NULL) { 3399 bus_dma_tag_destroy(sc->rx_mbuf_tag); 3400 sc->rx_mbuf_tag = NULL; 3401 } 3402 3403 /* Unload and destroy the page mbuf maps. */ 3404 if (bce_hdr_split == TRUE) { 3405 for (i = 0; i < MAX_PG_BD_AVAIL; i++) { 3406 if (sc->pg_mbuf_map[i] != NULL) { 3407 bus_dmamap_unload(sc->pg_mbuf_tag, 3408 sc->pg_mbuf_map[i]); 3409 bus_dmamap_destroy(sc->pg_mbuf_tag, 3410 sc->pg_mbuf_map[i]); 3411 sc->pg_mbuf_map[i] = NULL; 3412 } 3413 } 3414 3415 /* Destroy the page mbuf tag. */ 3416 if (sc->pg_mbuf_tag != NULL) { 3417 bus_dma_tag_destroy(sc->pg_mbuf_tag); 3418 sc->pg_mbuf_tag = NULL; 3419 } 3420 } 3421 3422 /* Destroy the parent tag */ 3423 if (sc->parent_tag != NULL) { 3424 bus_dma_tag_destroy(sc->parent_tag); 3425 sc->parent_tag = NULL; 3426 } 3427 3428 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 3429} 3430 3431 3432/****************************************************************************/ 3433/* Get DMA memory from the OS. */ 3434/* */ 3435/* Validates that the OS has provided DMA buffers in response to a */ 3436/* bus_dmamap_load() call and saves the physical address of those buffers. */ 3437/* When the callback is used the OS will return 0 for the mapping function */ 3438/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 3439/* failures back to the caller. */ 3440/* */ 3441/* Returns: */ 3442/* Nothing. */ 3443/****************************************************************************/ 3444static void 3445bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3446{ 3447 bus_addr_t *busaddr = arg; 3448 3449 KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!", 3450 __FUNCTION__, nseg)); 3451 /* Simulate a mapping failure. */ 3452 DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control), 3453 error = ENOMEM); 3454 3455 /* ToDo: How to increment debug sim_count variable here? */ 3456 3457 /* Check for an error and signal the caller that an error occurred. */ 3458 if (error) { 3459 *busaddr = 0; 3460 } else { 3461 *busaddr = segs->ds_addr; 3462 } 3463} 3464 3465 3466/****************************************************************************/ 3467/* Allocate any DMA memory needed by the driver. */ 3468/* */ 3469/* Allocates DMA memory needed for the various global structures needed by */ 3470/* hardware. */ 3471/* */ 3472/* Memory alignment requirements: */ 3473/* +-----------------+----------+----------+----------+----------+ */ 3474/* | | 5706 | 5708 | 5709 | 5716 | */ 3475/* +-----------------+----------+----------+----------+----------+ */ 3476/* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3477/* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3478/* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 3479/* |PG Buffers | none | none | none | none | */ 3480/* |TX Buffers | none | none | none | none | */ 3481/* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 3482/* |Context Memory | | | | | */ 3483/* +-----------------+----------+----------+----------+----------+ */ 3484/* */ 3485/* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 3486/* */ 3487/* Returns: */ 3488/* 0 for success, positive value for failure. */ 3489/****************************************************************************/ 3490static int 3491bce_dma_alloc(device_t dev) 3492{ 3493 struct bce_softc *sc; 3494 int i, error, rc = 0; 3495 bus_size_t max_size, max_seg_size; 3496 int max_segments; 3497 3498 sc = device_get_softc(dev); 3499 3500 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3501 3502 /* 3503 * Allocate the parent bus DMA tag appropriate for PCI. 3504 */ 3505 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY, 3506 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3507 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 3508 &sc->parent_tag)) { 3509 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", 3510 __FILE__, __LINE__); 3511 rc = ENOMEM; 3512 goto bce_dma_alloc_exit; 3513 } 3514 3515 /* 3516 * Create a DMA tag for the status block, allocate and clear the 3517 * memory, map the memory into DMA space, and fetch the physical 3518 * address of the block. 3519 */ 3520 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3521 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3522 NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ, 3523 0, NULL, NULL, &sc->status_tag)) { 3524 BCE_PRINTF("%s(%d): Could not allocate status block " 3525 "DMA tag!\n", __FILE__, __LINE__); 3526 rc = ENOMEM; 3527 goto bce_dma_alloc_exit; 3528 } 3529 3530 if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, 3531 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3532 &sc->status_map)) { 3533 BCE_PRINTF("%s(%d): Could not allocate status block " 3534 "DMA memory!\n", __FILE__, __LINE__); 3535 rc = ENOMEM; 3536 goto bce_dma_alloc_exit; 3537 } 3538 3539 error = bus_dmamap_load(sc->status_tag, sc->status_map, 3540 sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr, 3541 &sc->status_block_paddr, BUS_DMA_NOWAIT); 3542 3543 if (error || sc->status_block_paddr == 0) { 3544 BCE_PRINTF("%s(%d): Could not map status block " 3545 "DMA memory!\n", __FILE__, __LINE__); 3546 rc = ENOMEM; 3547 goto bce_dma_alloc_exit; 3548 } 3549 3550 DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n", 3551 __FUNCTION__, (uintmax_t) sc->status_block_paddr); 3552 3553 /* 3554 * Create a DMA tag for the statistics block, allocate and clear the 3555 * memory, map the memory into DMA space, and fetch the physical 3556 * address of the block. 3557 */ 3558 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3559 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3560 NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ, 3561 0, NULL, NULL, &sc->stats_tag)) { 3562 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3563 "DMA tag!\n", __FILE__, __LINE__); 3564 rc = ENOMEM; 3565 goto bce_dma_alloc_exit; 3566 } 3567 3568 if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, 3569 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) { 3570 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3571 "DMA memory!\n", __FILE__, __LINE__); 3572 rc = ENOMEM; 3573 goto bce_dma_alloc_exit; 3574 } 3575 3576 error = bus_dmamap_load(sc->stats_tag, sc->stats_map, 3577 sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr, 3578 &sc->stats_block_paddr, BUS_DMA_NOWAIT); 3579 3580 if (error || sc->stats_block_paddr == 0) { 3581 BCE_PRINTF("%s(%d): Could not map statistics block " 3582 "DMA memory!\n", __FILE__, __LINE__); 3583 rc = ENOMEM; 3584 goto bce_dma_alloc_exit; 3585 } 3586 3587 DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n", 3588 __FUNCTION__, (uintmax_t) sc->stats_block_paddr); 3589 3590 /* BCM5709 uses host memory as cache for context memory. */ 3591 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 3592 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 3593 if (sc->ctx_pages == 0) 3594 sc->ctx_pages = 1; 3595 3596 DBRUNIF((sc->ctx_pages > 512), 3597 BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n", 3598 __FILE__, __LINE__, sc->ctx_pages)); 3599 3600 /* 3601 * Create a DMA tag for the context pages, 3602 * allocate and clear the memory, map the 3603 * memory into DMA space, and fetch the 3604 * physical address of the block. 3605 */ 3606 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3607 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3608 NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 3609 0, NULL, NULL, &sc->ctx_tag)) { 3610 BCE_PRINTF("%s(%d): Could not allocate CTX " 3611 "DMA tag!\n", __FILE__, __LINE__); 3612 rc = ENOMEM; 3613 goto bce_dma_alloc_exit; 3614 } 3615 3616 for (i = 0; i < sc->ctx_pages; i++) { 3617 3618 if(bus_dmamem_alloc(sc->ctx_tag, 3619 (void **)&sc->ctx_block[i], 3620 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3621 &sc->ctx_map[i])) { 3622 BCE_PRINTF("%s(%d): Could not allocate CTX " 3623 "DMA memory!\n", __FILE__, __LINE__); 3624 rc = ENOMEM; 3625 goto bce_dma_alloc_exit; 3626 } 3627 3628 error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 3629 sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr, 3630 &sc->ctx_paddr[i], BUS_DMA_NOWAIT); 3631 3632 if (error || sc->ctx_paddr[i] == 0) { 3633 BCE_PRINTF("%s(%d): Could not map CTX " 3634 "DMA memory!\n", __FILE__, __LINE__); 3635 rc = ENOMEM; 3636 goto bce_dma_alloc_exit; 3637 } 3638 3639 DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] " 3640 "= 0x%jX\n", __FUNCTION__, i, 3641 (uintmax_t) sc->ctx_paddr[i]); 3642 } 3643 } 3644 3645 /* 3646 * Create a DMA tag for the TX buffer descriptor chain, 3647 * allocate and clear the memory, and fetch the 3648 * physical address of the block. 3649 */ 3650 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, 3651 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3652 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0, 3653 NULL, NULL, &sc->tx_bd_chain_tag)) { 3654 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3655 "chain DMA tag!\n", __FILE__, __LINE__); 3656 rc = ENOMEM; 3657 goto bce_dma_alloc_exit; 3658 } 3659 3660 for (i = 0; i < sc->tx_pages; i++) { 3661 3662 if(bus_dmamem_alloc(sc->tx_bd_chain_tag, 3663 (void **)&sc->tx_bd_chain[i], 3664 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3665 &sc->tx_bd_chain_map[i])) { 3666 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3667 "chain DMA memory!\n", __FILE__, __LINE__); 3668 rc = ENOMEM; 3669 goto bce_dma_alloc_exit; 3670 } 3671 3672 error = bus_dmamap_load(sc->tx_bd_chain_tag, 3673 sc->tx_bd_chain_map[i], sc->tx_bd_chain[i], 3674 BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3675 &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3676 3677 if (error || sc->tx_bd_chain_paddr[i] == 0) { 3678 BCE_PRINTF("%s(%d): Could not map TX descriptor " 3679 "chain DMA memory!\n", __FILE__, __LINE__); 3680 rc = ENOMEM; 3681 goto bce_dma_alloc_exit; 3682 } 3683 3684 DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = " 3685 "0x%jX\n", __FUNCTION__, i, 3686 (uintmax_t) sc->tx_bd_chain_paddr[i]); 3687 } 3688 3689 /* Check the required size before mapping to conserve resources. */ 3690 if (bce_tso_enable) { 3691 max_size = BCE_TSO_MAX_SIZE; 3692 max_segments = BCE_MAX_SEGMENTS; 3693 max_seg_size = BCE_TSO_MAX_SEG_SIZE; 3694 } else { 3695 max_size = MCLBYTES * BCE_MAX_SEGMENTS; 3696 max_segments = BCE_MAX_SEGMENTS; 3697 max_seg_size = MCLBYTES; 3698 } 3699 3700 /* Create a DMA tag for TX mbufs. */ 3701 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3702 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, 3703 max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) { 3704 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n", 3705 __FILE__, __LINE__); 3706 rc = ENOMEM; 3707 goto bce_dma_alloc_exit; 3708 } 3709 3710 /* Create DMA maps for the TX mbufs clusters. */ 3711 for (i = 0; i < TOTAL_TX_BD_ALLOC; i++) { 3712 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, 3713 &sc->tx_mbuf_map[i])) { 3714 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA " 3715 "map!\n", __FILE__, __LINE__); 3716 rc = ENOMEM; 3717 goto bce_dma_alloc_exit; 3718 } 3719 } 3720 3721 /* 3722 * Create a DMA tag for the RX buffer descriptor chain, 3723 * allocate and clear the memory, and fetch the physical 3724 * address of the blocks. 3725 */ 3726 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3727 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, 3728 sc->max_bus_addr, NULL, NULL, 3729 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 3730 0, NULL, NULL, &sc->rx_bd_chain_tag)) { 3731 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " 3732 "DMA tag!\n", __FILE__, __LINE__); 3733 rc = ENOMEM; 3734 goto bce_dma_alloc_exit; 3735 } 3736 3737 for (i = 0; i < sc->rx_pages; i++) { 3738 3739 if (bus_dmamem_alloc(sc->rx_bd_chain_tag, 3740 (void **)&sc->rx_bd_chain[i], 3741 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3742 &sc->rx_bd_chain_map[i])) { 3743 BCE_PRINTF("%s(%d): Could not allocate RX descriptor " 3744 "chain DMA memory!\n", __FILE__, __LINE__); 3745 rc = ENOMEM; 3746 goto bce_dma_alloc_exit; 3747 } 3748 3749 error = bus_dmamap_load(sc->rx_bd_chain_tag, 3750 sc->rx_bd_chain_map[i], sc->rx_bd_chain[i], 3751 BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3752 &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3753 3754 if (error || sc->rx_bd_chain_paddr[i] == 0) { 3755 BCE_PRINTF("%s(%d): Could not map RX descriptor " 3756 "chain DMA memory!\n", __FILE__, __LINE__); 3757 rc = ENOMEM; 3758 goto bce_dma_alloc_exit; 3759 } 3760 3761 DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = " 3762 "0x%jX\n", __FUNCTION__, i, 3763 (uintmax_t) sc->rx_bd_chain_paddr[i]); 3764 } 3765 3766 /* 3767 * Create a DMA tag for RX mbufs. 3768 */ 3769 if (bce_hdr_split == TRUE) 3770 max_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? 3771 MCLBYTES : sc->rx_bd_mbuf_alloc_size); 3772 else 3773 max_size = MJUM9BYTES; 3774 3775 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag " 3776 "(max size = 0x%jX)\n", __FUNCTION__, (uintmax_t)max_size); 3777 3778 if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN, 3779 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3780 max_size, 1, max_size, 0, NULL, NULL, &sc->rx_mbuf_tag)) { 3781 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n", 3782 __FILE__, __LINE__); 3783 rc = ENOMEM; 3784 goto bce_dma_alloc_exit; 3785 } 3786 3787 /* Create DMA maps for the RX mbuf clusters. */ 3788 for (i = 0; i < TOTAL_RX_BD_ALLOC; i++) { 3789 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT, 3790 &sc->rx_mbuf_map[i])) { 3791 BCE_PRINTF("%s(%d): Unable to create RX mbuf " 3792 "DMA map!\n", __FILE__, __LINE__); 3793 rc = ENOMEM; 3794 goto bce_dma_alloc_exit; 3795 } 3796 } 3797 3798 if (bce_hdr_split == TRUE) { 3799 /* 3800 * Create a DMA tag for the page buffer descriptor chain, 3801 * allocate and clear the memory, and fetch the physical 3802 * address of the blocks. 3803 */ 3804 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3805 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, 3806 NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ, 3807 0, NULL, NULL, &sc->pg_bd_chain_tag)) { 3808 BCE_PRINTF("%s(%d): Could not allocate page descriptor " 3809 "chain DMA tag!\n", __FILE__, __LINE__); 3810 rc = ENOMEM; 3811 goto bce_dma_alloc_exit; 3812 } 3813 3814 for (i = 0; i < sc->pg_pages; i++) { 3815 if (bus_dmamem_alloc(sc->pg_bd_chain_tag, 3816 (void **)&sc->pg_bd_chain[i], 3817 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3818 &sc->pg_bd_chain_map[i])) { 3819 BCE_PRINTF("%s(%d): Could not allocate page " 3820 "descriptor chain DMA memory!\n", 3821 __FILE__, __LINE__); 3822 rc = ENOMEM; 3823 goto bce_dma_alloc_exit; 3824 } 3825 3826 error = bus_dmamap_load(sc->pg_bd_chain_tag, 3827 sc->pg_bd_chain_map[i], sc->pg_bd_chain[i], 3828 BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr, 3829 &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3830 3831 if (error || sc->pg_bd_chain_paddr[i] == 0) { 3832 BCE_PRINTF("%s(%d): Could not map page descriptor " 3833 "chain DMA memory!\n", __FILE__, __LINE__); 3834 rc = ENOMEM; 3835 goto bce_dma_alloc_exit; 3836 } 3837 3838 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = " 3839 "0x%jX\n", __FUNCTION__, i, 3840 (uintmax_t) sc->pg_bd_chain_paddr[i]); 3841 } 3842 3843 /* 3844 * Create a DMA tag for page mbufs. 3845 */ 3846 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3847 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 3848 1, MCLBYTES, 0, NULL, NULL, &sc->pg_mbuf_tag)) { 3849 BCE_PRINTF("%s(%d): Could not allocate page mbuf " 3850 "DMA tag!\n", __FILE__, __LINE__); 3851 rc = ENOMEM; 3852 goto bce_dma_alloc_exit; 3853 } 3854 3855 /* Create DMA maps for the page mbuf clusters. */ 3856 for (i = 0; i < TOTAL_PG_BD_ALLOC; i++) { 3857 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT, 3858 &sc->pg_mbuf_map[i])) { 3859 BCE_PRINTF("%s(%d): Unable to create page mbuf " 3860 "DMA map!\n", __FILE__, __LINE__); 3861 rc = ENOMEM; 3862 goto bce_dma_alloc_exit; 3863 } 3864 } 3865 } 3866 3867bce_dma_alloc_exit: 3868 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3869 return(rc); 3870} 3871 3872 3873/****************************************************************************/ 3874/* Release all resources used by the driver. */ 3875/* */ 3876/* Releases all resources acquired by the driver including interrupts, */ 3877/* interrupt handler, interfaces, mutexes, and DMA memory. */ 3878/* */ 3879/* Returns: */ 3880/* Nothing. */ 3881/****************************************************************************/ 3882static void 3883bce_release_resources(struct bce_softc *sc) 3884{ 3885 device_t dev; 3886 3887 DBENTER(BCE_VERBOSE_RESET); 3888 3889 dev = sc->bce_dev; 3890 3891 bce_dma_free(sc); 3892 3893 if (sc->bce_intrhand != NULL) { 3894 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n"); 3895 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 3896 } 3897 3898 if (sc->bce_res_irq != NULL) { 3899 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n"); 3900 bus_release_resource(dev, SYS_RES_IRQ, 3901 rman_get_rid(sc->bce_res_irq), sc->bce_res_irq); 3902 } 3903 3904 if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) { 3905 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n"); 3906 pci_release_msi(dev); 3907 } 3908 3909 if (sc->bce_res_mem != NULL) { 3910 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n"); 3911 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 3912 sc->bce_res_mem); 3913 } 3914 3915 if (sc->bce_ifp != NULL) { 3916 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n"); 3917 if_free(sc->bce_ifp); 3918 } 3919 3920 if (mtx_initialized(&sc->bce_mtx)) 3921 BCE_LOCK_DESTROY(sc); 3922 3923 DBEXIT(BCE_VERBOSE_RESET); 3924} 3925 3926 3927/****************************************************************************/ 3928/* Firmware synchronization. */ 3929/* */ 3930/* Before performing certain events such as a chip reset, synchronize with */ 3931/* the firmware first. */ 3932/* */ 3933/* Returns: */ 3934/* 0 for success, positive value for failure. */ 3935/****************************************************************************/ 3936static int 3937bce_fw_sync(struct bce_softc *sc, u32 msg_data) 3938{ 3939 int i, rc = 0; 3940 u32 val; 3941 3942 DBENTER(BCE_VERBOSE_RESET); 3943 3944 /* Don't waste any time if we've timed out before. */ 3945 if (sc->bce_fw_timed_out == TRUE) { 3946 rc = EBUSY; 3947 goto bce_fw_sync_exit; 3948 } 3949 3950 /* Increment the message sequence number. */ 3951 sc->bce_fw_wr_seq++; 3952 msg_data |= sc->bce_fw_wr_seq; 3953 3954 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = " 3955 "0x%08X\n", msg_data); 3956 3957 /* Send the message to the bootcode driver mailbox. */ 3958 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3959 3960 /* Wait for the bootcode to acknowledge the message. */ 3961 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 3962 /* Check for a response in the bootcode firmware mailbox. */ 3963 val = bce_shmem_rd(sc, BCE_FW_MB); 3964 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 3965 break; 3966 DELAY(1000); 3967 } 3968 3969 /* If we've timed out, tell bootcode that we've stopped waiting. */ 3970 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) && 3971 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) { 3972 3973 BCE_PRINTF("%s(%d): Firmware synchronization timeout! " 3974 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 3975 3976 msg_data &= ~BCE_DRV_MSG_CODE; 3977 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 3978 3979 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3980 3981 sc->bce_fw_timed_out = TRUE; 3982 rc = EBUSY; 3983 } 3984 3985bce_fw_sync_exit: 3986 DBEXIT(BCE_VERBOSE_RESET); 3987 return (rc); 3988} 3989 3990 3991/****************************************************************************/ 3992/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 3993/* */ 3994/* Returns: */ 3995/* Nothing. */ 3996/****************************************************************************/ 3997static void 3998bce_load_rv2p_fw(struct bce_softc *sc, const u32 *rv2p_code, 3999 u32 rv2p_code_len, u32 rv2p_proc) 4000{ 4001 int i; 4002 u32 val; 4003 4004 DBENTER(BCE_VERBOSE_RESET); 4005 4006 /* Set the page size used by RV2P. */ 4007 if (rv2p_proc == RV2P_PROC2) { 4008 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE); 4009 } 4010 4011 for (i = 0; i < rv2p_code_len; i += 8) { 4012 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 4013 rv2p_code++; 4014 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 4015 rv2p_code++; 4016 4017 if (rv2p_proc == RV2P_PROC1) { 4018 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 4019 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 4020 } 4021 else { 4022 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 4023 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 4024 } 4025 } 4026 4027 /* Reset the processor, un-stall is done later. */ 4028 if (rv2p_proc == RV2P_PROC1) { 4029 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 4030 } 4031 else { 4032 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 4033 } 4034 4035 DBEXIT(BCE_VERBOSE_RESET); 4036} 4037 4038 4039/****************************************************************************/ 4040/* Load RISC processor firmware. */ 4041/* */ 4042/* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 4043/* associated with a particular processor. */ 4044/* */ 4045/* Returns: */ 4046/* Nothing. */ 4047/****************************************************************************/ 4048static void 4049bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 4050 struct fw_info *fw) 4051{ 4052 u32 offset; 4053 4054 DBENTER(BCE_VERBOSE_RESET); 4055 4056 bce_halt_cpu(sc, cpu_reg); 4057 4058 /* Load the Text area. */ 4059 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 4060 if (fw->text) { 4061 int j; 4062 4063 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { 4064 REG_WR_IND(sc, offset, fw->text[j]); 4065 } 4066 } 4067 4068 /* Load the Data area. */ 4069 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 4070 if (fw->data) { 4071 int j; 4072 4073 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { 4074 REG_WR_IND(sc, offset, fw->data[j]); 4075 } 4076 } 4077 4078 /* Load the SBSS area. */ 4079 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 4080 if (fw->sbss) { 4081 int j; 4082 4083 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { 4084 REG_WR_IND(sc, offset, fw->sbss[j]); 4085 } 4086 } 4087 4088 /* Load the BSS area. */ 4089 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 4090 if (fw->bss) { 4091 int j; 4092 4093 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { 4094 REG_WR_IND(sc, offset, fw->bss[j]); 4095 } 4096 } 4097 4098 /* Load the Read-Only area. */ 4099 offset = cpu_reg->spad_base + 4100 (fw->rodata_addr - cpu_reg->mips_view_base); 4101 if (fw->rodata) { 4102 int j; 4103 4104 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { 4105 REG_WR_IND(sc, offset, fw->rodata[j]); 4106 } 4107 } 4108 4109 /* Clear the pre-fetch instruction and set the FW start address. */ 4110 REG_WR_IND(sc, cpu_reg->inst, 0); 4111 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 4112 4113 DBEXIT(BCE_VERBOSE_RESET); 4114} 4115 4116 4117/****************************************************************************/ 4118/* Starts the RISC processor. */ 4119/* */ 4120/* Assumes the CPU starting address has already been set. */ 4121/* */ 4122/* Returns: */ 4123/* Nothing. */ 4124/****************************************************************************/ 4125static void 4126bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 4127{ 4128 u32 val; 4129 4130 DBENTER(BCE_VERBOSE_RESET); 4131 4132 /* Start the CPU. */ 4133 val = REG_RD_IND(sc, cpu_reg->mode); 4134 val &= ~cpu_reg->mode_value_halt; 4135 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 4136 REG_WR_IND(sc, cpu_reg->mode, val); 4137 4138 DBEXIT(BCE_VERBOSE_RESET); 4139} 4140 4141 4142/****************************************************************************/ 4143/* Halts the RISC processor. */ 4144/* */ 4145/* Returns: */ 4146/* Nothing. */ 4147/****************************************************************************/ 4148static void 4149bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 4150{ 4151 u32 val; 4152 4153 DBENTER(BCE_VERBOSE_RESET); 4154 4155 /* Halt the CPU. */ 4156 val = REG_RD_IND(sc, cpu_reg->mode); 4157 val |= cpu_reg->mode_value_halt; 4158 REG_WR_IND(sc, cpu_reg->mode, val); 4159 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 4160 4161 DBEXIT(BCE_VERBOSE_RESET); 4162} 4163 4164 4165/****************************************************************************/ 4166/* Initialize the RX CPU. */ 4167/* */ 4168/* Returns: */ 4169/* Nothing. */ 4170/****************************************************************************/ 4171static void 4172bce_start_rxp_cpu(struct bce_softc *sc) 4173{ 4174 struct cpu_reg cpu_reg; 4175 4176 DBENTER(BCE_VERBOSE_RESET); 4177 4178 cpu_reg.mode = BCE_RXP_CPU_MODE; 4179 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 4180 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 4181 cpu_reg.state = BCE_RXP_CPU_STATE; 4182 cpu_reg.state_value_clear = 0xffffff; 4183 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 4184 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 4185 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 4186 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 4187 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 4188 cpu_reg.spad_base = BCE_RXP_SCRATCH; 4189 cpu_reg.mips_view_base = 0x8000000; 4190 4191 DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n"); 4192 bce_start_cpu(sc, &cpu_reg); 4193 4194 DBEXIT(BCE_VERBOSE_RESET); 4195} 4196 4197 4198/****************************************************************************/ 4199/* Initialize the RX CPU. */ 4200/* */ 4201/* Returns: */ 4202/* Nothing. */ 4203/****************************************************************************/ 4204static void 4205bce_init_rxp_cpu(struct bce_softc *sc) 4206{ 4207 struct cpu_reg cpu_reg; 4208 struct fw_info fw; 4209 4210 DBENTER(BCE_VERBOSE_RESET); 4211 4212 cpu_reg.mode = BCE_RXP_CPU_MODE; 4213 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 4214 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 4215 cpu_reg.state = BCE_RXP_CPU_STATE; 4216 cpu_reg.state_value_clear = 0xffffff; 4217 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 4218 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 4219 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 4220 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 4221 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 4222 cpu_reg.spad_base = BCE_RXP_SCRATCH; 4223 cpu_reg.mips_view_base = 0x8000000; 4224 4225 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4226 fw.ver_major = bce_RXP_b09FwReleaseMajor; 4227 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 4228 fw.ver_fix = bce_RXP_b09FwReleaseFix; 4229 fw.start_addr = bce_RXP_b09FwStartAddr; 4230 4231 fw.text_addr = bce_RXP_b09FwTextAddr; 4232 fw.text_len = bce_RXP_b09FwTextLen; 4233 fw.text_index = 0; 4234 fw.text = bce_RXP_b09FwText; 4235 4236 fw.data_addr = bce_RXP_b09FwDataAddr; 4237 fw.data_len = bce_RXP_b09FwDataLen; 4238 fw.data_index = 0; 4239 fw.data = bce_RXP_b09FwData; 4240 4241 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 4242 fw.sbss_len = bce_RXP_b09FwSbssLen; 4243 fw.sbss_index = 0; 4244 fw.sbss = bce_RXP_b09FwSbss; 4245 4246 fw.bss_addr = bce_RXP_b09FwBssAddr; 4247 fw.bss_len = bce_RXP_b09FwBssLen; 4248 fw.bss_index = 0; 4249 fw.bss = bce_RXP_b09FwBss; 4250 4251 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 4252 fw.rodata_len = bce_RXP_b09FwRodataLen; 4253 fw.rodata_index = 0; 4254 fw.rodata = bce_RXP_b09FwRodata; 4255 } else { 4256 fw.ver_major = bce_RXP_b06FwReleaseMajor; 4257 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 4258 fw.ver_fix = bce_RXP_b06FwReleaseFix; 4259 fw.start_addr = bce_RXP_b06FwStartAddr; 4260 4261 fw.text_addr = bce_RXP_b06FwTextAddr; 4262 fw.text_len = bce_RXP_b06FwTextLen; 4263 fw.text_index = 0; 4264 fw.text = bce_RXP_b06FwText; 4265 4266 fw.data_addr = bce_RXP_b06FwDataAddr; 4267 fw.data_len = bce_RXP_b06FwDataLen; 4268 fw.data_index = 0; 4269 fw.data = bce_RXP_b06FwData; 4270 4271 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 4272 fw.sbss_len = bce_RXP_b06FwSbssLen; 4273 fw.sbss_index = 0; 4274 fw.sbss = bce_RXP_b06FwSbss; 4275 4276 fw.bss_addr = bce_RXP_b06FwBssAddr; 4277 fw.bss_len = bce_RXP_b06FwBssLen; 4278 fw.bss_index = 0; 4279 fw.bss = bce_RXP_b06FwBss; 4280 4281 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 4282 fw.rodata_len = bce_RXP_b06FwRodataLen; 4283 fw.rodata_index = 0; 4284 fw.rodata = bce_RXP_b06FwRodata; 4285 } 4286 4287 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 4288 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4289 4290 /* Delay RXP start until initialization is complete. */ 4291 4292 DBEXIT(BCE_VERBOSE_RESET); 4293} 4294 4295 4296/****************************************************************************/ 4297/* Initialize the TX CPU. */ 4298/* */ 4299/* Returns: */ 4300/* Nothing. */ 4301/****************************************************************************/ 4302static void 4303bce_init_txp_cpu(struct bce_softc *sc) 4304{ 4305 struct cpu_reg cpu_reg; 4306 struct fw_info fw; 4307 4308 DBENTER(BCE_VERBOSE_RESET); 4309 4310 cpu_reg.mode = BCE_TXP_CPU_MODE; 4311 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 4312 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 4313 cpu_reg.state = BCE_TXP_CPU_STATE; 4314 cpu_reg.state_value_clear = 0xffffff; 4315 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 4316 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 4317 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 4318 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 4319 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 4320 cpu_reg.spad_base = BCE_TXP_SCRATCH; 4321 cpu_reg.mips_view_base = 0x8000000; 4322 4323 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4324 fw.ver_major = bce_TXP_b09FwReleaseMajor; 4325 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 4326 fw.ver_fix = bce_TXP_b09FwReleaseFix; 4327 fw.start_addr = bce_TXP_b09FwStartAddr; 4328 4329 fw.text_addr = bce_TXP_b09FwTextAddr; 4330 fw.text_len = bce_TXP_b09FwTextLen; 4331 fw.text_index = 0; 4332 fw.text = bce_TXP_b09FwText; 4333 4334 fw.data_addr = bce_TXP_b09FwDataAddr; 4335 fw.data_len = bce_TXP_b09FwDataLen; 4336 fw.data_index = 0; 4337 fw.data = bce_TXP_b09FwData; 4338 4339 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 4340 fw.sbss_len = bce_TXP_b09FwSbssLen; 4341 fw.sbss_index = 0; 4342 fw.sbss = bce_TXP_b09FwSbss; 4343 4344 fw.bss_addr = bce_TXP_b09FwBssAddr; 4345 fw.bss_len = bce_TXP_b09FwBssLen; 4346 fw.bss_index = 0; 4347 fw.bss = bce_TXP_b09FwBss; 4348 4349 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 4350 fw.rodata_len = bce_TXP_b09FwRodataLen; 4351 fw.rodata_index = 0; 4352 fw.rodata = bce_TXP_b09FwRodata; 4353 } else { 4354 fw.ver_major = bce_TXP_b06FwReleaseMajor; 4355 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 4356 fw.ver_fix = bce_TXP_b06FwReleaseFix; 4357 fw.start_addr = bce_TXP_b06FwStartAddr; 4358 4359 fw.text_addr = bce_TXP_b06FwTextAddr; 4360 fw.text_len = bce_TXP_b06FwTextLen; 4361 fw.text_index = 0; 4362 fw.text = bce_TXP_b06FwText; 4363 4364 fw.data_addr = bce_TXP_b06FwDataAddr; 4365 fw.data_len = bce_TXP_b06FwDataLen; 4366 fw.data_index = 0; 4367 fw.data = bce_TXP_b06FwData; 4368 4369 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 4370 fw.sbss_len = bce_TXP_b06FwSbssLen; 4371 fw.sbss_index = 0; 4372 fw.sbss = bce_TXP_b06FwSbss; 4373 4374 fw.bss_addr = bce_TXP_b06FwBssAddr; 4375 fw.bss_len = bce_TXP_b06FwBssLen; 4376 fw.bss_index = 0; 4377 fw.bss = bce_TXP_b06FwBss; 4378 4379 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 4380 fw.rodata_len = bce_TXP_b06FwRodataLen; 4381 fw.rodata_index = 0; 4382 fw.rodata = bce_TXP_b06FwRodata; 4383 } 4384 4385 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 4386 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4387 bce_start_cpu(sc, &cpu_reg); 4388 4389 DBEXIT(BCE_VERBOSE_RESET); 4390} 4391 4392 4393/****************************************************************************/ 4394/* Initialize the TPAT CPU. */ 4395/* */ 4396/* Returns: */ 4397/* Nothing. */ 4398/****************************************************************************/ 4399static void 4400bce_init_tpat_cpu(struct bce_softc *sc) 4401{ 4402 struct cpu_reg cpu_reg; 4403 struct fw_info fw; 4404 4405 DBENTER(BCE_VERBOSE_RESET); 4406 4407 cpu_reg.mode = BCE_TPAT_CPU_MODE; 4408 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 4409 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 4410 cpu_reg.state = BCE_TPAT_CPU_STATE; 4411 cpu_reg.state_value_clear = 0xffffff; 4412 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 4413 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 4414 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 4415 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 4416 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 4417 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 4418 cpu_reg.mips_view_base = 0x8000000; 4419 4420 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4421 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 4422 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 4423 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 4424 fw.start_addr = bce_TPAT_b09FwStartAddr; 4425 4426 fw.text_addr = bce_TPAT_b09FwTextAddr; 4427 fw.text_len = bce_TPAT_b09FwTextLen; 4428 fw.text_index = 0; 4429 fw.text = bce_TPAT_b09FwText; 4430 4431 fw.data_addr = bce_TPAT_b09FwDataAddr; 4432 fw.data_len = bce_TPAT_b09FwDataLen; 4433 fw.data_index = 0; 4434 fw.data = bce_TPAT_b09FwData; 4435 4436 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 4437 fw.sbss_len = bce_TPAT_b09FwSbssLen; 4438 fw.sbss_index = 0; 4439 fw.sbss = bce_TPAT_b09FwSbss; 4440 4441 fw.bss_addr = bce_TPAT_b09FwBssAddr; 4442 fw.bss_len = bce_TPAT_b09FwBssLen; 4443 fw.bss_index = 0; 4444 fw.bss = bce_TPAT_b09FwBss; 4445 4446 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 4447 fw.rodata_len = bce_TPAT_b09FwRodataLen; 4448 fw.rodata_index = 0; 4449 fw.rodata = bce_TPAT_b09FwRodata; 4450 } else { 4451 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 4452 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 4453 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 4454 fw.start_addr = bce_TPAT_b06FwStartAddr; 4455 4456 fw.text_addr = bce_TPAT_b06FwTextAddr; 4457 fw.text_len = bce_TPAT_b06FwTextLen; 4458 fw.text_index = 0; 4459 fw.text = bce_TPAT_b06FwText; 4460 4461 fw.data_addr = bce_TPAT_b06FwDataAddr; 4462 fw.data_len = bce_TPAT_b06FwDataLen; 4463 fw.data_index = 0; 4464 fw.data = bce_TPAT_b06FwData; 4465 4466 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 4467 fw.sbss_len = bce_TPAT_b06FwSbssLen; 4468 fw.sbss_index = 0; 4469 fw.sbss = bce_TPAT_b06FwSbss; 4470 4471 fw.bss_addr = bce_TPAT_b06FwBssAddr; 4472 fw.bss_len = bce_TPAT_b06FwBssLen; 4473 fw.bss_index = 0; 4474 fw.bss = bce_TPAT_b06FwBss; 4475 4476 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 4477 fw.rodata_len = bce_TPAT_b06FwRodataLen; 4478 fw.rodata_index = 0; 4479 fw.rodata = bce_TPAT_b06FwRodata; 4480 } 4481 4482 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 4483 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4484 bce_start_cpu(sc, &cpu_reg); 4485 4486 DBEXIT(BCE_VERBOSE_RESET); 4487} 4488 4489 4490/****************************************************************************/ 4491/* Initialize the CP CPU. */ 4492/* */ 4493/* Returns: */ 4494/* Nothing. */ 4495/****************************************************************************/ 4496static void 4497bce_init_cp_cpu(struct bce_softc *sc) 4498{ 4499 struct cpu_reg cpu_reg; 4500 struct fw_info fw; 4501 4502 DBENTER(BCE_VERBOSE_RESET); 4503 4504 cpu_reg.mode = BCE_CP_CPU_MODE; 4505 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 4506 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 4507 cpu_reg.state = BCE_CP_CPU_STATE; 4508 cpu_reg.state_value_clear = 0xffffff; 4509 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 4510 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 4511 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 4512 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 4513 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 4514 cpu_reg.spad_base = BCE_CP_SCRATCH; 4515 cpu_reg.mips_view_base = 0x8000000; 4516 4517 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4518 fw.ver_major = bce_CP_b09FwReleaseMajor; 4519 fw.ver_minor = bce_CP_b09FwReleaseMinor; 4520 fw.ver_fix = bce_CP_b09FwReleaseFix; 4521 fw.start_addr = bce_CP_b09FwStartAddr; 4522 4523 fw.text_addr = bce_CP_b09FwTextAddr; 4524 fw.text_len = bce_CP_b09FwTextLen; 4525 fw.text_index = 0; 4526 fw.text = bce_CP_b09FwText; 4527 4528 fw.data_addr = bce_CP_b09FwDataAddr; 4529 fw.data_len = bce_CP_b09FwDataLen; 4530 fw.data_index = 0; 4531 fw.data = bce_CP_b09FwData; 4532 4533 fw.sbss_addr = bce_CP_b09FwSbssAddr; 4534 fw.sbss_len = bce_CP_b09FwSbssLen; 4535 fw.sbss_index = 0; 4536 fw.sbss = bce_CP_b09FwSbss; 4537 4538 fw.bss_addr = bce_CP_b09FwBssAddr; 4539 fw.bss_len = bce_CP_b09FwBssLen; 4540 fw.bss_index = 0; 4541 fw.bss = bce_CP_b09FwBss; 4542 4543 fw.rodata_addr = bce_CP_b09FwRodataAddr; 4544 fw.rodata_len = bce_CP_b09FwRodataLen; 4545 fw.rodata_index = 0; 4546 fw.rodata = bce_CP_b09FwRodata; 4547 } else { 4548 fw.ver_major = bce_CP_b06FwReleaseMajor; 4549 fw.ver_minor = bce_CP_b06FwReleaseMinor; 4550 fw.ver_fix = bce_CP_b06FwReleaseFix; 4551 fw.start_addr = bce_CP_b06FwStartAddr; 4552 4553 fw.text_addr = bce_CP_b06FwTextAddr; 4554 fw.text_len = bce_CP_b06FwTextLen; 4555 fw.text_index = 0; 4556 fw.text = bce_CP_b06FwText; 4557 4558 fw.data_addr = bce_CP_b06FwDataAddr; 4559 fw.data_len = bce_CP_b06FwDataLen; 4560 fw.data_index = 0; 4561 fw.data = bce_CP_b06FwData; 4562 4563 fw.sbss_addr = bce_CP_b06FwSbssAddr; 4564 fw.sbss_len = bce_CP_b06FwSbssLen; 4565 fw.sbss_index = 0; 4566 fw.sbss = bce_CP_b06FwSbss; 4567 4568 fw.bss_addr = bce_CP_b06FwBssAddr; 4569 fw.bss_len = bce_CP_b06FwBssLen; 4570 fw.bss_index = 0; 4571 fw.bss = bce_CP_b06FwBss; 4572 4573 fw.rodata_addr = bce_CP_b06FwRodataAddr; 4574 fw.rodata_len = bce_CP_b06FwRodataLen; 4575 fw.rodata_index = 0; 4576 fw.rodata = bce_CP_b06FwRodata; 4577 } 4578 4579 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 4580 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4581 bce_start_cpu(sc, &cpu_reg); 4582 4583 DBEXIT(BCE_VERBOSE_RESET); 4584} 4585 4586 4587/****************************************************************************/ 4588/* Initialize the COM CPU. */ 4589/* */ 4590/* Returns: */ 4591/* Nothing. */ 4592/****************************************************************************/ 4593static void 4594bce_init_com_cpu(struct bce_softc *sc) 4595{ 4596 struct cpu_reg cpu_reg; 4597 struct fw_info fw; 4598 4599 DBENTER(BCE_VERBOSE_RESET); 4600 4601 cpu_reg.mode = BCE_COM_CPU_MODE; 4602 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 4603 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 4604 cpu_reg.state = BCE_COM_CPU_STATE; 4605 cpu_reg.state_value_clear = 0xffffff; 4606 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 4607 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 4608 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 4609 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 4610 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 4611 cpu_reg.spad_base = BCE_COM_SCRATCH; 4612 cpu_reg.mips_view_base = 0x8000000; 4613 4614 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4615 fw.ver_major = bce_COM_b09FwReleaseMajor; 4616 fw.ver_minor = bce_COM_b09FwReleaseMinor; 4617 fw.ver_fix = bce_COM_b09FwReleaseFix; 4618 fw.start_addr = bce_COM_b09FwStartAddr; 4619 4620 fw.text_addr = bce_COM_b09FwTextAddr; 4621 fw.text_len = bce_COM_b09FwTextLen; 4622 fw.text_index = 0; 4623 fw.text = bce_COM_b09FwText; 4624 4625 fw.data_addr = bce_COM_b09FwDataAddr; 4626 fw.data_len = bce_COM_b09FwDataLen; 4627 fw.data_index = 0; 4628 fw.data = bce_COM_b09FwData; 4629 4630 fw.sbss_addr = bce_COM_b09FwSbssAddr; 4631 fw.sbss_len = bce_COM_b09FwSbssLen; 4632 fw.sbss_index = 0; 4633 fw.sbss = bce_COM_b09FwSbss; 4634 4635 fw.bss_addr = bce_COM_b09FwBssAddr; 4636 fw.bss_len = bce_COM_b09FwBssLen; 4637 fw.bss_index = 0; 4638 fw.bss = bce_COM_b09FwBss; 4639 4640 fw.rodata_addr = bce_COM_b09FwRodataAddr; 4641 fw.rodata_len = bce_COM_b09FwRodataLen; 4642 fw.rodata_index = 0; 4643 fw.rodata = bce_COM_b09FwRodata; 4644 } else { 4645 fw.ver_major = bce_COM_b06FwReleaseMajor; 4646 fw.ver_minor = bce_COM_b06FwReleaseMinor; 4647 fw.ver_fix = bce_COM_b06FwReleaseFix; 4648 fw.start_addr = bce_COM_b06FwStartAddr; 4649 4650 fw.text_addr = bce_COM_b06FwTextAddr; 4651 fw.text_len = bce_COM_b06FwTextLen; 4652 fw.text_index = 0; 4653 fw.text = bce_COM_b06FwText; 4654 4655 fw.data_addr = bce_COM_b06FwDataAddr; 4656 fw.data_len = bce_COM_b06FwDataLen; 4657 fw.data_index = 0; 4658 fw.data = bce_COM_b06FwData; 4659 4660 fw.sbss_addr = bce_COM_b06FwSbssAddr; 4661 fw.sbss_len = bce_COM_b06FwSbssLen; 4662 fw.sbss_index = 0; 4663 fw.sbss = bce_COM_b06FwSbss; 4664 4665 fw.bss_addr = bce_COM_b06FwBssAddr; 4666 fw.bss_len = bce_COM_b06FwBssLen; 4667 fw.bss_index = 0; 4668 fw.bss = bce_COM_b06FwBss; 4669 4670 fw.rodata_addr = bce_COM_b06FwRodataAddr; 4671 fw.rodata_len = bce_COM_b06FwRodataLen; 4672 fw.rodata_index = 0; 4673 fw.rodata = bce_COM_b06FwRodata; 4674 } 4675 4676 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 4677 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4678 bce_start_cpu(sc, &cpu_reg); 4679 4680 DBEXIT(BCE_VERBOSE_RESET); 4681} 4682 4683 4684/****************************************************************************/ 4685/* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 4686/* */ 4687/* Loads the firmware for each CPU and starts the CPU. */ 4688/* */ 4689/* Returns: */ 4690/* Nothing. */ 4691/****************************************************************************/ 4692static void 4693bce_init_cpus(struct bce_softc *sc) 4694{ 4695 DBENTER(BCE_VERBOSE_RESET); 4696 4697 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4698 4699 if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) { 4700 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 4701 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 4702 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 4703 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 4704 } else { 4705 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 4706 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 4707 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 4708 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 4709 } 4710 4711 } else { 4712 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 4713 sizeof(bce_rv2p_proc1), RV2P_PROC1); 4714 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 4715 sizeof(bce_rv2p_proc2), RV2P_PROC2); 4716 } 4717 4718 bce_init_rxp_cpu(sc); 4719 bce_init_txp_cpu(sc); 4720 bce_init_tpat_cpu(sc); 4721 bce_init_com_cpu(sc); 4722 bce_init_cp_cpu(sc); 4723 4724 DBEXIT(BCE_VERBOSE_RESET); 4725} 4726 4727 4728/****************************************************************************/ 4729/* Initialize context memory. */ 4730/* */ 4731/* Clears the memory associated with each Context ID (CID). */ 4732/* */ 4733/* Returns: */ 4734/* Nothing. */ 4735/****************************************************************************/ 4736static int 4737bce_init_ctx(struct bce_softc *sc) 4738{ 4739 u32 offset, val, vcid_addr; 4740 int i, j, rc, retry_cnt; 4741 4742 rc = 0; 4743 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4744 4745 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4746 retry_cnt = CTX_INIT_RETRY_COUNT; 4747 4748 DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n"); 4749 4750 /* 4751 * BCM5709 context memory may be cached 4752 * in host memory so prepare the host memory 4753 * for access. 4754 */ 4755 val = BCE_CTX_COMMAND_ENABLED | 4756 BCE_CTX_COMMAND_MEM_INIT | (1 << 12); 4757 val |= (BCM_PAGE_BITS - 8) << 16; 4758 REG_WR(sc, BCE_CTX_COMMAND, val); 4759 4760 /* Wait for mem init command to complete. */ 4761 for (i = 0; i < retry_cnt; i++) { 4762 val = REG_RD(sc, BCE_CTX_COMMAND); 4763 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 4764 break; 4765 DELAY(2); 4766 } 4767 if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) { 4768 BCE_PRINTF("%s(): Context memory initialization failed!\n", 4769 __FUNCTION__); 4770 rc = EBUSY; 4771 goto init_ctx_fail; 4772 } 4773 4774 for (i = 0; i < sc->ctx_pages; i++) { 4775 /* Set the physical address of the context memory. */ 4776 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 4777 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 4778 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 4779 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 4780 BCE_ADDR_HI(sc->ctx_paddr[i])); 4781 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i | 4782 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4783 4784 /* Verify the context memory write was successful. */ 4785 for (j = 0; j < retry_cnt; j++) { 4786 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 4787 if ((val & 4788 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 4789 break; 4790 DELAY(5); 4791 } 4792 if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) { 4793 BCE_PRINTF("%s(): Failed to initialize " 4794 "context page %d!\n", __FUNCTION__, i); 4795 rc = EBUSY; 4796 goto init_ctx_fail; 4797 } 4798 } 4799 } else { 4800 4801 DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n"); 4802 4803 /* 4804 * For the 5706/5708, context memory is local to 4805 * the controller, so initialize the controller 4806 * context memory. 4807 */ 4808 4809 vcid_addr = GET_CID_ADDR(96); 4810 while (vcid_addr) { 4811 4812 vcid_addr -= PHY_CTX_SIZE; 4813 4814 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 4815 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4816 4817 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) { 4818 CTX_WR(sc, 0x00, offset, 0); 4819 } 4820 4821 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 4822 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4823 } 4824 4825 } 4826init_ctx_fail: 4827 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4828 return (rc); 4829} 4830 4831 4832/****************************************************************************/ 4833/* Fetch the permanent MAC address of the controller. */ 4834/* */ 4835/* Returns: */ 4836/* Nothing. */ 4837/****************************************************************************/ 4838static void 4839bce_get_mac_addr(struct bce_softc *sc) 4840{ 4841 u32 mac_lo = 0, mac_hi = 0; 4842 4843 DBENTER(BCE_VERBOSE_RESET); 4844 4845 /* 4846 * The NetXtreme II bootcode populates various NIC 4847 * power-on and runtime configuration items in a 4848 * shared memory area. The factory configured MAC 4849 * address is available from both NVRAM and the 4850 * shared memory area so we'll read the value from 4851 * shared memory for speed. 4852 */ 4853 4854 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 4855 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 4856 4857 if ((mac_lo == 0) && (mac_hi == 0)) { 4858 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n", 4859 __FILE__, __LINE__); 4860 } else { 4861 sc->eaddr[0] = (u_char)(mac_hi >> 8); 4862 sc->eaddr[1] = (u_char)(mac_hi >> 0); 4863 sc->eaddr[2] = (u_char)(mac_lo >> 24); 4864 sc->eaddr[3] = (u_char)(mac_lo >> 16); 4865 sc->eaddr[4] = (u_char)(mac_lo >> 8); 4866 sc->eaddr[5] = (u_char)(mac_lo >> 0); 4867 } 4868 4869 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet " 4870 "address = %6D\n", sc->eaddr, ":"); 4871 DBEXIT(BCE_VERBOSE_RESET); 4872} 4873 4874 4875/****************************************************************************/ 4876/* Program the MAC address. */ 4877/* */ 4878/* Returns: */ 4879/* Nothing. */ 4880/****************************************************************************/ 4881static void 4882bce_set_mac_addr(struct bce_softc *sc) 4883{ 4884 u32 val; 4885 u8 *mac_addr = sc->eaddr; 4886 4887 /* ToDo: Add support for setting multiple MAC addresses. */ 4888 4889 DBENTER(BCE_VERBOSE_RESET); 4890 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = " 4891 "%6D\n", sc->eaddr, ":"); 4892 4893 val = (mac_addr[0] << 8) | mac_addr[1]; 4894 4895 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 4896 4897 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4898 (mac_addr[4] << 8) | mac_addr[5]; 4899 4900 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 4901 4902 DBEXIT(BCE_VERBOSE_RESET); 4903} 4904 4905 4906/****************************************************************************/ 4907/* Stop the controller. */ 4908/* */ 4909/* Returns: */ 4910/* Nothing. */ 4911/****************************************************************************/ 4912static void 4913bce_stop(struct bce_softc *sc) 4914{ 4915 struct ifnet *ifp; 4916 4917 DBENTER(BCE_VERBOSE_RESET); 4918 4919 BCE_LOCK_ASSERT(sc); 4920 4921 ifp = sc->bce_ifp; 4922 4923 callout_stop(&sc->bce_tick_callout); 4924 4925 /* Disable the transmit/receive blocks. */ 4926 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 4927 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4928 DELAY(20); 4929 4930 bce_disable_intr(sc); 4931 4932 /* Free RX buffers. */ 4933 if (bce_hdr_split == TRUE) { 4934 bce_free_pg_chain(sc); 4935 } 4936 bce_free_rx_chain(sc); 4937 4938 /* Free TX buffers. */ 4939 bce_free_tx_chain(sc); 4940 4941 sc->watchdog_timer = 0; 4942 4943 sc->bce_link_up = FALSE; 4944 4945 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4946 4947 DBEXIT(BCE_VERBOSE_RESET); 4948} 4949 4950 4951static int 4952bce_reset(struct bce_softc *sc, u32 reset_code) 4953{ 4954 u32 emac_mode_save, val; 4955 int i, rc = 0; 4956 static const u32 emac_mode_mask = BCE_EMAC_MODE_PORT | 4957 BCE_EMAC_MODE_HALF_DUPLEX | BCE_EMAC_MODE_25G; 4958 4959 DBENTER(BCE_VERBOSE_RESET); 4960 4961 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n", 4962 __FUNCTION__, reset_code); 4963 4964 /* 4965 * If ASF/IPMI is operational, then the EMAC Mode register already 4966 * contains appropriate values for the link settings that have 4967 * been auto-negotiated. Resetting the chip will clobber those 4968 * values. Save the important bits so we can restore them after 4969 * the reset. 4970 */ 4971 emac_mode_save = REG_RD(sc, BCE_EMAC_MODE) & emac_mode_mask; 4972 4973 /* Wait for pending PCI transactions to complete. */ 4974 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 4975 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4976 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4977 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4978 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4979 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4980 DELAY(5); 4981 4982 /* Disable DMA */ 4983 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 4984 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 4985 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 4986 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 4987 } 4988 4989 /* Assume bootcode is running. */ 4990 sc->bce_fw_timed_out = FALSE; 4991 sc->bce_drv_cardiac_arrest = FALSE; 4992 4993 /* Give the firmware a chance to prepare for the reset. */ 4994 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 4995 if (rc) 4996 goto bce_reset_exit; 4997 4998 /* Set a firmware reminder that this is a soft reset. */ 4999 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC); 5000 5001 /* Dummy read to force the chip to complete all current transactions. */ 5002 val = REG_RD(sc, BCE_MISC_ID); 5003 5004 /* Chip reset. */ 5005 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 5006 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 5007 REG_RD(sc, BCE_MISC_COMMAND); 5008 DELAY(5); 5009 5010 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 5011 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 5012 5013 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 5014 } else { 5015 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 5016 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 5017 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 5018 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 5019 5020 /* Allow up to 30us for reset to complete. */ 5021 for (i = 0; i < 10; i++) { 5022 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 5023 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 5024 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 5025 break; 5026 } 5027 DELAY(10); 5028 } 5029 5030 /* Check that reset completed successfully. */ 5031 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 5032 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 5033 BCE_PRINTF("%s(%d): Reset failed!\n", 5034 __FILE__, __LINE__); 5035 rc = EBUSY; 5036 goto bce_reset_exit; 5037 } 5038 } 5039 5040 /* Make sure byte swapping is properly configured. */ 5041 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 5042 if (val != 0x01020304) { 5043 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n", 5044 __FILE__, __LINE__); 5045 rc = ENODEV; 5046 goto bce_reset_exit; 5047 } 5048 5049 /* Just completed a reset, assume that firmware is running again. */ 5050 sc->bce_fw_timed_out = FALSE; 5051 sc->bce_drv_cardiac_arrest = FALSE; 5052 5053 /* Wait for the firmware to finish its initialization. */ 5054 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 5055 if (rc) 5056 BCE_PRINTF("%s(%d): Firmware did not complete " 5057 "initialization!\n", __FILE__, __LINE__); 5058 /* Get firmware capabilities. */ 5059 bce_fw_cap_init(sc); 5060 5061bce_reset_exit: 5062 /* Restore EMAC Mode bits needed to keep ASF/IPMI running. */ 5063 if (reset_code == BCE_DRV_MSG_CODE_RESET) { 5064 val = REG_RD(sc, BCE_EMAC_MODE); 5065 val = (val & ~emac_mode_mask) | emac_mode_save; 5066 REG_WR(sc, BCE_EMAC_MODE, val); 5067 } 5068 5069 DBEXIT(BCE_VERBOSE_RESET); 5070 return (rc); 5071} 5072 5073 5074static int 5075bce_chipinit(struct bce_softc *sc) 5076{ 5077 u32 val; 5078 int rc = 0; 5079 5080 DBENTER(BCE_VERBOSE_RESET); 5081 5082 bce_disable_intr(sc); 5083 5084 /* 5085 * Initialize DMA byte/word swapping, configure the number of DMA 5086 * channels and PCI clock compensation delay. 5087 */ 5088 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 5089 BCE_DMA_CONFIG_DATA_WORD_SWAP | 5090#if BYTE_ORDER == BIG_ENDIAN 5091 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 5092#endif 5093 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 5094 DMA_READ_CHANS << 12 | 5095 DMA_WRITE_CHANS << 16; 5096 5097 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 5098 5099 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 5100 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 5101 5102 /* 5103 * This setting resolves a problem observed on certain Intel PCI 5104 * chipsets that cannot handle multiple outstanding DMA operations. 5105 * See errata E9_5706A1_65. 5106 */ 5107 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5108 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) && 5109 !(sc->bce_flags & BCE_PCIX_FLAG)) 5110 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 5111 5112 REG_WR(sc, BCE_DMA_CONFIG, val); 5113 5114 /* Enable the RX_V2P and Context state machines before access. */ 5115 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 5116 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 5117 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 5118 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 5119 5120 /* Initialize context mapping and zero out the quick contexts. */ 5121 if ((rc = bce_init_ctx(sc)) != 0) 5122 goto bce_chipinit_exit; 5123 5124 /* Initialize the on-boards CPUs */ 5125 bce_init_cpus(sc); 5126 5127 /* Enable management frames (NC-SI) to flow to the MCP. */ 5128 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5129 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 5130 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 5131 } 5132 5133 /* Prepare NVRAM for access. */ 5134 if ((rc = bce_init_nvram(sc)) != 0) 5135 goto bce_chipinit_exit; 5136 5137 /* Set the kernel bypass block size */ 5138 val = REG_RD(sc, BCE_MQ_CONFIG); 5139 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 5140 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 5141 5142 /* Enable bins used on the 5709. */ 5143 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 5144 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 5145 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 5146 val |= BCE_MQ_CONFIG_HALT_DIS; 5147 } 5148 5149 REG_WR(sc, BCE_MQ_CONFIG, val); 5150 5151 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 5152 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 5153 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 5154 5155 /* Set the page size and clear the RV2P processor stall bits. */ 5156 val = (BCM_PAGE_BITS - 8) << 24; 5157 REG_WR(sc, BCE_RV2P_CONFIG, val); 5158 5159 /* Configure page size. */ 5160 val = REG_RD(sc, BCE_TBDR_CONFIG); 5161 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 5162 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 5163 REG_WR(sc, BCE_TBDR_CONFIG, val); 5164 5165 /* Set the perfect match control register to default. */ 5166 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 5167 5168bce_chipinit_exit: 5169 DBEXIT(BCE_VERBOSE_RESET); 5170 5171 return(rc); 5172} 5173 5174 5175/****************************************************************************/ 5176/* Initialize the controller in preparation to send/receive traffic. */ 5177/* */ 5178/* Returns: */ 5179/* 0 for success, positive value for failure. */ 5180/****************************************************************************/ 5181static int 5182bce_blockinit(struct bce_softc *sc) 5183{ 5184 u32 reg, val; 5185 int rc = 0; 5186 5187 DBENTER(BCE_VERBOSE_RESET); 5188 5189 /* Load the hardware default MAC address. */ 5190 bce_set_mac_addr(sc); 5191 5192 /* Set the Ethernet backoff seed value */ 5193 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + 5194 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + 5195 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 5196 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 5197 5198 sc->last_status_idx = 0; 5199 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 5200 5201 /* Set up link change interrupt generation. */ 5202 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 5203 5204 /* Program the physical address of the status block. */ 5205 REG_WR(sc, BCE_HC_STATUS_ADDR_L, 5206 BCE_ADDR_LO(sc->status_block_paddr)); 5207 REG_WR(sc, BCE_HC_STATUS_ADDR_H, 5208 BCE_ADDR_HI(sc->status_block_paddr)); 5209 5210 /* Program the physical address of the statistics block. */ 5211 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 5212 BCE_ADDR_LO(sc->stats_block_paddr)); 5213 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 5214 BCE_ADDR_HI(sc->stats_block_paddr)); 5215 5216 /* 5217 * Program various host coalescing parameters. 5218 * Trip points control how many BDs should be ready before generating 5219 * an interrupt while ticks control how long a BD can sit in the chain 5220 * before generating an interrupt. 5221 */ 5222 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5223 (sc->bce_tx_quick_cons_trip_int << 16) | 5224 sc->bce_tx_quick_cons_trip); 5225 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5226 (sc->bce_rx_quick_cons_trip_int << 16) | 5227 sc->bce_rx_quick_cons_trip); 5228 REG_WR(sc, BCE_HC_TX_TICKS, 5229 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 5230 REG_WR(sc, BCE_HC_RX_TICKS, 5231 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 5232 REG_WR(sc, BCE_HC_STATS_TICKS, sc->bce_stats_ticks & 0xffff00); 5233 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 5234 /* Not used for L2. */ 5235 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 0); 5236 REG_WR(sc, BCE_HC_COM_TICKS, 0); 5237 REG_WR(sc, BCE_HC_CMD_TICKS, 0); 5238 5239 /* Configure the Host Coalescing block. */ 5240 val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | 5241 BCE_HC_CONFIG_COLLECT_STATS; 5242 5243#if 0 5244 /* ToDo: Add MSI-X support. */ 5245 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 5246 u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) + 5247 BCE_HC_SB_CONFIG_1; 5248 5249 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); 5250 5251 REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE | 5252 BCE_HC_SB_CONFIG_1_ONE_SHOT); 5253 5254 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 5255 (sc->tx_quick_cons_trip_int << 16) | 5256 sc->tx_quick_cons_trip); 5257 5258 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 5259 (sc->tx_ticks_int << 16) | sc->tx_ticks); 5260 5261 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 5262 } 5263 5264 /* 5265 * Tell the HC block to automatically set the 5266 * INT_MASK bit after an MSI/MSI-X interrupt 5267 * is generated so the driver doesn't have to. 5268 */ 5269 if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG) 5270 val |= BCE_HC_CONFIG_ONE_SHOT; 5271 5272 /* Set the MSI-X status blocks to 128 byte boundaries. */ 5273 if (sc->bce_flags & BCE_USING_MSIX_FLAG) 5274 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 5275#endif 5276 5277 REG_WR(sc, BCE_HC_CONFIG, val); 5278 5279 /* Clear the internal statistics counters. */ 5280 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 5281 5282 /* Verify that bootcode is running. */ 5283 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 5284 5285 DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control), 5286 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n", 5287 __FILE__, __LINE__); 5288 reg = 0); 5289 5290 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 5291 BCE_DEV_INFO_SIGNATURE_MAGIC) { 5292 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, " 5293 "Expected: 08%08X\n", __FILE__, __LINE__, 5294 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK), 5295 BCE_DEV_INFO_SIGNATURE_MAGIC); 5296 rc = ENODEV; 5297 goto bce_blockinit_exit; 5298 } 5299 5300 /* Enable DMA */ 5301 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 5302 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 5303 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 5304 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 5305 } 5306 5307 /* Allow bootcode to apply additional fixes before enabling MAC. */ 5308 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | 5309 BCE_DRV_MSG_CODE_RESET); 5310 5311 /* Enable link state change interrupt generation. */ 5312 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 5313 5314 /* Enable the RXP. */ 5315 bce_start_rxp_cpu(sc); 5316 5317 /* Disable management frames (NC-SI) from flowing to the MCP. */ 5318 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5319 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 5320 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 5321 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 5322 } 5323 5324 /* Enable all remaining blocks in the MAC. */ 5325 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 5326 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 5327 BCE_MISC_ENABLE_DEFAULT_XI); 5328 else 5329 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 5330 BCE_MISC_ENABLE_DEFAULT); 5331 5332 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 5333 DELAY(20); 5334 5335 /* Save the current host coalescing block settings. */ 5336 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 5337 5338bce_blockinit_exit: 5339 DBEXIT(BCE_VERBOSE_RESET); 5340 5341 return (rc); 5342} 5343 5344 5345/****************************************************************************/ 5346/* Encapsulate an mbuf into the rx_bd chain. */ 5347/* */ 5348/* Returns: */ 5349/* 0 for success, positive value for failure. */ 5350/****************************************************************************/ 5351static int 5352bce_get_rx_buf(struct bce_softc *sc, u16 prod, u16 chain_prod, u32 *prod_bseq) 5353{ 5354 bus_dma_segment_t segs[1]; 5355 struct mbuf *m_new = NULL; 5356 struct rx_bd *rxbd; 5357 int nsegs, error, rc = 0; 5358#ifdef BCE_DEBUG 5359 u16 debug_chain_prod = chain_prod; 5360#endif 5361 5362 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5363 5364 /* Make sure the inputs are valid. */ 5365 DBRUNIF((chain_prod > MAX_RX_BD_ALLOC), 5366 BCE_PRINTF("%s(%d): RX producer out of range: " 5367 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5368 chain_prod, (u16)MAX_RX_BD_ALLOC)); 5369 5370 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5371 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, 5372 prod, chain_prod, *prod_bseq); 5373 5374 /* Update some debug statistic counters */ 5375 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 5376 sc->rx_low_watermark = sc->free_rx_bd); 5377 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 5378 sc->rx_empty_count++); 5379 5380 /* Simulate an mbuf allocation failure. */ 5381 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5382 sc->mbuf_alloc_failed_count++; 5383 sc->mbuf_alloc_failed_sim_count++; 5384 rc = ENOBUFS; 5385 goto bce_get_rx_buf_exit); 5386 5387 /* This is a new mbuf allocation. */ 5388 if (bce_hdr_split == TRUE) 5389 MGETHDR(m_new, M_NOWAIT, MT_DATA); 5390 else 5391 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 5392 sc->rx_bd_mbuf_alloc_size); 5393 5394 if (m_new == NULL) { 5395 sc->mbuf_alloc_failed_count++; 5396 rc = ENOBUFS; 5397 goto bce_get_rx_buf_exit; 5398 } 5399 5400 DBRUN(sc->debug_rx_mbuf_alloc++); 5401 5402 /* Make sure we have a valid packet header. */ 5403 M_ASSERTPKTHDR(m_new); 5404 5405 /* Initialize the mbuf size and pad if necessary for alignment. */ 5406 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size; 5407 m_adj(m_new, sc->rx_bd_mbuf_align_pad); 5408 5409 /* ToDo: Consider calling m_fragment() to test error handling. */ 5410 5411 /* Map the mbuf cluster into device memory. */ 5412 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, 5413 sc->rx_mbuf_map[chain_prod], m_new, segs, &nsegs, BUS_DMA_NOWAIT); 5414 5415 /* Handle any mapping errors. */ 5416 if (error) { 5417 BCE_PRINTF("%s(%d): Error mapping mbuf into RX " 5418 "chain (%d)!\n", __FILE__, __LINE__, error); 5419 5420 sc->dma_map_addr_rx_failed_count++; 5421 m_freem(m_new); 5422 5423 DBRUN(sc->debug_rx_mbuf_alloc--); 5424 5425 rc = ENOBUFS; 5426 goto bce_get_rx_buf_exit; 5427 } 5428 5429 /* All mbufs must map to a single segment. */ 5430 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", 5431 __FUNCTION__, nsegs)); 5432 5433 /* Setup the rx_bd for the segment. */ 5434 rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 5435 5436 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); 5437 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); 5438 rxbd->rx_bd_len = htole32(segs[0].ds_len); 5439 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5440 *prod_bseq += segs[0].ds_len; 5441 5442 /* Save the mbuf and update our counter. */ 5443 sc->rx_mbuf_ptr[chain_prod] = m_new; 5444 sc->free_rx_bd -= nsegs; 5445 5446 DBRUNMSG(BCE_INSANE_RECV, 5447 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs)); 5448 5449 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5450 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod, 5451 chain_prod, *prod_bseq); 5452 5453bce_get_rx_buf_exit: 5454 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5455 5456 return(rc); 5457} 5458 5459 5460/****************************************************************************/ 5461/* Encapsulate an mbuf cluster into the page chain. */ 5462/* */ 5463/* Returns: */ 5464/* 0 for success, positive value for failure. */ 5465/****************************************************************************/ 5466static int 5467bce_get_pg_buf(struct bce_softc *sc, u16 prod, u16 prod_idx) 5468{ 5469 bus_dma_segment_t segs[1]; 5470 struct mbuf *m_new = NULL; 5471 struct rx_bd *pgbd; 5472 int error, nsegs, rc = 0; 5473#ifdef BCE_DEBUG 5474 u16 debug_prod_idx = prod_idx; 5475#endif 5476 5477 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5478 5479 /* Make sure the inputs are valid. */ 5480 DBRUNIF((prod_idx > MAX_PG_BD_ALLOC), 5481 BCE_PRINTF("%s(%d): page producer out of range: " 5482 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5483 prod_idx, (u16)MAX_PG_BD_ALLOC)); 5484 5485 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5486 "chain_prod = 0x%04X\n", __FUNCTION__, prod, prod_idx); 5487 5488 /* Update counters if we've hit a new low or run out of pages. */ 5489 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark), 5490 sc->pg_low_watermark = sc->free_pg_bd); 5491 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++); 5492 5493 /* Simulate an mbuf allocation failure. */ 5494 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5495 sc->mbuf_alloc_failed_count++; 5496 sc->mbuf_alloc_failed_sim_count++; 5497 rc = ENOBUFS; 5498 goto bce_get_pg_buf_exit); 5499 5500 /* This is a new mbuf allocation. */ 5501 m_new = m_getcl(M_NOWAIT, MT_DATA, 0); 5502 if (m_new == NULL) { 5503 sc->mbuf_alloc_failed_count++; 5504 rc = ENOBUFS; 5505 goto bce_get_pg_buf_exit; 5506 } 5507 5508 DBRUN(sc->debug_pg_mbuf_alloc++); 5509 5510 m_new->m_len = MCLBYTES; 5511 5512 /* ToDo: Consider calling m_fragment() to test error handling. */ 5513 5514 /* Map the mbuf cluster into device memory. */ 5515 error = bus_dmamap_load_mbuf_sg(sc->pg_mbuf_tag, 5516 sc->pg_mbuf_map[prod_idx], m_new, segs, &nsegs, BUS_DMA_NOWAIT); 5517 5518 /* Handle any mapping errors. */ 5519 if (error) { 5520 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", 5521 __FILE__, __LINE__); 5522 5523 m_freem(m_new); 5524 DBRUN(sc->debug_pg_mbuf_alloc--); 5525 5526 rc = ENOBUFS; 5527 goto bce_get_pg_buf_exit; 5528 } 5529 5530 /* All mbufs must map to a single segment. */ 5531 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", 5532 __FUNCTION__, nsegs)); 5533 5534 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */ 5535 5536 /* 5537 * The page chain uses the same rx_bd data structure 5538 * as the receive chain but doesn't require a byte sequence (bseq). 5539 */ 5540 pgbd = &sc->pg_bd_chain[PG_PAGE(prod_idx)][PG_IDX(prod_idx)]; 5541 5542 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); 5543 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); 5544 pgbd->rx_bd_len = htole32(MCLBYTES); 5545 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5546 5547 /* Save the mbuf and update our counter. */ 5548 sc->pg_mbuf_ptr[prod_idx] = m_new; 5549 sc->free_pg_bd--; 5550 5551 DBRUNMSG(BCE_INSANE_RECV, 5552 bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1)); 5553 5554 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5555 "prod_idx = 0x%04X\n", __FUNCTION__, prod, prod_idx); 5556 5557bce_get_pg_buf_exit: 5558 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5559 5560 return(rc); 5561} 5562 5563 5564/****************************************************************************/ 5565/* Initialize the TX context memory. */ 5566/* */ 5567/* Returns: */ 5568/* Nothing */ 5569/****************************************************************************/ 5570static void 5571bce_init_tx_context(struct bce_softc *sc) 5572{ 5573 u32 val; 5574 5575 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5576 5577 /* Initialize the context ID for an L2 TX chain. */ 5578 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 5579 /* Set the CID type to support an L2 connection. */ 5580 val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | 5581 BCE_L2CTX_TX_TYPE_SIZE_L2_XI; 5582 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 5583 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16); 5584 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5585 BCE_L2CTX_TX_CMD_TYPE_XI, val); 5586 5587 /* Point the hardware to the first page in the chain. */ 5588 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5589 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5590 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 5591 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5592 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5593 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 5594 } else { 5595 /* Set the CID type to support an L2 connection. */ 5596 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 5597 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 5598 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 5599 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 5600 5601 /* Point the hardware to the first page in the chain. */ 5602 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5603 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5604 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 5605 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5606 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5607 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 5608 } 5609 5610 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5611} 5612 5613 5614/****************************************************************************/ 5615/* Allocate memory and initialize the TX data structures. */ 5616/* */ 5617/* Returns: */ 5618/* 0 for success, positive value for failure. */ 5619/****************************************************************************/ 5620static int 5621bce_init_tx_chain(struct bce_softc *sc) 5622{ 5623 struct tx_bd *txbd; 5624 int i, rc = 0; 5625 5626 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5627 5628 /* Set the initial TX producer/consumer indices. */ 5629 sc->tx_prod = 0; 5630 sc->tx_cons = 0; 5631 sc->tx_prod_bseq = 0; 5632 sc->used_tx_bd = 0; 5633 sc->max_tx_bd = USABLE_TX_BD_ALLOC; 5634 DBRUN(sc->tx_hi_watermark = 0); 5635 DBRUN(sc->tx_full_count = 0); 5636 5637 /* 5638 * The NetXtreme II supports a linked-list structre called 5639 * a Buffer Descriptor Chain (or BD chain). A BD chain 5640 * consists of a series of 1 or more chain pages, each of which 5641 * consists of a fixed number of BD entries. 5642 * The last BD entry on each page is a pointer to the next page 5643 * in the chain, and the last pointer in the BD chain 5644 * points back to the beginning of the chain. 5645 */ 5646 5647 /* Set the TX next pointer chain entries. */ 5648 for (i = 0; i < sc->tx_pages; i++) { 5649 int j; 5650 5651 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 5652 5653 /* Check if we've reached the last page. */ 5654 if (i == (sc->tx_pages - 1)) 5655 j = 0; 5656 else 5657 j = i + 1; 5658 5659 txbd->tx_bd_haddr_hi = 5660 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 5661 txbd->tx_bd_haddr_lo = 5662 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 5663 } 5664 5665 bce_init_tx_context(sc); 5666 5667 DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC)); 5668 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5669 5670 return(rc); 5671} 5672 5673 5674/****************************************************************************/ 5675/* Free memory and clear the TX data structures. */ 5676/* */ 5677/* Returns: */ 5678/* Nothing. */ 5679/****************************************************************************/ 5680static void 5681bce_free_tx_chain(struct bce_softc *sc) 5682{ 5683 int i; 5684 5685 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5686 5687 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 5688 for (i = 0; i < MAX_TX_BD_AVAIL; i++) { 5689 if (sc->tx_mbuf_ptr[i] != NULL) { 5690 if (sc->tx_mbuf_map[i] != NULL) 5691 bus_dmamap_sync(sc->tx_mbuf_tag, 5692 sc->tx_mbuf_map[i], 5693 BUS_DMASYNC_POSTWRITE); 5694 m_freem(sc->tx_mbuf_ptr[i]); 5695 sc->tx_mbuf_ptr[i] = NULL; 5696 DBRUN(sc->debug_tx_mbuf_alloc--); 5697 } 5698 } 5699 5700 /* Clear each TX chain page. */ 5701 for (i = 0; i < sc->tx_pages; i++) 5702 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 5703 5704 sc->used_tx_bd = 0; 5705 5706 /* Check if we lost any mbufs in the process. */ 5707 DBRUNIF((sc->debug_tx_mbuf_alloc), 5708 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs " 5709 "from tx chain!\n", __FILE__, __LINE__, 5710 sc->debug_tx_mbuf_alloc)); 5711 5712 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5713} 5714 5715 5716/****************************************************************************/ 5717/* Initialize the RX context memory. */ 5718/* */ 5719/* Returns: */ 5720/* Nothing */ 5721/****************************************************************************/ 5722static void 5723bce_init_rx_context(struct bce_softc *sc) 5724{ 5725 u32 val; 5726 5727 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5728 5729 /* Init the type, size, and BD cache levels for the RX context. */ 5730 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 5731 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | 5732 (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT); 5733 5734 /* 5735 * Set the level for generating pause frames 5736 * when the number of available rx_bd's gets 5737 * too low (the low watermark) and the level 5738 * when pause frames can be stopped (the high 5739 * watermark). 5740 */ 5741 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 5742 u32 lo_water, hi_water; 5743 5744 if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) { 5745 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 5746 } else { 5747 lo_water = 0; 5748 } 5749 5750 if (lo_water >= USABLE_RX_BD_ALLOC) { 5751 lo_water = 0; 5752 } 5753 5754 hi_water = USABLE_RX_BD_ALLOC / 4; 5755 5756 if (hi_water <= lo_water) { 5757 lo_water = 0; 5758 } 5759 5760 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 5761 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 5762 5763 if (hi_water > 0xf) 5764 hi_water = 0xf; 5765 else if (hi_water == 0) 5766 lo_water = 0; 5767 5768 val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) | 5769 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 5770 } 5771 5772 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 5773 5774 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 5775 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 5776 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 5777 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 5778 } 5779 5780 /* Point the hardware to the first page in the chain. */ 5781 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 5782 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 5783 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 5784 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 5785 5786 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5787} 5788 5789 5790/****************************************************************************/ 5791/* Allocate memory and initialize the RX data structures. */ 5792/* */ 5793/* Returns: */ 5794/* 0 for success, positive value for failure. */ 5795/****************************************************************************/ 5796static int 5797bce_init_rx_chain(struct bce_softc *sc) 5798{ 5799 struct rx_bd *rxbd; 5800 int i, rc = 0; 5801 5802 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5803 BCE_VERBOSE_CTX); 5804 5805 /* Initialize the RX producer and consumer indices. */ 5806 sc->rx_prod = 0; 5807 sc->rx_cons = 0; 5808 sc->rx_prod_bseq = 0; 5809 sc->free_rx_bd = USABLE_RX_BD_ALLOC; 5810 sc->max_rx_bd = USABLE_RX_BD_ALLOC; 5811 5812 /* Initialize the RX next pointer chain entries. */ 5813 for (i = 0; i < sc->rx_pages; i++) { 5814 int j; 5815 5816 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 5817 5818 /* Check if we've reached the last page. */ 5819 if (i == (sc->rx_pages - 1)) 5820 j = 0; 5821 else 5822 j = i + 1; 5823 5824 /* Setup the chain page pointers. */ 5825 rxbd->rx_bd_haddr_hi = 5826 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 5827 rxbd->rx_bd_haddr_lo = 5828 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 5829 } 5830 5831 /* Fill up the RX chain. */ 5832 bce_fill_rx_chain(sc); 5833 5834 DBRUN(sc->rx_low_watermark = USABLE_RX_BD_ALLOC); 5835 DBRUN(sc->rx_empty_count = 0); 5836 for (i = 0; i < sc->rx_pages; i++) { 5837 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], 5838 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 5839 } 5840 5841 bce_init_rx_context(sc); 5842 5843 DBRUNMSG(BCE_EXTREME_RECV, 5844 bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC)); 5845 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5846 BCE_VERBOSE_CTX); 5847 5848 /* ToDo: Are there possible failure modes here? */ 5849 5850 return(rc); 5851} 5852 5853 5854/****************************************************************************/ 5855/* Add mbufs to the RX chain until its full or an mbuf allocation error */ 5856/* occurs. */ 5857/* */ 5858/* Returns: */ 5859/* Nothing */ 5860/****************************************************************************/ 5861static void 5862bce_fill_rx_chain(struct bce_softc *sc) 5863{ 5864 u16 prod, prod_idx; 5865 u32 prod_bseq; 5866 5867 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5868 BCE_VERBOSE_CTX); 5869 5870 /* Get the RX chain producer indices. */ 5871 prod = sc->rx_prod; 5872 prod_bseq = sc->rx_prod_bseq; 5873 5874 /* Keep filling the RX chain until it's full. */ 5875 while (sc->free_rx_bd > 0) { 5876 prod_idx = RX_CHAIN_IDX(prod); 5877 if (bce_get_rx_buf(sc, prod, prod_idx, &prod_bseq)) { 5878 /* Bail out if we can't add an mbuf to the chain. */ 5879 break; 5880 } 5881 prod = NEXT_RX_BD(prod); 5882 } 5883 5884 /* Save the RX chain producer indices. */ 5885 sc->rx_prod = prod; 5886 sc->rx_prod_bseq = prod_bseq; 5887 5888 /* We should never end up pointing to a next page pointer. */ 5889 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 5890 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n", 5891 __FUNCTION__, rx_prod)); 5892 5893 /* Write the mailbox and tell the chip about the waiting rx_bd's. */ 5894 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, prod); 5895 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, prod_bseq); 5896 5897 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5898 BCE_VERBOSE_CTX); 5899} 5900 5901 5902/****************************************************************************/ 5903/* Free memory and clear the RX data structures. */ 5904/* */ 5905/* Returns: */ 5906/* Nothing. */ 5907/****************************************************************************/ 5908static void 5909bce_free_rx_chain(struct bce_softc *sc) 5910{ 5911 int i; 5912 5913 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5914 5915 /* Free any mbufs still in the RX mbuf chain. */ 5916 for (i = 0; i < MAX_RX_BD_AVAIL; i++) { 5917 if (sc->rx_mbuf_ptr[i] != NULL) { 5918 if (sc->rx_mbuf_map[i] != NULL) 5919 bus_dmamap_sync(sc->rx_mbuf_tag, 5920 sc->rx_mbuf_map[i], 5921 BUS_DMASYNC_POSTREAD); 5922 m_freem(sc->rx_mbuf_ptr[i]); 5923 sc->rx_mbuf_ptr[i] = NULL; 5924 DBRUN(sc->debug_rx_mbuf_alloc--); 5925 } 5926 } 5927 5928 /* Clear each RX chain page. */ 5929 for (i = 0; i < sc->rx_pages; i++) 5930 if (sc->rx_bd_chain[i] != NULL) 5931 bzero((char *)sc->rx_bd_chain[i], 5932 BCE_RX_CHAIN_PAGE_SZ); 5933 5934 sc->free_rx_bd = sc->max_rx_bd; 5935 5936 /* Check if we lost any mbufs in the process. */ 5937 DBRUNIF((sc->debug_rx_mbuf_alloc), 5938 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n", 5939 __FUNCTION__, sc->debug_rx_mbuf_alloc)); 5940 5941 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5942} 5943 5944 5945/****************************************************************************/ 5946/* Allocate memory and initialize the page data structures. */ 5947/* Assumes that bce_init_rx_chain() has not already been called. */ 5948/* */ 5949/* Returns: */ 5950/* 0 for success, positive value for failure. */ 5951/****************************************************************************/ 5952static int 5953bce_init_pg_chain(struct bce_softc *sc) 5954{ 5955 struct rx_bd *pgbd; 5956 int i, rc = 0; 5957 u32 val; 5958 5959 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5960 BCE_VERBOSE_CTX); 5961 5962 /* Initialize the page producer and consumer indices. */ 5963 sc->pg_prod = 0; 5964 sc->pg_cons = 0; 5965 sc->free_pg_bd = USABLE_PG_BD_ALLOC; 5966 sc->max_pg_bd = USABLE_PG_BD_ALLOC; 5967 DBRUN(sc->pg_low_watermark = sc->max_pg_bd); 5968 DBRUN(sc->pg_empty_count = 0); 5969 5970 /* Initialize the page next pointer chain entries. */ 5971 for (i = 0; i < sc->pg_pages; i++) { 5972 int j; 5973 5974 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE]; 5975 5976 /* Check if we've reached the last page. */ 5977 if (i == (sc->pg_pages - 1)) 5978 j = 0; 5979 else 5980 j = i + 1; 5981 5982 /* Setup the chain page pointers. */ 5983 pgbd->rx_bd_haddr_hi = 5984 htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j])); 5985 pgbd->rx_bd_haddr_lo = 5986 htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); 5987 } 5988 5989 /* Setup the MQ BIN mapping for host_pg_bidx. */ 5990 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 5991 REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT); 5992 5993 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0); 5994 5995 /* Configure the rx_bd and page chain mbuf cluster size. */ 5996 val = (sc->rx_bd_mbuf_data_len << 16) | MCLBYTES; 5997 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val); 5998 5999 /* Configure the context reserved for jumbo support. */ 6000 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY, 6001 BCE_L2CTX_RX_RBDC_JUMBO_KEY); 6002 6003 /* Point the hardware to the first page in the page chain. */ 6004 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]); 6005 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val); 6006 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); 6007 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val); 6008 6009 /* Fill up the page chain. */ 6010 bce_fill_pg_chain(sc); 6011 6012 for (i = 0; i < sc->pg_pages; i++) { 6013 bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], 6014 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 6015 } 6016 6017 DBRUNMSG(BCE_EXTREME_RECV, 6018 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC)); 6019 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 6020 BCE_VERBOSE_CTX); 6021 return(rc); 6022} 6023 6024 6025/****************************************************************************/ 6026/* Add mbufs to the page chain until its full or an mbuf allocation error */ 6027/* occurs. */ 6028/* */ 6029/* Returns: */ 6030/* Nothing */ 6031/****************************************************************************/ 6032static void 6033bce_fill_pg_chain(struct bce_softc *sc) 6034{ 6035 u16 prod, prod_idx; 6036 6037 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 6038 BCE_VERBOSE_CTX); 6039 6040 /* Get the page chain prodcuer index. */ 6041 prod = sc->pg_prod; 6042 6043 /* Keep filling the page chain until it's full. */ 6044 while (sc->free_pg_bd > 0) { 6045 prod_idx = PG_CHAIN_IDX(prod); 6046 if (bce_get_pg_buf(sc, prod, prod_idx)) { 6047 /* Bail out if we can't add an mbuf to the chain. */ 6048 break; 6049 } 6050 prod = NEXT_PG_BD(prod); 6051 } 6052 6053 /* Save the page chain producer index. */ 6054 sc->pg_prod = prod; 6055 6056 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 6057 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n", 6058 __FUNCTION__, pg_prod)); 6059 6060 /* 6061 * Write the mailbox and tell the chip about 6062 * the new rx_bd's in the page chain. 6063 */ 6064 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX, 6065 prod); 6066 6067 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 6068 BCE_VERBOSE_CTX); 6069} 6070 6071 6072/****************************************************************************/ 6073/* Free memory and clear the RX data structures. */ 6074/* */ 6075/* Returns: */ 6076/* Nothing. */ 6077/****************************************************************************/ 6078static void 6079bce_free_pg_chain(struct bce_softc *sc) 6080{ 6081 int i; 6082 6083 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 6084 6085 /* Free any mbufs still in the mbuf page chain. */ 6086 for (i = 0; i < MAX_PG_BD_AVAIL; i++) { 6087 if (sc->pg_mbuf_ptr[i] != NULL) { 6088 if (sc->pg_mbuf_map[i] != NULL) 6089 bus_dmamap_sync(sc->pg_mbuf_tag, 6090 sc->pg_mbuf_map[i], 6091 BUS_DMASYNC_POSTREAD); 6092 m_freem(sc->pg_mbuf_ptr[i]); 6093 sc->pg_mbuf_ptr[i] = NULL; 6094 DBRUN(sc->debug_pg_mbuf_alloc--); 6095 } 6096 } 6097 6098 /* Clear each page chain pages. */ 6099 for (i = 0; i < sc->pg_pages; i++) 6100 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); 6101 6102 sc->free_pg_bd = sc->max_pg_bd; 6103 6104 /* Check if we lost any mbufs in the process. */ 6105 DBRUNIF((sc->debug_pg_mbuf_alloc), 6106 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n", 6107 __FUNCTION__, sc->debug_pg_mbuf_alloc)); 6108 6109 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 6110} 6111 6112 6113static u32 6114bce_get_rphy_link(struct bce_softc *sc) 6115{ 6116 u32 advertise, link; 6117 int fdpx; 6118 6119 advertise = 0; 6120 fdpx = 0; 6121 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) 6122 link = bce_shmem_rd(sc, BCE_RPHY_SERDES_LINK); 6123 else 6124 link = bce_shmem_rd(sc, BCE_RPHY_COPPER_LINK); 6125 if (link & BCE_NETLINK_ANEG_ENB) 6126 advertise |= BCE_NETLINK_ANEG_ENB; 6127 if (link & BCE_NETLINK_SPEED_10HALF) 6128 advertise |= BCE_NETLINK_SPEED_10HALF; 6129 if (link & BCE_NETLINK_SPEED_10FULL) { 6130 advertise |= BCE_NETLINK_SPEED_10FULL; 6131 fdpx++; 6132 } 6133 if (link & BCE_NETLINK_SPEED_100HALF) 6134 advertise |= BCE_NETLINK_SPEED_100HALF; 6135 if (link & BCE_NETLINK_SPEED_100FULL) { 6136 advertise |= BCE_NETLINK_SPEED_100FULL; 6137 fdpx++; 6138 } 6139 if (link & BCE_NETLINK_SPEED_1000HALF) 6140 advertise |= BCE_NETLINK_SPEED_1000HALF; 6141 if (link & BCE_NETLINK_SPEED_1000FULL) { 6142 advertise |= BCE_NETLINK_SPEED_1000FULL; 6143 fdpx++; 6144 } 6145 if (link & BCE_NETLINK_SPEED_2500HALF) 6146 advertise |= BCE_NETLINK_SPEED_2500HALF; 6147 if (link & BCE_NETLINK_SPEED_2500FULL) { 6148 advertise |= BCE_NETLINK_SPEED_2500FULL; 6149 fdpx++; 6150 } 6151 if (fdpx) 6152 advertise |= BCE_NETLINK_FC_PAUSE_SYM | 6153 BCE_NETLINK_FC_PAUSE_ASYM; 6154 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) 6155 advertise |= BCE_NETLINK_PHY_APP_REMOTE | 6156 BCE_NETLINK_ETH_AT_WIRESPEED; 6157 6158 return (advertise); 6159} 6160 6161 6162/****************************************************************************/ 6163/* Set media options. */ 6164/* */ 6165/* Returns: */ 6166/* 0 for success, positive value for failure. */ 6167/****************************************************************************/ 6168static int 6169bce_ifmedia_upd(struct ifnet *ifp) 6170{ 6171 struct bce_softc *sc = ifp->if_softc; 6172 int error; 6173 6174 DBENTER(BCE_VERBOSE); 6175 6176 BCE_LOCK(sc); 6177 error = bce_ifmedia_upd_locked(ifp); 6178 BCE_UNLOCK(sc); 6179 6180 DBEXIT(BCE_VERBOSE); 6181 return (error); 6182} 6183 6184 6185/****************************************************************************/ 6186/* Set media options. */ 6187/* */ 6188/* Returns: */ 6189/* Nothing. */ 6190/****************************************************************************/ 6191static int 6192bce_ifmedia_upd_locked(struct ifnet *ifp) 6193{ 6194 struct bce_softc *sc = ifp->if_softc; 6195 struct mii_data *mii; 6196 struct mii_softc *miisc; 6197 struct ifmedia *ifm; 6198 u32 link; 6199 int error, fdx; 6200 6201 DBENTER(BCE_VERBOSE_PHY); 6202 6203 error = 0; 6204 BCE_LOCK_ASSERT(sc); 6205 6206 sc->bce_link_up = FALSE; 6207 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { 6208 ifm = &sc->bce_ifmedia; 6209 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 6210 return (EINVAL); 6211 link = 0; 6212 fdx = IFM_OPTIONS(ifm->ifm_media) & IFM_FDX; 6213 switch(IFM_SUBTYPE(ifm->ifm_media)) { 6214 case IFM_AUTO: 6215 /* 6216 * Check advertised link of remote PHY by reading 6217 * BCE_RPHY_SERDES_LINK or BCE_RPHY_COPPER_LINK. 6218 * Always use the same link type of remote PHY. 6219 */ 6220 link = bce_get_rphy_link(sc); 6221 break; 6222 case IFM_2500_SX: 6223 if ((sc->bce_phy_flags & 6224 (BCE_PHY_REMOTE_PORT_FIBER_FLAG | 6225 BCE_PHY_2_5G_CAPABLE_FLAG)) == 0) 6226 return (EINVAL); 6227 /* 6228 * XXX 6229 * Have to enable forced 2.5Gbps configuration. 6230 */ 6231 if (fdx != 0) 6232 link |= BCE_NETLINK_SPEED_2500FULL; 6233 else 6234 link |= BCE_NETLINK_SPEED_2500HALF; 6235 break; 6236 case IFM_1000_SX: 6237 if ((sc->bce_phy_flags & 6238 BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) 6239 return (EINVAL); 6240 /* 6241 * XXX 6242 * Have to disable 2.5Gbps configuration. 6243 */ 6244 if (fdx != 0) 6245 link = BCE_NETLINK_SPEED_1000FULL; 6246 else 6247 link = BCE_NETLINK_SPEED_1000HALF; 6248 break; 6249 case IFM_1000_T: 6250 if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) 6251 return (EINVAL); 6252 if (fdx != 0) 6253 link = BCE_NETLINK_SPEED_1000FULL; 6254 else 6255 link = BCE_NETLINK_SPEED_1000HALF; 6256 break; 6257 case IFM_100_TX: 6258 if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) 6259 return (EINVAL); 6260 if (fdx != 0) 6261 link = BCE_NETLINK_SPEED_100FULL; 6262 else 6263 link = BCE_NETLINK_SPEED_100HALF; 6264 break; 6265 case IFM_10_T: 6266 if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) 6267 return (EINVAL); 6268 if (fdx != 0) 6269 link = BCE_NETLINK_SPEED_10FULL; 6270 else 6271 link = BCE_NETLINK_SPEED_10HALF; 6272 break; 6273 default: 6274 return (EINVAL); 6275 } 6276 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { 6277 /* 6278 * XXX 6279 * Advertise pause capability for full-duplex media. 6280 */ 6281 if (fdx != 0) 6282 link |= BCE_NETLINK_FC_PAUSE_SYM | 6283 BCE_NETLINK_FC_PAUSE_ASYM; 6284 if ((sc->bce_phy_flags & 6285 BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) 6286 link |= BCE_NETLINK_PHY_APP_REMOTE | 6287 BCE_NETLINK_ETH_AT_WIRESPEED; 6288 } 6289 6290 bce_shmem_wr(sc, BCE_MB_ARGS_0, link); 6291 error = bce_fw_sync(sc, BCE_DRV_MSG_CODE_CMD_SET_LINK); 6292 } else { 6293 mii = device_get_softc(sc->bce_miibus); 6294 6295 /* Make sure the MII bus has been enumerated. */ 6296 if (mii) { 6297 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 6298 PHY_RESET(miisc); 6299 error = mii_mediachg(mii); 6300 } 6301 } 6302 6303 DBEXIT(BCE_VERBOSE_PHY); 6304 return (error); 6305} 6306 6307 6308static void 6309bce_ifmedia_sts_rphy(struct bce_softc *sc, struct ifmediareq *ifmr) 6310{ 6311 struct ifnet *ifp; 6312 u32 link; 6313 6314 ifp = sc->bce_ifp; 6315 BCE_LOCK_ASSERT(sc); 6316 6317 ifmr->ifm_status = IFM_AVALID; 6318 ifmr->ifm_active = IFM_ETHER; 6319 link = bce_shmem_rd(sc, BCE_LINK_STATUS); 6320 /* XXX Handle heart beat status? */ 6321 if ((link & BCE_LINK_STATUS_LINK_UP) != 0) 6322 ifmr->ifm_status |= IFM_ACTIVE; 6323 else { 6324 ifmr->ifm_active |= IFM_NONE; 6325 ifp->if_baudrate = 0; 6326 return; 6327 } 6328 switch (link & BCE_LINK_STATUS_SPEED_MASK) { 6329 case BCE_LINK_STATUS_10HALF: 6330 ifmr->ifm_active |= IFM_10_T | IFM_HDX; 6331 ifp->if_baudrate = IF_Mbps(10UL); 6332 break; 6333 case BCE_LINK_STATUS_10FULL: 6334 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 6335 ifp->if_baudrate = IF_Mbps(10UL); 6336 break; 6337 case BCE_LINK_STATUS_100HALF: 6338 ifmr->ifm_active |= IFM_100_TX | IFM_HDX; 6339 ifp->if_baudrate = IF_Mbps(100UL); 6340 break; 6341 case BCE_LINK_STATUS_100FULL: 6342 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 6343 ifp->if_baudrate = IF_Mbps(100UL); 6344 break; 6345 case BCE_LINK_STATUS_1000HALF: 6346 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) 6347 ifmr->ifm_active |= IFM_1000_T | IFM_HDX; 6348 else 6349 ifmr->ifm_active |= IFM_1000_SX | IFM_HDX; 6350 ifp->if_baudrate = IF_Mbps(1000UL); 6351 break; 6352 case BCE_LINK_STATUS_1000FULL: 6353 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) 6354 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 6355 else 6356 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 6357 ifp->if_baudrate = IF_Mbps(1000UL); 6358 break; 6359 case BCE_LINK_STATUS_2500HALF: 6360 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) { 6361 ifmr->ifm_active |= IFM_NONE; 6362 return; 6363 } else 6364 ifmr->ifm_active |= IFM_2500_SX | IFM_HDX; 6365 ifp->if_baudrate = IF_Mbps(2500UL); 6366 break; 6367 case BCE_LINK_STATUS_2500FULL: 6368 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) { 6369 ifmr->ifm_active |= IFM_NONE; 6370 return; 6371 } else 6372 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 6373 ifp->if_baudrate = IF_Mbps(2500UL); 6374 break; 6375 default: 6376 ifmr->ifm_active |= IFM_NONE; 6377 return; 6378 } 6379 6380 if ((link & BCE_LINK_STATUS_RX_FC_ENABLED) != 0) 6381 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 6382 if ((link & BCE_LINK_STATUS_TX_FC_ENABLED) != 0) 6383 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 6384} 6385 6386 6387/****************************************************************************/ 6388/* Reports current media status. */ 6389/* */ 6390/* Returns: */ 6391/* Nothing. */ 6392/****************************************************************************/ 6393static void 6394bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 6395{ 6396 struct bce_softc *sc = ifp->if_softc; 6397 struct mii_data *mii; 6398 6399 DBENTER(BCE_VERBOSE_PHY); 6400 6401 BCE_LOCK(sc); 6402 6403 if ((ifp->if_flags & IFF_UP) == 0) { 6404 BCE_UNLOCK(sc); 6405 return; 6406 } 6407 6408 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) 6409 bce_ifmedia_sts_rphy(sc, ifmr); 6410 else { 6411 mii = device_get_softc(sc->bce_miibus); 6412 mii_pollstat(mii); 6413 ifmr->ifm_active = mii->mii_media_active; 6414 ifmr->ifm_status = mii->mii_media_status; 6415 } 6416 6417 BCE_UNLOCK(sc); 6418 6419 DBEXIT(BCE_VERBOSE_PHY); 6420} 6421 6422 6423/****************************************************************************/ 6424/* Handles PHY generated interrupt events. */ 6425/* */ 6426/* Returns: */ 6427/* Nothing. */ 6428/****************************************************************************/ 6429static void 6430bce_phy_intr(struct bce_softc *sc) 6431{ 6432 u32 new_link_state, old_link_state; 6433 6434 DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 6435 6436 DBRUN(sc->phy_interrupts++); 6437 6438 new_link_state = sc->status_block->status_attn_bits & 6439 STATUS_ATTN_BITS_LINK_STATE; 6440 old_link_state = sc->status_block->status_attn_bits_ack & 6441 STATUS_ATTN_BITS_LINK_STATE; 6442 6443 /* Handle any changes if the link state has changed. */ 6444 if (new_link_state != old_link_state) { 6445 6446 /* Update the status_attn_bits_ack field. */ 6447 if (new_link_state) { 6448 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 6449 STATUS_ATTN_BITS_LINK_STATE); 6450 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n", 6451 __FUNCTION__); 6452 } else { 6453 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 6454 STATUS_ATTN_BITS_LINK_STATE); 6455 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n", 6456 __FUNCTION__); 6457 } 6458 6459 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { 6460 if (new_link_state) { 6461 if (bootverbose) 6462 if_printf(sc->bce_ifp, "link UP\n"); 6463 if_link_state_change(sc->bce_ifp, 6464 LINK_STATE_UP); 6465 } else { 6466 if (bootverbose) 6467 if_printf(sc->bce_ifp, "link DOWN\n"); 6468 if_link_state_change(sc->bce_ifp, 6469 LINK_STATE_DOWN); 6470 } 6471 } 6472 /* 6473 * Assume link is down and allow 6474 * tick routine to update the state 6475 * based on the actual media state. 6476 */ 6477 sc->bce_link_up = FALSE; 6478 callout_stop(&sc->bce_tick_callout); 6479 bce_tick(sc); 6480 } 6481 6482 /* Acknowledge the link change interrupt. */ 6483 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 6484 6485 DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 6486} 6487 6488 6489/****************************************************************************/ 6490/* Reads the receive consumer value from the status block (skipping over */ 6491/* chain page pointer if necessary). */ 6492/* */ 6493/* Returns: */ 6494/* hw_cons */ 6495/****************************************************************************/ 6496static inline u16 6497bce_get_hw_rx_cons(struct bce_softc *sc) 6498{ 6499 u16 hw_cons; 6500 6501 rmb(); 6502 hw_cons = sc->status_block->status_rx_quick_consumer_index0; 6503 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 6504 hw_cons++; 6505 6506 return hw_cons; 6507} 6508 6509/****************************************************************************/ 6510/* Handles received frame interrupt events. */ 6511/* */ 6512/* Returns: */ 6513/* Nothing. */ 6514/****************************************************************************/ 6515static void 6516bce_rx_intr(struct bce_softc *sc) 6517{ 6518 struct ifnet *ifp = sc->bce_ifp; 6519 struct l2_fhdr *l2fhdr; 6520 struct ether_vlan_header *vh; 6521 unsigned int pkt_len; 6522 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; 6523 u32 status; 6524 unsigned int rem_len; 6525 u16 sw_pg_cons, sw_pg_cons_idx; 6526 6527 DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 6528 DBRUN(sc->interrupts_rx++); 6529 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, " 6530 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 6531 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 6532 6533 /* Prepare the RX chain pages to be accessed by the host CPU. */ 6534 for (int i = 0; i < sc->rx_pages; i++) 6535 bus_dmamap_sync(sc->rx_bd_chain_tag, 6536 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 6537 6538 /* Prepare the page chain pages to be accessed by the host CPU. */ 6539 if (bce_hdr_split == TRUE) { 6540 for (int i = 0; i < sc->pg_pages; i++) 6541 bus_dmamap_sync(sc->pg_bd_chain_tag, 6542 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 6543 } 6544 6545 /* Get the hardware's view of the RX consumer index. */ 6546 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 6547 6548 /* Get working copies of the driver's view of the consumer indices. */ 6549 sw_rx_cons = sc->rx_cons; 6550 sw_pg_cons = sc->pg_cons; 6551 6552 /* Update some debug statistics counters */ 6553 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 6554 sc->rx_low_watermark = sc->free_rx_bd); 6555 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 6556 sc->rx_empty_count++); 6557 6558 /* Scan through the receive chain as long as there is work to do */ 6559 /* ToDo: Consider setting a limit on the number of packets processed. */ 6560 rmb(); 6561 while (sw_rx_cons != hw_rx_cons) { 6562 struct mbuf *m0; 6563 6564 /* Convert the producer/consumer indices to an actual rx_bd index. */ 6565 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); 6566 6567 /* Unmap the mbuf from DMA space. */ 6568 bus_dmamap_sync(sc->rx_mbuf_tag, 6569 sc->rx_mbuf_map[sw_rx_cons_idx], 6570 BUS_DMASYNC_POSTREAD); 6571 bus_dmamap_unload(sc->rx_mbuf_tag, 6572 sc->rx_mbuf_map[sw_rx_cons_idx]); 6573 6574 /* Remove the mbuf from the RX chain. */ 6575 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx]; 6576 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL; 6577 DBRUN(sc->debug_rx_mbuf_alloc--); 6578 sc->free_rx_bd++; 6579 6580 /* 6581 * Frames received on the NetXteme II are prepended 6582 * with an l2_fhdr structure which provides status 6583 * information about the received frame (including 6584 * VLAN tags and checksum info). The frames are 6585 * also automatically adjusted to word align the IP 6586 * header (i.e. two null bytes are inserted before 6587 * the Ethernet header). As a result the data 6588 * DMA'd by the controller into the mbuf looks 6589 * like this: 6590 * 6591 * +---------+-----+---------------------+-----+ 6592 * | l2_fhdr | pad | packet data | FCS | 6593 * +---------+-----+---------------------+-----+ 6594 * 6595 * The l2_fhdr needs to be checked and skipped and 6596 * the FCS needs to be stripped before sending the 6597 * packet up the stack. 6598 */ 6599 l2fhdr = mtod(m0, struct l2_fhdr *); 6600 6601 /* Get the packet data + FCS length and the status. */ 6602 pkt_len = l2fhdr->l2_fhdr_pkt_len; 6603 status = l2fhdr->l2_fhdr_status; 6604 6605 /* 6606 * Skip over the l2_fhdr and pad, resulting in the 6607 * following data in the mbuf: 6608 * +---------------------+-----+ 6609 * | packet data | FCS | 6610 * +---------------------+-----+ 6611 */ 6612 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN); 6613 6614 /* 6615 * When split header mode is used, an ethernet frame 6616 * may be split across the receive chain and the 6617 * page chain. If that occurs an mbuf cluster must be 6618 * reassembled from the individual mbuf pieces. 6619 */ 6620 if (bce_hdr_split == TRUE) { 6621 /* 6622 * Check whether the received frame fits in a single 6623 * mbuf or not (i.e. packet data + FCS <= 6624 * sc->rx_bd_mbuf_data_len bytes). 6625 */ 6626 if (pkt_len > m0->m_len) { 6627 /* 6628 * The received frame is larger than a single mbuf. 6629 * If the frame was a TCP frame then only the TCP 6630 * header is placed in the mbuf, the remaining 6631 * payload (including FCS) is placed in the page 6632 * chain, the SPLIT flag is set, and the header 6633 * length is placed in the IP checksum field. 6634 * If the frame is not a TCP frame then the mbuf 6635 * is filled and the remaining bytes are placed 6636 * in the page chain. 6637 */ 6638 6639 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large " 6640 "packet.\n", __FUNCTION__); 6641 DBRUN(sc->split_header_frames_rcvd++); 6642 6643 /* 6644 * When the page chain is enabled and the TCP 6645 * header has been split from the TCP payload, 6646 * the ip_xsum structure will reflect the length 6647 * of the TCP header, not the IP checksum. Set 6648 * the packet length of the mbuf accordingly. 6649 */ 6650 if (status & L2_FHDR_STATUS_SPLIT) { 6651 m0->m_len = l2fhdr->l2_fhdr_ip_xsum; 6652 DBRUN(sc->split_header_tcp_frames_rcvd++); 6653 } 6654 6655 rem_len = pkt_len - m0->m_len; 6656 6657 /* Pull mbufs off the page chain for any remaining data. */ 6658 while (rem_len > 0) { 6659 struct mbuf *m_pg; 6660 6661 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons); 6662 6663 /* Remove the mbuf from the page chain. */ 6664 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx]; 6665 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL; 6666 DBRUN(sc->debug_pg_mbuf_alloc--); 6667 sc->free_pg_bd++; 6668 6669 /* Unmap the page chain mbuf from DMA space. */ 6670 bus_dmamap_sync(sc->pg_mbuf_tag, 6671 sc->pg_mbuf_map[sw_pg_cons_idx], 6672 BUS_DMASYNC_POSTREAD); 6673 bus_dmamap_unload(sc->pg_mbuf_tag, 6674 sc->pg_mbuf_map[sw_pg_cons_idx]); 6675 6676 /* Adjust the mbuf length. */ 6677 if (rem_len < m_pg->m_len) { 6678 /* The mbuf chain is complete. */ 6679 m_pg->m_len = rem_len; 6680 rem_len = 0; 6681 } else { 6682 /* More packet data is waiting. */ 6683 rem_len -= m_pg->m_len; 6684 } 6685 6686 /* Concatenate the mbuf cluster to the mbuf. */ 6687 m_cat(m0, m_pg); 6688 6689 sw_pg_cons = NEXT_PG_BD(sw_pg_cons); 6690 } 6691 6692 /* Set the total packet length. */ 6693 m0->m_pkthdr.len = pkt_len; 6694 6695 } else { 6696 /* 6697 * The received packet is small and fits in a 6698 * single mbuf (i.e. the l2_fhdr + pad + packet + 6699 * FCS <= MHLEN). In other words, the packet is 6700 * 154 bytes or less in size. 6701 */ 6702 6703 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small " 6704 "packet.\n", __FUNCTION__); 6705 6706 /* Set the total packet length. */ 6707 m0->m_pkthdr.len = m0->m_len = pkt_len; 6708 } 6709 } else 6710 /* Set the total packet length. */ 6711 m0->m_pkthdr.len = m0->m_len = pkt_len; 6712 6713 /* Remove the trailing Ethernet FCS. */ 6714 m_adj(m0, -ETHER_CRC_LEN); 6715 6716 /* Check that the resulting mbuf chain is valid. */ 6717 DBRUN(m_sanity(m0, FALSE)); 6718 DBRUNIF(((m0->m_len < ETHER_HDR_LEN) | 6719 (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)), 6720 BCE_PRINTF("Invalid Ethernet frame size!\n"); 6721 m_print(m0, 128)); 6722 6723 DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control), 6724 sc->l2fhdr_error_sim_count++; 6725 status = status | L2_FHDR_ERRORS_PHY_DECODE); 6726 6727 /* Check the received frame for errors. */ 6728 if (status & (L2_FHDR_ERRORS_BAD_CRC | 6729 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 6730 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { 6731 6732 /* Log the error and release the mbuf. */ 6733 ifp->if_ierrors++; 6734 sc->l2fhdr_error_count++; 6735 6736 m_freem(m0); 6737 m0 = NULL; 6738 goto bce_rx_intr_next_rx; 6739 } 6740 6741 /* Send the packet to the appropriate interface. */ 6742 m0->m_pkthdr.rcvif = ifp; 6743 6744 /* Assume no hardware checksum. */ 6745 m0->m_pkthdr.csum_flags = 0; 6746 6747 /* Validate the checksum if offload enabled. */ 6748 if (ifp->if_capenable & IFCAP_RXCSUM) { 6749 /* Check for an IP datagram. */ 6750 if (!(status & L2_FHDR_STATUS_SPLIT) && 6751 (status & L2_FHDR_STATUS_IP_DATAGRAM)) { 6752 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 6753 DBRUN(sc->csum_offload_ip++); 6754 /* Check if the IP checksum is valid. */ 6755 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 6756 m0->m_pkthdr.csum_flags |= 6757 CSUM_IP_VALID; 6758 } 6759 6760 /* Check for a valid TCP/UDP frame. */ 6761 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 6762 L2_FHDR_STATUS_UDP_DATAGRAM)) { 6763 6764 /* Check for a good TCP/UDP checksum. */ 6765 if ((status & (L2_FHDR_ERRORS_TCP_XSUM | 6766 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 6767 DBRUN(sc->csum_offload_tcp_udp++); 6768 m0->m_pkthdr.csum_data = 6769 l2fhdr->l2_fhdr_tcp_udp_xsum; 6770 m0->m_pkthdr.csum_flags |= 6771 (CSUM_DATA_VALID 6772 | CSUM_PSEUDO_HDR); 6773 } 6774 } 6775 } 6776 6777 /* Attach the VLAN tag. */ 6778 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 6779 !(sc->rx_mode & BCE_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 6780 DBRUN(sc->vlan_tagged_frames_rcvd++); 6781 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 6782 DBRUN(sc->vlan_tagged_frames_stripped++); 6783#if __FreeBSD_version < 700000 6784 VLAN_INPUT_TAG(ifp, m0, 6785 l2fhdr->l2_fhdr_vlan_tag, continue); 6786#else 6787 m0->m_pkthdr.ether_vtag = 6788 l2fhdr->l2_fhdr_vlan_tag; 6789 m0->m_flags |= M_VLANTAG; 6790#endif 6791 } else { 6792 /* 6793 * bce(4) controllers can't disable VLAN 6794 * tag stripping if management firmware 6795 * (ASF/IPMI/UMP) is running. So we always 6796 * strip VLAN tag and manually reconstruct 6797 * the VLAN frame by appending stripped 6798 * VLAN tag in driver if VLAN tag stripping 6799 * was disabled. 6800 * 6801 * TODO: LLC SNAP handling. 6802 */ 6803 bcopy(mtod(m0, uint8_t *), 6804 mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN, 6805 ETHER_ADDR_LEN * 2); 6806 m0->m_data -= ETHER_VLAN_ENCAP_LEN; 6807 vh = mtod(m0, struct ether_vlan_header *); 6808 vh->evl_encap_proto = htons(ETHERTYPE_VLAN); 6809 vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag); 6810 m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN; 6811 m0->m_len += ETHER_VLAN_ENCAP_LEN; 6812 } 6813 } 6814 6815 /* Increment received packet statistics. */ 6816 ifp->if_ipackets++; 6817 6818bce_rx_intr_next_rx: 6819 sw_rx_cons = NEXT_RX_BD(sw_rx_cons); 6820 6821 /* If we have a packet, pass it up the stack */ 6822 if (m0) { 6823 /* Make sure we don't lose our place when we release the lock. */ 6824 sc->rx_cons = sw_rx_cons; 6825 sc->pg_cons = sw_pg_cons; 6826 6827 BCE_UNLOCK(sc); 6828 (*ifp->if_input)(ifp, m0); 6829 BCE_LOCK(sc); 6830 6831 /* Recover our place. */ 6832 sw_rx_cons = sc->rx_cons; 6833 sw_pg_cons = sc->pg_cons; 6834 } 6835 6836 /* Refresh hw_cons to see if there's new work */ 6837 if (sw_rx_cons == hw_rx_cons) 6838 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 6839 } 6840 6841 /* No new packets. Refill the page chain. */ 6842 if (bce_hdr_split == TRUE) { 6843 sc->pg_cons = sw_pg_cons; 6844 bce_fill_pg_chain(sc); 6845 } 6846 6847 /* No new packets. Refill the RX chain. */ 6848 sc->rx_cons = sw_rx_cons; 6849 bce_fill_rx_chain(sc); 6850 6851 /* Prepare the page chain pages to be accessed by the NIC. */ 6852 for (int i = 0; i < sc->rx_pages; i++) 6853 bus_dmamap_sync(sc->rx_bd_chain_tag, 6854 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6855 6856 if (bce_hdr_split == TRUE) { 6857 for (int i = 0; i < sc->pg_pages; i++) 6858 bus_dmamap_sync(sc->pg_bd_chain_tag, 6859 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6860 } 6861 6862 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, " 6863 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 6864 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 6865 DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 6866} 6867 6868 6869/****************************************************************************/ 6870/* Reads the transmit consumer value from the status block (skipping over */ 6871/* chain page pointer if necessary). */ 6872/* */ 6873/* Returns: */ 6874/* hw_cons */ 6875/****************************************************************************/ 6876static inline u16 6877bce_get_hw_tx_cons(struct bce_softc *sc) 6878{ 6879 u16 hw_cons; 6880 6881 mb(); 6882 hw_cons = sc->status_block->status_tx_quick_consumer_index0; 6883 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 6884 hw_cons++; 6885 6886 return hw_cons; 6887} 6888 6889 6890/****************************************************************************/ 6891/* Handles transmit completion interrupt events. */ 6892/* */ 6893/* Returns: */ 6894/* Nothing. */ 6895/****************************************************************************/ 6896static void 6897bce_tx_intr(struct bce_softc *sc) 6898{ 6899 struct ifnet *ifp = sc->bce_ifp; 6900 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 6901 6902 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 6903 DBRUN(sc->interrupts_tx++); 6904 DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, " 6905 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 6906 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 6907 6908 BCE_LOCK_ASSERT(sc); 6909 6910 /* Get the hardware's view of the TX consumer index. */ 6911 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6912 sw_tx_cons = sc->tx_cons; 6913 6914 /* Prevent speculative reads of the status block. */ 6915 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6916 BUS_SPACE_BARRIER_READ); 6917 6918 /* Cycle through any completed TX chain page entries. */ 6919 while (sw_tx_cons != hw_tx_cons) { 6920#ifdef BCE_DEBUG 6921 struct tx_bd *txbd = NULL; 6922#endif 6923 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 6924 6925 DBPRINT(sc, BCE_INFO_SEND, 6926 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 6927 "sw_tx_chain_cons = 0x%04X\n", 6928 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 6929 6930 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD_ALLOC), 6931 BCE_PRINTF("%s(%d): TX chain consumer out of range! " 6932 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons, 6933 (int) MAX_TX_BD_ALLOC); 6934 bce_breakpoint(sc)); 6935 6936 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 6937 [TX_IDX(sw_tx_chain_cons)]); 6938 6939 DBRUNIF((txbd == NULL), 6940 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", 6941 __FILE__, __LINE__, sw_tx_chain_cons); 6942 bce_breakpoint(sc)); 6943 6944 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__); 6945 bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); 6946 6947 /* 6948 * Free the associated mbuf. Remember 6949 * that only the last tx_bd of a packet 6950 * has an mbuf pointer and DMA map. 6951 */ 6952 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 6953 6954 /* Validate that this is the last tx_bd. */ 6955 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), 6956 BCE_PRINTF("%s(%d): tx_bd END flag not set but " 6957 "txmbuf == NULL!\n", __FILE__, __LINE__); 6958 bce_breakpoint(sc)); 6959 6960 DBRUNMSG(BCE_INFO_SEND, 6961 BCE_PRINTF("%s(): Unloading map/freeing mbuf " 6962 "from tx_bd[0x%04X]\n", __FUNCTION__, 6963 sw_tx_chain_cons)); 6964 6965 /* Unmap the mbuf. */ 6966 bus_dmamap_unload(sc->tx_mbuf_tag, 6967 sc->tx_mbuf_map[sw_tx_chain_cons]); 6968 6969 /* Free the mbuf. */ 6970 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 6971 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 6972 DBRUN(sc->debug_tx_mbuf_alloc--); 6973 6974 ifp->if_opackets++; 6975 } 6976 6977 sc->used_tx_bd--; 6978 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 6979 6980 /* Refresh hw_cons to see if there's new work. */ 6981 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6982 6983 /* Prevent speculative reads of the status block. */ 6984 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6985 BUS_SPACE_BARRIER_READ); 6986 } 6987 6988 /* Clear the TX timeout timer. */ 6989 sc->watchdog_timer = 0; 6990 6991 /* Clear the tx hardware queue full flag. */ 6992 if (sc->used_tx_bd < sc->max_tx_bd) { 6993 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE), 6994 DBPRINT(sc, BCE_INFO_SEND, 6995 "%s(): Open TX chain! %d/%d (used/total)\n", 6996 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd)); 6997 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6998 } 6999 7000 sc->tx_cons = sw_tx_cons; 7001 7002 DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, " 7003 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 7004 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 7005 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 7006} 7007 7008 7009/****************************************************************************/ 7010/* Disables interrupt generation. */ 7011/* */ 7012/* Returns: */ 7013/* Nothing. */ 7014/****************************************************************************/ 7015static void 7016bce_disable_intr(struct bce_softc *sc) 7017{ 7018 DBENTER(BCE_VERBOSE_INTR); 7019 7020 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 7021 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 7022 7023 DBEXIT(BCE_VERBOSE_INTR); 7024} 7025 7026 7027/****************************************************************************/ 7028/* Enables interrupt generation. */ 7029/* */ 7030/* Returns: */ 7031/* Nothing. */ 7032/****************************************************************************/ 7033static void 7034bce_enable_intr(struct bce_softc *sc, int coal_now) 7035{ 7036 DBENTER(BCE_VERBOSE_INTR); 7037 7038 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 7039 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 7040 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 7041 7042 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 7043 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 7044 7045 /* Force an immediate interrupt (whether there is new data or not). */ 7046 if (coal_now) 7047 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 7048 7049 DBEXIT(BCE_VERBOSE_INTR); 7050} 7051 7052 7053/****************************************************************************/ 7054/* Handles controller initialization. */ 7055/* */ 7056/* Returns: */ 7057/* Nothing. */ 7058/****************************************************************************/ 7059static void 7060bce_init_locked(struct bce_softc *sc) 7061{ 7062 struct ifnet *ifp; 7063 u32 ether_mtu = 0; 7064 7065 DBENTER(BCE_VERBOSE_RESET); 7066 7067 BCE_LOCK_ASSERT(sc); 7068 7069 ifp = sc->bce_ifp; 7070 7071 /* Check if the driver is still running and bail out if it is. */ 7072 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 7073 goto bce_init_locked_exit; 7074 7075 bce_stop(sc); 7076 7077 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 7078 BCE_PRINTF("%s(%d): Controller reset failed!\n", 7079 __FILE__, __LINE__); 7080 goto bce_init_locked_exit; 7081 } 7082 7083 if (bce_chipinit(sc)) { 7084 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 7085 __FILE__, __LINE__); 7086 goto bce_init_locked_exit; 7087 } 7088 7089 if (bce_blockinit(sc)) { 7090 BCE_PRINTF("%s(%d): Block initialization failed!\n", 7091 __FILE__, __LINE__); 7092 goto bce_init_locked_exit; 7093 } 7094 7095 /* Load our MAC address. */ 7096 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN); 7097 bce_set_mac_addr(sc); 7098 7099 if (bce_hdr_split == FALSE) 7100 bce_get_rx_buffer_sizes(sc, ifp->if_mtu); 7101 /* 7102 * Calculate and program the hardware Ethernet MTU 7103 * size. Be generous on the receive if we have room 7104 * and allowed by the user. 7105 */ 7106 if (bce_strict_rx_mtu == TRUE) 7107 ether_mtu = ifp->if_mtu; 7108 else { 7109 if (bce_hdr_split == TRUE) { 7110 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len + MCLBYTES) 7111 ether_mtu = sc->rx_bd_mbuf_data_len + 7112 MCLBYTES; 7113 else 7114 ether_mtu = ifp->if_mtu; 7115 } else { 7116 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len) 7117 ether_mtu = sc->rx_bd_mbuf_data_len; 7118 else 7119 ether_mtu = ifp->if_mtu; 7120 } 7121 } 7122 7123 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 7124 7125 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", 7126 __FUNCTION__, ether_mtu); 7127 7128 /* Program the mtu, enabling jumbo frame support if necessary. */ 7129 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) 7130 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 7131 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 7132 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 7133 else 7134 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 7135 7136 /* Program appropriate promiscuous/multicast filtering. */ 7137 bce_set_rx_mode(sc); 7138 7139 if (bce_hdr_split == TRUE) { 7140 /* Init page buffer descriptor chain. */ 7141 bce_init_pg_chain(sc); 7142 } 7143 7144 /* Init RX buffer descriptor chain. */ 7145 bce_init_rx_chain(sc); 7146 7147 /* Init TX buffer descriptor chain. */ 7148 bce_init_tx_chain(sc); 7149 7150 /* Enable host interrupts. */ 7151 bce_enable_intr(sc, 1); 7152 7153 bce_ifmedia_upd_locked(ifp); 7154 7155 /* Let the OS know the driver is up and running. */ 7156 ifp->if_drv_flags |= IFF_DRV_RUNNING; 7157 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 7158 7159 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 7160 7161bce_init_locked_exit: 7162 DBEXIT(BCE_VERBOSE_RESET); 7163} 7164 7165 7166/****************************************************************************/ 7167/* Initialize the controller just enough so that any management firmware */ 7168/* running on the device will continue to operate correctly. */ 7169/* */ 7170/* Returns: */ 7171/* Nothing. */ 7172/****************************************************************************/ 7173static void 7174bce_mgmt_init_locked(struct bce_softc *sc) 7175{ 7176 struct ifnet *ifp; 7177 7178 DBENTER(BCE_VERBOSE_RESET); 7179 7180 BCE_LOCK_ASSERT(sc); 7181 7182 /* Bail out if management firmware is not running. */ 7183 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) { 7184 DBPRINT(sc, BCE_VERBOSE_SPECIAL, 7185 "No management firmware running...\n"); 7186 goto bce_mgmt_init_locked_exit; 7187 } 7188 7189 ifp = sc->bce_ifp; 7190 7191 /* Enable all critical blocks in the MAC. */ 7192 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 7193 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 7194 DELAY(20); 7195 7196 bce_ifmedia_upd_locked(ifp); 7197 7198bce_mgmt_init_locked_exit: 7199 DBEXIT(BCE_VERBOSE_RESET); 7200} 7201 7202 7203/****************************************************************************/ 7204/* Handles controller initialization when called from an unlocked routine. */ 7205/* */ 7206/* Returns: */ 7207/* Nothing. */ 7208/****************************************************************************/ 7209static void 7210bce_init(void *xsc) 7211{ 7212 struct bce_softc *sc = xsc; 7213 7214 DBENTER(BCE_VERBOSE_RESET); 7215 7216 BCE_LOCK(sc); 7217 bce_init_locked(sc); 7218 BCE_UNLOCK(sc); 7219 7220 DBEXIT(BCE_VERBOSE_RESET); 7221} 7222 7223 7224/****************************************************************************/ 7225/* Modifies an mbuf for TSO on the hardware. */ 7226/* */ 7227/* Returns: */ 7228/* Pointer to a modified mbuf. */ 7229/****************************************************************************/ 7230static struct mbuf * 7231bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags) 7232{ 7233 struct mbuf *m; 7234 struct ether_header *eh; 7235 struct ip *ip; 7236 struct tcphdr *th; 7237 u16 etype; 7238 int hdr_len, ip_hlen = 0, tcp_hlen = 0, ip_len = 0; 7239 7240 DBRUN(sc->tso_frames_requested++); 7241 7242 /* Controller may modify mbuf chains. */ 7243 if (M_WRITABLE(*m_head) == 0) { 7244 m = m_dup(*m_head, M_NOWAIT); 7245 m_freem(*m_head); 7246 if (m == NULL) { 7247 sc->mbuf_alloc_failed_count++; 7248 *m_head = NULL; 7249 return (NULL); 7250 } 7251 *m_head = m; 7252 } 7253 7254 /* 7255 * For TSO the controller needs two pieces of info, 7256 * the MSS and the IP+TCP options length. 7257 */ 7258 m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip)); 7259 if (m == NULL) { 7260 *m_head = NULL; 7261 return (NULL); 7262 } 7263 eh = mtod(m, struct ether_header *); 7264 etype = ntohs(eh->ether_type); 7265 7266 /* Check for supported TSO Ethernet types (only IPv4 for now) */ 7267 switch (etype) { 7268 case ETHERTYPE_IP: 7269 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 7270 /* TSO only supported for TCP protocol. */ 7271 if (ip->ip_p != IPPROTO_TCP) { 7272 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n", 7273 __FILE__, __LINE__); 7274 m_freem(*m_head); 7275 *m_head = NULL; 7276 return (NULL); 7277 } 7278 7279 /* Get IP header length in bytes (min 20) */ 7280 ip_hlen = ip->ip_hl << 2; 7281 m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen + 7282 sizeof(struct tcphdr)); 7283 if (m == NULL) { 7284 *m_head = NULL; 7285 return (NULL); 7286 } 7287 7288 /* Get the TCP header length in bytes (min 20) */ 7289 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 7290 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 7291 tcp_hlen = (th->th_off << 2); 7292 7293 /* Make sure all IP/TCP options live in the same buffer. */ 7294 m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen + 7295 tcp_hlen); 7296 if (m == NULL) { 7297 *m_head = NULL; 7298 return (NULL); 7299 } 7300 7301 /* Clear IP header length and checksum, will be calc'd by h/w. */ 7302 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 7303 ip_len = ip->ip_len; 7304 ip->ip_len = 0; 7305 ip->ip_sum = 0; 7306 break; 7307 case ETHERTYPE_IPV6: 7308 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n", 7309 __FILE__, __LINE__); 7310 m_freem(*m_head); 7311 *m_head = NULL; 7312 return (NULL); 7313 /* NOT REACHED */ 7314 default: 7315 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n", 7316 __FILE__, __LINE__); 7317 m_freem(*m_head); 7318 *m_head = NULL; 7319 return (NULL); 7320 } 7321 7322 hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen; 7323 7324 DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, " 7325 "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n", 7326 __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen, 7327 tcp_hlen, ip_len); 7328 7329 /* Set the LSO flag in the TX BD */ 7330 *flags |= TX_BD_FLAGS_SW_LSO; 7331 7332 /* Set the length of IP + TCP options (in 32 bit words) */ 7333 *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) - 7334 sizeof(struct tcphdr)) >> 2) << 8); 7335 7336 DBRUN(sc->tso_frames_completed++); 7337 return (*m_head); 7338} 7339 7340 7341/****************************************************************************/ 7342/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 7343/* memory visible to the controller. */ 7344/* */ 7345/* Returns: */ 7346/* 0 for success, positive value for failure. */ 7347/* Modified: */ 7348/* m_head: May be set to NULL if MBUF is excessively fragmented. */ 7349/****************************************************************************/ 7350static int 7351bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head) 7352{ 7353 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 7354 bus_dmamap_t map; 7355 struct tx_bd *txbd = NULL; 7356 struct mbuf *m0; 7357 u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0; 7358 u32 prod_bseq; 7359 7360#ifdef BCE_DEBUG 7361 u16 debug_prod; 7362#endif 7363 7364 int i, error, nsegs, rc = 0; 7365 7366 DBENTER(BCE_VERBOSE_SEND); 7367 7368 /* Make sure we have room in the TX chain. */ 7369 if (sc->used_tx_bd >= sc->max_tx_bd) 7370 goto bce_tx_encap_exit; 7371 7372 /* Transfer any checksum offload flags to the bd. */ 7373 m0 = *m_head; 7374 if (m0->m_pkthdr.csum_flags) { 7375 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 7376 m0 = bce_tso_setup(sc, m_head, &flags); 7377 if (m0 == NULL) { 7378 DBRUN(sc->tso_frames_failed++); 7379 goto bce_tx_encap_exit; 7380 } 7381 mss = htole16(m0->m_pkthdr.tso_segsz); 7382 } else { 7383 if (m0->m_pkthdr.csum_flags & CSUM_IP) 7384 flags |= TX_BD_FLAGS_IP_CKSUM; 7385 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 7386 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 7387 } 7388 } 7389 7390 /* Transfer any VLAN tags to the bd. */ 7391 if (m0->m_flags & M_VLANTAG) { 7392 flags |= TX_BD_FLAGS_VLAN_TAG; 7393 vlan_tag = m0->m_pkthdr.ether_vtag; 7394 } 7395 7396 /* Map the mbuf into DMAable memory. */ 7397 prod = sc->tx_prod; 7398 chain_prod = TX_CHAIN_IDX(prod); 7399 map = sc->tx_mbuf_map[chain_prod]; 7400 7401 /* Map the mbuf into our DMA address space. */ 7402 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, 7403 segs, &nsegs, BUS_DMA_NOWAIT); 7404 7405 /* Check if the DMA mapping was successful */ 7406 if (error == EFBIG) { 7407 sc->mbuf_frag_count++; 7408 7409 /* Try to defrag the mbuf. */ 7410 m0 = m_collapse(*m_head, M_NOWAIT, BCE_MAX_SEGMENTS); 7411 if (m0 == NULL) { 7412 /* Defrag was unsuccessful */ 7413 m_freem(*m_head); 7414 *m_head = NULL; 7415 sc->mbuf_alloc_failed_count++; 7416 rc = ENOBUFS; 7417 goto bce_tx_encap_exit; 7418 } 7419 7420 /* Defrag was successful, try mapping again */ 7421 *m_head = m0; 7422 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, 7423 map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 7424 7425 /* Still getting an error after a defrag. */ 7426 if (error == ENOMEM) { 7427 /* Insufficient DMA buffers available. */ 7428 sc->dma_map_addr_tx_failed_count++; 7429 rc = error; 7430 goto bce_tx_encap_exit; 7431 } else if (error != 0) { 7432 /* Release it and return an error. */ 7433 BCE_PRINTF("%s(%d): Unknown error mapping mbuf into " 7434 "TX chain!\n", __FILE__, __LINE__); 7435 m_freem(m0); 7436 *m_head = NULL; 7437 sc->dma_map_addr_tx_failed_count++; 7438 rc = ENOBUFS; 7439 goto bce_tx_encap_exit; 7440 } 7441 } else if (error == ENOMEM) { 7442 /* Insufficient DMA buffers available. */ 7443 sc->dma_map_addr_tx_failed_count++; 7444 rc = error; 7445 goto bce_tx_encap_exit; 7446 } else if (error != 0) { 7447 m_freem(m0); 7448 *m_head = NULL; 7449 sc->dma_map_addr_tx_failed_count++; 7450 rc = error; 7451 goto bce_tx_encap_exit; 7452 } 7453 7454 /* Make sure there's room in the chain */ 7455 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { 7456 bus_dmamap_unload(sc->tx_mbuf_tag, map); 7457 rc = ENOBUFS; 7458 goto bce_tx_encap_exit; 7459 } 7460 7461 /* prod points to an empty tx_bd at this point. */ 7462 prod_bseq = sc->tx_prod_bseq; 7463 7464#ifdef BCE_DEBUG 7465 debug_prod = chain_prod; 7466#endif 7467 7468 DBPRINT(sc, BCE_INFO_SEND, 7469 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, " 7470 "prod_bseq = 0x%08X\n", 7471 __FUNCTION__, prod, chain_prod, prod_bseq); 7472 7473 /* 7474 * Cycle through each mbuf segment that makes up 7475 * the outgoing frame, gathering the mapping info 7476 * for that segment and creating a tx_bd for 7477 * the mbuf. 7478 */ 7479 for (i = 0; i < nsegs ; i++) { 7480 7481 chain_prod = TX_CHAIN_IDX(prod); 7482 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)] 7483 [TX_IDX(chain_prod)]; 7484 7485 txbd->tx_bd_haddr_lo = 7486 htole32(BCE_ADDR_LO(segs[i].ds_addr)); 7487 txbd->tx_bd_haddr_hi = 7488 htole32(BCE_ADDR_HI(segs[i].ds_addr)); 7489 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 7490 htole16(segs[i].ds_len); 7491 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 7492 txbd->tx_bd_flags = htole16(flags); 7493 prod_bseq += segs[i].ds_len; 7494 if (i == 0) 7495 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 7496 prod = NEXT_TX_BD(prod); 7497 } 7498 7499 /* Set the END flag on the last TX buffer descriptor. */ 7500 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 7501 7502 DBRUNMSG(BCE_EXTREME_SEND, 7503 bce_dump_tx_chain(sc, debug_prod, nsegs)); 7504 7505 /* 7506 * Ensure that the mbuf pointer for this transmission 7507 * is placed at the array index of the last 7508 * descriptor in this chain. This is done 7509 * because a single map is used for all 7510 * segments of the mbuf and we don't want to 7511 * unload the map before all of the segments 7512 * have been freed. 7513 */ 7514 sc->tx_mbuf_ptr[chain_prod] = m0; 7515 sc->used_tx_bd += nsegs; 7516 7517 /* Update some debug statistic counters */ 7518 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 7519 sc->tx_hi_watermark = sc->used_tx_bd); 7520 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); 7521 DBRUNIF(sc->debug_tx_mbuf_alloc++); 7522 7523 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1)); 7524 7525 /* prod points to the next free tx_bd at this point. */ 7526 sc->tx_prod = prod; 7527 sc->tx_prod_bseq = prod_bseq; 7528 7529 /* Tell the chip about the waiting TX frames. */ 7530 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + 7531 BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod); 7532 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + 7533 BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq); 7534 7535bce_tx_encap_exit: 7536 DBEXIT(BCE_VERBOSE_SEND); 7537 return(rc); 7538} 7539 7540 7541/****************************************************************************/ 7542/* Main transmit routine when called from another routine with a lock. */ 7543/* */ 7544/* Returns: */ 7545/* Nothing. */ 7546/****************************************************************************/ 7547static void 7548bce_start_locked(struct ifnet *ifp) 7549{ 7550 struct bce_softc *sc = ifp->if_softc; 7551 struct mbuf *m_head = NULL; 7552 int count = 0; 7553 u16 tx_prod, tx_chain_prod; 7554 7555 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 7556 7557 BCE_LOCK_ASSERT(sc); 7558 7559 /* prod points to the next free tx_bd. */ 7560 tx_prod = sc->tx_prod; 7561 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 7562 7563 DBPRINT(sc, BCE_INFO_SEND, 7564 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 7565 "tx_prod_bseq = 0x%08X\n", 7566 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); 7567 7568 /* If there's no link or the transmit queue is empty then just exit. */ 7569 if (sc->bce_link_up == FALSE) { 7570 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", 7571 __FUNCTION__); 7572 goto bce_start_locked_exit; 7573 } 7574 7575 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 7576 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n", 7577 __FUNCTION__); 7578 goto bce_start_locked_exit; 7579 } 7580 7581 /* 7582 * Keep adding entries while there is space in the ring. 7583 */ 7584 while (sc->used_tx_bd < sc->max_tx_bd) { 7585 7586 /* Check for any frames to send. */ 7587 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 7588 7589 /* Stop when the transmit queue is empty. */ 7590 if (m_head == NULL) 7591 break; 7592 7593 /* 7594 * Pack the data into the transmit ring. If we 7595 * don't have room, place the mbuf back at the 7596 * head of the queue and set the OACTIVE flag 7597 * to wait for the NIC to drain the chain. 7598 */ 7599 if (bce_tx_encap(sc, &m_head)) { 7600 if (m_head != NULL) 7601 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 7602 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 7603 DBPRINT(sc, BCE_INFO_SEND, 7604 "TX chain is closed for business! Total " 7605 "tx_bd used = %d\n", sc->used_tx_bd); 7606 break; 7607 } 7608 7609 count++; 7610 7611 /* Send a copy of the frame to any BPF listeners. */ 7612 ETHER_BPF_MTAP(ifp, m_head); 7613 } 7614 7615 /* Exit if no packets were dequeued. */ 7616 if (count == 0) { 7617 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were " 7618 "dequeued\n", __FUNCTION__); 7619 goto bce_start_locked_exit; 7620 } 7621 7622 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into " 7623 "send queue.\n", __FUNCTION__, count); 7624 7625 /* Set the tx timeout. */ 7626 sc->watchdog_timer = BCE_TX_TIMEOUT; 7627 7628 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID)); 7629 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc)); 7630 7631bce_start_locked_exit: 7632 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 7633} 7634 7635 7636/****************************************************************************/ 7637/* Main transmit routine when called from another routine without a lock. */ 7638/* */ 7639/* Returns: */ 7640/* Nothing. */ 7641/****************************************************************************/ 7642static void 7643bce_start(struct ifnet *ifp) 7644{ 7645 struct bce_softc *sc = ifp->if_softc; 7646 7647 DBENTER(BCE_VERBOSE_SEND); 7648 7649 BCE_LOCK(sc); 7650 bce_start_locked(ifp); 7651 BCE_UNLOCK(sc); 7652 7653 DBEXIT(BCE_VERBOSE_SEND); 7654} 7655 7656 7657/****************************************************************************/ 7658/* Handles any IOCTL calls from the operating system. */ 7659/* */ 7660/* Returns: */ 7661/* 0 for success, positive value for failure. */ 7662/****************************************************************************/ 7663static int 7664bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 7665{ 7666 struct bce_softc *sc = ifp->if_softc; 7667 struct ifreq *ifr = (struct ifreq *) data; 7668 struct mii_data *mii; 7669 int mask, error = 0; 7670 7671 DBENTER(BCE_VERBOSE_MISC); 7672 7673 switch(command) { 7674 7675 /* Set the interface MTU. */ 7676 case SIOCSIFMTU: 7677 /* Check that the MTU setting is supported. */ 7678 if ((ifr->ifr_mtu < BCE_MIN_MTU) || 7679 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) { 7680 error = EINVAL; 7681 break; 7682 } 7683 7684 DBPRINT(sc, BCE_INFO_MISC, 7685 "SIOCSIFMTU: Changing MTU from %d to %d\n", 7686 (int) ifp->if_mtu, (int) ifr->ifr_mtu); 7687 7688 BCE_LOCK(sc); 7689 ifp->if_mtu = ifr->ifr_mtu; 7690 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7691 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 7692 bce_init_locked(sc); 7693 } 7694 BCE_UNLOCK(sc); 7695 break; 7696 7697 /* Set interface flags. */ 7698 case SIOCSIFFLAGS: 7699 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n"); 7700 7701 BCE_LOCK(sc); 7702 7703 /* Check if the interface is up. */ 7704 if (ifp->if_flags & IFF_UP) { 7705 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7706 /* Change promiscuous/multicast flags as necessary. */ 7707 bce_set_rx_mode(sc); 7708 } else { 7709 /* Start the HW */ 7710 bce_init_locked(sc); 7711 } 7712 } else { 7713 /* The interface is down, check if driver is running. */ 7714 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7715 bce_stop(sc); 7716 7717 /* If MFW is running, restart the controller a bit. */ 7718 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 7719 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 7720 bce_chipinit(sc); 7721 bce_mgmt_init_locked(sc); 7722 } 7723 } 7724 } 7725 7726 BCE_UNLOCK(sc); 7727 break; 7728 7729 /* Add/Delete multicast address */ 7730 case SIOCADDMULTI: 7731 case SIOCDELMULTI: 7732 DBPRINT(sc, BCE_VERBOSE_MISC, 7733 "Received SIOCADDMULTI/SIOCDELMULTI\n"); 7734 7735 BCE_LOCK(sc); 7736 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 7737 bce_set_rx_mode(sc); 7738 BCE_UNLOCK(sc); 7739 7740 break; 7741 7742 /* Set/Get Interface media */ 7743 case SIOCSIFMEDIA: 7744 case SIOCGIFMEDIA: 7745 DBPRINT(sc, BCE_VERBOSE_MISC, 7746 "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n"); 7747 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) 7748 error = ifmedia_ioctl(ifp, ifr, &sc->bce_ifmedia, 7749 command); 7750 else { 7751 mii = device_get_softc(sc->bce_miibus); 7752 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 7753 command); 7754 } 7755 break; 7756 7757 /* Set interface capability */ 7758 case SIOCSIFCAP: 7759 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 7760 DBPRINT(sc, BCE_INFO_MISC, 7761 "Received SIOCSIFCAP = 0x%08X\n", (u32) mask); 7762 7763 /* Toggle the TX checksum capabilities enable flag. */ 7764 if (mask & IFCAP_TXCSUM && 7765 ifp->if_capabilities & IFCAP_TXCSUM) { 7766 ifp->if_capenable ^= IFCAP_TXCSUM; 7767 if (IFCAP_TXCSUM & ifp->if_capenable) 7768 ifp->if_hwassist |= BCE_IF_HWASSIST; 7769 else 7770 ifp->if_hwassist &= ~BCE_IF_HWASSIST; 7771 } 7772 7773 /* Toggle the RX checksum capabilities enable flag. */ 7774 if (mask & IFCAP_RXCSUM && 7775 ifp->if_capabilities & IFCAP_RXCSUM) 7776 ifp->if_capenable ^= IFCAP_RXCSUM; 7777 7778 /* Toggle the TSO capabilities enable flag. */ 7779 if (bce_tso_enable && (mask & IFCAP_TSO4) && 7780 ifp->if_capabilities & IFCAP_TSO4) { 7781 ifp->if_capenable ^= IFCAP_TSO4; 7782 if (IFCAP_TSO4 & ifp->if_capenable) 7783 ifp->if_hwassist |= CSUM_TSO; 7784 else 7785 ifp->if_hwassist &= ~CSUM_TSO; 7786 } 7787 7788 if (mask & IFCAP_VLAN_HWCSUM && 7789 ifp->if_capabilities & IFCAP_VLAN_HWCSUM) 7790 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 7791 7792 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 7793 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 7794 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 7795 /* 7796 * Don't actually disable VLAN tag stripping as 7797 * management firmware (ASF/IPMI/UMP) requires the 7798 * feature. If VLAN tag stripping is disabled driver 7799 * will manually reconstruct the VLAN frame by 7800 * appending stripped VLAN tag. 7801 */ 7802 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 7803 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 7804 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 7805 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 7806 == 0) 7807 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 7808 } 7809 VLAN_CAPABILITIES(ifp); 7810 break; 7811 default: 7812 /* We don't know how to handle the IOCTL, pass it on. */ 7813 error = ether_ioctl(ifp, command, data); 7814 break; 7815 } 7816 7817 DBEXIT(BCE_VERBOSE_MISC); 7818 return(error); 7819} 7820 7821 7822/****************************************************************************/ 7823/* Transmit timeout handler. */ 7824/* */ 7825/* Returns: */ 7826/* Nothing. */ 7827/****************************************************************************/ 7828static void 7829bce_watchdog(struct bce_softc *sc) 7830{ 7831 uint32_t status; 7832 7833 DBENTER(BCE_EXTREME_SEND); 7834 7835 BCE_LOCK_ASSERT(sc); 7836 7837 status = 0; 7838 /* If the watchdog timer hasn't expired then just exit. */ 7839 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 7840 goto bce_watchdog_exit; 7841 7842 status = REG_RD(sc, BCE_EMAC_RX_STATUS); 7843 /* If pause frames are active then don't reset the hardware. */ 7844 if ((sc->bce_flags & BCE_USING_RX_FLOW_CONTROL) != 0) { 7845 if ((status & BCE_EMAC_RX_STATUS_FFED) != 0) { 7846 /* 7847 * If link partner has us in XOFF state then wait for 7848 * the condition to clear. 7849 */ 7850 sc->watchdog_timer = BCE_TX_TIMEOUT; 7851 goto bce_watchdog_exit; 7852 } else if ((status & BCE_EMAC_RX_STATUS_FF_RECEIVED) != 0 && 7853 (status & BCE_EMAC_RX_STATUS_N_RECEIVED) != 0) { 7854 /* 7855 * If we're not currently XOFF'ed but have recently 7856 * been XOFF'd/XON'd then assume that's delaying TX 7857 * this time around. 7858 */ 7859 sc->watchdog_timer = BCE_TX_TIMEOUT; 7860 goto bce_watchdog_exit; 7861 } 7862 /* 7863 * Any other condition is unexpected and the controller 7864 * should be reset. 7865 */ 7866 } 7867 7868 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n", 7869 __FILE__, __LINE__); 7870 7871 DBRUNMSG(BCE_INFO, 7872 bce_dump_driver_state(sc); 7873 bce_dump_status_block(sc); 7874 bce_dump_stats_block(sc); 7875 bce_dump_ftqs(sc); 7876 bce_dump_txp_state(sc, 0); 7877 bce_dump_rxp_state(sc, 0); 7878 bce_dump_tpat_state(sc, 0); 7879 bce_dump_cp_state(sc, 0); 7880 bce_dump_com_state(sc, 0)); 7881 7882 DBRUN(bce_breakpoint(sc)); 7883 7884 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 7885 7886 bce_init_locked(sc); 7887 sc->bce_ifp->if_oerrors++; 7888 7889bce_watchdog_exit: 7890 REG_WR(sc, BCE_EMAC_RX_STATUS, status); 7891 DBEXIT(BCE_EXTREME_SEND); 7892} 7893 7894 7895/* 7896 * Interrupt handler. 7897 */ 7898/****************************************************************************/ 7899/* Main interrupt entry point. Verifies that the controller generated the */ 7900/* interrupt and then calls a separate routine for handle the various */ 7901/* interrupt causes (PHY, TX, RX). */ 7902/* */ 7903/* Returns: */ 7904/* Nothing. */ 7905/****************************************************************************/ 7906static void 7907bce_intr(void *xsc) 7908{ 7909 struct bce_softc *sc; 7910 struct ifnet *ifp; 7911 u32 status_attn_bits; 7912 u16 hw_rx_cons, hw_tx_cons; 7913 7914 sc = xsc; 7915 ifp = sc->bce_ifp; 7916 7917 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 7918 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 7919 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc)); 7920 7921 BCE_LOCK(sc); 7922 7923 DBRUN(sc->interrupts_generated++); 7924 7925 /* Synchnorize before we read from interface's status block */ 7926 bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); 7927 7928 /* 7929 * If the hardware status block index matches the last value read 7930 * by the driver and we haven't asserted our interrupt then there's 7931 * nothing to do. This may only happen in case of INTx due to the 7932 * interrupt arriving at the CPU before the status block is updated. 7933 */ 7934 if ((sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) == 0 && 7935 sc->status_block->status_idx == sc->last_status_idx && 7936 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 7937 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { 7938 DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n", 7939 __FUNCTION__); 7940 goto bce_intr_exit; 7941 } 7942 7943 /* Ack the interrupt and stop others from occuring. */ 7944 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 7945 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 7946 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 7947 7948 /* Check if the hardware has finished any work. */ 7949 hw_rx_cons = bce_get_hw_rx_cons(sc); 7950 hw_tx_cons = bce_get_hw_tx_cons(sc); 7951 7952 /* Keep processing data as long as there is work to do. */ 7953 for (;;) { 7954 7955 status_attn_bits = sc->status_block->status_attn_bits; 7956 7957 DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), 7958 BCE_PRINTF("Simulating unexpected status attention " 7959 "bit set."); 7960 sc->unexpected_attention_sim_count++; 7961 status_attn_bits = status_attn_bits | 7962 STATUS_ATTN_BITS_PARITY_ERROR); 7963 7964 /* Was it a link change interrupt? */ 7965 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 7966 (sc->status_block->status_attn_bits_ack & 7967 STATUS_ATTN_BITS_LINK_STATE)) { 7968 bce_phy_intr(sc); 7969 7970 /* Clear transient updates during link state change. */ 7971 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | 7972 BCE_HC_COMMAND_COAL_NOW_WO_INT); 7973 REG_RD(sc, BCE_HC_COMMAND); 7974 } 7975 7976 /* If any other attention is asserted, the chip is toast. */ 7977 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 7978 (sc->status_block->status_attn_bits_ack & 7979 ~STATUS_ATTN_BITS_LINK_STATE))) { 7980 7981 sc->unexpected_attention_count++; 7982 7983 BCE_PRINTF("%s(%d): Fatal attention detected: " 7984 "0x%08X\n", __FILE__, __LINE__, 7985 sc->status_block->status_attn_bits); 7986 7987 DBRUNMSG(BCE_FATAL, 7988 if (unexpected_attention_sim_control == 0) 7989 bce_breakpoint(sc)); 7990 7991 bce_init_locked(sc); 7992 goto bce_intr_exit; 7993 } 7994 7995 /* Check for any completed RX frames. */ 7996 if (hw_rx_cons != sc->hw_rx_cons) 7997 bce_rx_intr(sc); 7998 7999 /* Check for any completed TX frames. */ 8000 if (hw_tx_cons != sc->hw_tx_cons) 8001 bce_tx_intr(sc); 8002 8003 /* Save status block index value for the next interrupt. */ 8004 sc->last_status_idx = sc->status_block->status_idx; 8005 8006 /* 8007 * Prevent speculative reads from getting 8008 * ahead of the status block. 8009 */ 8010 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 8011 BUS_SPACE_BARRIER_READ); 8012 8013 /* 8014 * If there's no work left then exit the 8015 * interrupt service routine. 8016 */ 8017 hw_rx_cons = bce_get_hw_rx_cons(sc); 8018 hw_tx_cons = bce_get_hw_tx_cons(sc); 8019 8020 if ((hw_rx_cons == sc->hw_rx_cons) && 8021 (hw_tx_cons == sc->hw_tx_cons)) 8022 break; 8023 } 8024 8025 bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREREAD); 8026 8027 /* Re-enable interrupts. */ 8028 bce_enable_intr(sc, 0); 8029 8030 /* Handle any frames that arrived while handling the interrupt. */ 8031 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 8032 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 8033 bce_start_locked(ifp); 8034 8035bce_intr_exit: 8036 BCE_UNLOCK(sc); 8037 8038 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 8039} 8040 8041 8042/****************************************************************************/ 8043/* Programs the various packet receive modes (broadcast and multicast). */ 8044/* */ 8045/* Returns: */ 8046/* Nothing. */ 8047/****************************************************************************/ 8048static void 8049bce_set_rx_mode(struct bce_softc *sc) 8050{ 8051 struct ifnet *ifp; 8052 struct ifmultiaddr *ifma; 8053 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 8054 u32 rx_mode, sort_mode; 8055 int h, i; 8056 8057 DBENTER(BCE_VERBOSE_MISC); 8058 8059 BCE_LOCK_ASSERT(sc); 8060 8061 ifp = sc->bce_ifp; 8062 8063 /* Initialize receive mode default settings. */ 8064 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 8065 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 8066 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 8067 8068 /* 8069 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 8070 * be enbled. 8071 */ 8072 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 8073 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))) 8074 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 8075 8076 /* 8077 * Check for promiscuous, all multicast, or selected 8078 * multicast address filtering. 8079 */ 8080 if (ifp->if_flags & IFF_PROMISC) { 8081 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n"); 8082 8083 /* Enable promiscuous mode. */ 8084 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 8085 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 8086 } else if (ifp->if_flags & IFF_ALLMULTI) { 8087 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n"); 8088 8089 /* Enable all multicast addresses. */ 8090 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 8091 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 8092 0xffffffff); 8093 } 8094 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 8095 } else { 8096 /* Accept one or more multicast(s). */ 8097 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n"); 8098 8099 if_maddr_rlock(ifp); 8100 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 8101 if (ifma->ifma_addr->sa_family != AF_LINK) 8102 continue; 8103 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 8104 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; 8105 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 8106 } 8107 if_maddr_runlock(ifp); 8108 8109 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 8110 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); 8111 8112 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 8113 } 8114 8115 /* Only make changes if the recive mode has actually changed. */ 8116 if (rx_mode != sc->rx_mode) { 8117 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: " 8118 "0x%08X\n", rx_mode); 8119 8120 sc->rx_mode = rx_mode; 8121 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 8122 } 8123 8124 /* Disable and clear the exisitng sort before enabling a new sort. */ 8125 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 8126 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 8127 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 8128 8129 DBEXIT(BCE_VERBOSE_MISC); 8130} 8131 8132 8133/****************************************************************************/ 8134/* Called periodically to updates statistics from the controllers */ 8135/* statistics block. */ 8136/* */ 8137/* Returns: */ 8138/* Nothing. */ 8139/****************************************************************************/ 8140static void 8141bce_stats_update(struct bce_softc *sc) 8142{ 8143 struct ifnet *ifp; 8144 struct statistics_block *stats; 8145 8146 DBENTER(BCE_EXTREME_MISC); 8147 8148 ifp = sc->bce_ifp; 8149 8150 bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD); 8151 8152 stats = (struct statistics_block *) sc->stats_block; 8153 8154 /* 8155 * Certain controllers don't report 8156 * carrier sense errors correctly. 8157 * See errata E11_5708CA0_1165. 8158 */ 8159 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 8160 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) 8161 ifp->if_oerrors += 8162 (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 8163 8164 /* 8165 * Update the sysctl statistics from the 8166 * hardware statistics. 8167 */ 8168 sc->stat_IfHCInOctets = 8169 ((u64) stats->stat_IfHCInOctets_hi << 32) + 8170 (u64) stats->stat_IfHCInOctets_lo; 8171 8172 sc->stat_IfHCInBadOctets = 8173 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 8174 (u64) stats->stat_IfHCInBadOctets_lo; 8175 8176 sc->stat_IfHCOutOctets = 8177 ((u64) stats->stat_IfHCOutOctets_hi << 32) + 8178 (u64) stats->stat_IfHCOutOctets_lo; 8179 8180 sc->stat_IfHCOutBadOctets = 8181 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + 8182 (u64) stats->stat_IfHCOutBadOctets_lo; 8183 8184 sc->stat_IfHCInUcastPkts = 8185 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + 8186 (u64) stats->stat_IfHCInUcastPkts_lo; 8187 8188 sc->stat_IfHCInMulticastPkts = 8189 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + 8190 (u64) stats->stat_IfHCInMulticastPkts_lo; 8191 8192 sc->stat_IfHCInBroadcastPkts = 8193 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + 8194 (u64) stats->stat_IfHCInBroadcastPkts_lo; 8195 8196 sc->stat_IfHCOutUcastPkts = 8197 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + 8198 (u64) stats->stat_IfHCOutUcastPkts_lo; 8199 8200 sc->stat_IfHCOutMulticastPkts = 8201 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + 8202 (u64) stats->stat_IfHCOutMulticastPkts_lo; 8203 8204 sc->stat_IfHCOutBroadcastPkts = 8205 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 8206 (u64) stats->stat_IfHCOutBroadcastPkts_lo; 8207 8208 /* ToDo: Preserve counters beyond 32 bits? */ 8209 /* ToDo: Read the statistics from auto-clear regs? */ 8210 8211 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 8212 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 8213 8214 sc->stat_Dot3StatsCarrierSenseErrors = 8215 stats->stat_Dot3StatsCarrierSenseErrors; 8216 8217 sc->stat_Dot3StatsFCSErrors = 8218 stats->stat_Dot3StatsFCSErrors; 8219 8220 sc->stat_Dot3StatsAlignmentErrors = 8221 stats->stat_Dot3StatsAlignmentErrors; 8222 8223 sc->stat_Dot3StatsSingleCollisionFrames = 8224 stats->stat_Dot3StatsSingleCollisionFrames; 8225 8226 sc->stat_Dot3StatsMultipleCollisionFrames = 8227 stats->stat_Dot3StatsMultipleCollisionFrames; 8228 8229 sc->stat_Dot3StatsDeferredTransmissions = 8230 stats->stat_Dot3StatsDeferredTransmissions; 8231 8232 sc->stat_Dot3StatsExcessiveCollisions = 8233 stats->stat_Dot3StatsExcessiveCollisions; 8234 8235 sc->stat_Dot3StatsLateCollisions = 8236 stats->stat_Dot3StatsLateCollisions; 8237 8238 sc->stat_EtherStatsCollisions = 8239 stats->stat_EtherStatsCollisions; 8240 8241 sc->stat_EtherStatsFragments = 8242 stats->stat_EtherStatsFragments; 8243 8244 sc->stat_EtherStatsJabbers = 8245 stats->stat_EtherStatsJabbers; 8246 8247 sc->stat_EtherStatsUndersizePkts = 8248 stats->stat_EtherStatsUndersizePkts; 8249 8250 sc->stat_EtherStatsOversizePkts = 8251 stats->stat_EtherStatsOversizePkts; 8252 8253 sc->stat_EtherStatsPktsRx64Octets = 8254 stats->stat_EtherStatsPktsRx64Octets; 8255 8256 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 8257 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 8258 8259 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 8260 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 8261 8262 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 8263 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 8264 8265 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 8266 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 8267 8268 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 8269 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 8270 8271 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 8272 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 8273 8274 sc->stat_EtherStatsPktsTx64Octets = 8275 stats->stat_EtherStatsPktsTx64Octets; 8276 8277 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 8278 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 8279 8280 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 8281 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 8282 8283 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 8284 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 8285 8286 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 8287 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 8288 8289 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 8290 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 8291 8292 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 8293 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 8294 8295 sc->stat_XonPauseFramesReceived = 8296 stats->stat_XonPauseFramesReceived; 8297 8298 sc->stat_XoffPauseFramesReceived = 8299 stats->stat_XoffPauseFramesReceived; 8300 8301 sc->stat_OutXonSent = 8302 stats->stat_OutXonSent; 8303 8304 sc->stat_OutXoffSent = 8305 stats->stat_OutXoffSent; 8306 8307 sc->stat_FlowControlDone = 8308 stats->stat_FlowControlDone; 8309 8310 sc->stat_MacControlFramesReceived = 8311 stats->stat_MacControlFramesReceived; 8312 8313 sc->stat_XoffStateEntered = 8314 stats->stat_XoffStateEntered; 8315 8316 sc->stat_IfInFramesL2FilterDiscards = 8317 stats->stat_IfInFramesL2FilterDiscards; 8318 8319 sc->stat_IfInRuleCheckerDiscards = 8320 stats->stat_IfInRuleCheckerDiscards; 8321 8322 sc->stat_IfInFTQDiscards = 8323 stats->stat_IfInFTQDiscards; 8324 8325 sc->stat_IfInMBUFDiscards = 8326 stats->stat_IfInMBUFDiscards; 8327 8328 sc->stat_IfInRuleCheckerP4Hit = 8329 stats->stat_IfInRuleCheckerP4Hit; 8330 8331 sc->stat_CatchupInRuleCheckerDiscards = 8332 stats->stat_CatchupInRuleCheckerDiscards; 8333 8334 sc->stat_CatchupInFTQDiscards = 8335 stats->stat_CatchupInFTQDiscards; 8336 8337 sc->stat_CatchupInMBUFDiscards = 8338 stats->stat_CatchupInMBUFDiscards; 8339 8340 sc->stat_CatchupInRuleCheckerP4Hit = 8341 stats->stat_CatchupInRuleCheckerP4Hit; 8342 8343 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 8344 8345 /* 8346 * Update the interface statistics from the 8347 * hardware statistics. 8348 */ 8349 ifp->if_collisions = 8350 (u_long) sc->stat_EtherStatsCollisions; 8351 8352 /* ToDo: This method loses soft errors. */ 8353 ifp->if_ierrors = 8354 (u_long) sc->stat_EtherStatsUndersizePkts + 8355 (u_long) sc->stat_EtherStatsOversizePkts + 8356 (u_long) sc->stat_IfInMBUFDiscards + 8357 (u_long) sc->stat_Dot3StatsAlignmentErrors + 8358 (u_long) sc->stat_Dot3StatsFCSErrors + 8359 (u_long) sc->stat_IfInRuleCheckerDiscards + 8360 (u_long) sc->stat_IfInFTQDiscards + 8361 (u_long) sc->com_no_buffers; 8362 8363 /* ToDo: This method loses soft errors. */ 8364 ifp->if_oerrors = 8365 (u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 8366 (u_long) sc->stat_Dot3StatsExcessiveCollisions + 8367 (u_long) sc->stat_Dot3StatsLateCollisions; 8368 8369 /* ToDo: Add additional statistics? */ 8370 8371 DBEXIT(BCE_EXTREME_MISC); 8372} 8373 8374 8375/****************************************************************************/ 8376/* Periodic function to notify the bootcode that the driver is still */ 8377/* present. */ 8378/* */ 8379/* Returns: */ 8380/* Nothing. */ 8381/****************************************************************************/ 8382static void 8383bce_pulse(void *xsc) 8384{ 8385 struct bce_softc *sc = xsc; 8386 u32 msg; 8387 8388 DBENTER(BCE_EXTREME_MISC); 8389 8390 BCE_LOCK_ASSERT(sc); 8391 8392 /* Tell the firmware that the driver is still running. */ 8393 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq; 8394 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 8395 8396 /* Update the bootcode condition. */ 8397 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 8398 8399 /* Report whether the bootcode still knows the driver is running. */ 8400 if (bce_verbose || bootverbose) { 8401 if (sc->bce_drv_cardiac_arrest == FALSE) { 8402 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 8403 sc->bce_drv_cardiac_arrest = TRUE; 8404 BCE_PRINTF("%s(): Warning: bootcode " 8405 "thinks driver is absent! " 8406 "(bc_state = 0x%08X)\n", 8407 __FUNCTION__, sc->bc_state); 8408 } 8409 } else { 8410 /* 8411 * Not supported by all bootcode versions. 8412 * (v5.0.11+ and v5.2.1+) Older bootcode 8413 * will require the driver to reset the 8414 * controller to clear this condition. 8415 */ 8416 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 8417 sc->bce_drv_cardiac_arrest = FALSE; 8418 BCE_PRINTF("%s(): Bootcode found the " 8419 "driver pulse! (bc_state = 0x%08X)\n", 8420 __FUNCTION__, sc->bc_state); 8421 } 8422 } 8423 } 8424 8425 8426 /* Schedule the next pulse. */ 8427 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); 8428 8429 DBEXIT(BCE_EXTREME_MISC); 8430} 8431 8432 8433/****************************************************************************/ 8434/* Periodic function to perform maintenance tasks. */ 8435/* */ 8436/* Returns: */ 8437/* Nothing. */ 8438/****************************************************************************/ 8439static void 8440bce_tick(void *xsc) 8441{ 8442 struct bce_softc *sc = xsc; 8443 struct mii_data *mii; 8444 struct ifnet *ifp; 8445 struct ifmediareq ifmr; 8446 8447 ifp = sc->bce_ifp; 8448 8449 DBENTER(BCE_EXTREME_MISC); 8450 8451 BCE_LOCK_ASSERT(sc); 8452 8453 /* Schedule the next tick. */ 8454 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 8455 8456 /* Update the statistics from the hardware statistics block. */ 8457 bce_stats_update(sc); 8458 8459 /* Ensure page and RX chains get refilled in low-memory situations. */ 8460 if (bce_hdr_split == TRUE) 8461 bce_fill_pg_chain(sc); 8462 bce_fill_rx_chain(sc); 8463 8464 /* Check that chip hasn't hung. */ 8465 bce_watchdog(sc); 8466 8467 /* If link is up already up then we're done. */ 8468 if (sc->bce_link_up == TRUE) 8469 goto bce_tick_exit; 8470 8471 /* Link is down. Check what the PHY's doing. */ 8472 if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { 8473 bzero(&ifmr, sizeof(ifmr)); 8474 bce_ifmedia_sts_rphy(sc, &ifmr); 8475 if ((ifmr.ifm_status & (IFM_ACTIVE | IFM_AVALID)) == 8476 (IFM_ACTIVE | IFM_AVALID)) { 8477 sc->bce_link_up = TRUE; 8478 bce_miibus_statchg(sc->bce_dev); 8479 } 8480 } else { 8481 mii = device_get_softc(sc->bce_miibus); 8482 mii_tick(mii); 8483 /* Check if the link has come up. */ 8484 if ((mii->mii_media_status & IFM_ACTIVE) && 8485 (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) { 8486 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n", 8487 __FUNCTION__); 8488 sc->bce_link_up = TRUE; 8489 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 8490 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX || 8491 IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) && 8492 (bce_verbose || bootverbose)) 8493 BCE_PRINTF("Gigabit link up!\n"); 8494 } 8495 8496 } 8497 if (sc->bce_link_up == TRUE) { 8498 /* Now that link is up, handle any outstanding TX traffic. */ 8499 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 8500 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found " 8501 "pending TX traffic.\n", __FUNCTION__); 8502 bce_start_locked(ifp); 8503 } 8504 } 8505 8506bce_tick_exit: 8507 DBEXIT(BCE_EXTREME_MISC); 8508} 8509 8510static void 8511bce_fw_cap_init(struct bce_softc *sc) 8512{ 8513 u32 ack, cap, link; 8514 8515 ack = 0; 8516 cap = bce_shmem_rd(sc, BCE_FW_CAP_MB); 8517 if ((cap & BCE_FW_CAP_SIGNATURE_MAGIC_MASK) != 8518 BCE_FW_CAP_SIGNATURE_MAGIC) 8519 return; 8520 if ((cap & (BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN)) == 8521 (BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN)) 8522 ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC | 8523 BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN; 8524 if ((sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) != 0 && 8525 (cap & BCE_FW_CAP_REMOTE_PHY_CAP) != 0) { 8526 sc->bce_phy_flags &= ~BCE_PHY_REMOTE_PORT_FIBER_FLAG; 8527 sc->bce_phy_flags |= BCE_PHY_REMOTE_CAP_FLAG; 8528 link = bce_shmem_rd(sc, BCE_LINK_STATUS); 8529 if ((link & BCE_LINK_STATUS_SERDES_LINK) != 0) 8530 sc->bce_phy_flags |= BCE_PHY_REMOTE_PORT_FIBER_FLAG; 8531 ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC | 8532 BCE_FW_CAP_REMOTE_PHY_CAP; 8533 } 8534 8535 if (ack != 0) 8536 bce_shmem_wr(sc, BCE_DRV_ACK_CAP_MB, ack); 8537} 8538 8539 8540#ifdef BCE_DEBUG 8541/****************************************************************************/ 8542/* Allows the driver state to be dumped through the sysctl interface. */ 8543/* */ 8544/* Returns: */ 8545/* 0 for success, positive value for failure. */ 8546/****************************************************************************/ 8547static int 8548bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 8549{ 8550 int error; 8551 int result; 8552 struct bce_softc *sc; 8553 8554 result = -1; 8555 error = sysctl_handle_int(oidp, &result, 0, req); 8556 8557 if (error || !req->newptr) 8558 return (error); 8559 8560 if (result == 1) { 8561 sc = (struct bce_softc *)arg1; 8562 bce_dump_driver_state(sc); 8563 } 8564 8565 return error; 8566} 8567 8568 8569/****************************************************************************/ 8570/* Allows the hardware state to be dumped through the sysctl interface. */ 8571/* */ 8572/* Returns: */ 8573/* 0 for success, positive value for failure. */ 8574/****************************************************************************/ 8575static int 8576bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 8577{ 8578 int error; 8579 int result; 8580 struct bce_softc *sc; 8581 8582 result = -1; 8583 error = sysctl_handle_int(oidp, &result, 0, req); 8584 8585 if (error || !req->newptr) 8586 return (error); 8587 8588 if (result == 1) { 8589 sc = (struct bce_softc *)arg1; 8590 bce_dump_hw_state(sc); 8591 } 8592 8593 return error; 8594} 8595 8596 8597/****************************************************************************/ 8598/* Allows the status block to be dumped through the sysctl interface. */ 8599/* */ 8600/* Returns: */ 8601/* 0 for success, positive value for failure. */ 8602/****************************************************************************/ 8603static int 8604bce_sysctl_status_block(SYSCTL_HANDLER_ARGS) 8605{ 8606 int error; 8607 int result; 8608 struct bce_softc *sc; 8609 8610 result = -1; 8611 error = sysctl_handle_int(oidp, &result, 0, req); 8612 8613 if (error || !req->newptr) 8614 return (error); 8615 8616 if (result == 1) { 8617 sc = (struct bce_softc *)arg1; 8618 bce_dump_status_block(sc); 8619 } 8620 8621 return error; 8622} 8623 8624 8625/****************************************************************************/ 8626/* Allows the stats block to be dumped through the sysctl interface. */ 8627/* */ 8628/* Returns: */ 8629/* 0 for success, positive value for failure. */ 8630/****************************************************************************/ 8631static int 8632bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS) 8633{ 8634 int error; 8635 int result; 8636 struct bce_softc *sc; 8637 8638 result = -1; 8639 error = sysctl_handle_int(oidp, &result, 0, req); 8640 8641 if (error || !req->newptr) 8642 return (error); 8643 8644 if (result == 1) { 8645 sc = (struct bce_softc *)arg1; 8646 bce_dump_stats_block(sc); 8647 } 8648 8649 return error; 8650} 8651 8652 8653/****************************************************************************/ 8654/* Allows the stat counters to be cleared without unloading/reloading the */ 8655/* driver. */ 8656/* */ 8657/* Returns: */ 8658/* 0 for success, positive value for failure. */ 8659/****************************************************************************/ 8660static int 8661bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS) 8662{ 8663 int error; 8664 int result; 8665 struct bce_softc *sc; 8666 8667 result = -1; 8668 error = sysctl_handle_int(oidp, &result, 0, req); 8669 8670 if (error || !req->newptr) 8671 return (error); 8672 8673 if (result == 1) { 8674 sc = (struct bce_softc *)arg1; 8675 struct statistics_block *stats; 8676 8677 stats = (struct statistics_block *) sc->stats_block; 8678 bzero(stats, sizeof(struct statistics_block)); 8679 bus_dmamap_sync(sc->stats_tag, sc->stats_map, 8680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 8681 8682 /* Clear the internal H/W statistics counters. */ 8683 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 8684 8685 /* Reset the driver maintained statistics. */ 8686 sc->interrupts_rx = 8687 sc->interrupts_tx = 0; 8688 sc->tso_frames_requested = 8689 sc->tso_frames_completed = 8690 sc->tso_frames_failed = 0; 8691 sc->rx_empty_count = 8692 sc->tx_full_count = 0; 8693 sc->rx_low_watermark = USABLE_RX_BD_ALLOC; 8694 sc->tx_hi_watermark = 0; 8695 sc->l2fhdr_error_count = 8696 sc->l2fhdr_error_sim_count = 0; 8697 sc->mbuf_alloc_failed_count = 8698 sc->mbuf_alloc_failed_sim_count = 0; 8699 sc->dma_map_addr_rx_failed_count = 8700 sc->dma_map_addr_tx_failed_count = 0; 8701 sc->mbuf_frag_count = 0; 8702 sc->csum_offload_tcp_udp = 8703 sc->csum_offload_ip = 0; 8704 sc->vlan_tagged_frames_rcvd = 8705 sc->vlan_tagged_frames_stripped = 0; 8706 sc->split_header_frames_rcvd = 8707 sc->split_header_tcp_frames_rcvd = 0; 8708 8709 /* Clear firmware maintained statistics. */ 8710 REG_WR_IND(sc, 0x120084, 0); 8711 } 8712 8713 return error; 8714} 8715 8716 8717/****************************************************************************/ 8718/* Allows the shared memory contents to be dumped through the sysctl . */ 8719/* interface. */ 8720/* */ 8721/* Returns: */ 8722/* 0 for success, positive value for failure. */ 8723/****************************************************************************/ 8724static int 8725bce_sysctl_shmem_state(SYSCTL_HANDLER_ARGS) 8726{ 8727 int error; 8728 int result; 8729 struct bce_softc *sc; 8730 8731 result = -1; 8732 error = sysctl_handle_int(oidp, &result, 0, req); 8733 8734 if (error || !req->newptr) 8735 return (error); 8736 8737 if (result == 1) { 8738 sc = (struct bce_softc *)arg1; 8739 bce_dump_shmem_state(sc); 8740 } 8741 8742 return error; 8743} 8744 8745 8746/****************************************************************************/ 8747/* Allows the bootcode state to be dumped through the sysctl interface. */ 8748/* */ 8749/* Returns: */ 8750/* 0 for success, positive value for failure. */ 8751/****************************************************************************/ 8752static int 8753bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS) 8754{ 8755 int error; 8756 int result; 8757 struct bce_softc *sc; 8758 8759 result = -1; 8760 error = sysctl_handle_int(oidp, &result, 0, req); 8761 8762 if (error || !req->newptr) 8763 return (error); 8764 8765 if (result == 1) { 8766 sc = (struct bce_softc *)arg1; 8767 bce_dump_bc_state(sc); 8768 } 8769 8770 return error; 8771} 8772 8773 8774/****************************************************************************/ 8775/* Provides a sysctl interface to allow dumping the RX BD chain. */ 8776/* */ 8777/* Returns: */ 8778/* 0 for success, positive value for failure. */ 8779/****************************************************************************/ 8780static int 8781bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS) 8782{ 8783 int error; 8784 int result; 8785 struct bce_softc *sc; 8786 8787 result = -1; 8788 error = sysctl_handle_int(oidp, &result, 0, req); 8789 8790 if (error || !req->newptr) 8791 return (error); 8792 8793 if (result == 1) { 8794 sc = (struct bce_softc *)arg1; 8795 bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC); 8796 } 8797 8798 return error; 8799} 8800 8801 8802/****************************************************************************/ 8803/* Provides a sysctl interface to allow dumping the RX MBUF chain. */ 8804/* */ 8805/* Returns: */ 8806/* 0 for success, positive value for failure. */ 8807/****************************************************************************/ 8808static int 8809bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS) 8810{ 8811 int error; 8812 int result; 8813 struct bce_softc *sc; 8814 8815 result = -1; 8816 error = sysctl_handle_int(oidp, &result, 0, req); 8817 8818 if (error || !req->newptr) 8819 return (error); 8820 8821 if (result == 1) { 8822 sc = (struct bce_softc *)arg1; 8823 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC); 8824 } 8825 8826 return error; 8827} 8828 8829 8830/****************************************************************************/ 8831/* Provides a sysctl interface to allow dumping the TX chain. */ 8832/* */ 8833/* Returns: */ 8834/* 0 for success, positive value for failure. */ 8835/****************************************************************************/ 8836static int 8837bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 8838{ 8839 int error; 8840 int result; 8841 struct bce_softc *sc; 8842 8843 result = -1; 8844 error = sysctl_handle_int(oidp, &result, 0, req); 8845 8846 if (error || !req->newptr) 8847 return (error); 8848 8849 if (result == 1) { 8850 sc = (struct bce_softc *)arg1; 8851 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC); 8852 } 8853 8854 return error; 8855} 8856 8857 8858/****************************************************************************/ 8859/* Provides a sysctl interface to allow dumping the page chain. */ 8860/* */ 8861/* Returns: */ 8862/* 0 for success, positive value for failure. */ 8863/****************************************************************************/ 8864static int 8865bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS) 8866{ 8867 int error; 8868 int result; 8869 struct bce_softc *sc; 8870 8871 result = -1; 8872 error = sysctl_handle_int(oidp, &result, 0, req); 8873 8874 if (error || !req->newptr) 8875 return (error); 8876 8877 if (result == 1) { 8878 sc = (struct bce_softc *)arg1; 8879 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC); 8880 } 8881 8882 return error; 8883} 8884 8885/****************************************************************************/ 8886/* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */ 8887/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8888/* */ 8889/* Returns: */ 8890/* 0 for success, positive value for failure. */ 8891/****************************************************************************/ 8892static int 8893bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS) 8894{ 8895 struct bce_softc *sc = (struct bce_softc *)arg1; 8896 int error; 8897 u32 result; 8898 u32 val[1]; 8899 u8 *data = (u8 *) val; 8900 8901 result = -1; 8902 error = sysctl_handle_int(oidp, &result, 0, req); 8903 if (error || (req->newptr == NULL)) 8904 return (error); 8905 8906 error = bce_nvram_read(sc, result, data, 4); 8907 8908 BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0])); 8909 8910 return (error); 8911} 8912 8913 8914/****************************************************************************/ 8915/* Provides a sysctl interface to allow reading arbitrary registers in the */ 8916/* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8917/* */ 8918/* Returns: */ 8919/* 0 for success, positive value for failure. */ 8920/****************************************************************************/ 8921static int 8922bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 8923{ 8924 struct bce_softc *sc = (struct bce_softc *)arg1; 8925 int error; 8926 u32 val, result; 8927 8928 result = -1; 8929 error = sysctl_handle_int(oidp, &result, 0, req); 8930 if (error || (req->newptr == NULL)) 8931 return (error); 8932 8933 /* Make sure the register is accessible. */ 8934 if (result < 0x8000) { 8935 val = REG_RD(sc, result); 8936 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8937 } else if (result < 0x0280000) { 8938 val = REG_RD_IND(sc, result); 8939 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8940 } 8941 8942 return (error); 8943} 8944 8945 8946/****************************************************************************/ 8947/* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 8948/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8949/* */ 8950/* Returns: */ 8951/* 0 for success, positive value for failure. */ 8952/****************************************************************************/ 8953static int 8954bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 8955{ 8956 struct bce_softc *sc; 8957 device_t dev; 8958 int error, result; 8959 u16 val; 8960 8961 result = -1; 8962 error = sysctl_handle_int(oidp, &result, 0, req); 8963 if (error || (req->newptr == NULL)) 8964 return (error); 8965 8966 /* Make sure the register is accessible. */ 8967 if (result < 0x20) { 8968 sc = (struct bce_softc *)arg1; 8969 dev = sc->bce_dev; 8970 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 8971 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val); 8972 } 8973 return (error); 8974} 8975 8976 8977/****************************************************************************/ 8978/* Provides a sysctl interface for dumping the nvram contents. */ 8979/* DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8980/* */ 8981/* Returns: */ 8982/* 0 for success, positive errno for failure. */ 8983/****************************************************************************/ 8984static int 8985bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS) 8986{ 8987 struct bce_softc *sc = (struct bce_softc *)arg1; 8988 int error, i; 8989 8990 if (sc->nvram_buf == NULL) 8991 sc->nvram_buf = malloc(sc->bce_flash_size, 8992 M_TEMP, M_ZERO | M_WAITOK); 8993 8994 error = 0; 8995 if (req->oldlen == sc->bce_flash_size) { 8996 for (i = 0; i < sc->bce_flash_size && error == 0; i++) 8997 error = bce_nvram_read(sc, i, &sc->nvram_buf[i], 1); 8998 } 8999 9000 if (error == 0) 9001 error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size); 9002 9003 return error; 9004} 9005 9006#ifdef BCE_NVRAM_WRITE_SUPPORT 9007/****************************************************************************/ 9008/* Provides a sysctl interface for writing to nvram. */ 9009/* DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 9010/* */ 9011/* Returns: */ 9012/* 0 for success, positive errno for failure. */ 9013/****************************************************************************/ 9014static int 9015bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS) 9016{ 9017 struct bce_softc *sc = (struct bce_softc *)arg1; 9018 int error; 9019 9020 if (sc->nvram_buf == NULL) 9021 sc->nvram_buf = malloc(sc->bce_flash_size, 9022 M_TEMP, M_ZERO | M_WAITOK); 9023 else 9024 bzero(sc->nvram_buf, sc->bce_flash_size); 9025 9026 error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size); 9027 if (error == 0) 9028 return (error); 9029 9030 if (req->newlen == sc->bce_flash_size) 9031 error = bce_nvram_write(sc, 0, sc->nvram_buf, 9032 sc->bce_flash_size); 9033 9034 9035 return error; 9036} 9037#endif 9038 9039 9040/****************************************************************************/ 9041/* Provides a sysctl interface to allow reading a CID. */ 9042/* */ 9043/* Returns: */ 9044/* 0 for success, positive value for failure. */ 9045/****************************************************************************/ 9046static int 9047bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS) 9048{ 9049 struct bce_softc *sc; 9050 int error, result; 9051 9052 result = -1; 9053 error = sysctl_handle_int(oidp, &result, 0, req); 9054 if (error || (req->newptr == NULL)) 9055 return (error); 9056 9057 /* Make sure the register is accessible. */ 9058 if (result <= TX_CID) { 9059 sc = (struct bce_softc *)arg1; 9060 bce_dump_ctx(sc, result); 9061 } 9062 9063 return (error); 9064} 9065 9066 9067/****************************************************************************/ 9068/* Provides a sysctl interface to forcing the driver to dump state and */ 9069/* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 9070/* */ 9071/* Returns: */ 9072/* 0 for success, positive value for failure. */ 9073/****************************************************************************/ 9074static int 9075bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 9076{ 9077 int error; 9078 int result; 9079 struct bce_softc *sc; 9080 9081 result = -1; 9082 error = sysctl_handle_int(oidp, &result, 0, req); 9083 9084 if (error || !req->newptr) 9085 return (error); 9086 9087 if (result == 1) { 9088 sc = (struct bce_softc *)arg1; 9089 bce_breakpoint(sc); 9090 } 9091 9092 return error; 9093} 9094#endif 9095 9096/****************************************************************************/ 9097/* Adds any sysctl parameters for tuning or debugging purposes. */ 9098/* */ 9099/* Returns: */ 9100/* 0 for success, positive value for failure. */ 9101/****************************************************************************/ 9102static void 9103bce_add_sysctls(struct bce_softc *sc) 9104{ 9105 struct sysctl_ctx_list *ctx; 9106 struct sysctl_oid_list *children; 9107 9108 DBENTER(BCE_VERBOSE_MISC); 9109 9110 ctx = device_get_sysctl_ctx(sc->bce_dev); 9111 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); 9112 9113#ifdef BCE_DEBUG 9114 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9115 "l2fhdr_error_sim_control", 9116 CTLFLAG_RW, &l2fhdr_error_sim_control, 9117 0, "Debug control to force l2fhdr errors"); 9118 9119 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9120 "l2fhdr_error_sim_count", 9121 CTLFLAG_RD, &sc->l2fhdr_error_sim_count, 9122 0, "Number of simulated l2_fhdr errors"); 9123#endif 9124 9125 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9126 "l2fhdr_error_count", 9127 CTLFLAG_RD, &sc->l2fhdr_error_count, 9128 0, "Number of l2_fhdr errors"); 9129 9130#ifdef BCE_DEBUG 9131 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9132 "mbuf_alloc_failed_sim_control", 9133 CTLFLAG_RW, &mbuf_alloc_failed_sim_control, 9134 0, "Debug control to force mbuf allocation failures"); 9135 9136 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9137 "mbuf_alloc_failed_sim_count", 9138 CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count, 9139 0, "Number of simulated mbuf cluster allocation failures"); 9140#endif 9141 9142 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9143 "mbuf_alloc_failed_count", 9144 CTLFLAG_RD, &sc->mbuf_alloc_failed_count, 9145 0, "Number of mbuf allocation failures"); 9146 9147 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9148 "mbuf_frag_count", 9149 CTLFLAG_RD, &sc->mbuf_frag_count, 9150 0, "Number of fragmented mbufs"); 9151 9152#ifdef BCE_DEBUG 9153 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9154 "dma_map_addr_failed_sim_control", 9155 CTLFLAG_RW, &dma_map_addr_failed_sim_control, 9156 0, "Debug control to force DMA mapping failures"); 9157 9158 /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */ 9159 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9160 "dma_map_addr_failed_sim_count", 9161 CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count, 9162 0, "Number of simulated DMA mapping failures"); 9163 9164#endif 9165 9166 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9167 "dma_map_addr_rx_failed_count", 9168 CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count, 9169 0, "Number of RX DMA mapping failures"); 9170 9171 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9172 "dma_map_addr_tx_failed_count", 9173 CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count, 9174 0, "Number of TX DMA mapping failures"); 9175 9176#ifdef BCE_DEBUG 9177 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9178 "unexpected_attention_sim_control", 9179 CTLFLAG_RW, &unexpected_attention_sim_control, 9180 0, "Debug control to simulate unexpected attentions"); 9181 9182 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9183 "unexpected_attention_sim_count", 9184 CTLFLAG_RW, &sc->unexpected_attention_sim_count, 9185 0, "Number of simulated unexpected attentions"); 9186#endif 9187 9188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9189 "unexpected_attention_count", 9190 CTLFLAG_RW, &sc->unexpected_attention_count, 9191 0, "Number of unexpected attentions"); 9192 9193#ifdef BCE_DEBUG 9194 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9195 "debug_bootcode_running_failure", 9196 CTLFLAG_RW, &bootcode_running_failure_sim_control, 9197 0, "Debug control to force bootcode running failures"); 9198 9199 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9200 "rx_low_watermark", 9201 CTLFLAG_RD, &sc->rx_low_watermark, 9202 0, "Lowest level of free rx_bd's"); 9203 9204 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9205 "rx_empty_count", 9206 CTLFLAG_RD, &sc->rx_empty_count, 9207 "Number of times the RX chain was empty"); 9208 9209 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 9210 "tx_hi_watermark", 9211 CTLFLAG_RD, &sc->tx_hi_watermark, 9212 0, "Highest level of used tx_bd's"); 9213 9214 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9215 "tx_full_count", 9216 CTLFLAG_RD, &sc->tx_full_count, 9217 "Number of times the TX chain was full"); 9218 9219 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9220 "tso_frames_requested", 9221 CTLFLAG_RD, &sc->tso_frames_requested, 9222 "Number of TSO frames requested"); 9223 9224 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9225 "tso_frames_completed", 9226 CTLFLAG_RD, &sc->tso_frames_completed, 9227 "Number of TSO frames completed"); 9228 9229 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9230 "tso_frames_failed", 9231 CTLFLAG_RD, &sc->tso_frames_failed, 9232 "Number of TSO frames failed"); 9233 9234 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9235 "csum_offload_ip", 9236 CTLFLAG_RD, &sc->csum_offload_ip, 9237 "Number of IP checksum offload frames"); 9238 9239 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9240 "csum_offload_tcp_udp", 9241 CTLFLAG_RD, &sc->csum_offload_tcp_udp, 9242 "Number of TCP/UDP checksum offload frames"); 9243 9244 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9245 "vlan_tagged_frames_rcvd", 9246 CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd, 9247 "Number of VLAN tagged frames received"); 9248 9249 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9250 "vlan_tagged_frames_stripped", 9251 CTLFLAG_RD, &sc->vlan_tagged_frames_stripped, 9252 "Number of VLAN tagged frames stripped"); 9253 9254 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9255 "interrupts_rx", 9256 CTLFLAG_RD, &sc->interrupts_rx, 9257 "Number of RX interrupts"); 9258 9259 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9260 "interrupts_tx", 9261 CTLFLAG_RD, &sc->interrupts_tx, 9262 "Number of TX interrupts"); 9263 9264 if (bce_hdr_split == TRUE) { 9265 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9266 "split_header_frames_rcvd", 9267 CTLFLAG_RD, &sc->split_header_frames_rcvd, 9268 "Number of split header frames received"); 9269 9270 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9271 "split_header_tcp_frames_rcvd", 9272 CTLFLAG_RD, &sc->split_header_tcp_frames_rcvd, 9273 "Number of split header TCP frames received"); 9274 } 9275 9276 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9277 "nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD, 9278 (void *)sc, 0, 9279 bce_sysctl_nvram_dump, "S", ""); 9280 9281#ifdef BCE_NVRAM_WRITE_SUPPORT 9282 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9283 "nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR, 9284 (void *)sc, 0, 9285 bce_sysctl_nvram_write, "S", ""); 9286#endif 9287#endif /* BCE_DEBUG */ 9288 9289 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9290 "stat_IfHcInOctets", 9291 CTLFLAG_RD, &sc->stat_IfHCInOctets, 9292 "Bytes received"); 9293 9294 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9295 "stat_IfHCInBadOctets", 9296 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 9297 "Bad bytes received"); 9298 9299 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9300 "stat_IfHCOutOctets", 9301 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 9302 "Bytes sent"); 9303 9304 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9305 "stat_IfHCOutBadOctets", 9306 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 9307 "Bad bytes sent"); 9308 9309 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9310 "stat_IfHCInUcastPkts", 9311 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 9312 "Unicast packets received"); 9313 9314 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9315 "stat_IfHCInMulticastPkts", 9316 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 9317 "Multicast packets received"); 9318 9319 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9320 "stat_IfHCInBroadcastPkts", 9321 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 9322 "Broadcast packets received"); 9323 9324 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9325 "stat_IfHCOutUcastPkts", 9326 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 9327 "Unicast packets sent"); 9328 9329 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9330 "stat_IfHCOutMulticastPkts", 9331 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 9332 "Multicast packets sent"); 9333 9334 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, 9335 "stat_IfHCOutBroadcastPkts", 9336 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 9337 "Broadcast packets sent"); 9338 9339 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9340 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 9341 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 9342 0, "Internal MAC transmit errors"); 9343 9344 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9345 "stat_Dot3StatsCarrierSenseErrors", 9346 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 9347 0, "Carrier sense errors"); 9348 9349 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9350 "stat_Dot3StatsFCSErrors", 9351 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 9352 0, "Frame check sequence errors"); 9353 9354 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9355 "stat_Dot3StatsAlignmentErrors", 9356 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 9357 0, "Alignment errors"); 9358 9359 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9360 "stat_Dot3StatsSingleCollisionFrames", 9361 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 9362 0, "Single Collision Frames"); 9363 9364 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9365 "stat_Dot3StatsMultipleCollisionFrames", 9366 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 9367 0, "Multiple Collision Frames"); 9368 9369 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9370 "stat_Dot3StatsDeferredTransmissions", 9371 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 9372 0, "Deferred Transmissions"); 9373 9374 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9375 "stat_Dot3StatsExcessiveCollisions", 9376 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 9377 0, "Excessive Collisions"); 9378 9379 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9380 "stat_Dot3StatsLateCollisions", 9381 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 9382 0, "Late Collisions"); 9383 9384 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9385 "stat_EtherStatsCollisions", 9386 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 9387 0, "Collisions"); 9388 9389 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9390 "stat_EtherStatsFragments", 9391 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 9392 0, "Fragments"); 9393 9394 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9395 "stat_EtherStatsJabbers", 9396 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 9397 0, "Jabbers"); 9398 9399 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9400 "stat_EtherStatsUndersizePkts", 9401 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 9402 0, "Undersize packets"); 9403 9404 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9405 "stat_EtherStatsOversizePkts", 9406 CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts, 9407 0, "stat_EtherStatsOversizePkts"); 9408 9409 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9410 "stat_EtherStatsPktsRx64Octets", 9411 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 9412 0, "Bytes received in 64 byte packets"); 9413 9414 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9415 "stat_EtherStatsPktsRx65Octetsto127Octets", 9416 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 9417 0, "Bytes received in 65 to 127 byte packets"); 9418 9419 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9420 "stat_EtherStatsPktsRx128Octetsto255Octets", 9421 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 9422 0, "Bytes received in 128 to 255 byte packets"); 9423 9424 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9425 "stat_EtherStatsPktsRx256Octetsto511Octets", 9426 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 9427 0, "Bytes received in 256 to 511 byte packets"); 9428 9429 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9430 "stat_EtherStatsPktsRx512Octetsto1023Octets", 9431 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 9432 0, "Bytes received in 512 to 1023 byte packets"); 9433 9434 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9435 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 9436 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 9437 0, "Bytes received in 1024 t0 1522 byte packets"); 9438 9439 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9440 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 9441 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 9442 0, "Bytes received in 1523 to 9022 byte packets"); 9443 9444 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9445 "stat_EtherStatsPktsTx64Octets", 9446 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 9447 0, "Bytes sent in 64 byte packets"); 9448 9449 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9450 "stat_EtherStatsPktsTx65Octetsto127Octets", 9451 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 9452 0, "Bytes sent in 65 to 127 byte packets"); 9453 9454 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9455 "stat_EtherStatsPktsTx128Octetsto255Octets", 9456 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 9457 0, "Bytes sent in 128 to 255 byte packets"); 9458 9459 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9460 "stat_EtherStatsPktsTx256Octetsto511Octets", 9461 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 9462 0, "Bytes sent in 256 to 511 byte packets"); 9463 9464 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9465 "stat_EtherStatsPktsTx512Octetsto1023Octets", 9466 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 9467 0, "Bytes sent in 512 to 1023 byte packets"); 9468 9469 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9470 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 9471 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 9472 0, "Bytes sent in 1024 to 1522 byte packets"); 9473 9474 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9475 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 9476 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 9477 0, "Bytes sent in 1523 to 9022 byte packets"); 9478 9479 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9480 "stat_XonPauseFramesReceived", 9481 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 9482 0, "XON pause frames receved"); 9483 9484 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9485 "stat_XoffPauseFramesReceived", 9486 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 9487 0, "XOFF pause frames received"); 9488 9489 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9490 "stat_OutXonSent", 9491 CTLFLAG_RD, &sc->stat_OutXonSent, 9492 0, "XON pause frames sent"); 9493 9494 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9495 "stat_OutXoffSent", 9496 CTLFLAG_RD, &sc->stat_OutXoffSent, 9497 0, "XOFF pause frames sent"); 9498 9499 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9500 "stat_FlowControlDone", 9501 CTLFLAG_RD, &sc->stat_FlowControlDone, 9502 0, "Flow control done"); 9503 9504 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9505 "stat_MacControlFramesReceived", 9506 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 9507 0, "MAC control frames received"); 9508 9509 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9510 "stat_XoffStateEntered", 9511 CTLFLAG_RD, &sc->stat_XoffStateEntered, 9512 0, "XOFF state entered"); 9513 9514 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9515 "stat_IfInFramesL2FilterDiscards", 9516 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 9517 0, "Received L2 packets discarded"); 9518 9519 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9520 "stat_IfInRuleCheckerDiscards", 9521 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 9522 0, "Received packets discarded by rule"); 9523 9524 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9525 "stat_IfInFTQDiscards", 9526 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 9527 0, "Received packet FTQ discards"); 9528 9529 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9530 "stat_IfInMBUFDiscards", 9531 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 9532 0, "Received packets discarded due to lack " 9533 "of controller buffer memory"); 9534 9535 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9536 "stat_IfInRuleCheckerP4Hit", 9537 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 9538 0, "Received packets rule checker hits"); 9539 9540 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9541 "stat_CatchupInRuleCheckerDiscards", 9542 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 9543 0, "Received packets discarded in Catchup path"); 9544 9545 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9546 "stat_CatchupInFTQDiscards", 9547 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 9548 0, "Received packets discarded in FTQ in Catchup path"); 9549 9550 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9551 "stat_CatchupInMBUFDiscards", 9552 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 9553 0, "Received packets discarded in controller " 9554 "buffer memory in Catchup path"); 9555 9556 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9557 "stat_CatchupInRuleCheckerP4Hit", 9558 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 9559 0, "Received packets rule checker hits in Catchup path"); 9560 9561 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 9562 "com_no_buffers", 9563 CTLFLAG_RD, &sc->com_no_buffers, 9564 0, "Valid packets received but no RX buffers available"); 9565 9566#ifdef BCE_DEBUG 9567 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9568 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 9569 (void *)sc, 0, 9570 bce_sysctl_driver_state, "I", "Drive state information"); 9571 9572 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9573 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 9574 (void *)sc, 0, 9575 bce_sysctl_hw_state, "I", "Hardware state information"); 9576 9577 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9578 "status_block", CTLTYPE_INT | CTLFLAG_RW, 9579 (void *)sc, 0, 9580 bce_sysctl_status_block, "I", "Dump status block"); 9581 9582 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9583 "stats_block", CTLTYPE_INT | CTLFLAG_RW, 9584 (void *)sc, 0, 9585 bce_sysctl_stats_block, "I", "Dump statistics block"); 9586 9587 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9588 "stats_clear", CTLTYPE_INT | CTLFLAG_RW, 9589 (void *)sc, 0, 9590 bce_sysctl_stats_clear, "I", "Clear statistics block"); 9591 9592 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9593 "shmem_state", CTLTYPE_INT | CTLFLAG_RW, 9594 (void *)sc, 0, 9595 bce_sysctl_shmem_state, "I", "Shared memory state information"); 9596 9597 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9598 "bc_state", CTLTYPE_INT | CTLFLAG_RW, 9599 (void *)sc, 0, 9600 bce_sysctl_bc_state, "I", "Bootcode state information"); 9601 9602 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9603 "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW, 9604 (void *)sc, 0, 9605 bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain"); 9606 9607 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9608 "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW, 9609 (void *)sc, 0, 9610 bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain"); 9611 9612 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9613 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 9614 (void *)sc, 0, 9615 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 9616 9617 if (bce_hdr_split == TRUE) { 9618 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9619 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW, 9620 (void *)sc, 0, 9621 bce_sysctl_dump_pg_chain, "I", "Dump page chain"); 9622 } 9623 9624 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9625 "dump_ctx", CTLTYPE_INT | CTLFLAG_RW, 9626 (void *)sc, 0, 9627 bce_sysctl_dump_ctx, "I", "Dump context memory"); 9628 9629 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9630 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 9631 (void *)sc, 0, 9632 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 9633 9634 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9635 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 9636 (void *)sc, 0, 9637 bce_sysctl_reg_read, "I", "Register read"); 9638 9639 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9640 "nvram_read", CTLTYPE_INT | CTLFLAG_RW, 9641 (void *)sc, 0, 9642 bce_sysctl_nvram_read, "I", "NVRAM read"); 9643 9644 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 9645 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 9646 (void *)sc, 0, 9647 bce_sysctl_phy_read, "I", "PHY register read"); 9648 9649#endif 9650 9651 DBEXIT(BCE_VERBOSE_MISC); 9652} 9653 9654 9655/****************************************************************************/ 9656/* BCE Debug Routines */ 9657/****************************************************************************/ 9658#ifdef BCE_DEBUG 9659 9660/****************************************************************************/ 9661/* Freezes the controller to allow for a cohesive state dump. */ 9662/* */ 9663/* Returns: */ 9664/* Nothing. */ 9665/****************************************************************************/ 9666static __attribute__ ((noinline)) void 9667bce_freeze_controller(struct bce_softc *sc) 9668{ 9669 u32 val; 9670 val = REG_RD(sc, BCE_MISC_COMMAND); 9671 val |= BCE_MISC_COMMAND_DISABLE_ALL; 9672 REG_WR(sc, BCE_MISC_COMMAND, val); 9673} 9674 9675 9676/****************************************************************************/ 9677/* Unfreezes the controller after a freeze operation. This may not always */ 9678/* work and the controller will require a reset! */ 9679/* */ 9680/* Returns: */ 9681/* Nothing. */ 9682/****************************************************************************/ 9683static __attribute__ ((noinline)) void 9684bce_unfreeze_controller(struct bce_softc *sc) 9685{ 9686 u32 val; 9687 val = REG_RD(sc, BCE_MISC_COMMAND); 9688 val |= BCE_MISC_COMMAND_ENABLE_ALL; 9689 REG_WR(sc, BCE_MISC_COMMAND, val); 9690} 9691 9692 9693/****************************************************************************/ 9694/* Prints out Ethernet frame information from an mbuf. */ 9695/* */ 9696/* Partially decode an Ethernet frame to look at some important headers. */ 9697/* */ 9698/* Returns: */ 9699/* Nothing. */ 9700/****************************************************************************/ 9701static __attribute__ ((noinline)) void 9702bce_dump_enet(struct bce_softc *sc, struct mbuf *m) 9703{ 9704 struct ether_vlan_header *eh; 9705 u16 etype; 9706 int ehlen; 9707 struct ip *ip; 9708 struct tcphdr *th; 9709 struct udphdr *uh; 9710 struct arphdr *ah; 9711 9712 BCE_PRINTF( 9713 "-----------------------------" 9714 " Frame Decode " 9715 "-----------------------------\n"); 9716 9717 eh = mtod(m, struct ether_vlan_header *); 9718 9719 /* Handle VLAN encapsulation if present. */ 9720 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 9721 etype = ntohs(eh->evl_proto); 9722 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 9723 } else { 9724 etype = ntohs(eh->evl_encap_proto); 9725 ehlen = ETHER_HDR_LEN; 9726 } 9727 9728 /* ToDo: Add VLAN output. */ 9729 BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n", 9730 eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen); 9731 9732 switch (etype) { 9733 case ETHERTYPE_IP: 9734 ip = (struct ip *)(m->m_data + ehlen); 9735 BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, " 9736 "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n", 9737 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr), 9738 ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum)); 9739 9740 switch (ip->ip_p) { 9741 case IPPROTO_TCP: 9742 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9743 BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = " 9744 "%d bytes, flags = 0x%b, csum = 0x%04X\n", 9745 ntohs(th->th_dport), ntohs(th->th_sport), 9746 (th->th_off << 2), th->th_flags, 9747 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST" 9748 "\02SYN\01FIN", ntohs(th->th_sum)); 9749 break; 9750 case IPPROTO_UDP: 9751 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9752 BCE_PRINTF("-udp: dest = %d, src = %d, len = %d " 9753 "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport), 9754 ntohs(uh->uh_sport), ntohs(uh->uh_ulen), 9755 ntohs(uh->uh_sum)); 9756 break; 9757 case IPPROTO_ICMP: 9758 BCE_PRINTF("icmp:\n"); 9759 break; 9760 default: 9761 BCE_PRINTF("----: Other IP protocol.\n"); 9762 } 9763 break; 9764 case ETHERTYPE_IPV6: 9765 BCE_PRINTF("ipv6: No decode supported.\n"); 9766 break; 9767 case ETHERTYPE_ARP: 9768 BCE_PRINTF("-arp: "); 9769 ah = (struct arphdr *) (m->m_data + ehlen); 9770 switch (ntohs(ah->ar_op)) { 9771 case ARPOP_REVREQUEST: 9772 printf("reverse ARP request\n"); 9773 break; 9774 case ARPOP_REVREPLY: 9775 printf("reverse ARP reply\n"); 9776 break; 9777 case ARPOP_REQUEST: 9778 printf("ARP request\n"); 9779 break; 9780 case ARPOP_REPLY: 9781 printf("ARP reply\n"); 9782 break; 9783 default: 9784 printf("other ARP operation\n"); 9785 } 9786 break; 9787 default: 9788 BCE_PRINTF("----: Other protocol.\n"); 9789 } 9790 9791 BCE_PRINTF( 9792 "-----------------------------" 9793 "--------------" 9794 "-----------------------------\n"); 9795} 9796 9797 9798/****************************************************************************/ 9799/* Prints out information about an mbuf. */ 9800/* */ 9801/* Returns: */ 9802/* Nothing. */ 9803/****************************************************************************/ 9804static __attribute__ ((noinline)) void 9805bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 9806{ 9807 struct mbuf *mp = m; 9808 9809 if (m == NULL) { 9810 BCE_PRINTF("mbuf: null pointer\n"); 9811 return; 9812 } 9813 9814 while (mp) { 9815 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, " 9816 "m_data = %p\n", mp, mp->m_len, mp->m_flags, 9817 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data); 9818 9819 if (mp->m_flags & M_PKTHDR) { 9820 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, " 9821 "csum_flags = %b\n", mp->m_pkthdr.len, 9822 mp->m_flags, M_FLAG_PRINTF, 9823 mp->m_pkthdr.csum_flags, 9824 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP" 9825 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 9826 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 9827 "\14CSUM_PSEUDO_HDR"); 9828 } 9829 9830 if (mp->m_flags & M_EXT) { 9831 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ", 9832 mp->m_ext.ext_buf, mp->m_ext.ext_size); 9833 switch (mp->m_ext.ext_type) { 9834 case EXT_CLUSTER: 9835 printf("EXT_CLUSTER\n"); break; 9836 case EXT_SFBUF: 9837 printf("EXT_SFBUF\n"); break; 9838 case EXT_JUMBO9: 9839 printf("EXT_JUMBO9\n"); break; 9840 case EXT_JUMBO16: 9841 printf("EXT_JUMBO16\n"); break; 9842 case EXT_PACKET: 9843 printf("EXT_PACKET\n"); break; 9844 case EXT_MBUF: 9845 printf("EXT_MBUF\n"); break; 9846 case EXT_NET_DRV: 9847 printf("EXT_NET_DRV\n"); break; 9848 case EXT_MOD_TYPE: 9849 printf("EXT_MDD_TYPE\n"); break; 9850 case EXT_DISPOSABLE: 9851 printf("EXT_DISPOSABLE\n"); break; 9852 case EXT_EXTREF: 9853 printf("EXT_EXTREF\n"); break; 9854 default: 9855 printf("UNKNOWN\n"); 9856 } 9857 } 9858 9859 mp = mp->m_next; 9860 } 9861} 9862 9863 9864/****************************************************************************/ 9865/* Prints out the mbufs in the TX mbuf chain. */ 9866/* */ 9867/* Returns: */ 9868/* Nothing. */ 9869/****************************************************************************/ 9870static __attribute__ ((noinline)) void 9871bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9872{ 9873 struct mbuf *m; 9874 9875 BCE_PRINTF( 9876 "----------------------------" 9877 " tx mbuf data " 9878 "----------------------------\n"); 9879 9880 for (int i = 0; i < count; i++) { 9881 m = sc->tx_mbuf_ptr[chain_prod]; 9882 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod); 9883 bce_dump_mbuf(sc, m); 9884 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 9885 } 9886 9887 BCE_PRINTF( 9888 "----------------------------" 9889 "----------------" 9890 "----------------------------\n"); 9891} 9892 9893 9894/****************************************************************************/ 9895/* Prints out the mbufs in the RX mbuf chain. */ 9896/* */ 9897/* Returns: */ 9898/* Nothing. */ 9899/****************************************************************************/ 9900static __attribute__ ((noinline)) void 9901bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9902{ 9903 struct mbuf *m; 9904 9905 BCE_PRINTF( 9906 "----------------------------" 9907 " rx mbuf data " 9908 "----------------------------\n"); 9909 9910 for (int i = 0; i < count; i++) { 9911 m = sc->rx_mbuf_ptr[chain_prod]; 9912 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod); 9913 bce_dump_mbuf(sc, m); 9914 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 9915 } 9916 9917 9918 BCE_PRINTF( 9919 "----------------------------" 9920 "----------------" 9921 "----------------------------\n"); 9922} 9923 9924 9925/****************************************************************************/ 9926/* Prints out the mbufs in the mbuf page chain. */ 9927/* */ 9928/* Returns: */ 9929/* Nothing. */ 9930/****************************************************************************/ 9931static __attribute__ ((noinline)) void 9932bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9933{ 9934 struct mbuf *m; 9935 9936 BCE_PRINTF( 9937 "----------------------------" 9938 " pg mbuf data " 9939 "----------------------------\n"); 9940 9941 for (int i = 0; i < count; i++) { 9942 m = sc->pg_mbuf_ptr[chain_prod]; 9943 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod); 9944 bce_dump_mbuf(sc, m); 9945 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod)); 9946 } 9947 9948 9949 BCE_PRINTF( 9950 "----------------------------" 9951 "----------------" 9952 "----------------------------\n"); 9953} 9954 9955 9956/****************************************************************************/ 9957/* Prints out a tx_bd structure. */ 9958/* */ 9959/* Returns: */ 9960/* Nothing. */ 9961/****************************************************************************/ 9962static __attribute__ ((noinline)) void 9963bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 9964{ 9965 int i = 0; 9966 9967 if (idx > MAX_TX_BD_ALLOC) 9968 /* Index out of range. */ 9969 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 9970 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 9971 /* TX Chain page pointer. */ 9972 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 9973 "pointer\n", idx, txbd->tx_bd_haddr_hi, 9974 txbd->tx_bd_haddr_lo); 9975 else { 9976 /* Normal tx_bd entry. */ 9977 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 9978 "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = " 9979 "0x%04X (", idx, txbd->tx_bd_haddr_hi, 9980 txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes, 9981 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 9982 9983 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) { 9984 if (i>0) 9985 printf("|"); 9986 printf("CONN_FAULT"); 9987 i++; 9988 } 9989 9990 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) { 9991 if (i>0) 9992 printf("|"); 9993 printf("TCP_UDP_CKSUM"); 9994 i++; 9995 } 9996 9997 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) { 9998 if (i>0) 9999 printf("|"); 10000 printf("IP_CKSUM"); 10001 i++; 10002 } 10003 10004 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) { 10005 if (i>0) 10006 printf("|"); 10007 printf("VLAN"); 10008 i++; 10009 } 10010 10011 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) { 10012 if (i>0) 10013 printf("|"); 10014 printf("COAL_NOW"); 10015 i++; 10016 } 10017 10018 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) { 10019 if (i>0) 10020 printf("|"); 10021 printf("DONT_GEN_CRC"); 10022 i++; 10023 } 10024 10025 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) { 10026 if (i>0) 10027 printf("|"); 10028 printf("START"); 10029 i++; 10030 } 10031 10032 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) { 10033 if (i>0) 10034 printf("|"); 10035 printf("END"); 10036 i++; 10037 } 10038 10039 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) { 10040 if (i>0) 10041 printf("|"); 10042 printf("LSO"); 10043 i++; 10044 } 10045 10046 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) { 10047 if (i>0) 10048 printf("|"); 10049 printf("SW_OPTION=%d", ((txbd->tx_bd_flags & 10050 TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++; 10051 } 10052 10053 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) { 10054 if (i>0) 10055 printf("|"); 10056 printf("SW_FLAGS"); 10057 i++; 10058 } 10059 10060 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) { 10061 if (i>0) 10062 printf("|"); 10063 printf("SNAP)"); 10064 } else { 10065 printf(")\n"); 10066 } 10067 } 10068} 10069 10070 10071/****************************************************************************/ 10072/* Prints out a rx_bd structure. */ 10073/* */ 10074/* Returns: */ 10075/* Nothing. */ 10076/****************************************************************************/ 10077static __attribute__ ((noinline)) void 10078bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 10079{ 10080 if (idx > MAX_RX_BD_ALLOC) 10081 /* Index out of range. */ 10082 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 10083 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 10084 /* RX Chain page pointer. */ 10085 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 10086 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 10087 rxbd->rx_bd_haddr_lo); 10088 else 10089 /* Normal rx_bd entry. */ 10090 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 10091 "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi, 10092 rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len, 10093 rxbd->rx_bd_flags); 10094} 10095 10096 10097/****************************************************************************/ 10098/* Prints out a rx_bd structure in the page chain. */ 10099/* */ 10100/* Returns: */ 10101/* Nothing. */ 10102/****************************************************************************/ 10103static __attribute__ ((noinline)) void 10104bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd) 10105{ 10106 if (idx > MAX_PG_BD_ALLOC) 10107 /* Index out of range. */ 10108 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx); 10109 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE) 10110 /* Page Chain page pointer. */ 10111 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 10112 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo); 10113 else 10114 /* Normal rx_bd entry. */ 10115 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 10116 "flags = 0x%08X\n", idx, 10117 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo, 10118 pgbd->rx_bd_len, pgbd->rx_bd_flags); 10119} 10120 10121 10122/****************************************************************************/ 10123/* Prints out a l2_fhdr structure. */ 10124/* */ 10125/* Returns: */ 10126/* Nothing. */ 10127/****************************************************************************/ 10128static __attribute__ ((noinline)) void 10129bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 10130{ 10131 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, " 10132 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, " 10133 "tcp_udp_xsum = 0x%04X\n", idx, 10134 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB, 10135 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 10136 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 10137} 10138 10139 10140/****************************************************************************/ 10141/* Prints out context memory info. (Only useful for CID 0 to 16.) */ 10142/* */ 10143/* Returns: */ 10144/* Nothing. */ 10145/****************************************************************************/ 10146static __attribute__ ((noinline)) void 10147bce_dump_ctx(struct bce_softc *sc, u16 cid) 10148{ 10149 if (cid > TX_CID) { 10150 BCE_PRINTF(" Unknown CID\n"); 10151 return; 10152 } 10153 10154 BCE_PRINTF( 10155 "----------------------------" 10156 " CTX Data " 10157 "----------------------------\n"); 10158 10159 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid); 10160 10161 if (cid == RX_CID) { 10162 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx " 10163 "producer index\n", 10164 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX)); 10165 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host " 10166 "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 10167 BCE_L2CTX_RX_HOST_BSEQ)); 10168 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n", 10169 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ)); 10170 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer " 10171 "descriptor address\n", 10172 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI)); 10173 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer " 10174 "descriptor address\n", 10175 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO)); 10176 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer " 10177 "index\n", CTX_RD(sc, GET_CID_ADDR(cid), 10178 BCE_L2CTX_RX_NX_BDIDX)); 10179 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page " 10180 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 10181 BCE_L2CTX_RX_HOST_PG_BDIDX)); 10182 BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page " 10183 "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid), 10184 BCE_L2CTX_RX_PG_BUF_SIZE)); 10185 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page " 10186 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 10187 BCE_L2CTX_RX_NX_PG_BDHADDR_HI)); 10188 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page " 10189 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 10190 BCE_L2CTX_RX_NX_PG_BDHADDR_LO)); 10191 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page " 10192 "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 10193 BCE_L2CTX_RX_NX_PG_BDIDX)); 10194 } else if (cid == TX_CID) { 10195 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 10196 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n", 10197 CTX_RD(sc, GET_CID_ADDR(cid), 10198 BCE_L2CTX_TX_TYPE_XI)); 10199 BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx " 10200 "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), 10201 BCE_L2CTX_TX_CMD_TYPE_XI)); 10202 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) " 10203 "h/w buffer descriptor address\n", 10204 CTX_RD(sc, GET_CID_ADDR(cid), 10205 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI)); 10206 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) " 10207 "h/w buffer descriptor address\n", 10208 CTX_RD(sc, GET_CID_ADDR(cid), 10209 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI)); 10210 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) " 10211 "host producer index\n", 10212 CTX_RD(sc, GET_CID_ADDR(cid), 10213 BCE_L2CTX_TX_HOST_BIDX_XI)); 10214 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) " 10215 "host byte sequence\n", 10216 CTX_RD(sc, GET_CID_ADDR(cid), 10217 BCE_L2CTX_TX_HOST_BSEQ_XI)); 10218 } else { 10219 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n", 10220 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE)); 10221 BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n", 10222 CTX_RD(sc, GET_CID_ADDR(cid), 10223 BCE_L2CTX_TX_CMD_TYPE)); 10224 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) " 10225 "h/w buffer descriptor address\n", 10226 CTX_RD(sc, GET_CID_ADDR(cid), 10227 BCE_L2CTX_TX_TBDR_BHADDR_HI)); 10228 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) " 10229 "h/w buffer descriptor address\n", 10230 CTX_RD(sc, GET_CID_ADDR(cid), 10231 BCE_L2CTX_TX_TBDR_BHADDR_LO)); 10232 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host " 10233 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 10234 BCE_L2CTX_TX_HOST_BIDX)); 10235 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte " 10236 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 10237 BCE_L2CTX_TX_HOST_BSEQ)); 10238 } 10239 } 10240 10241 BCE_PRINTF( 10242 "----------------------------" 10243 " Raw CTX " 10244 "----------------------------\n"); 10245 10246 for (int i = 0x0; i < 0x300; i += 0x10) { 10247 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 10248 CTX_RD(sc, GET_CID_ADDR(cid), i), 10249 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4), 10250 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8), 10251 CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc)); 10252 } 10253 10254 10255 BCE_PRINTF( 10256 "----------------------------" 10257 "----------------" 10258 "----------------------------\n"); 10259} 10260 10261 10262/****************************************************************************/ 10263/* Prints out the FTQ data. */ 10264/* */ 10265/* Returns: */ 10266/* Nothing. */ 10267/****************************************************************************/ 10268static __attribute__ ((noinline)) void 10269bce_dump_ftqs(struct bce_softc *sc) 10270{ 10271 u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val; 10272 10273 BCE_PRINTF( 10274 "----------------------------" 10275 " FTQ Data " 10276 "----------------------------\n"); 10277 10278 BCE_PRINTF(" FTQ Command Control Depth_Now " 10279 "Max_Depth Valid_Cnt \n"); 10280 BCE_PRINTF(" ------- ---------- ---------- ---------- " 10281 "---------- ----------\n"); 10282 10283 /* Setup the generic statistic counters for the FTQ valid count. */ 10284 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) | 10285 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) | 10286 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) | 10287 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT); 10288 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 10289 10290 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) | 10291 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) | 10292 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) | 10293 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT); 10294 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val); 10295 10296 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) | 10297 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) | 10298 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) | 10299 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT); 10300 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val); 10301 10302 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) | 10303 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) | 10304 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) | 10305 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT); 10306 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val); 10307 10308 /* Input queue to the Receive Lookup state machine */ 10309 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD); 10310 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL); 10311 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22; 10312 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12; 10313 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 10314 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10315 cmd, ctl, cur_depth, max_depth, valid_cnt); 10316 10317 /* Input queue to the Receive Processor */ 10318 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD); 10319 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL); 10320 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22; 10321 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12; 10322 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 10323 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10324 cmd, ctl, cur_depth, max_depth, valid_cnt); 10325 10326 /* Input queue to the Recevie Processor */ 10327 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD); 10328 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL); 10329 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22; 10330 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12; 10331 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 10332 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10333 cmd, ctl, cur_depth, max_depth, valid_cnt); 10334 10335 /* Input queue to the Receive Virtual to Physical state machine */ 10336 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD); 10337 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL); 10338 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22; 10339 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12; 10340 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 10341 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10342 cmd, ctl, cur_depth, max_depth, valid_cnt); 10343 10344 /* Input queue to the Recevie Virtual to Physical state machine */ 10345 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD); 10346 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL); 10347 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22; 10348 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12; 10349 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4); 10350 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10351 cmd, ctl, cur_depth, max_depth, valid_cnt); 10352 10353 /* Input queue to the Receive Virtual to Physical state machine */ 10354 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD); 10355 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL); 10356 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22; 10357 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12; 10358 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5); 10359 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10360 cmd, ctl, cur_depth, max_depth, valid_cnt); 10361 10362 /* Input queue to the Receive DMA state machine */ 10363 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD); 10364 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL); 10365 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22; 10366 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12; 10367 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6); 10368 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10369 cmd, ctl, cur_depth, max_depth, valid_cnt); 10370 10371 /* Input queue to the Transmit Scheduler state machine */ 10372 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD); 10373 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL); 10374 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22; 10375 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12; 10376 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7); 10377 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10378 cmd, ctl, cur_depth, max_depth, valid_cnt); 10379 10380 /* Input queue to the Transmit Buffer Descriptor state machine */ 10381 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD); 10382 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL); 10383 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22; 10384 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12; 10385 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8); 10386 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10387 cmd, ctl, cur_depth, max_depth, valid_cnt); 10388 10389 /* Input queue to the Transmit Processor */ 10390 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD); 10391 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL); 10392 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22; 10393 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12; 10394 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9); 10395 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10396 cmd, ctl, cur_depth, max_depth, valid_cnt); 10397 10398 /* Input queue to the Transmit DMA state machine */ 10399 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD); 10400 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL); 10401 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22; 10402 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12; 10403 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10); 10404 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10405 cmd, ctl, cur_depth, max_depth, valid_cnt); 10406 10407 /* Input queue to the Transmit Patch-Up Processor */ 10408 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD); 10409 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL); 10410 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22; 10411 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12; 10412 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11); 10413 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10414 cmd, ctl, cur_depth, max_depth, valid_cnt); 10415 10416 /* Input queue to the Transmit Assembler state machine */ 10417 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD); 10418 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL); 10419 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22; 10420 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12; 10421 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12); 10422 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10423 cmd, ctl, cur_depth, max_depth, valid_cnt); 10424 10425 /* Input queue to the Completion Processor */ 10426 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD); 10427 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL); 10428 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22; 10429 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12; 10430 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13); 10431 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10432 cmd, ctl, cur_depth, max_depth, valid_cnt); 10433 10434 /* Input queue to the Completion Processor */ 10435 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD); 10436 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL); 10437 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22; 10438 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12; 10439 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14); 10440 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10441 cmd, ctl, cur_depth, max_depth, valid_cnt); 10442 10443 /* Input queue to the Completion Processor */ 10444 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD); 10445 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL); 10446 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22; 10447 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12; 10448 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15); 10449 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10450 cmd, ctl, cur_depth, max_depth, valid_cnt); 10451 10452 /* Setup the generic statistic counters for the FTQ valid count. */ 10453 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) | 10454 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) | 10455 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT); 10456 10457 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 10458 val = val | 10459 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 10460 24); 10461 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 10462 10463 /* Input queue to the Management Control Processor */ 10464 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD); 10465 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL); 10466 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22; 10467 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12; 10468 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 10469 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10470 cmd, ctl, cur_depth, max_depth, valid_cnt); 10471 10472 /* Input queue to the Command Processor */ 10473 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD); 10474 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL); 10475 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22; 10476 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12; 10477 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 10478 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10479 cmd, ctl, cur_depth, max_depth, valid_cnt); 10480 10481 /* Input queue to the Completion Scheduler state machine */ 10482 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD); 10483 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL); 10484 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22; 10485 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12; 10486 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 10487 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10488 cmd, ctl, cur_depth, max_depth, valid_cnt); 10489 10490 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 10491 /* Input queue to the RV2P Command Scheduler */ 10492 cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD); 10493 ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL); 10494 cur_depth = (ctl & 0xFFC00000) >> 22; 10495 max_depth = (ctl & 0x003FF000) >> 12; 10496 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 10497 BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 10498 cmd, ctl, cur_depth, max_depth, valid_cnt); 10499 } 10500 10501 BCE_PRINTF( 10502 "----------------------------" 10503 "----------------" 10504 "----------------------------\n"); 10505} 10506 10507 10508/****************************************************************************/ 10509/* Prints out the TX chain. */ 10510/* */ 10511/* Returns: */ 10512/* Nothing. */ 10513/****************************************************************************/ 10514static __attribute__ ((noinline)) void 10515bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count) 10516{ 10517 struct tx_bd *txbd; 10518 10519 /* First some info about the tx_bd chain structure. */ 10520 BCE_PRINTF( 10521 "----------------------------" 10522 " tx_bd chain " 10523 "----------------------------\n"); 10524 10525 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n", 10526 (u32) BCM_PAGE_SIZE, (u32) sc->tx_pages); 10527 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 10528 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); 10529 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD_ALLOC); 10530 10531 BCE_PRINTF( 10532 "----------------------------" 10533 " tx_bd data " 10534 "----------------------------\n"); 10535 10536 /* Now print out a decoded list of TX buffer descriptors. */ 10537 for (int i = 0; i < count; i++) { 10538 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 10539 bce_dump_txbd(sc, tx_prod, txbd); 10540 tx_prod++; 10541 } 10542 10543 BCE_PRINTF( 10544 "----------------------------" 10545 "----------------" 10546 "----------------------------\n"); 10547} 10548 10549 10550/****************************************************************************/ 10551/* Prints out the RX chain. */ 10552/* */ 10553/* Returns: */ 10554/* Nothing. */ 10555/****************************************************************************/ 10556static __attribute__ ((noinline)) void 10557bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count) 10558{ 10559 struct rx_bd *rxbd; 10560 10561 /* First some info about the rx_bd chain structure. */ 10562 BCE_PRINTF( 10563 "----------------------------" 10564 " rx_bd chain " 10565 "----------------------------\n"); 10566 10567 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n", 10568 (u32) BCM_PAGE_SIZE, (u32) sc->rx_pages); 10569 10570 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 10571 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); 10572 10573 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD_ALLOC); 10574 10575 BCE_PRINTF( 10576 "----------------------------" 10577 " rx_bd data " 10578 "----------------------------\n"); 10579 10580 /* Now print out the rx_bd's themselves. */ 10581 for (int i = 0; i < count; i++) { 10582 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 10583 bce_dump_rxbd(sc, rx_prod, rxbd); 10584 rx_prod = RX_CHAIN_IDX(rx_prod + 1); 10585 } 10586 10587 BCE_PRINTF( 10588 "----------------------------" 10589 "----------------" 10590 "----------------------------\n"); 10591} 10592 10593 10594/****************************************************************************/ 10595/* Prints out the page chain. */ 10596/* */ 10597/* Returns: */ 10598/* Nothing. */ 10599/****************************************************************************/ 10600static __attribute__ ((noinline)) void 10601bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count) 10602{ 10603 struct rx_bd *pgbd; 10604 10605 /* First some info about the page chain structure. */ 10606 BCE_PRINTF( 10607 "----------------------------" 10608 " page chain " 10609 "----------------------------\n"); 10610 10611 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n", 10612 (u32) BCM_PAGE_SIZE, (u32) sc->pg_pages); 10613 10614 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 10615 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE); 10616 10617 BCE_PRINTF("total pg_bd = 0x%08X\n", (u32) TOTAL_PG_BD_ALLOC); 10618 10619 BCE_PRINTF( 10620 "----------------------------" 10621 " page data " 10622 "----------------------------\n"); 10623 10624 /* Now print out the rx_bd's themselves. */ 10625 for (int i = 0; i < count; i++) { 10626 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)]; 10627 bce_dump_pgbd(sc, pg_prod, pgbd); 10628 pg_prod = PG_CHAIN_IDX(pg_prod + 1); 10629 } 10630 10631 BCE_PRINTF( 10632 "----------------------------" 10633 "----------------" 10634 "----------------------------\n"); 10635} 10636 10637 10638#define BCE_PRINT_RX_CONS(arg) \ 10639if (sblk->status_rx_quick_consumer_index##arg) \ 10640 BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \ 10641 sblk->status_rx_quick_consumer_index##arg, (u16) \ 10642 RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \ 10643 arg); 10644 10645 10646#define BCE_PRINT_TX_CONS(arg) \ 10647if (sblk->status_tx_quick_consumer_index##arg) \ 10648 BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \ 10649 sblk->status_tx_quick_consumer_index##arg, (u16) \ 10650 TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \ 10651 arg); 10652 10653/****************************************************************************/ 10654/* Prints out the status block from host memory. */ 10655/* */ 10656/* Returns: */ 10657/* Nothing. */ 10658/****************************************************************************/ 10659static __attribute__ ((noinline)) void 10660bce_dump_status_block(struct bce_softc *sc) 10661{ 10662 struct status_block *sblk; 10663 10664 bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); 10665 10666 sblk = sc->status_block; 10667 10668 BCE_PRINTF( 10669 "----------------------------" 10670 " Status Block " 10671 "----------------------------\n"); 10672 10673 /* Theses indices are used for normal L2 drivers. */ 10674 BCE_PRINTF(" 0x%08X - attn_bits\n", 10675 sblk->status_attn_bits); 10676 10677 BCE_PRINTF(" 0x%08X - attn_bits_ack\n", 10678 sblk->status_attn_bits_ack); 10679 10680 BCE_PRINT_RX_CONS(0); 10681 BCE_PRINT_TX_CONS(0) 10682 10683 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx); 10684 10685 /* Theses indices are not used for normal L2 drivers. */ 10686 BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3); 10687 BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6); 10688 BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9); 10689 BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12); 10690 BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15); 10691 10692 BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3); 10693 10694 if (sblk->status_completion_producer_index || 10695 sblk->status_cmd_consumer_index) 10696 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n", 10697 sblk->status_completion_producer_index, 10698 sblk->status_cmd_consumer_index); 10699 10700 BCE_PRINTF( 10701 "----------------------------" 10702 "----------------" 10703 "----------------------------\n"); 10704} 10705 10706 10707#define BCE_PRINT_64BIT_STAT(arg) \ 10708if (sblk->arg##_lo || sblk->arg##_hi) \ 10709 BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \ 10710 sblk->arg##_lo, #arg); 10711 10712#define BCE_PRINT_32BIT_STAT(arg) \ 10713if (sblk->arg) \ 10714 BCE_PRINTF(" 0x%08X : %s\n", \ 10715 sblk->arg, #arg); 10716 10717/****************************************************************************/ 10718/* Prints out the statistics block from host memory. */ 10719/* */ 10720/* Returns: */ 10721/* Nothing. */ 10722/****************************************************************************/ 10723static __attribute__ ((noinline)) void 10724bce_dump_stats_block(struct bce_softc *sc) 10725{ 10726 struct statistics_block *sblk; 10727 10728 bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD); 10729 10730 sblk = sc->stats_block; 10731 10732 BCE_PRINTF( 10733 "---------------" 10734 " Stats Block (All Stats Not Shown Are 0) " 10735 "---------------\n"); 10736 10737 BCE_PRINT_64BIT_STAT(stat_IfHCInOctets); 10738 BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets); 10739 BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets); 10740 BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets); 10741 BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts); 10742 BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts); 10743 BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts); 10744 BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts); 10745 BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts); 10746 BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts); 10747 BCE_PRINT_32BIT_STAT( 10748 stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 10749 BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors); 10750 BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors); 10751 BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors); 10752 BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames); 10753 BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames); 10754 BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions); 10755 BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions); 10756 BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions); 10757 BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions); 10758 BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments); 10759 BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers); 10760 BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts); 10761 BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts); 10762 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets); 10763 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets); 10764 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets); 10765 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets); 10766 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets); 10767 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets); 10768 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets); 10769 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets); 10770 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets); 10771 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets); 10772 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets); 10773 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets); 10774 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets); 10775 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets); 10776 BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived); 10777 BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived); 10778 BCE_PRINT_32BIT_STAT(stat_OutXonSent); 10779 BCE_PRINT_32BIT_STAT(stat_OutXoffSent); 10780 BCE_PRINT_32BIT_STAT(stat_FlowControlDone); 10781 BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived); 10782 BCE_PRINT_32BIT_STAT(stat_XoffStateEntered); 10783 BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards); 10784 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards); 10785 BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards); 10786 BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards); 10787 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit); 10788 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards); 10789 BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards); 10790 BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards); 10791 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit); 10792 10793 BCE_PRINTF( 10794 "----------------------------" 10795 "----------------" 10796 "----------------------------\n"); 10797} 10798 10799 10800/****************************************************************************/ 10801/* Prints out a summary of the driver state. */ 10802/* */ 10803/* Returns: */ 10804/* Nothing. */ 10805/****************************************************************************/ 10806static __attribute__ ((noinline)) void 10807bce_dump_driver_state(struct bce_softc *sc) 10808{ 10809 u32 val_hi, val_lo; 10810 10811 BCE_PRINTF( 10812 "-----------------------------" 10813 " Driver State " 10814 "-----------------------------\n"); 10815 10816 val_hi = BCE_ADDR_HI(sc); 10817 val_lo = BCE_ADDR_LO(sc); 10818 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual " 10819 "address\n", val_hi, val_lo); 10820 10821 val_hi = BCE_ADDR_HI(sc->bce_vhandle); 10822 val_lo = BCE_ADDR_LO(sc->bce_vhandle); 10823 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual " 10824 "address\n", val_hi, val_lo); 10825 10826 val_hi = BCE_ADDR_HI(sc->status_block); 10827 val_lo = BCE_ADDR_LO(sc->status_block); 10828 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block " 10829 "virtual address\n", val_hi, val_lo); 10830 10831 val_hi = BCE_ADDR_HI(sc->stats_block); 10832 val_lo = BCE_ADDR_LO(sc->stats_block); 10833 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block " 10834 "virtual address\n", val_hi, val_lo); 10835 10836 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 10837 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 10838 BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 10839 "virtual adddress\n", val_hi, val_lo); 10840 10841 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 10842 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 10843 BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 10844 "virtual address\n", val_hi, val_lo); 10845 10846 if (bce_hdr_split == TRUE) { 10847 val_hi = BCE_ADDR_HI(sc->pg_bd_chain); 10848 val_lo = BCE_ADDR_LO(sc->pg_bd_chain); 10849 BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain " 10850 "virtual address\n", val_hi, val_lo); 10851 } 10852 10853 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 10854 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 10855 BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 10856 "virtual address\n", val_hi, val_lo); 10857 10858 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 10859 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 10860 BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 10861 "virtual address\n", val_hi, val_lo); 10862 10863 if (bce_hdr_split == TRUE) { 10864 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr); 10865 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr); 10866 BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain " 10867 "virtual address\n", val_hi, val_lo); 10868 } 10869 10870 BCE_PRINTF(" 0x%016llX - (sc->interrupts_generated) " 10871 "h/w intrs\n", 10872 (long long unsigned int) sc->interrupts_generated); 10873 10874 BCE_PRINTF(" 0x%016llX - (sc->interrupts_rx) " 10875 "rx interrupts handled\n", 10876 (long long unsigned int) sc->interrupts_rx); 10877 10878 BCE_PRINTF(" 0x%016llX - (sc->interrupts_tx) " 10879 "tx interrupts handled\n", 10880 (long long unsigned int) sc->interrupts_tx); 10881 10882 BCE_PRINTF(" 0x%016llX - (sc->phy_interrupts) " 10883 "phy interrupts handled\n", 10884 (long long unsigned int) sc->phy_interrupts); 10885 10886 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) " 10887 "status block index\n", sc->last_status_idx); 10888 10889 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer " 10890 "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod)); 10891 10892 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer " 10893 "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons)); 10894 10895 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer " 10896 "byte seq index\n", sc->tx_prod_bseq); 10897 10898 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx " 10899 "mbufs allocated\n", sc->debug_tx_mbuf_alloc); 10900 10901 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used " 10902 "tx_bd's\n", sc->used_tx_bd); 10903 10904 BCE_PRINTF(" 0x%04X/0x%04X - (sc->tx_hi_watermark)/" 10905 "(sc->max_tx_bd)\n", sc->tx_hi_watermark, sc->max_tx_bd); 10906 10907 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer " 10908 "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod)); 10909 10910 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer " 10911 "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons)); 10912 10913 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer " 10914 "byte seq index\n", sc->rx_prod_bseq); 10915 10916 BCE_PRINTF(" 0x%04X/0x%04X - (sc->rx_low_watermark)/" 10917 "(sc->max_rx_bd)\n", sc->rx_low_watermark, sc->max_rx_bd); 10918 10919 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx " 10920 "mbufs allocated\n", sc->debug_rx_mbuf_alloc); 10921 10922 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free " 10923 "rx_bd's\n", sc->free_rx_bd); 10924 10925 if (bce_hdr_split == TRUE) { 10926 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer " 10927 "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod)); 10928 10929 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer " 10930 "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons)); 10931 10932 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page " 10933 "mbufs allocated\n", sc->debug_pg_mbuf_alloc); 10934 } 10935 10936 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page " 10937 "rx_bd's\n", sc->free_pg_bd); 10938 10939 BCE_PRINTF(" 0x%04X/0x%04X - (sc->pg_low_watermark)/" 10940 "(sc->max_pg_bd)\n", sc->pg_low_watermark, sc->max_pg_bd); 10941 10942 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) " 10943 "mbuf alloc failures\n", sc->mbuf_alloc_failed_count); 10944 10945 BCE_PRINTF(" 0x%08X - (sc->bce_flags) " 10946 "bce mac flags\n", sc->bce_flags); 10947 10948 BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) " 10949 "bce phy flags\n", sc->bce_phy_flags); 10950 10951 BCE_PRINTF( 10952 "----------------------------" 10953 "----------------" 10954 "----------------------------\n"); 10955} 10956 10957 10958/****************************************************************************/ 10959/* Prints out the hardware state through a summary of important register, */ 10960/* followed by a complete register dump. */ 10961/* */ 10962/* Returns: */ 10963/* Nothing. */ 10964/****************************************************************************/ 10965static __attribute__ ((noinline)) void 10966bce_dump_hw_state(struct bce_softc *sc) 10967{ 10968 u32 val; 10969 10970 BCE_PRINTF( 10971 "----------------------------" 10972 " Hardware State " 10973 "----------------------------\n"); 10974 10975 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 10976 10977 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 10978 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n", 10979 val, BCE_MISC_ENABLE_STATUS_BITS); 10980 10981 val = REG_RD(sc, BCE_DMA_STATUS); 10982 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", 10983 val, BCE_DMA_STATUS); 10984 10985 val = REG_RD(sc, BCE_CTX_STATUS); 10986 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", 10987 val, BCE_CTX_STATUS); 10988 10989 val = REG_RD(sc, BCE_EMAC_STATUS); 10990 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", 10991 val, BCE_EMAC_STATUS); 10992 10993 val = REG_RD(sc, BCE_RPM_STATUS); 10994 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", 10995 val, BCE_RPM_STATUS); 10996 10997 /* ToDo: Create a #define for this constant. */ 10998 val = REG_RD(sc, 0x2004); 10999 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", 11000 val, 0x2004); 11001 11002 val = REG_RD(sc, BCE_RV2P_STATUS); 11003 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", 11004 val, BCE_RV2P_STATUS); 11005 11006 /* ToDo: Create a #define for this constant. */ 11007 val = REG_RD(sc, 0x2c04); 11008 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", 11009 val, 0x2c04); 11010 11011 val = REG_RD(sc, BCE_TBDR_STATUS); 11012 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", 11013 val, BCE_TBDR_STATUS); 11014 11015 val = REG_RD(sc, BCE_TDMA_STATUS); 11016 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", 11017 val, BCE_TDMA_STATUS); 11018 11019 val = REG_RD(sc, BCE_HC_STATUS); 11020 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", 11021 val, BCE_HC_STATUS); 11022 11023 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 11024 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 11025 val, BCE_TXP_CPU_STATE); 11026 11027 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 11028 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 11029 val, BCE_TPAT_CPU_STATE); 11030 11031 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 11032 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 11033 val, BCE_RXP_CPU_STATE); 11034 11035 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 11036 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 11037 val, BCE_COM_CPU_STATE); 11038 11039 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 11040 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", 11041 val, BCE_MCP_CPU_STATE); 11042 11043 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 11044 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 11045 val, BCE_CP_CPU_STATE); 11046 11047 BCE_PRINTF( 11048 "----------------------------" 11049 "----------------" 11050 "----------------------------\n"); 11051 11052 BCE_PRINTF( 11053 "----------------------------" 11054 " Register Dump " 11055 "----------------------------\n"); 11056 11057 for (int i = 0x400; i < 0x8000; i += 0x10) { 11058 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 11059 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 11060 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 11061 } 11062 11063 BCE_PRINTF( 11064 "----------------------------" 11065 "----------------" 11066 "----------------------------\n"); 11067} 11068 11069 11070/****************************************************************************/ 11071/* Prints out the contentst of shared memory which is used for host driver */ 11072/* to bootcode firmware communication. */ 11073/* */ 11074/* Returns: */ 11075/* Nothing. */ 11076/****************************************************************************/ 11077static __attribute__ ((noinline)) void 11078bce_dump_shmem_state(struct bce_softc *sc) 11079{ 11080 BCE_PRINTF( 11081 "----------------------------" 11082 " Hardware State " 11083 "----------------------------\n"); 11084 11085 BCE_PRINTF("0x%08X - Shared memory base address\n", 11086 sc->bce_shmem_base); 11087 BCE_PRINTF("%s - bootcode version\n", 11088 sc->bce_bc_ver); 11089 11090 BCE_PRINTF( 11091 "----------------------------" 11092 " Shared Mem " 11093 "----------------------------\n"); 11094 11095 for (int i = 0x0; i < 0x200; i += 0x10) { 11096 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 11097 i, bce_shmem_rd(sc, i), bce_shmem_rd(sc, i + 0x4), 11098 bce_shmem_rd(sc, i + 0x8), bce_shmem_rd(sc, i + 0xC)); 11099 } 11100 11101 BCE_PRINTF( 11102 "----------------------------" 11103 "----------------" 11104 "----------------------------\n"); 11105} 11106 11107 11108/****************************************************************************/ 11109/* Prints out the mailbox queue registers. */ 11110/* */ 11111/* Returns: */ 11112/* Nothing. */ 11113/****************************************************************************/ 11114static __attribute__ ((noinline)) void 11115bce_dump_mq_regs(struct bce_softc *sc) 11116{ 11117 BCE_PRINTF( 11118 "----------------------------" 11119 " MQ Regs " 11120 "----------------------------\n"); 11121 11122 BCE_PRINTF( 11123 "----------------------------" 11124 "----------------" 11125 "----------------------------\n"); 11126 11127 for (int i = 0x3c00; i < 0x4000; i += 0x10) { 11128 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 11129 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 11130 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 11131 } 11132 11133 BCE_PRINTF( 11134 "----------------------------" 11135 "----------------" 11136 "----------------------------\n"); 11137} 11138 11139 11140/****************************************************************************/ 11141/* Prints out the bootcode state. */ 11142/* */ 11143/* Returns: */ 11144/* Nothing. */ 11145/****************************************************************************/ 11146static __attribute__ ((noinline)) void 11147bce_dump_bc_state(struct bce_softc *sc) 11148{ 11149 u32 val; 11150 11151 BCE_PRINTF( 11152 "----------------------------" 11153 " Bootcode State " 11154 "----------------------------\n"); 11155 11156 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 11157 11158 val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE); 11159 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n", 11160 val, BCE_BC_RESET_TYPE); 11161 11162 val = bce_shmem_rd(sc, BCE_BC_STATE); 11163 BCE_PRINTF("0x%08X - (0x%06X) state\n", 11164 val, BCE_BC_STATE); 11165 11166 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 11167 BCE_PRINTF("0x%08X - (0x%06X) condition\n", 11168 val, BCE_BC_STATE_CONDITION); 11169 11170 val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD); 11171 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n", 11172 val, BCE_BC_STATE_DEBUG_CMD); 11173 11174 BCE_PRINTF( 11175 "----------------------------" 11176 "----------------" 11177 "----------------------------\n"); 11178} 11179 11180 11181/****************************************************************************/ 11182/* Prints out the TXP processor state. */ 11183/* */ 11184/* Returns: */ 11185/* Nothing. */ 11186/****************************************************************************/ 11187static __attribute__ ((noinline)) void 11188bce_dump_txp_state(struct bce_softc *sc, int regs) 11189{ 11190 u32 val; 11191 u32 fw_version[3]; 11192 11193 BCE_PRINTF( 11194 "----------------------------" 11195 " TXP State " 11196 "----------------------------\n"); 11197 11198 for (int i = 0; i < 3; i++) 11199 fw_version[i] = htonl(REG_RD_IND(sc, 11200 (BCE_TXP_SCRATCH + 0x10 + i * 4))); 11201 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 11202 11203 val = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 11204 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", 11205 val, BCE_TXP_CPU_MODE); 11206 11207 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 11208 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 11209 val, BCE_TXP_CPU_STATE); 11210 11211 val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 11212 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", 11213 val, BCE_TXP_CPU_EVENT_MASK); 11214 11215 if (regs) { 11216 BCE_PRINTF( 11217 "----------------------------" 11218 " Register Dump " 11219 "----------------------------\n"); 11220 11221 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 11222 /* Skip the big blank spaces */ 11223 if (i < 0x454000 && i > 0x5ffff) 11224 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 11225 "0x%08X 0x%08X\n", i, 11226 REG_RD_IND(sc, i), 11227 REG_RD_IND(sc, i + 0x4), 11228 REG_RD_IND(sc, i + 0x8), 11229 REG_RD_IND(sc, i + 0xC)); 11230 } 11231 } 11232 11233 BCE_PRINTF( 11234 "----------------------------" 11235 "----------------" 11236 "----------------------------\n"); 11237} 11238 11239 11240/****************************************************************************/ 11241/* Prints out the RXP processor state. */ 11242/* */ 11243/* Returns: */ 11244/* Nothing. */ 11245/****************************************************************************/ 11246static __attribute__ ((noinline)) void 11247bce_dump_rxp_state(struct bce_softc *sc, int regs) 11248{ 11249 u32 val; 11250 u32 fw_version[3]; 11251 11252 BCE_PRINTF( 11253 "----------------------------" 11254 " RXP State " 11255 "----------------------------\n"); 11256 11257 for (int i = 0; i < 3; i++) 11258 fw_version[i] = htonl(REG_RD_IND(sc, 11259 (BCE_RXP_SCRATCH + 0x10 + i * 4))); 11260 11261 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 11262 11263 val = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 11264 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", 11265 val, BCE_RXP_CPU_MODE); 11266 11267 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 11268 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 11269 val, BCE_RXP_CPU_STATE); 11270 11271 val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 11272 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", 11273 val, BCE_RXP_CPU_EVENT_MASK); 11274 11275 if (regs) { 11276 BCE_PRINTF( 11277 "----------------------------" 11278 " Register Dump " 11279 "----------------------------\n"); 11280 11281 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 11282 /* Skip the big blank sapces */ 11283 if (i < 0xc5400 && i > 0xdffff) 11284 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 11285 "0x%08X 0x%08X\n", i, 11286 REG_RD_IND(sc, i), 11287 REG_RD_IND(sc, i + 0x4), 11288 REG_RD_IND(sc, i + 0x8), 11289 REG_RD_IND(sc, i + 0xC)); 11290 } 11291 } 11292 11293 BCE_PRINTF( 11294 "----------------------------" 11295 "----------------" 11296 "----------------------------\n"); 11297} 11298 11299 11300/****************************************************************************/ 11301/* Prints out the TPAT processor state. */ 11302/* */ 11303/* Returns: */ 11304/* Nothing. */ 11305/****************************************************************************/ 11306static __attribute__ ((noinline)) void 11307bce_dump_tpat_state(struct bce_softc *sc, int regs) 11308{ 11309 u32 val; 11310 u32 fw_version[3]; 11311 11312 BCE_PRINTF( 11313 "----------------------------" 11314 " TPAT State " 11315 "----------------------------\n"); 11316 11317 for (int i = 0; i < 3; i++) 11318 fw_version[i] = htonl(REG_RD_IND(sc, 11319 (BCE_TPAT_SCRATCH + 0x410 + i * 4))); 11320 11321 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 11322 11323 val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 11324 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", 11325 val, BCE_TPAT_CPU_MODE); 11326 11327 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 11328 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 11329 val, BCE_TPAT_CPU_STATE); 11330 11331 val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 11332 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", 11333 val, BCE_TPAT_CPU_EVENT_MASK); 11334 11335 if (regs) { 11336 BCE_PRINTF( 11337 "----------------------------" 11338 " Register Dump " 11339 "----------------------------\n"); 11340 11341 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 11342 /* Skip the big blank spaces */ 11343 if (i < 0x854000 && i > 0x9ffff) 11344 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 11345 "0x%08X 0x%08X\n", i, 11346 REG_RD_IND(sc, i), 11347 REG_RD_IND(sc, i + 0x4), 11348 REG_RD_IND(sc, i + 0x8), 11349 REG_RD_IND(sc, i + 0xC)); 11350 } 11351 } 11352 11353 BCE_PRINTF( 11354 "----------------------------" 11355 "----------------" 11356 "----------------------------\n"); 11357} 11358 11359 11360/****************************************************************************/ 11361/* Prints out the Command Procesor (CP) state. */ 11362/* */ 11363/* Returns: */ 11364/* Nothing. */ 11365/****************************************************************************/ 11366static __attribute__ ((noinline)) void 11367bce_dump_cp_state(struct bce_softc *sc, int regs) 11368{ 11369 u32 val; 11370 u32 fw_version[3]; 11371 11372 BCE_PRINTF( 11373 "----------------------------" 11374 " CP State " 11375 "----------------------------\n"); 11376 11377 for (int i = 0; i < 3; i++) 11378 fw_version[i] = htonl(REG_RD_IND(sc, 11379 (BCE_CP_SCRATCH + 0x10 + i * 4))); 11380 11381 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 11382 11383 val = REG_RD_IND(sc, BCE_CP_CPU_MODE); 11384 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", 11385 val, BCE_CP_CPU_MODE); 11386 11387 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 11388 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 11389 val, BCE_CP_CPU_STATE); 11390 11391 val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK); 11392 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val, 11393 BCE_CP_CPU_EVENT_MASK); 11394 11395 if (regs) { 11396 BCE_PRINTF( 11397 "----------------------------" 11398 " Register Dump " 11399 "----------------------------\n"); 11400 11401 for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) { 11402 /* Skip the big blank spaces */ 11403 if (i < 0x185400 && i > 0x19ffff) 11404 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 11405 "0x%08X 0x%08X\n", i, 11406 REG_RD_IND(sc, i), 11407 REG_RD_IND(sc, i + 0x4), 11408 REG_RD_IND(sc, i + 0x8), 11409 REG_RD_IND(sc, i + 0xC)); 11410 } 11411 } 11412 11413 BCE_PRINTF( 11414 "----------------------------" 11415 "----------------" 11416 "----------------------------\n"); 11417} 11418 11419 11420/****************************************************************************/ 11421/* Prints out the Completion Procesor (COM) state. */ 11422/* */ 11423/* Returns: */ 11424/* Nothing. */ 11425/****************************************************************************/ 11426static __attribute__ ((noinline)) void 11427bce_dump_com_state(struct bce_softc *sc, int regs) 11428{ 11429 u32 val; 11430 u32 fw_version[4]; 11431 11432 BCE_PRINTF( 11433 "----------------------------" 11434 " COM State " 11435 "----------------------------\n"); 11436 11437 for (int i = 0; i < 3; i++) 11438 fw_version[i] = htonl(REG_RD_IND(sc, 11439 (BCE_COM_SCRATCH + 0x10 + i * 4))); 11440 11441 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 11442 11443 val = REG_RD_IND(sc, BCE_COM_CPU_MODE); 11444 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", 11445 val, BCE_COM_CPU_MODE); 11446 11447 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 11448 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 11449 val, BCE_COM_CPU_STATE); 11450 11451 val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK); 11452 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val, 11453 BCE_COM_CPU_EVENT_MASK); 11454 11455 if (regs) { 11456 BCE_PRINTF( 11457 "----------------------------" 11458 " Register Dump " 11459 "----------------------------\n"); 11460 11461 for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) { 11462 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 11463 "0x%08X 0x%08X\n", i, 11464 REG_RD_IND(sc, i), 11465 REG_RD_IND(sc, i + 0x4), 11466 REG_RD_IND(sc, i + 0x8), 11467 REG_RD_IND(sc, i + 0xC)); 11468 } 11469 } 11470 11471 BCE_PRINTF( 11472 "----------------------------" 11473 "----------------" 11474 "----------------------------\n"); 11475} 11476 11477 11478/****************************************************************************/ 11479/* Prints out the Receive Virtual 2 Physical (RV2P) state. */ 11480/* */ 11481/* Returns: */ 11482/* Nothing. */ 11483/****************************************************************************/ 11484static __attribute__ ((noinline)) void 11485bce_dump_rv2p_state(struct bce_softc *sc) 11486{ 11487 u32 val, pc1, pc2, fw_ver_high, fw_ver_low; 11488 11489 BCE_PRINTF( 11490 "----------------------------" 11491 " RV2P State " 11492 "----------------------------\n"); 11493 11494 /* Stall the RV2P processors. */ 11495 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 11496 val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2; 11497 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 11498 11499 /* Read the firmware version. */ 11500 val = 0x00000001; 11501 REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 11502 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 11503 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 11504 BCE_RV2P_INSTR_HIGH_HIGH; 11505 BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n", 11506 fw_ver_high, fw_ver_low); 11507 11508 val = 0x00000001; 11509 REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 11510 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 11511 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 11512 BCE_RV2P_INSTR_HIGH_HIGH; 11513 BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n", 11514 fw_ver_high, fw_ver_low); 11515 11516 /* Resume the RV2P processors. */ 11517 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 11518 val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2); 11519 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 11520 11521 /* Fetch the program counter value. */ 11522 val = 0x68007800; 11523 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 11524 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 11525 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 11526 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 11527 BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1); 11528 BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2); 11529 11530 /* Fetch the program counter value again to see if it is advancing. */ 11531 val = 0x68007800; 11532 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 11533 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 11534 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 11535 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 11536 BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1); 11537 BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2); 11538 11539 BCE_PRINTF( 11540 "----------------------------" 11541 "----------------" 11542 "----------------------------\n"); 11543} 11544 11545 11546/****************************************************************************/ 11547/* Prints out the driver state and then enters the debugger. */ 11548/* */ 11549/* Returns: */ 11550/* Nothing. */ 11551/****************************************************************************/ 11552static __attribute__ ((noinline)) void 11553bce_breakpoint(struct bce_softc *sc) 11554{ 11555 11556 /* 11557 * Unreachable code to silence compiler warnings 11558 * about unused functions. 11559 */ 11560 if (0) { 11561 bce_freeze_controller(sc); 11562 bce_unfreeze_controller(sc); 11563 bce_dump_enet(sc, NULL); 11564 bce_dump_txbd(sc, 0, NULL); 11565 bce_dump_rxbd(sc, 0, NULL); 11566 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD_ALLOC); 11567 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC); 11568 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD_ALLOC); 11569 bce_dump_l2fhdr(sc, 0, NULL); 11570 bce_dump_ctx(sc, RX_CID); 11571 bce_dump_ftqs(sc); 11572 bce_dump_tx_chain(sc, 0, USABLE_TX_BD_ALLOC); 11573 bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD_ALLOC); 11574 bce_dump_pg_chain(sc, 0, USABLE_PG_BD_ALLOC); 11575 bce_dump_status_block(sc); 11576 bce_dump_stats_block(sc); 11577 bce_dump_driver_state(sc); 11578 bce_dump_hw_state(sc); 11579 bce_dump_bc_state(sc); 11580 bce_dump_txp_state(sc, 0); 11581 bce_dump_rxp_state(sc, 0); 11582 bce_dump_tpat_state(sc, 0); 11583 bce_dump_cp_state(sc, 0); 11584 bce_dump_com_state(sc, 0); 11585 bce_dump_rv2p_state(sc); 11586 bce_dump_pgbd(sc, 0, NULL); 11587 } 11588 11589 bce_dump_status_block(sc); 11590 bce_dump_driver_state(sc); 11591 11592 /* Call the debugger. */ 11593 breakpoint(); 11594} 11595#endif 11596