1/*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/dev/bxe/bxe.c 339884 2018-10-29 21:31:23Z davidcs $"); 29 30#define BXE_DRIVER_VERSION "1.78.91" 31 32#include "bxe.h" 33#include "ecore_sp.h" 34#include "ecore_init.h" 35#include "ecore_init_ops.h" 36 37#include "57710_int_offsets.h" 38#include "57711_int_offsets.h" 39#include "57712_int_offsets.h" 40 41/* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45#ifndef CTLTYPE_U64 46#define CTLTYPE_U64 CTLTYPE_QUAD 47#define sysctl_handle_64 sysctl_handle_quad 48#endif 49 50/* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55#ifndef CSUM_TCP_IPV6 56#define CSUM_TCP_IPV6 0 57#define CSUM_UDP_IPV6 0 58#endif 59 60/* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64#if __FreeBSD_version < 900035 65#define pci_find_cap pci_find_extcap 66#endif 67 68#define BXE_DEF_SB_ATT_IDX 0x0001 69#define BXE_DEF_SB_IDX 0x0002 70 71/* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75#define FLR_WAIT_USEC 10000 /* 10 msecs */ 76#define FLR_WAIT_INTERVAL 50 /* usecs */ 77#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84}; 85 86struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90}; 91 92/* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95#define BXE_DEVDESC_MAX 64 96static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127 { 128 BRCM_VENDORID, 129 CHIP_NUM_57800, 130 PCI_ANY_ID, PCI_ANY_ID, 131 "QLogic NetXtreme II BCM57800 10GbE" 132 }, 133 { 134 BRCM_VENDORID, 135 CHIP_NUM_57800_MF, 136 PCI_ANY_ID, PCI_ANY_ID, 137 "QLogic NetXtreme II BCM57800 MF 10GbE" 138 }, 139 { 140 BRCM_VENDORID, 141 CHIP_NUM_57810, 142 PCI_ANY_ID, PCI_ANY_ID, 143 "QLogic NetXtreme II BCM57810 10GbE" 144 }, 145 { 146 BRCM_VENDORID, 147 CHIP_NUM_57810_MF, 148 PCI_ANY_ID, PCI_ANY_ID, 149 "QLogic NetXtreme II BCM57810 MF 10GbE" 150 }, 151 { 152 BRCM_VENDORID, 153 CHIP_NUM_57811, 154 PCI_ANY_ID, PCI_ANY_ID, 155 "QLogic NetXtreme II BCM57811 10GbE" 156 }, 157 { 158 BRCM_VENDORID, 159 CHIP_NUM_57811_MF, 160 PCI_ANY_ID, PCI_ANY_ID, 161 "QLogic NetXtreme II BCM57811 MF 10GbE" 162 }, 163 { 164 BRCM_VENDORID, 165 CHIP_NUM_57840_4_10, 166 PCI_ANY_ID, PCI_ANY_ID, 167 "QLogic NetXtreme II BCM57840 4x10GbE" 168 }, 169 { 170 QLOGIC_VENDORID, 171 CHIP_NUM_57840_4_10, 172 PCI_ANY_ID, PCI_ANY_ID, 173 "QLogic NetXtreme II BCM57840 4x10GbE" 174 }, 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57840_MF, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57840 MF 10GbE" 180 }, 181 { 182 0, 0, 0, 0, NULL 183 } 184}; 185 186MALLOC_DECLARE(M_BXE_ILT); 187MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 188 189/* 190 * FreeBSD device entry points. 191 */ 192static int bxe_probe(device_t); 193static int bxe_attach(device_t); 194static int bxe_detach(device_t); 195static int bxe_shutdown(device_t); 196 197 198/* 199 * FreeBSD KLD module/device interface event handler method. 200 */ 201static device_method_t bxe_methods[] = { 202 /* Device interface (device_if.h) */ 203 DEVMETHOD(device_probe, bxe_probe), 204 DEVMETHOD(device_attach, bxe_attach), 205 DEVMETHOD(device_detach, bxe_detach), 206 DEVMETHOD(device_shutdown, bxe_shutdown), 207 /* Bus interface (bus_if.h) */ 208 DEVMETHOD(bus_print_child, bus_generic_print_child), 209 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 210 KOBJMETHOD_END 211}; 212 213/* 214 * FreeBSD KLD Module data declaration 215 */ 216static driver_t bxe_driver = { 217 "bxe", /* module name */ 218 bxe_methods, /* event handler */ 219 sizeof(struct bxe_softc) /* extra data */ 220}; 221 222/* 223 * FreeBSD dev class is needed to manage dev instances and 224 * to associate with a bus type 225 */ 226static devclass_t bxe_devclass; 227 228MODULE_DEPEND(bxe, pci, 1, 1, 1); 229MODULE_DEPEND(bxe, ether, 1, 1, 1); 230DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 231 232/* resources needed for unloading a previously loaded device */ 233 234#define BXE_PREV_WAIT_NEEDED 1 235struct mtx bxe_prev_mtx; 236MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 237struct bxe_prev_list_node { 238 LIST_ENTRY(bxe_prev_list_node) node; 239 uint8_t bus; 240 uint8_t slot; 241 uint8_t path; 242 uint8_t aer; /* XXX automatic error recovery */ 243 uint8_t undi; 244}; 245static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 246 247static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 248 249/* Tunable device values... */ 250 251SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 252 253/* Debug */ 254unsigned long bxe_debug = 0; 255TUNABLE_ULONG("hw.bxe.debug", &bxe_debug); 256SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN), 257 &bxe_debug, 0, "Debug logging mode"); 258 259/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 260static int bxe_interrupt_mode = INTR_MODE_MSIX; 261TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode); 262SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 263 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 264 265/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 266static int bxe_queue_count = 4; 267TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count); 268SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 269 &bxe_queue_count, 0, "Multi-Queue queue count"); 270 271/* max number of buffers per queue (default RX_BD_USABLE) */ 272static int bxe_max_rx_bufs = 0; 273TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs); 274SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 275 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 276 277/* Host interrupt coalescing RX tick timer (usecs) */ 278static int bxe_hc_rx_ticks = 25; 279TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks); 280SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 281 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 282 283/* Host interrupt coalescing TX tick timer (usecs) */ 284static int bxe_hc_tx_ticks = 50; 285TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks); 286SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 287 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 288 289/* Maximum number of Rx packets to process at a time */ 290static int bxe_rx_budget = 0xffffffff; 291TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget); 292SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 293 &bxe_rx_budget, 0, "Rx processing budget"); 294 295/* Maximum LRO aggregation size */ 296static int bxe_max_aggregation_size = 0; 297TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size); 298SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 299 &bxe_max_aggregation_size, 0, "max aggregation size"); 300 301/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 302static int bxe_mrrs = -1; 303TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs); 304SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 305 &bxe_mrrs, 0, "PCIe maximum read request size"); 306 307/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 308static int bxe_autogreeen = 0; 309TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen); 310SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 311 &bxe_autogreeen, 0, "AutoGrEEEn support"); 312 313/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 314static int bxe_udp_rss = 0; 315TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss); 316SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 317 &bxe_udp_rss, 0, "UDP RSS support"); 318 319 320#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 321 322#define STATS_OFFSET32(stat_name) \ 323 (offsetof(struct bxe_eth_stats, stat_name) / 4) 324 325#define Q_STATS_OFFSET32(stat_name) \ 326 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 327 328static const struct { 329 uint32_t offset; 330 uint32_t size; 331 uint32_t flags; 332#define STATS_FLAGS_PORT 1 333#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 334#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 335 char string[STAT_NAME_LEN]; 336} bxe_eth_stats_arr[] = { 337 { STATS_OFFSET32(total_bytes_received_hi), 338 8, STATS_FLAGS_BOTH, "rx_bytes" }, 339 { STATS_OFFSET32(error_bytes_received_hi), 340 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 341 { STATS_OFFSET32(total_unicast_packets_received_hi), 342 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 343 { STATS_OFFSET32(total_multicast_packets_received_hi), 344 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 345 { STATS_OFFSET32(total_broadcast_packets_received_hi), 346 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 347 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 348 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 349 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 350 8, STATS_FLAGS_PORT, "rx_align_errors" }, 351 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 352 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 353 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 354 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 355 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 356 8, STATS_FLAGS_PORT, "rx_fragments" }, 357 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 358 8, STATS_FLAGS_PORT, "rx_jabbers" }, 359 { STATS_OFFSET32(no_buff_discard_hi), 360 8, STATS_FLAGS_BOTH, "rx_discards" }, 361 { STATS_OFFSET32(mac_filter_discard), 362 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 363 { STATS_OFFSET32(mf_tag_discard), 364 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 365 { STATS_OFFSET32(pfc_frames_received_hi), 366 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 367 { STATS_OFFSET32(pfc_frames_sent_hi), 368 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 369 { STATS_OFFSET32(brb_drop_hi), 370 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 371 { STATS_OFFSET32(brb_truncate_hi), 372 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 373 { STATS_OFFSET32(pause_frames_received_hi), 374 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 375 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 376 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 377 { STATS_OFFSET32(nig_timer_max), 378 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 379 { STATS_OFFSET32(total_bytes_transmitted_hi), 380 8, STATS_FLAGS_BOTH, "tx_bytes" }, 381 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 382 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 383 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 384 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 385 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 386 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 387 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 388 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 389 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 390 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 391 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 392 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 393 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 394 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 395 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 396 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 397 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 398 8, STATS_FLAGS_PORT, "tx_deferred" }, 399 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 400 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 401 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 402 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 403 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 404 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 405 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 406 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 407 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 408 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 409 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 410 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 411 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 412 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 413 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 414 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 415 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 416 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 417 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 418 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 419 { STATS_OFFSET32(pause_frames_sent_hi), 420 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 421 { STATS_OFFSET32(total_tpa_aggregations_hi), 422 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 423 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 424 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 425 { STATS_OFFSET32(total_tpa_bytes_hi), 426 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 427 { STATS_OFFSET32(eee_tx_lpi), 428 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 429 { STATS_OFFSET32(rx_calls), 430 4, STATS_FLAGS_FUNC, "rx_calls"}, 431 { STATS_OFFSET32(rx_pkts), 432 4, STATS_FLAGS_FUNC, "rx_pkts"}, 433 { STATS_OFFSET32(rx_tpa_pkts), 434 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 435 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 436 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, 437 { STATS_OFFSET32(rx_bxe_service_rxsgl), 438 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, 439 { STATS_OFFSET32(rx_jumbo_sge_pkts), 440 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, 441 { STATS_OFFSET32(rx_soft_errors), 442 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 443 { STATS_OFFSET32(rx_hw_csum_errors), 444 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 445 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 446 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 447 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 448 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 449 { STATS_OFFSET32(rx_budget_reached), 450 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 451 { STATS_OFFSET32(tx_pkts), 452 4, STATS_FLAGS_FUNC, "tx_pkts"}, 453 { STATS_OFFSET32(tx_soft_errors), 454 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 455 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 456 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 457 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 458 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 459 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 460 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 461 { STATS_OFFSET32(tx_ofld_frames_lso), 462 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 463 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 464 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 465 { STATS_OFFSET32(tx_encap_failures), 466 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 467 { STATS_OFFSET32(tx_hw_queue_full), 468 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 469 { STATS_OFFSET32(tx_hw_max_queue_depth), 470 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 471 { STATS_OFFSET32(tx_dma_mapping_failure), 472 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 473 { STATS_OFFSET32(tx_max_drbr_queue_depth), 474 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 475 { STATS_OFFSET32(tx_window_violation_std), 476 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 477 { STATS_OFFSET32(tx_window_violation_tso), 478 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 479 { STATS_OFFSET32(tx_chain_lost_mbuf), 480 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 481 { STATS_OFFSET32(tx_frames_deferred), 482 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 483 { STATS_OFFSET32(tx_queue_xoff), 484 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 485 { STATS_OFFSET32(mbuf_defrag_attempts), 486 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 487 { STATS_OFFSET32(mbuf_defrag_failures), 488 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 489 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 490 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 491 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 492 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 493 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 494 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 495 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 496 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 497 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 498 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 499 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 500 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 501 { STATS_OFFSET32(mbuf_alloc_tx), 502 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 503 { STATS_OFFSET32(mbuf_alloc_rx), 504 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 505 { STATS_OFFSET32(mbuf_alloc_sge), 506 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 507 { STATS_OFFSET32(mbuf_alloc_tpa), 508 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, 509 { STATS_OFFSET32(tx_queue_full_return), 510 4, STATS_FLAGS_FUNC, "tx_queue_full_return"}, 511 { STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 512 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"}, 513 { STATS_OFFSET32(tx_request_link_down_failures), 514 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"}, 515 { STATS_OFFSET32(bd_avail_too_less_failures), 516 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"}, 517 { STATS_OFFSET32(tx_mq_not_empty), 518 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}, 519 { STATS_OFFSET32(nsegs_path1_errors), 520 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"}, 521 { STATS_OFFSET32(nsegs_path2_errors), 522 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"} 523 524 525}; 526 527static const struct { 528 uint32_t offset; 529 uint32_t size; 530 char string[STAT_NAME_LEN]; 531} bxe_eth_q_stats_arr[] = { 532 { Q_STATS_OFFSET32(total_bytes_received_hi), 533 8, "rx_bytes" }, 534 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 535 8, "rx_ucast_packets" }, 536 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 537 8, "rx_mcast_packets" }, 538 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 539 8, "rx_bcast_packets" }, 540 { Q_STATS_OFFSET32(no_buff_discard_hi), 541 8, "rx_discards" }, 542 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 543 8, "tx_bytes" }, 544 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 545 8, "tx_ucast_packets" }, 546 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 547 8, "tx_mcast_packets" }, 548 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 549 8, "tx_bcast_packets" }, 550 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 551 8, "tpa_aggregations" }, 552 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 553 8, "tpa_aggregated_frames"}, 554 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 555 8, "tpa_bytes"}, 556 { Q_STATS_OFFSET32(rx_calls), 557 4, "rx_calls"}, 558 { Q_STATS_OFFSET32(rx_pkts), 559 4, "rx_pkts"}, 560 { Q_STATS_OFFSET32(rx_tpa_pkts), 561 4, "rx_tpa_pkts"}, 562 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 563 4, "rx_erroneous_jumbo_sge_pkts"}, 564 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 565 4, "rx_bxe_service_rxsgl"}, 566 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 567 4, "rx_jumbo_sge_pkts"}, 568 { Q_STATS_OFFSET32(rx_soft_errors), 569 4, "rx_soft_errors"}, 570 { Q_STATS_OFFSET32(rx_hw_csum_errors), 571 4, "rx_hw_csum_errors"}, 572 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 573 4, "rx_ofld_frames_csum_ip"}, 574 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 575 4, "rx_ofld_frames_csum_tcp_udp"}, 576 { Q_STATS_OFFSET32(rx_budget_reached), 577 4, "rx_budget_reached"}, 578 { Q_STATS_OFFSET32(tx_pkts), 579 4, "tx_pkts"}, 580 { Q_STATS_OFFSET32(tx_soft_errors), 581 4, "tx_soft_errors"}, 582 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 583 4, "tx_ofld_frames_csum_ip"}, 584 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 585 4, "tx_ofld_frames_csum_tcp"}, 586 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 587 4, "tx_ofld_frames_csum_udp"}, 588 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 589 4, "tx_ofld_frames_lso"}, 590 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 591 4, "tx_ofld_frames_lso_hdr_splits"}, 592 { Q_STATS_OFFSET32(tx_encap_failures), 593 4, "tx_encap_failures"}, 594 { Q_STATS_OFFSET32(tx_hw_queue_full), 595 4, "tx_hw_queue_full"}, 596 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 597 4, "tx_hw_max_queue_depth"}, 598 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 599 4, "tx_dma_mapping_failure"}, 600 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 601 4, "tx_max_drbr_queue_depth"}, 602 { Q_STATS_OFFSET32(tx_window_violation_std), 603 4, "tx_window_violation_std"}, 604 { Q_STATS_OFFSET32(tx_window_violation_tso), 605 4, "tx_window_violation_tso"}, 606 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 607 4, "tx_chain_lost_mbuf"}, 608 { Q_STATS_OFFSET32(tx_frames_deferred), 609 4, "tx_frames_deferred"}, 610 { Q_STATS_OFFSET32(tx_queue_xoff), 611 4, "tx_queue_xoff"}, 612 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 613 4, "mbuf_defrag_attempts"}, 614 { Q_STATS_OFFSET32(mbuf_defrag_failures), 615 4, "mbuf_defrag_failures"}, 616 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 617 4, "mbuf_rx_bd_alloc_failed"}, 618 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 619 4, "mbuf_rx_bd_mapping_failed"}, 620 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 621 4, "mbuf_rx_tpa_alloc_failed"}, 622 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 623 4, "mbuf_rx_tpa_mapping_failed"}, 624 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 625 4, "mbuf_rx_sge_alloc_failed"}, 626 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 627 4, "mbuf_rx_sge_mapping_failed"}, 628 { Q_STATS_OFFSET32(mbuf_alloc_tx), 629 4, "mbuf_alloc_tx"}, 630 { Q_STATS_OFFSET32(mbuf_alloc_rx), 631 4, "mbuf_alloc_rx"}, 632 { Q_STATS_OFFSET32(mbuf_alloc_sge), 633 4, "mbuf_alloc_sge"}, 634 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 635 4, "mbuf_alloc_tpa"}, 636 { Q_STATS_OFFSET32(tx_queue_full_return), 637 4, "tx_queue_full_return"}, 638 { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 639 4, "bxe_tx_mq_sc_state_failures"}, 640 { Q_STATS_OFFSET32(tx_request_link_down_failures), 641 4, "tx_request_link_down_failures"}, 642 { Q_STATS_OFFSET32(bd_avail_too_less_failures), 643 4, "bd_avail_too_less_failures"}, 644 { Q_STATS_OFFSET32(tx_mq_not_empty), 645 4, "tx_mq_not_empty"}, 646 { Q_STATS_OFFSET32(nsegs_path1_errors), 647 4, "nsegs_path1_errors"}, 648 { Q_STATS_OFFSET32(nsegs_path2_errors), 649 4, "nsegs_path2_errors"} 650 651 652}; 653 654#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 655#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 656 657 658static void bxe_cmng_fns_init(struct bxe_softc *sc, 659 uint8_t read_cfg, 660 uint8_t cmng_type); 661static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 662static void storm_memset_cmng(struct bxe_softc *sc, 663 struct cmng_init *cmng, 664 uint8_t port); 665static void bxe_set_reset_global(struct bxe_softc *sc); 666static void bxe_set_reset_in_progress(struct bxe_softc *sc); 667static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 668 int engine); 669static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 670static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 671 uint8_t *global, 672 uint8_t print); 673static void bxe_int_disable(struct bxe_softc *sc); 674static int bxe_release_leader_lock(struct bxe_softc *sc); 675static void bxe_pf_disable(struct bxe_softc *sc); 676static void bxe_free_fp_buffers(struct bxe_softc *sc); 677static inline void bxe_update_rx_prod(struct bxe_softc *sc, 678 struct bxe_fastpath *fp, 679 uint16_t rx_bd_prod, 680 uint16_t rx_cq_prod, 681 uint16_t rx_sge_prod); 682static void bxe_link_report_locked(struct bxe_softc *sc); 683static void bxe_link_report(struct bxe_softc *sc); 684static void bxe_link_status_update(struct bxe_softc *sc); 685static void bxe_periodic_callout_func(void *xsc); 686static void bxe_periodic_start(struct bxe_softc *sc); 687static void bxe_periodic_stop(struct bxe_softc *sc); 688static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 689 uint16_t prev_index, 690 uint16_t index); 691static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 692 int queue); 693static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 694 uint16_t index); 695static uint8_t bxe_txeof(struct bxe_softc *sc, 696 struct bxe_fastpath *fp); 697static void bxe_task_fp(struct bxe_fastpath *fp); 698static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 699 struct mbuf *m, 700 uint8_t contents); 701static int bxe_alloc_mem(struct bxe_softc *sc); 702static void bxe_free_mem(struct bxe_softc *sc); 703static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 704static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 705static int bxe_interrupt_attach(struct bxe_softc *sc); 706static void bxe_interrupt_detach(struct bxe_softc *sc); 707static void bxe_set_rx_mode(struct bxe_softc *sc); 708static int bxe_init_locked(struct bxe_softc *sc); 709static int bxe_stop_locked(struct bxe_softc *sc); 710static void bxe_sp_err_timeout_task(void *arg, int pending); 711void bxe_parity_recover(struct bxe_softc *sc); 712void bxe_handle_error(struct bxe_softc *sc); 713static __noinline int bxe_nic_load(struct bxe_softc *sc, 714 int load_mode); 715static __noinline int bxe_nic_unload(struct bxe_softc *sc, 716 uint32_t unload_mode, 717 uint8_t keep_link); 718 719static void bxe_handle_sp_tq(void *context, int pending); 720static void bxe_handle_fp_tq(void *context, int pending); 721 722static int bxe_add_cdev(struct bxe_softc *sc); 723static void bxe_del_cdev(struct bxe_softc *sc); 724int bxe_grc_dump(struct bxe_softc *sc); 725static int bxe_alloc_buf_rings(struct bxe_softc *sc); 726static void bxe_free_buf_rings(struct bxe_softc *sc); 727 728/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 729uint32_t 730calc_crc32(uint8_t *crc32_packet, 731 uint32_t crc32_length, 732 uint32_t crc32_seed, 733 uint8_t complement) 734{ 735 uint32_t byte = 0; 736 uint32_t bit = 0; 737 uint8_t msb = 0; 738 uint32_t temp = 0; 739 uint32_t shft = 0; 740 uint8_t current_byte = 0; 741 uint32_t crc32_result = crc32_seed; 742 const uint32_t CRC32_POLY = 0x1edc6f41; 743 744 if ((crc32_packet == NULL) || 745 (crc32_length == 0) || 746 ((crc32_length % 8) != 0)) 747 { 748 return (crc32_result); 749 } 750 751 for (byte = 0; byte < crc32_length; byte = byte + 1) 752 { 753 current_byte = crc32_packet[byte]; 754 for (bit = 0; bit < 8; bit = bit + 1) 755 { 756 /* msb = crc32_result[31]; */ 757 msb = (uint8_t)(crc32_result >> 31); 758 759 crc32_result = crc32_result << 1; 760 761 /* it (msb != current_byte[bit]) */ 762 if (msb != (0x1 & (current_byte >> bit))) 763 { 764 crc32_result = crc32_result ^ CRC32_POLY; 765 /* crc32_result[0] = 1 */ 766 crc32_result |= 1; 767 } 768 } 769 } 770 771 /* Last step is to: 772 * 1. "mirror" every bit 773 * 2. swap the 4 bytes 774 * 3. complement each bit 775 */ 776 777 /* Mirror */ 778 temp = crc32_result; 779 shft = sizeof(crc32_result) * 8 - 1; 780 781 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 782 { 783 temp <<= 1; 784 temp |= crc32_result & 1; 785 shft-- ; 786 } 787 788 /* temp[31-bit] = crc32_result[bit] */ 789 temp <<= shft; 790 791 /* Swap */ 792 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 793 { 794 uint32_t t0, t1, t2, t3; 795 t0 = (0x000000ff & (temp >> 24)); 796 t1 = (0x0000ff00 & (temp >> 8)); 797 t2 = (0x00ff0000 & (temp << 8)); 798 t3 = (0xff000000 & (temp << 24)); 799 crc32_result = t0 | t1 | t2 | t3; 800 } 801 802 /* Complement */ 803 if (complement) 804 { 805 crc32_result = ~crc32_result; 806 } 807 808 return (crc32_result); 809} 810 811int 812bxe_test_bit(int nr, 813 volatile unsigned long *addr) 814{ 815 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 816} 817 818void 819bxe_set_bit(unsigned int nr, 820 volatile unsigned long *addr) 821{ 822 atomic_set_acq_long(addr, (1 << nr)); 823} 824 825void 826bxe_clear_bit(int nr, 827 volatile unsigned long *addr) 828{ 829 atomic_clear_acq_long(addr, (1 << nr)); 830} 831 832int 833bxe_test_and_set_bit(int nr, 834 volatile unsigned long *addr) 835{ 836 unsigned long x; 837 nr = (1 << nr); 838 do { 839 x = *addr; 840 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 841 // if (x & nr) bit_was_set; else bit_was_not_set; 842 return (x & nr); 843} 844 845int 846bxe_test_and_clear_bit(int nr, 847 volatile unsigned long *addr) 848{ 849 unsigned long x; 850 nr = (1 << nr); 851 do { 852 x = *addr; 853 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 854 // if (x & nr) bit_was_set; else bit_was_not_set; 855 return (x & nr); 856} 857 858int 859bxe_cmpxchg(volatile int *addr, 860 int old, 861 int new) 862{ 863 int x; 864 do { 865 x = *addr; 866 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 867 return (x); 868} 869 870/* 871 * Get DMA memory from the OS. 872 * 873 * Validates that the OS has provided DMA buffers in response to a 874 * bus_dmamap_load call and saves the physical address of those buffers. 875 * When the callback is used the OS will return 0 for the mapping function 876 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 877 * failures back to the caller. 878 * 879 * Returns: 880 * Nothing. 881 */ 882static void 883bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 884{ 885 struct bxe_dma *dma = arg; 886 887 if (error) { 888 dma->paddr = 0; 889 dma->nseg = 0; 890 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 891 } else { 892 dma->paddr = segs->ds_addr; 893 dma->nseg = nseg; 894 } 895} 896 897/* 898 * Allocate a block of memory and map it for DMA. No partial completions 899 * allowed and release any resources acquired if we can't acquire all 900 * resources. 901 * 902 * Returns: 903 * 0 = Success, !0 = Failure 904 */ 905int 906bxe_dma_alloc(struct bxe_softc *sc, 907 bus_size_t size, 908 struct bxe_dma *dma, 909 const char *msg) 910{ 911 int rc; 912 913 if (dma->size > 0) { 914 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 915 (unsigned long)dma->size); 916 return (1); 917 } 918 919 memset(dma, 0, sizeof(*dma)); /* sanity */ 920 dma->sc = sc; 921 dma->size = size; 922 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 923 924 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 925 BCM_PAGE_SIZE, /* alignment */ 926 0, /* boundary limit */ 927 BUS_SPACE_MAXADDR, /* restricted low */ 928 BUS_SPACE_MAXADDR, /* restricted hi */ 929 NULL, /* addr filter() */ 930 NULL, /* addr filter() arg */ 931 size, /* max map size */ 932 1, /* num discontinuous */ 933 size, /* max seg size */ 934 BUS_DMA_ALLOCNOW, /* flags */ 935 NULL, /* lock() */ 936 NULL, /* lock() arg */ 937 &dma->tag); /* returned dma tag */ 938 if (rc != 0) { 939 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 940 memset(dma, 0, sizeof(*dma)); 941 return (1); 942 } 943 944 rc = bus_dmamem_alloc(dma->tag, 945 (void **)&dma->vaddr, 946 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 947 &dma->map); 948 if (rc != 0) { 949 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 950 bus_dma_tag_destroy(dma->tag); 951 memset(dma, 0, sizeof(*dma)); 952 return (1); 953 } 954 955 rc = bus_dmamap_load(dma->tag, 956 dma->map, 957 dma->vaddr, 958 size, 959 bxe_dma_map_addr, /* BLOGD in here */ 960 dma, 961 BUS_DMA_NOWAIT); 962 if (rc != 0) { 963 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 964 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 965 bus_dma_tag_destroy(dma->tag); 966 memset(dma, 0, sizeof(*dma)); 967 return (1); 968 } 969 970 return (0); 971} 972 973void 974bxe_dma_free(struct bxe_softc *sc, 975 struct bxe_dma *dma) 976{ 977 if (dma->size > 0) { 978 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 979 980 bus_dmamap_sync(dma->tag, dma->map, 981 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 982 bus_dmamap_unload(dma->tag, dma->map); 983 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 984 bus_dma_tag_destroy(dma->tag); 985 } 986 987 memset(dma, 0, sizeof(*dma)); 988} 989 990/* 991 * These indirect read and write routines are only during init. 992 * The locking is handled by the MCP. 993 */ 994 995void 996bxe_reg_wr_ind(struct bxe_softc *sc, 997 uint32_t addr, 998 uint32_t val) 999{ 1000 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1001 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 1002 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1003} 1004 1005uint32_t 1006bxe_reg_rd_ind(struct bxe_softc *sc, 1007 uint32_t addr) 1008{ 1009 uint32_t val; 1010 1011 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1012 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1013 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1014 1015 return (val); 1016} 1017 1018static int 1019bxe_acquire_hw_lock(struct bxe_softc *sc, 1020 uint32_t resource) 1021{ 1022 uint32_t lock_status; 1023 uint32_t resource_bit = (1 << resource); 1024 int func = SC_FUNC(sc); 1025 uint32_t hw_lock_control_reg; 1026 int cnt; 1027 1028 /* validate the resource is within range */ 1029 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1030 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 1031 " resource_bit 0x%x\n", resource, resource_bit); 1032 return (-1); 1033 } 1034 1035 if (func <= 5) { 1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1037 } else { 1038 hw_lock_control_reg = 1039 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1040 } 1041 1042 /* validate the resource is not already taken */ 1043 lock_status = REG_RD(sc, hw_lock_control_reg); 1044 if (lock_status & resource_bit) { 1045 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", 1046 resource, lock_status, resource_bit); 1047 return (-1); 1048 } 1049 1050 /* try every 5ms for 5 seconds */ 1051 for (cnt = 0; cnt < 1000; cnt++) { 1052 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1053 lock_status = REG_RD(sc, hw_lock_control_reg); 1054 if (lock_status & resource_bit) { 1055 return (0); 1056 } 1057 DELAY(5000); 1058 } 1059 1060 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", 1061 resource, resource_bit); 1062 return (-1); 1063} 1064 1065static int 1066bxe_release_hw_lock(struct bxe_softc *sc, 1067 uint32_t resource) 1068{ 1069 uint32_t lock_status; 1070 uint32_t resource_bit = (1 << resource); 1071 int func = SC_FUNC(sc); 1072 uint32_t hw_lock_control_reg; 1073 1074 /* validate the resource is within range */ 1075 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1076 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 1077 " resource_bit 0x%x\n", resource, resource_bit); 1078 return (-1); 1079 } 1080 1081 if (func <= 5) { 1082 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1083 } else { 1084 hw_lock_control_reg = 1085 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1086 } 1087 1088 /* validate the resource is currently taken */ 1089 lock_status = REG_RD(sc, hw_lock_control_reg); 1090 if (!(lock_status & resource_bit)) { 1091 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", 1092 resource, lock_status, resource_bit); 1093 return (-1); 1094 } 1095 1096 REG_WR(sc, hw_lock_control_reg, resource_bit); 1097 return (0); 1098} 1099static void bxe_acquire_phy_lock(struct bxe_softc *sc) 1100{ 1101 BXE_PHY_LOCK(sc); 1102 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1103} 1104 1105static void bxe_release_phy_lock(struct bxe_softc *sc) 1106{ 1107 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1108 BXE_PHY_UNLOCK(sc); 1109} 1110/* 1111 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1112 * had we done things the other way around, if two pfs from the same port 1113 * would attempt to access nvram at the same time, we could run into a 1114 * scenario such as: 1115 * pf A takes the port lock. 1116 * pf B succeeds in taking the same lock since they are from the same port. 1117 * pf A takes the per pf misc lock. Performs eeprom access. 1118 * pf A finishes. Unlocks the per pf misc lock. 1119 * Pf B takes the lock and proceeds to perform it's own access. 1120 * pf A unlocks the per port lock, while pf B is still working (!). 1121 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1122 * access corrupted by pf B).* 1123 */ 1124static int 1125bxe_acquire_nvram_lock(struct bxe_softc *sc) 1126{ 1127 int port = SC_PORT(sc); 1128 int count, i; 1129 uint32_t val = 0; 1130 1131 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1132 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1133 1134 /* adjust timeout for emulation/FPGA */ 1135 count = NVRAM_TIMEOUT_COUNT; 1136 if (CHIP_REV_IS_SLOW(sc)) { 1137 count *= 100; 1138 } 1139 1140 /* request access to nvram interface */ 1141 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1142 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1143 1144 for (i = 0; i < count*10; i++) { 1145 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1146 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1147 break; 1148 } 1149 1150 DELAY(5); 1151 } 1152 1153 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1154 BLOGE(sc, "Cannot get access to nvram interface " 1155 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", 1156 port, val); 1157 return (-1); 1158 } 1159 1160 return (0); 1161} 1162 1163static int 1164bxe_release_nvram_lock(struct bxe_softc *sc) 1165{ 1166 int port = SC_PORT(sc); 1167 int count, i; 1168 uint32_t val = 0; 1169 1170 /* adjust timeout for emulation/FPGA */ 1171 count = NVRAM_TIMEOUT_COUNT; 1172 if (CHIP_REV_IS_SLOW(sc)) { 1173 count *= 100; 1174 } 1175 1176 /* relinquish nvram interface */ 1177 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1178 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1179 1180 for (i = 0; i < count*10; i++) { 1181 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1182 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1183 break; 1184 } 1185 1186 DELAY(5); 1187 } 1188 1189 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1190 BLOGE(sc, "Cannot free access to nvram interface " 1191 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", 1192 port, val); 1193 return (-1); 1194 } 1195 1196 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1197 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1198 1199 return (0); 1200} 1201 1202static void 1203bxe_enable_nvram_access(struct bxe_softc *sc) 1204{ 1205 uint32_t val; 1206 1207 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1208 1209 /* enable both bits, even on read */ 1210 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1211 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1212} 1213 1214static void 1215bxe_disable_nvram_access(struct bxe_softc *sc) 1216{ 1217 uint32_t val; 1218 1219 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1220 1221 /* disable both bits, even after read */ 1222 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1223 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1224 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1225} 1226 1227static int 1228bxe_nvram_read_dword(struct bxe_softc *sc, 1229 uint32_t offset, 1230 uint32_t *ret_val, 1231 uint32_t cmd_flags) 1232{ 1233 int count, i, rc; 1234 uint32_t val; 1235 1236 /* build the command word */ 1237 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1238 1239 /* need to clear DONE bit separately */ 1240 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1241 1242 /* address of the NVRAM to read from */ 1243 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1244 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1245 1246 /* issue a read command */ 1247 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1248 1249 /* adjust timeout for emulation/FPGA */ 1250 count = NVRAM_TIMEOUT_COUNT; 1251 if (CHIP_REV_IS_SLOW(sc)) { 1252 count *= 100; 1253 } 1254 1255 /* wait for completion */ 1256 *ret_val = 0; 1257 rc = -1; 1258 for (i = 0; i < count; i++) { 1259 DELAY(5); 1260 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1261 1262 if (val & MCPR_NVM_COMMAND_DONE) { 1263 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1264 /* we read nvram data in cpu order 1265 * but ethtool sees it as an array of bytes 1266 * converting to big-endian will do the work 1267 */ 1268 *ret_val = htobe32(val); 1269 rc = 0; 1270 break; 1271 } 1272 } 1273 1274 if (rc == -1) { 1275 BLOGE(sc, "nvram read timeout expired " 1276 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", 1277 offset, cmd_flags, val); 1278 } 1279 1280 return (rc); 1281} 1282 1283static int 1284bxe_nvram_read(struct bxe_softc *sc, 1285 uint32_t offset, 1286 uint8_t *ret_buf, 1287 int buf_size) 1288{ 1289 uint32_t cmd_flags; 1290 uint32_t val; 1291 int rc; 1292 1293 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1294 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1295 offset, buf_size); 1296 return (-1); 1297 } 1298 1299 if ((offset + buf_size) > sc->devinfo.flash_size) { 1300 BLOGE(sc, "Invalid parameter, " 1301 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1302 offset, buf_size, sc->devinfo.flash_size); 1303 return (-1); 1304 } 1305 1306 /* request access to nvram interface */ 1307 rc = bxe_acquire_nvram_lock(sc); 1308 if (rc) { 1309 return (rc); 1310 } 1311 1312 /* enable access to nvram interface */ 1313 bxe_enable_nvram_access(sc); 1314 1315 /* read the first word(s) */ 1316 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1317 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1318 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1319 memcpy(ret_buf, &val, 4); 1320 1321 /* advance to the next dword */ 1322 offset += sizeof(uint32_t); 1323 ret_buf += sizeof(uint32_t); 1324 buf_size -= sizeof(uint32_t); 1325 cmd_flags = 0; 1326 } 1327 1328 if (rc == 0) { 1329 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1330 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1331 memcpy(ret_buf, &val, 4); 1332 } 1333 1334 /* disable access to nvram interface */ 1335 bxe_disable_nvram_access(sc); 1336 bxe_release_nvram_lock(sc); 1337 1338 return (rc); 1339} 1340 1341static int 1342bxe_nvram_write_dword(struct bxe_softc *sc, 1343 uint32_t offset, 1344 uint32_t val, 1345 uint32_t cmd_flags) 1346{ 1347 int count, i, rc; 1348 1349 /* build the command word */ 1350 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1351 1352 /* need to clear DONE bit separately */ 1353 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1354 1355 /* write the data */ 1356 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1357 1358 /* address of the NVRAM to write to */ 1359 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1360 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1361 1362 /* issue the write command */ 1363 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1364 1365 /* adjust timeout for emulation/FPGA */ 1366 count = NVRAM_TIMEOUT_COUNT; 1367 if (CHIP_REV_IS_SLOW(sc)) { 1368 count *= 100; 1369 } 1370 1371 /* wait for completion */ 1372 rc = -1; 1373 for (i = 0; i < count; i++) { 1374 DELAY(5); 1375 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1376 if (val & MCPR_NVM_COMMAND_DONE) { 1377 rc = 0; 1378 break; 1379 } 1380 } 1381 1382 if (rc == -1) { 1383 BLOGE(sc, "nvram write timeout expired " 1384 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", 1385 offset, cmd_flags, val); 1386 } 1387 1388 return (rc); 1389} 1390 1391#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1392 1393static int 1394bxe_nvram_write1(struct bxe_softc *sc, 1395 uint32_t offset, 1396 uint8_t *data_buf, 1397 int buf_size) 1398{ 1399 uint32_t cmd_flags; 1400 uint32_t align_offset; 1401 uint32_t val; 1402 int rc; 1403 1404 if ((offset + buf_size) > sc->devinfo.flash_size) { 1405 BLOGE(sc, "Invalid parameter, " 1406 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1407 offset, buf_size, sc->devinfo.flash_size); 1408 return (-1); 1409 } 1410 1411 /* request access to nvram interface */ 1412 rc = bxe_acquire_nvram_lock(sc); 1413 if (rc) { 1414 return (rc); 1415 } 1416 1417 /* enable access to nvram interface */ 1418 bxe_enable_nvram_access(sc); 1419 1420 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1421 align_offset = (offset & ~0x03); 1422 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1423 1424 if (rc == 0) { 1425 val &= ~(0xff << BYTE_OFFSET(offset)); 1426 val |= (*data_buf << BYTE_OFFSET(offset)); 1427 1428 /* nvram data is returned as an array of bytes 1429 * convert it back to cpu order 1430 */ 1431 val = be32toh(val); 1432 1433 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1434 } 1435 1436 /* disable access to nvram interface */ 1437 bxe_disable_nvram_access(sc); 1438 bxe_release_nvram_lock(sc); 1439 1440 return (rc); 1441} 1442 1443static int 1444bxe_nvram_write(struct bxe_softc *sc, 1445 uint32_t offset, 1446 uint8_t *data_buf, 1447 int buf_size) 1448{ 1449 uint32_t cmd_flags; 1450 uint32_t val; 1451 uint32_t written_so_far; 1452 int rc; 1453 1454 if (buf_size == 1) { 1455 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1456 } 1457 1458 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1459 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1460 offset, buf_size); 1461 return (-1); 1462 } 1463 1464 if (buf_size == 0) { 1465 return (0); /* nothing to do */ 1466 } 1467 1468 if ((offset + buf_size) > sc->devinfo.flash_size) { 1469 BLOGE(sc, "Invalid parameter, " 1470 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1471 offset, buf_size, sc->devinfo.flash_size); 1472 return (-1); 1473 } 1474 1475 /* request access to nvram interface */ 1476 rc = bxe_acquire_nvram_lock(sc); 1477 if (rc) { 1478 return (rc); 1479 } 1480 1481 /* enable access to nvram interface */ 1482 bxe_enable_nvram_access(sc); 1483 1484 written_so_far = 0; 1485 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1486 while ((written_so_far < buf_size) && (rc == 0)) { 1487 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1488 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1489 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1490 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1491 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1492 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1493 } 1494 1495 memcpy(&val, data_buf, 4); 1496 1497 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1498 1499 /* advance to the next dword */ 1500 offset += sizeof(uint32_t); 1501 data_buf += sizeof(uint32_t); 1502 written_so_far += sizeof(uint32_t); 1503 cmd_flags = 0; 1504 } 1505 1506 /* disable access to nvram interface */ 1507 bxe_disable_nvram_access(sc); 1508 bxe_release_nvram_lock(sc); 1509 1510 return (rc); 1511} 1512 1513/* copy command into DMAE command memory and set DMAE command Go */ 1514void 1515bxe_post_dmae(struct bxe_softc *sc, 1516 struct dmae_cmd *dmae, 1517 int idx) 1518{ 1519 uint32_t cmd_offset; 1520 int i; 1521 1522 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); 1523 for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { 1524 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1525 } 1526 1527 REG_WR(sc, dmae_reg_go_c[idx], 1); 1528} 1529 1530uint32_t 1531bxe_dmae_opcode_add_comp(uint32_t opcode, 1532 uint8_t comp_type) 1533{ 1534 return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | 1535 DMAE_CMD_C_TYPE_ENABLE)); 1536} 1537 1538uint32_t 1539bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1540{ 1541 return (opcode & ~DMAE_CMD_SRC_RESET); 1542} 1543 1544uint32_t 1545bxe_dmae_opcode(struct bxe_softc *sc, 1546 uint8_t src_type, 1547 uint8_t dst_type, 1548 uint8_t with_comp, 1549 uint8_t comp_type) 1550{ 1551 uint32_t opcode = 0; 1552 1553 opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | 1554 (dst_type << DMAE_CMD_DST_SHIFT)); 1555 1556 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 1557 1558 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1559 1560 opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | 1561 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); 1562 1563 opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); 1564 1565#ifdef __BIG_ENDIAN 1566 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1567#else 1568 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1569#endif 1570 1571 if (with_comp) { 1572 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1573 } 1574 1575 return (opcode); 1576} 1577 1578static void 1579bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1580 struct dmae_cmd *dmae, 1581 uint8_t src_type, 1582 uint8_t dst_type) 1583{ 1584 memset(dmae, 0, sizeof(struct dmae_cmd)); 1585 1586 /* set the opcode */ 1587 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1588 TRUE, DMAE_COMP_PCI); 1589 1590 /* fill in the completion parameters */ 1591 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1592 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1593 dmae->comp_val = DMAE_COMP_VAL; 1594} 1595 1596/* issue a DMAE command over the init channel and wait for completion */ 1597static int 1598bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1599 struct dmae_cmd *dmae) 1600{ 1601 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1602 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1603 1604 BXE_DMAE_LOCK(sc); 1605 1606 /* reset completion */ 1607 *wb_comp = 0; 1608 1609 /* post the command on the channel used for initializations */ 1610 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1611 1612 /* wait for completion */ 1613 DELAY(5); 1614 1615 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1616 if (!timeout || 1617 (sc->recovery_state != BXE_RECOVERY_DONE && 1618 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1619 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", 1620 *wb_comp, sc->recovery_state); 1621 BXE_DMAE_UNLOCK(sc); 1622 return (DMAE_TIMEOUT); 1623 } 1624 1625 timeout--; 1626 DELAY(50); 1627 } 1628 1629 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1630 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", 1631 *wb_comp, sc->recovery_state); 1632 BXE_DMAE_UNLOCK(sc); 1633 return (DMAE_PCI_ERROR); 1634 } 1635 1636 BXE_DMAE_UNLOCK(sc); 1637 return (0); 1638} 1639 1640void 1641bxe_read_dmae(struct bxe_softc *sc, 1642 uint32_t src_addr, 1643 uint32_t len32) 1644{ 1645 struct dmae_cmd dmae; 1646 uint32_t *data; 1647 int i, rc; 1648 1649 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1650 1651 if (!sc->dmae_ready) { 1652 data = BXE_SP(sc, wb_data[0]); 1653 1654 for (i = 0; i < len32; i++) { 1655 data[i] = (CHIP_IS_E1(sc)) ? 1656 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1657 REG_RD(sc, (src_addr + (i * 4))); 1658 } 1659 1660 return; 1661 } 1662 1663 /* set opcode and fixed command fields */ 1664 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1665 1666 /* fill in addresses and len */ 1667 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1668 dmae.src_addr_hi = 0; 1669 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1670 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1671 dmae.len = len32; 1672 1673 /* issue the command and wait for completion */ 1674 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1675 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1676 }; 1677} 1678 1679void 1680bxe_write_dmae(struct bxe_softc *sc, 1681 bus_addr_t dma_addr, 1682 uint32_t dst_addr, 1683 uint32_t len32) 1684{ 1685 struct dmae_cmd dmae; 1686 int rc; 1687 1688 if (!sc->dmae_ready) { 1689 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1690 1691 if (CHIP_IS_E1(sc)) { 1692 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1693 } else { 1694 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1695 } 1696 1697 return; 1698 } 1699 1700 /* set opcode and fixed command fields */ 1701 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1702 1703 /* fill in addresses and len */ 1704 dmae.src_addr_lo = U64_LO(dma_addr); 1705 dmae.src_addr_hi = U64_HI(dma_addr); 1706 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1707 dmae.dst_addr_hi = 0; 1708 dmae.len = len32; 1709 1710 /* issue the command and wait for completion */ 1711 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1712 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1713 } 1714} 1715 1716void 1717bxe_write_dmae_phys_len(struct bxe_softc *sc, 1718 bus_addr_t phys_addr, 1719 uint32_t addr, 1720 uint32_t len) 1721{ 1722 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1723 int offset = 0; 1724 1725 while (len > dmae_wr_max) { 1726 bxe_write_dmae(sc, 1727 (phys_addr + offset), /* src DMA address */ 1728 (addr + offset), /* dst GRC address */ 1729 dmae_wr_max); 1730 offset += (dmae_wr_max * 4); 1731 len -= dmae_wr_max; 1732 } 1733 1734 bxe_write_dmae(sc, 1735 (phys_addr + offset), /* src DMA address */ 1736 (addr + offset), /* dst GRC address */ 1737 len); 1738} 1739 1740void 1741bxe_set_ctx_validation(struct bxe_softc *sc, 1742 struct eth_context *cxt, 1743 uint32_t cid) 1744{ 1745 /* ustorm cxt validation */ 1746 cxt->ustorm_ag_context.cdu_usage = 1747 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1748 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1749 /* xcontext validation */ 1750 cxt->xstorm_ag_context.cdu_reserved = 1751 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1752 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1753} 1754 1755static void 1756bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1757 uint8_t port, 1758 uint8_t fw_sb_id, 1759 uint8_t sb_index, 1760 uint8_t ticks) 1761{ 1762 uint32_t addr = 1763 (BAR_CSTRORM_INTMEM + 1764 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1765 1766 REG_WR8(sc, addr, ticks); 1767 1768 BLOGD(sc, DBG_LOAD, 1769 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1770 port, fw_sb_id, sb_index, ticks); 1771} 1772 1773static void 1774bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1775 uint8_t port, 1776 uint16_t fw_sb_id, 1777 uint8_t sb_index, 1778 uint8_t disable) 1779{ 1780 uint32_t enable_flag = 1781 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1782 uint32_t addr = 1783 (BAR_CSTRORM_INTMEM + 1784 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1785 uint8_t flags; 1786 1787 /* clear and set */ 1788 flags = REG_RD8(sc, addr); 1789 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1790 flags |= enable_flag; 1791 REG_WR8(sc, addr, flags); 1792 1793 BLOGD(sc, DBG_LOAD, 1794 "port %d fw_sb_id %d sb_index %d disable %d\n", 1795 port, fw_sb_id, sb_index, disable); 1796} 1797 1798void 1799bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1800 uint8_t fw_sb_id, 1801 uint8_t sb_index, 1802 uint8_t disable, 1803 uint16_t usec) 1804{ 1805 int port = SC_PORT(sc); 1806 uint8_t ticks = (usec / 4); /* XXX ??? */ 1807 1808 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1809 1810 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1811 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1812} 1813 1814void 1815elink_cb_udelay(struct bxe_softc *sc, 1816 uint32_t usecs) 1817{ 1818 DELAY(usecs); 1819} 1820 1821uint32_t 1822elink_cb_reg_read(struct bxe_softc *sc, 1823 uint32_t reg_addr) 1824{ 1825 return (REG_RD(sc, reg_addr)); 1826} 1827 1828void 1829elink_cb_reg_write(struct bxe_softc *sc, 1830 uint32_t reg_addr, 1831 uint32_t val) 1832{ 1833 REG_WR(sc, reg_addr, val); 1834} 1835 1836void 1837elink_cb_reg_wb_write(struct bxe_softc *sc, 1838 uint32_t offset, 1839 uint32_t *wb_write, 1840 uint16_t len) 1841{ 1842 REG_WR_DMAE(sc, offset, wb_write, len); 1843} 1844 1845void 1846elink_cb_reg_wb_read(struct bxe_softc *sc, 1847 uint32_t offset, 1848 uint32_t *wb_write, 1849 uint16_t len) 1850{ 1851 REG_RD_DMAE(sc, offset, wb_write, len); 1852} 1853 1854uint8_t 1855elink_cb_path_id(struct bxe_softc *sc) 1856{ 1857 return (SC_PATH(sc)); 1858} 1859 1860void 1861elink_cb_event_log(struct bxe_softc *sc, 1862 const elink_log_id_t elink_log_id, 1863 ...) 1864{ 1865 /* XXX */ 1866 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1867} 1868 1869static int 1870bxe_set_spio(struct bxe_softc *sc, 1871 int spio, 1872 uint32_t mode) 1873{ 1874 uint32_t spio_reg; 1875 1876 /* Only 2 SPIOs are configurable */ 1877 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1878 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); 1879 return (-1); 1880 } 1881 1882 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1883 1884 /* read SPIO and mask except the float bits */ 1885 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1886 1887 switch (mode) { 1888 case MISC_SPIO_OUTPUT_LOW: 1889 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1890 /* clear FLOAT and set CLR */ 1891 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1892 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1893 break; 1894 1895 case MISC_SPIO_OUTPUT_HIGH: 1896 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1897 /* clear FLOAT and set SET */ 1898 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1899 spio_reg |= (spio << MISC_SPIO_SET_POS); 1900 break; 1901 1902 case MISC_SPIO_INPUT_HI_Z: 1903 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1904 /* set FLOAT */ 1905 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1906 break; 1907 1908 default: 1909 break; 1910 } 1911 1912 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1913 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1914 1915 return (0); 1916} 1917 1918static int 1919bxe_gpio_read(struct bxe_softc *sc, 1920 int gpio_num, 1921 uint8_t port) 1922{ 1923 /* The GPIO should be swapped if swap register is set and active */ 1924 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1925 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1926 int gpio_shift = (gpio_num + 1927 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1928 uint32_t gpio_mask = (1 << gpio_shift); 1929 uint32_t gpio_reg; 1930 1931 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1932 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" 1933 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, 1934 gpio_mask); 1935 return (-1); 1936 } 1937 1938 /* read GPIO value */ 1939 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 1940 1941 /* get the requested pin value */ 1942 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 1943} 1944 1945static int 1946bxe_gpio_write(struct bxe_softc *sc, 1947 int gpio_num, 1948 uint32_t mode, 1949 uint8_t port) 1950{ 1951 /* The GPIO should be swapped if swap register is set and active */ 1952 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1953 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1954 int gpio_shift = (gpio_num + 1955 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1956 uint32_t gpio_mask = (1 << gpio_shift); 1957 uint32_t gpio_reg; 1958 1959 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1960 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" 1961 " gpio_shift %d gpio_mask 0x%x\n", 1962 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); 1963 return (-1); 1964 } 1965 1966 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 1967 1968 /* read GPIO and mask except the float bits */ 1969 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1970 1971 switch (mode) { 1972 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1973 BLOGD(sc, DBG_PHY, 1974 "Set GPIO %d (shift %d) -> output low\n", 1975 gpio_num, gpio_shift); 1976 /* clear FLOAT and set CLR */ 1977 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1978 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 1979 break; 1980 1981 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1982 BLOGD(sc, DBG_PHY, 1983 "Set GPIO %d (shift %d) -> output high\n", 1984 gpio_num, gpio_shift); 1985 /* clear FLOAT and set SET */ 1986 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1988 break; 1989 1990 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1991 BLOGD(sc, DBG_PHY, 1992 "Set GPIO %d (shift %d) -> input\n", 1993 gpio_num, gpio_shift); 1994 /* set FLOAT */ 1995 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1996 break; 1997 1998 default: 1999 break; 2000 } 2001 2002 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2003 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2004 2005 return (0); 2006} 2007 2008static int 2009bxe_gpio_mult_write(struct bxe_softc *sc, 2010 uint8_t pins, 2011 uint32_t mode) 2012{ 2013 uint32_t gpio_reg; 2014 2015 /* any port swapping should be handled by caller */ 2016 2017 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2018 2019 /* read GPIO and mask except the float bits */ 2020 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2021 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2022 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2023 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2024 2025 switch (mode) { 2026 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2027 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2028 /* set CLR */ 2029 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2030 break; 2031 2032 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2033 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2034 /* set SET */ 2035 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2036 break; 2037 2038 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2039 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2040 /* set FLOAT */ 2041 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2042 break; 2043 2044 default: 2045 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" 2046 " gpio_reg 0x%x\n", pins, mode, gpio_reg); 2047 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2048 return (-1); 2049 } 2050 2051 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2052 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2053 2054 return (0); 2055} 2056 2057static int 2058bxe_gpio_int_write(struct bxe_softc *sc, 2059 int gpio_num, 2060 uint32_t mode, 2061 uint8_t port) 2062{ 2063 /* The GPIO should be swapped if swap register is set and active */ 2064 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2065 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2066 int gpio_shift = (gpio_num + 2067 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2068 uint32_t gpio_mask = (1 << gpio_shift); 2069 uint32_t gpio_reg; 2070 2071 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2072 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" 2073 " gpio_shift %d gpio_mask 0x%x\n", 2074 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); 2075 return (-1); 2076 } 2077 2078 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2079 2080 /* read GPIO int */ 2081 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2082 2083 switch (mode) { 2084 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2085 BLOGD(sc, DBG_PHY, 2086 "Clear GPIO INT %d (shift %d) -> output low\n", 2087 gpio_num, gpio_shift); 2088 /* clear SET and set CLR */ 2089 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2090 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2091 break; 2092 2093 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2094 BLOGD(sc, DBG_PHY, 2095 "Set GPIO INT %d (shift %d) -> output high\n", 2096 gpio_num, gpio_shift); 2097 /* clear CLR and set SET */ 2098 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2099 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2100 break; 2101 2102 default: 2103 break; 2104 } 2105 2106 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2107 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2108 2109 return (0); 2110} 2111 2112uint32_t 2113elink_cb_gpio_read(struct bxe_softc *sc, 2114 uint16_t gpio_num, 2115 uint8_t port) 2116{ 2117 return (bxe_gpio_read(sc, gpio_num, port)); 2118} 2119 2120uint8_t 2121elink_cb_gpio_write(struct bxe_softc *sc, 2122 uint16_t gpio_num, 2123 uint8_t mode, /* 0=low 1=high */ 2124 uint8_t port) 2125{ 2126 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2127} 2128 2129uint8_t 2130elink_cb_gpio_mult_write(struct bxe_softc *sc, 2131 uint8_t pins, 2132 uint8_t mode) /* 0=low 1=high */ 2133{ 2134 return (bxe_gpio_mult_write(sc, pins, mode)); 2135} 2136 2137uint8_t 2138elink_cb_gpio_int_write(struct bxe_softc *sc, 2139 uint16_t gpio_num, 2140 uint8_t mode, /* 0=low 1=high */ 2141 uint8_t port) 2142{ 2143 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2144} 2145 2146void 2147elink_cb_notify_link_changed(struct bxe_softc *sc) 2148{ 2149 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2150 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2151} 2152 2153/* send the MCP a request, block until there is a reply */ 2154uint32_t 2155elink_cb_fw_command(struct bxe_softc *sc, 2156 uint32_t command, 2157 uint32_t param) 2158{ 2159 int mb_idx = SC_FW_MB_IDX(sc); 2160 uint32_t seq; 2161 uint32_t rc = 0; 2162 uint32_t cnt = 1; 2163 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2164 2165 BXE_FWMB_LOCK(sc); 2166 2167 seq = ++sc->fw_seq; 2168 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2169 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2170 2171 BLOGD(sc, DBG_PHY, 2172 "wrote command 0x%08x to FW MB param 0x%08x\n", 2173 (command | seq), param); 2174 2175 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2176 do { 2177 DELAY(delay * 1000); 2178 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2179 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2180 2181 BLOGD(sc, DBG_PHY, 2182 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2183 cnt*delay, rc, seq); 2184 2185 /* is this a reply to our command? */ 2186 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2187 rc &= FW_MSG_CODE_MASK; 2188 } else { 2189 /* Ruh-roh! */ 2190 BLOGE(sc, "FW failed to respond!\n"); 2191 // XXX bxe_fw_dump(sc); 2192 rc = 0; 2193 } 2194 2195 BXE_FWMB_UNLOCK(sc); 2196 return (rc); 2197} 2198 2199static uint32_t 2200bxe_fw_command(struct bxe_softc *sc, 2201 uint32_t command, 2202 uint32_t param) 2203{ 2204 return (elink_cb_fw_command(sc, command, param)); 2205} 2206 2207static void 2208__storm_memset_dma_mapping(struct bxe_softc *sc, 2209 uint32_t addr, 2210 bus_addr_t mapping) 2211{ 2212 REG_WR(sc, addr, U64_LO(mapping)); 2213 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2214} 2215 2216static void 2217storm_memset_spq_addr(struct bxe_softc *sc, 2218 bus_addr_t mapping, 2219 uint16_t abs_fid) 2220{ 2221 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2222 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2223 __storm_memset_dma_mapping(sc, addr, mapping); 2224} 2225 2226static void 2227storm_memset_vf_to_pf(struct bxe_softc *sc, 2228 uint16_t abs_fid, 2229 uint16_t pf_id) 2230{ 2231 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2232 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2233 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2234 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2235} 2236 2237static void 2238storm_memset_func_en(struct bxe_softc *sc, 2239 uint16_t abs_fid, 2240 uint8_t enable) 2241{ 2242 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2243 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2244 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2245 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2246} 2247 2248static void 2249storm_memset_eq_data(struct bxe_softc *sc, 2250 struct event_ring_data *eq_data, 2251 uint16_t pfid) 2252{ 2253 uint32_t addr; 2254 size_t size; 2255 2256 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2257 size = sizeof(struct event_ring_data); 2258 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2259} 2260 2261static void 2262storm_memset_eq_prod(struct bxe_softc *sc, 2263 uint16_t eq_prod, 2264 uint16_t pfid) 2265{ 2266 uint32_t addr = (BAR_CSTRORM_INTMEM + 2267 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2268 REG_WR16(sc, addr, eq_prod); 2269} 2270 2271/* 2272 * Post a slowpath command. 2273 * 2274 * A slowpath command is used to propogate a configuration change through 2275 * the controller in a controlled manner, allowing each STORM processor and 2276 * other H/W blocks to phase in the change. The commands sent on the 2277 * slowpath are referred to as ramrods. Depending on the ramrod used the 2278 * completion of the ramrod will occur in different ways. Here's a 2279 * breakdown of ramrods and how they complete: 2280 * 2281 * RAMROD_CMD_ID_ETH_PORT_SETUP 2282 * Used to setup the leading connection on a port. Completes on the 2283 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2284 * 2285 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2286 * Used to setup an additional connection on a port. Completes on the 2287 * RCQ of the multi-queue/RSS connection being initialized. 2288 * 2289 * RAMROD_CMD_ID_ETH_STAT_QUERY 2290 * Used to force the storm processors to update the statistics database 2291 * in host memory. This ramrod is send on the leading connection CID and 2292 * completes as an index increment of the CSTORM on the default status 2293 * block. 2294 * 2295 * RAMROD_CMD_ID_ETH_UPDATE 2296 * Used to update the state of the leading connection, usually to udpate 2297 * the RSS indirection table. Completes on the RCQ of the leading 2298 * connection. (Not currently used under FreeBSD until OS support becomes 2299 * available.) 2300 * 2301 * RAMROD_CMD_ID_ETH_HALT 2302 * Used when tearing down a connection prior to driver unload. Completes 2303 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2304 * use this on the leading connection. 2305 * 2306 * RAMROD_CMD_ID_ETH_SET_MAC 2307 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2308 * the RCQ of the leading connection. 2309 * 2310 * RAMROD_CMD_ID_ETH_CFC_DEL 2311 * Used when tearing down a conneciton prior to driver unload. Completes 2312 * on the RCQ of the leading connection (since the current connection 2313 * has been completely removed from controller memory). 2314 * 2315 * RAMROD_CMD_ID_ETH_PORT_DEL 2316 * Used to tear down the leading connection prior to driver unload, 2317 * typically fp[0]. Completes as an index increment of the CSTORM on the 2318 * default status block. 2319 * 2320 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2321 * Used for connection offload. Completes on the RCQ of the multi-queue 2322 * RSS connection that is being offloaded. (Not currently used under 2323 * FreeBSD.) 2324 * 2325 * There can only be one command pending per function. 2326 * 2327 * Returns: 2328 * 0 = Success, !0 = Failure. 2329 */ 2330 2331/* must be called under the spq lock */ 2332static inline 2333struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2334{ 2335 struct eth_spe *next_spe = sc->spq_prod_bd; 2336 2337 if (sc->spq_prod_bd == sc->spq_last_bd) { 2338 /* wrap back to the first eth_spq */ 2339 sc->spq_prod_bd = sc->spq; 2340 sc->spq_prod_idx = 0; 2341 } else { 2342 sc->spq_prod_bd++; 2343 sc->spq_prod_idx++; 2344 } 2345 2346 return (next_spe); 2347} 2348 2349/* must be called under the spq lock */ 2350static inline 2351void bxe_sp_prod_update(struct bxe_softc *sc) 2352{ 2353 int func = SC_FUNC(sc); 2354 2355 /* 2356 * Make sure that BD data is updated before writing the producer. 2357 * BD data is written to the memory, the producer is read from the 2358 * memory, thus we need a full memory barrier to ensure the ordering. 2359 */ 2360 mb(); 2361 2362 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2363 sc->spq_prod_idx); 2364 2365 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2366 BUS_SPACE_BARRIER_WRITE); 2367} 2368 2369/** 2370 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2371 * 2372 * @cmd: command to check 2373 * @cmd_type: command type 2374 */ 2375static inline 2376int bxe_is_contextless_ramrod(int cmd, 2377 int cmd_type) 2378{ 2379 if ((cmd_type == NONE_CONNECTION_TYPE) || 2380 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2381 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2382 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2383 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2384 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2385 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2386 return (TRUE); 2387 } else { 2388 return (FALSE); 2389 } 2390} 2391 2392/** 2393 * bxe_sp_post - place a single command on an SP ring 2394 * 2395 * @sc: driver handle 2396 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2397 * @cid: SW CID the command is related to 2398 * @data_hi: command private data address (high 32 bits) 2399 * @data_lo: command private data address (low 32 bits) 2400 * @cmd_type: command type (e.g. NONE, ETH) 2401 * 2402 * SP data is handled as if it's always an address pair, thus data fields are 2403 * not swapped to little endian in upper functions. Instead this function swaps 2404 * data as if it's two uint32 fields. 2405 */ 2406int 2407bxe_sp_post(struct bxe_softc *sc, 2408 int command, 2409 int cid, 2410 uint32_t data_hi, 2411 uint32_t data_lo, 2412 int cmd_type) 2413{ 2414 struct eth_spe *spe; 2415 uint16_t type; 2416 int common; 2417 2418 common = bxe_is_contextless_ramrod(command, cmd_type); 2419 2420 BXE_SP_LOCK(sc); 2421 2422 if (common) { 2423 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2424 BLOGE(sc, "EQ ring is full!\n"); 2425 BXE_SP_UNLOCK(sc); 2426 return (-1); 2427 } 2428 } else { 2429 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2430 BLOGE(sc, "SPQ ring is full!\n"); 2431 BXE_SP_UNLOCK(sc); 2432 return (-1); 2433 } 2434 } 2435 2436 spe = bxe_sp_get_next(sc); 2437 2438 /* CID needs port number to be encoded int it */ 2439 spe->hdr.conn_and_cmd_data = 2440 htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2441 2442 type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; 2443 2444 /* TBD: Check if it works for VFs */ 2445 type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & 2446 SPE_HDR_T_FUNCTION_ID); 2447 2448 spe->hdr.type = htole16(type); 2449 2450 spe->data.update_data_addr.hi = htole32(data_hi); 2451 spe->data.update_data_addr.lo = htole32(data_lo); 2452 2453 /* 2454 * It's ok if the actual decrement is issued towards the memory 2455 * somewhere between the lock and unlock. Thus no more explict 2456 * memory barrier is needed. 2457 */ 2458 if (common) { 2459 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2460 } else { 2461 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2462 } 2463 2464 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2465 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2466 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2467 BLOGD(sc, DBG_SP, 2468 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2469 sc->spq_prod_idx, 2470 (uint32_t)U64_HI(sc->spq_dma.paddr), 2471 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2472 command, 2473 common, 2474 HW_CID(sc, cid), 2475 data_hi, 2476 data_lo, 2477 type, 2478 atomic_load_acq_long(&sc->cq_spq_left), 2479 atomic_load_acq_long(&sc->eq_spq_left)); 2480 2481 bxe_sp_prod_update(sc); 2482 2483 BXE_SP_UNLOCK(sc); 2484 return (0); 2485} 2486 2487/** 2488 * bxe_debug_print_ind_table - prints the indirection table configuration. 2489 * 2490 * @sc: driver hanlde 2491 * @p: pointer to rss configuration 2492 */ 2493 2494/* 2495 * FreeBSD Device probe function. 2496 * 2497 * Compares the device found to the driver's list of supported devices and 2498 * reports back to the bsd loader whether this is the right driver for the device. 2499 * This is the driver entry function called from the "kldload" command. 2500 * 2501 * Returns: 2502 * BUS_PROBE_DEFAULT on success, positive value on failure. 2503 */ 2504static int 2505bxe_probe(device_t dev) 2506{ 2507 struct bxe_softc *sc; 2508 struct bxe_device_type *t; 2509 char *descbuf; 2510 uint16_t did, sdid, svid, vid; 2511 2512 /* Find our device structure */ 2513 sc = device_get_softc(dev); 2514 sc->dev = dev; 2515 t = bxe_devs; 2516 2517 /* Get the data for the device to be probed. */ 2518 vid = pci_get_vendor(dev); 2519 did = pci_get_device(dev); 2520 svid = pci_get_subvendor(dev); 2521 sdid = pci_get_subdevice(dev); 2522 2523 BLOGD(sc, DBG_LOAD, 2524 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 2525 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 2526 2527 /* Look through the list of known devices for a match. */ 2528 while (t->bxe_name != NULL) { 2529 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2530 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2531 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2532 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2533 if (descbuf == NULL) 2534 return (ENOMEM); 2535 2536 /* Print out the device identity. */ 2537 snprintf(descbuf, BXE_DEVDESC_MAX, 2538 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2539 (((pci_read_config(dev, PCIR_REVID, 4) & 2540 0xf0) >> 4) + 'A'), 2541 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2542 BXE_DRIVER_VERSION); 2543 2544 device_set_desc_copy(dev, descbuf); 2545 free(descbuf, M_TEMP); 2546 return (BUS_PROBE_DEFAULT); 2547 } 2548 t++; 2549 } 2550 2551 return (ENXIO); 2552} 2553 2554static void 2555bxe_init_mutexes(struct bxe_softc *sc) 2556{ 2557#ifdef BXE_CORE_LOCK_SX 2558 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2559 "bxe%d_core_lock", sc->unit); 2560 sx_init(&sc->core_sx, sc->core_sx_name); 2561#else 2562 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2563 "bxe%d_core_lock", sc->unit); 2564 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2565#endif 2566 2567 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2568 "bxe%d_sp_lock", sc->unit); 2569 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2570 2571 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2572 "bxe%d_dmae_lock", sc->unit); 2573 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2574 2575 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2576 "bxe%d_phy_lock", sc->unit); 2577 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2578 2579 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2580 "bxe%d_fwmb_lock", sc->unit); 2581 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2582 2583 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2584 "bxe%d_print_lock", sc->unit); 2585 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2586 2587 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2588 "bxe%d_stats_lock", sc->unit); 2589 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2590 2591 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2592 "bxe%d_mcast_lock", sc->unit); 2593 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2594} 2595 2596static void 2597bxe_release_mutexes(struct bxe_softc *sc) 2598{ 2599#ifdef BXE_CORE_LOCK_SX 2600 sx_destroy(&sc->core_sx); 2601#else 2602 if (mtx_initialized(&sc->core_mtx)) { 2603 mtx_destroy(&sc->core_mtx); 2604 } 2605#endif 2606 2607 if (mtx_initialized(&sc->sp_mtx)) { 2608 mtx_destroy(&sc->sp_mtx); 2609 } 2610 2611 if (mtx_initialized(&sc->dmae_mtx)) { 2612 mtx_destroy(&sc->dmae_mtx); 2613 } 2614 2615 if (mtx_initialized(&sc->port.phy_mtx)) { 2616 mtx_destroy(&sc->port.phy_mtx); 2617 } 2618 2619 if (mtx_initialized(&sc->fwmb_mtx)) { 2620 mtx_destroy(&sc->fwmb_mtx); 2621 } 2622 2623 if (mtx_initialized(&sc->print_mtx)) { 2624 mtx_destroy(&sc->print_mtx); 2625 } 2626 2627 if (mtx_initialized(&sc->stats_mtx)) { 2628 mtx_destroy(&sc->stats_mtx); 2629 } 2630 2631 if (mtx_initialized(&sc->mcast_mtx)) { 2632 mtx_destroy(&sc->mcast_mtx); 2633 } 2634} 2635 2636static void 2637bxe_tx_disable(struct bxe_softc* sc) 2638{ 2639 struct ifnet *ifp = sc->ifnet; 2640 2641 /* tell the stack the driver is stopped and TX queue is full */ 2642 if (ifp != NULL) { 2643 ifp->if_drv_flags = 0; 2644 } 2645} 2646 2647static void 2648bxe_drv_pulse(struct bxe_softc *sc) 2649{ 2650 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2651 sc->fw_drv_pulse_wr_seq); 2652} 2653 2654static inline uint16_t 2655bxe_tx_avail(struct bxe_softc *sc, 2656 struct bxe_fastpath *fp) 2657{ 2658 int16_t used; 2659 uint16_t prod; 2660 uint16_t cons; 2661 2662 prod = fp->tx_bd_prod; 2663 cons = fp->tx_bd_cons; 2664 2665 used = SUB_S16(prod, cons); 2666 2667 return (int16_t)(sc->tx_ring_size) - used; 2668} 2669 2670static inline int 2671bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2672{ 2673 uint16_t hw_cons; 2674 2675 mb(); /* status block fields can change */ 2676 hw_cons = le16toh(*fp->tx_cons_sb); 2677 return (hw_cons != fp->tx_pkt_cons); 2678} 2679 2680static inline uint8_t 2681bxe_has_tx_work(struct bxe_fastpath *fp) 2682{ 2683 /* expand this for multi-cos if ever supported */ 2684 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2685} 2686 2687static inline int 2688bxe_has_rx_work(struct bxe_fastpath *fp) 2689{ 2690 uint16_t rx_cq_cons_sb; 2691 2692 mb(); /* status block fields can change */ 2693 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2694 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2695 rx_cq_cons_sb++; 2696 return (fp->rx_cq_cons != rx_cq_cons_sb); 2697} 2698 2699static void 2700bxe_sp_event(struct bxe_softc *sc, 2701 struct bxe_fastpath *fp, 2702 union eth_rx_cqe *rr_cqe) 2703{ 2704 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2705 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2706 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2707 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2708 2709 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2710 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2711 2712 switch (command) { 2713 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2714 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2715 drv_cmd = ECORE_Q_CMD_UPDATE; 2716 break; 2717 2718 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2719 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2720 drv_cmd = ECORE_Q_CMD_SETUP; 2721 break; 2722 2723 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2724 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2725 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2726 break; 2727 2728 case (RAMROD_CMD_ID_ETH_HALT): 2729 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2730 drv_cmd = ECORE_Q_CMD_HALT; 2731 break; 2732 2733 case (RAMROD_CMD_ID_ETH_TERMINATE): 2734 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2735 drv_cmd = ECORE_Q_CMD_TERMINATE; 2736 break; 2737 2738 case (RAMROD_CMD_ID_ETH_EMPTY): 2739 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2740 drv_cmd = ECORE_Q_CMD_EMPTY; 2741 break; 2742 2743 default: 2744 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2745 command, fp->index); 2746 return; 2747 } 2748 2749 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2750 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2751 /* 2752 * q_obj->complete_cmd() failure means that this was 2753 * an unexpected completion. 2754 * 2755 * In this case we don't want to increase the sc->spq_left 2756 * because apparently we haven't sent this command the first 2757 * place. 2758 */ 2759 // bxe_panic(sc, ("Unexpected SP completion\n")); 2760 return; 2761 } 2762 2763 atomic_add_acq_long(&sc->cq_spq_left, 1); 2764 2765 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2766 atomic_load_acq_long(&sc->cq_spq_left)); 2767} 2768 2769/* 2770 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2771 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2772 * the current aggregation queue as in-progress. 2773 */ 2774static void 2775bxe_tpa_start(struct bxe_softc *sc, 2776 struct bxe_fastpath *fp, 2777 uint16_t queue, 2778 uint16_t cons, 2779 uint16_t prod, 2780 struct eth_fast_path_rx_cqe *cqe) 2781{ 2782 struct bxe_sw_rx_bd tmp_bd; 2783 struct bxe_sw_rx_bd *rx_buf; 2784 struct eth_rx_bd *rx_bd; 2785 int max_agg_queues; 2786 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2787 uint16_t index; 2788 2789 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2790 "cons=%d prod=%d\n", 2791 fp->index, queue, cons, prod); 2792 2793 max_agg_queues = MAX_AGG_QS(sc); 2794 2795 KASSERT((queue < max_agg_queues), 2796 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2797 fp->index, queue, max_agg_queues)); 2798 2799 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2800 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2801 fp->index, queue)); 2802 2803 /* copy the existing mbuf and mapping from the TPA pool */ 2804 tmp_bd = tpa_info->bd; 2805 2806 if (tmp_bd.m == NULL) { 2807 uint32_t *tmp; 2808 2809 tmp = (uint32_t *)cqe; 2810 2811 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", 2812 fp->index, queue, cons, prod); 2813 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", 2814 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); 2815 2816 /* XXX Error handling? */ 2817 return; 2818 } 2819 2820 /* change the TPA queue to the start state */ 2821 tpa_info->state = BXE_TPA_STATE_START; 2822 tpa_info->placement_offset = cqe->placement_offset; 2823 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2824 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2825 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2826 2827 fp->rx_tpa_queue_used |= (1 << queue); 2828 2829 /* 2830 * If all the buffer descriptors are filled with mbufs then fill in 2831 * the current consumer index with a new BD. Else if a maximum Rx 2832 * buffer limit is imposed then fill in the next producer index. 2833 */ 2834 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2835 prod : cons; 2836 2837 /* move the received mbuf and mapping to TPA pool */ 2838 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2839 2840 /* release any existing RX BD mbuf mappings */ 2841 if (cons != index) { 2842 rx_buf = &fp->rx_mbuf_chain[cons]; 2843 2844 if (rx_buf->m_map != NULL) { 2845 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2846 BUS_DMASYNC_POSTREAD); 2847 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2848 } 2849 2850 /* 2851 * We get here when the maximum number of rx buffers is less than 2852 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2853 * it out here without concern of a memory leak. 2854 */ 2855 fp->rx_mbuf_chain[cons].m = NULL; 2856 } 2857 2858 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2859 fp->rx_mbuf_chain[index] = tmp_bd; 2860 2861 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2862 rx_bd = &fp->rx_chain[index]; 2863 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2864 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2865} 2866 2867/* 2868 * When a TPA aggregation is completed, loop through the individual mbufs 2869 * of the aggregation, combining them into a single mbuf which will be sent 2870 * up the stack. Refill all freed SGEs with mbufs as we go along. 2871 */ 2872static int 2873bxe_fill_frag_mbuf(struct bxe_softc *sc, 2874 struct bxe_fastpath *fp, 2875 struct bxe_sw_tpa_info *tpa_info, 2876 uint16_t queue, 2877 uint16_t pages, 2878 struct mbuf *m, 2879 struct eth_end_agg_rx_cqe *cqe, 2880 uint16_t cqe_idx) 2881{ 2882 struct mbuf *m_frag; 2883 uint32_t frag_len, frag_size, i; 2884 uint16_t sge_idx; 2885 int rc = 0; 2886 int j; 2887 2888 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 2889 2890 BLOGD(sc, DBG_LRO, 2891 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 2892 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 2893 2894 /* make sure the aggregated frame is not too big to handle */ 2895 if (pages > 8 * PAGES_PER_SGE) { 2896 2897 uint32_t *tmp = (uint32_t *)cqe; 2898 2899 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 2900 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 2901 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 2902 tpa_info->len_on_bd, frag_size); 2903 2904 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", 2905 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); 2906 2907 bxe_panic(sc, ("sge page count error\n")); 2908 return (EINVAL); 2909 } 2910 2911 /* 2912 * Scan through the scatter gather list pulling individual mbufs into a 2913 * single mbuf for the host stack. 2914 */ 2915 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 2916 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 2917 2918 /* 2919 * Firmware gives the indices of the SGE as if the ring is an array 2920 * (meaning that the "next" element will consume 2 indices). 2921 */ 2922 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 2923 2924 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 2925 "sge_idx=%d frag_size=%d frag_len=%d\n", 2926 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 2927 2928 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 2929 2930 /* allocate a new mbuf for the SGE */ 2931 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 2932 if (rc) { 2933 /* Leave all remaining SGEs in the ring! */ 2934 return (rc); 2935 } 2936 2937 /* update the fragment length */ 2938 m_frag->m_len = frag_len; 2939 2940 /* concatenate the fragment to the head mbuf */ 2941 m_cat(m, m_frag); 2942 fp->eth_q_stats.mbuf_alloc_sge--; 2943 2944 /* update the TPA mbuf size and remaining fragment size */ 2945 m->m_pkthdr.len += frag_len; 2946 frag_size -= frag_len; 2947 } 2948 2949 BLOGD(sc, DBG_LRO, 2950 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 2951 fp->index, queue, frag_size); 2952 2953 return (rc); 2954} 2955 2956static inline void 2957bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 2958{ 2959 int i, j; 2960 2961 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 2962 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 2963 2964 for (j = 0; j < 2; j++) { 2965 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 2966 idx--; 2967 } 2968 } 2969} 2970 2971static inline void 2972bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 2973{ 2974 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 2975 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 2976 2977 /* 2978 * Clear the two last indices in the page to 1. These are the indices that 2979 * correspond to the "next" element, hence will never be indicated and 2980 * should be removed from the calculations. 2981 */ 2982 bxe_clear_sge_mask_next_elems(fp); 2983} 2984 2985static inline void 2986bxe_update_last_max_sge(struct bxe_fastpath *fp, 2987 uint16_t idx) 2988{ 2989 uint16_t last_max = fp->last_max_sge; 2990 2991 if (SUB_S16(idx, last_max) > 0) { 2992 fp->last_max_sge = idx; 2993 } 2994} 2995 2996static inline void 2997bxe_update_sge_prod(struct bxe_softc *sc, 2998 struct bxe_fastpath *fp, 2999 uint16_t sge_len, 3000 union eth_sgl_or_raw_data *cqe) 3001{ 3002 uint16_t last_max, last_elem, first_elem; 3003 uint16_t delta = 0; 3004 uint16_t i; 3005 3006 if (!sge_len) { 3007 return; 3008 } 3009 3010 /* first mark all used pages */ 3011 for (i = 0; i < sge_len; i++) { 3012 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3013 RX_SGE(le16toh(cqe->sgl[i]))); 3014 } 3015 3016 BLOGD(sc, DBG_LRO, 3017 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3018 fp->index, sge_len - 1, 3019 le16toh(cqe->sgl[sge_len - 1])); 3020 3021 /* assume that the last SGE index is the biggest */ 3022 bxe_update_last_max_sge(fp, 3023 le16toh(cqe->sgl[sge_len - 1])); 3024 3025 last_max = RX_SGE(fp->last_max_sge); 3026 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3027 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3028 3029 /* if ring is not full */ 3030 if (last_elem + 1 != first_elem) { 3031 last_elem++; 3032 } 3033 3034 /* now update the prod */ 3035 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3036 if (__predict_true(fp->sge_mask[i])) { 3037 break; 3038 } 3039 3040 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3041 delta += BIT_VEC64_ELEM_SZ; 3042 } 3043 3044 if (delta > 0) { 3045 fp->rx_sge_prod += delta; 3046 /* clear page-end entries */ 3047 bxe_clear_sge_mask_next_elems(fp); 3048 } 3049 3050 BLOGD(sc, DBG_LRO, 3051 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3052 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3053} 3054 3055/* 3056 * The aggregation on the current TPA queue has completed. Pull the individual 3057 * mbuf fragments together into a single mbuf, perform all necessary checksum 3058 * calculations, and send the resuting mbuf to the stack. 3059 */ 3060static void 3061bxe_tpa_stop(struct bxe_softc *sc, 3062 struct bxe_fastpath *fp, 3063 struct bxe_sw_tpa_info *tpa_info, 3064 uint16_t queue, 3065 uint16_t pages, 3066 struct eth_end_agg_rx_cqe *cqe, 3067 uint16_t cqe_idx) 3068{ 3069 struct ifnet *ifp = sc->ifnet; 3070 struct mbuf *m; 3071 int rc = 0; 3072 3073 BLOGD(sc, DBG_LRO, 3074 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3075 fp->index, queue, tpa_info->placement_offset, 3076 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3077 3078 m = tpa_info->bd.m; 3079 3080 /* allocate a replacement before modifying existing mbuf */ 3081 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3082 if (rc) { 3083 /* drop the frame and log an error */ 3084 fp->eth_q_stats.rx_soft_errors++; 3085 goto bxe_tpa_stop_exit; 3086 } 3087 3088 /* we have a replacement, fixup the current mbuf */ 3089 m_adj(m, tpa_info->placement_offset); 3090 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3091 3092 /* mark the checksums valid (taken care of by the firmware) */ 3093 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3094 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3095 m->m_pkthdr.csum_data = 0xffff; 3096 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3097 CSUM_IP_VALID | 3098 CSUM_DATA_VALID | 3099 CSUM_PSEUDO_HDR); 3100 3101 /* aggregate all of the SGEs into a single mbuf */ 3102 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3103 if (rc) { 3104 /* drop the packet and log an error */ 3105 fp->eth_q_stats.rx_soft_errors++; 3106 m_freem(m); 3107 } else { 3108 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { 3109 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3110 m->m_flags |= M_VLANTAG; 3111 } 3112 3113 /* assign packet to this interface interface */ 3114 m->m_pkthdr.rcvif = ifp; 3115 3116#if __FreeBSD_version >= 800000 3117 /* specify what RSS queue was used for this flow */ 3118 m->m_pkthdr.flowid = fp->index; 3119 BXE_SET_FLOWID(m); 3120#endif 3121 3122 ifp->if_ipackets++; 3123 fp->eth_q_stats.rx_tpa_pkts++; 3124 3125 /* pass the frame to the stack */ 3126 (*ifp->if_input)(ifp, m); 3127 } 3128 3129 /* we passed an mbuf up the stack or dropped the frame */ 3130 fp->eth_q_stats.mbuf_alloc_tpa--; 3131 3132bxe_tpa_stop_exit: 3133 3134 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3135 fp->rx_tpa_queue_used &= ~(1 << queue); 3136} 3137 3138static uint8_t 3139bxe_service_rxsgl( 3140 struct bxe_fastpath *fp, 3141 uint16_t len, 3142 uint16_t lenonbd, 3143 struct mbuf *m, 3144 struct eth_fast_path_rx_cqe *cqe_fp) 3145{ 3146 struct mbuf *m_frag; 3147 uint16_t frags, frag_len; 3148 uint16_t sge_idx = 0; 3149 uint16_t j; 3150 uint8_t i, rc = 0; 3151 uint32_t frag_size; 3152 3153 /* adjust the mbuf */ 3154 m->m_len = lenonbd; 3155 3156 frag_size = len - lenonbd; 3157 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3158 3159 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { 3160 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); 3161 3162 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3163 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); 3164 m_frag->m_len = frag_len; 3165 3166 /* allocate a new mbuf for the SGE */ 3167 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3168 if (rc) { 3169 /* Leave all remaining SGEs in the ring! */ 3170 return (rc); 3171 } 3172 fp->eth_q_stats.mbuf_alloc_sge--; 3173 3174 /* concatenate the fragment to the head mbuf */ 3175 m_cat(m, m_frag); 3176 3177 frag_size -= frag_len; 3178 } 3179 3180 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); 3181 3182 return rc; 3183} 3184 3185static uint8_t 3186bxe_rxeof(struct bxe_softc *sc, 3187 struct bxe_fastpath *fp) 3188{ 3189 struct ifnet *ifp = sc->ifnet; 3190 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3191 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3192 int rx_pkts = 0; 3193 int rc = 0; 3194 3195 BXE_FP_RX_LOCK(fp); 3196 3197 /* CQ "next element" is of the size of the regular element */ 3198 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3199 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3200 hw_cq_cons++; 3201 } 3202 3203 bd_cons = fp->rx_bd_cons; 3204 bd_prod = fp->rx_bd_prod; 3205 bd_prod_fw = bd_prod; 3206 sw_cq_cons = fp->rx_cq_cons; 3207 sw_cq_prod = fp->rx_cq_prod; 3208 3209 /* 3210 * Memory barrier necessary as speculative reads of the rx 3211 * buffer can be ahead of the index in the status block 3212 */ 3213 rmb(); 3214 3215 BLOGD(sc, DBG_RX, 3216 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3217 fp->index, hw_cq_cons, sw_cq_cons); 3218 3219 while (sw_cq_cons != hw_cq_cons) { 3220 struct bxe_sw_rx_bd *rx_buf = NULL; 3221 union eth_rx_cqe *cqe; 3222 struct eth_fast_path_rx_cqe *cqe_fp; 3223 uint8_t cqe_fp_flags; 3224 enum eth_rx_cqe_type cqe_fp_type; 3225 uint16_t len, lenonbd, pad; 3226 struct mbuf *m = NULL; 3227 3228 comp_ring_cons = RCQ(sw_cq_cons); 3229 bd_prod = RX_BD(bd_prod); 3230 bd_cons = RX_BD(bd_cons); 3231 3232 cqe = &fp->rcq_chain[comp_ring_cons]; 3233 cqe_fp = &cqe->fast_path_cqe; 3234 cqe_fp_flags = cqe_fp->type_error_flags; 3235 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3236 3237 BLOGD(sc, DBG_RX, 3238 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3239 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3240 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", 3241 fp->index, 3242 hw_cq_cons, 3243 sw_cq_cons, 3244 bd_prod, 3245 bd_cons, 3246 CQE_TYPE(cqe_fp_flags), 3247 cqe_fp_flags, 3248 cqe_fp->status_flags, 3249 le32toh(cqe_fp->rss_hash_result), 3250 le16toh(cqe_fp->vlan_tag), 3251 le16toh(cqe_fp->pkt_len_or_gro_seg_len), 3252 le16toh(cqe_fp->len_on_bd)); 3253 3254 /* is this a slowpath msg? */ 3255 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3256 bxe_sp_event(sc, fp, cqe); 3257 goto next_cqe; 3258 } 3259 3260 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3261 3262 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3263 struct bxe_sw_tpa_info *tpa_info; 3264 uint16_t frag_size, pages; 3265 uint8_t queue; 3266 3267 if (CQE_TYPE_START(cqe_fp_type)) { 3268 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3269 bd_cons, bd_prod, cqe_fp); 3270 m = NULL; /* packet not ready yet */ 3271 goto next_rx; 3272 } 3273 3274 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3275 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3276 3277 queue = cqe->end_agg_cqe.queue_index; 3278 tpa_info = &fp->rx_tpa_info[queue]; 3279 3280 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3281 fp->index, queue); 3282 3283 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3284 tpa_info->len_on_bd); 3285 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3286 3287 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3288 &cqe->end_agg_cqe, comp_ring_cons); 3289 3290 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); 3291 3292 goto next_cqe; 3293 } 3294 3295 /* non TPA */ 3296 3297 /* is this an error packet? */ 3298 if (__predict_false(cqe_fp_flags & 3299 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3300 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3301 fp->eth_q_stats.rx_soft_errors++; 3302 goto next_rx; 3303 } 3304 3305 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3306 lenonbd = le16toh(cqe_fp->len_on_bd); 3307 pad = cqe_fp->placement_offset; 3308 3309 m = rx_buf->m; 3310 3311 if (__predict_false(m == NULL)) { 3312 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3313 bd_cons, fp->index); 3314 goto next_rx; 3315 } 3316 3317 /* XXX double copy if packet length under a threshold */ 3318 3319 /* 3320 * If all the buffer descriptors are filled with mbufs then fill in 3321 * the current consumer index with a new BD. Else if a maximum Rx 3322 * buffer limit is imposed then fill in the next producer index. 3323 */ 3324 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3325 (sc->max_rx_bufs != RX_BD_USABLE) ? 3326 bd_prod : bd_cons); 3327 if (rc != 0) { 3328 3329 /* we simply reuse the received mbuf and don't post it to the stack */ 3330 m = NULL; 3331 3332 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3333 fp->index, rc); 3334 fp->eth_q_stats.rx_soft_errors++; 3335 3336 if (sc->max_rx_bufs != RX_BD_USABLE) { 3337 /* copy this consumer index to the producer index */ 3338 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3339 sizeof(struct bxe_sw_rx_bd)); 3340 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3341 } 3342 3343 goto next_rx; 3344 } 3345 3346 /* current mbuf was detached from the bd */ 3347 fp->eth_q_stats.mbuf_alloc_rx--; 3348 3349 /* we allocated a replacement mbuf, fixup the current one */ 3350 m_adj(m, pad); 3351 m->m_pkthdr.len = m->m_len = len; 3352 3353 if ((len > 60) && (len > lenonbd)) { 3354 fp->eth_q_stats.rx_bxe_service_rxsgl++; 3355 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); 3356 if (rc) 3357 break; 3358 fp->eth_q_stats.rx_jumbo_sge_pkts++; 3359 } else if (lenonbd < len) { 3360 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; 3361 } 3362 3363 /* assign packet to this interface interface */ 3364 m->m_pkthdr.rcvif = ifp; 3365 3366 /* assume no hardware checksum has complated */ 3367 m->m_pkthdr.csum_flags = 0; 3368 3369 /* validate checksum if offload enabled */ 3370 if (ifp->if_capenable & IFCAP_RXCSUM) { 3371 /* check for a valid IP frame */ 3372 if (!(cqe->fast_path_cqe.status_flags & 3373 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3374 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3375 if (__predict_false(cqe_fp_flags & 3376 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3377 fp->eth_q_stats.rx_hw_csum_errors++; 3378 } else { 3379 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3380 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3381 } 3382 } 3383 3384 /* check for a valid TCP/UDP frame */ 3385 if (!(cqe->fast_path_cqe.status_flags & 3386 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3387 if (__predict_false(cqe_fp_flags & 3388 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3389 fp->eth_q_stats.rx_hw_csum_errors++; 3390 } else { 3391 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3392 m->m_pkthdr.csum_data = 0xFFFF; 3393 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3394 CSUM_PSEUDO_HDR); 3395 } 3396 } 3397 } 3398 3399 /* if there is a VLAN tag then flag that info */ 3400 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { 3401 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3402 m->m_flags |= M_VLANTAG; 3403 } 3404 3405#if __FreeBSD_version >= 800000 3406 /* specify what RSS queue was used for this flow */ 3407 m->m_pkthdr.flowid = fp->index; 3408 BXE_SET_FLOWID(m); 3409#endif 3410 3411next_rx: 3412 3413 bd_cons = RX_BD_NEXT(bd_cons); 3414 bd_prod = RX_BD_NEXT(bd_prod); 3415 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3416 3417 /* pass the frame to the stack */ 3418 if (__predict_true(m != NULL)) { 3419 ifp->if_ipackets++; 3420 rx_pkts++; 3421 (*ifp->if_input)(ifp, m); 3422 } 3423 3424next_cqe: 3425 3426 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3427 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3428 3429 /* limit spinning on the queue */ 3430 if (rc != 0) 3431 break; 3432 3433 if (rx_pkts == sc->rx_budget) { 3434 fp->eth_q_stats.rx_budget_reached++; 3435 break; 3436 } 3437 } /* while work to do */ 3438 3439 fp->rx_bd_cons = bd_cons; 3440 fp->rx_bd_prod = bd_prod_fw; 3441 fp->rx_cq_cons = sw_cq_cons; 3442 fp->rx_cq_prod = sw_cq_prod; 3443 3444 /* Update producers */ 3445 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3446 3447 fp->eth_q_stats.rx_pkts += rx_pkts; 3448 fp->eth_q_stats.rx_calls++; 3449 3450 BXE_FP_RX_UNLOCK(fp); 3451 3452 return (sw_cq_cons != hw_cq_cons); 3453} 3454 3455static uint16_t 3456bxe_free_tx_pkt(struct bxe_softc *sc, 3457 struct bxe_fastpath *fp, 3458 uint16_t idx) 3459{ 3460 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3461 struct eth_tx_start_bd *tx_start_bd; 3462 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3463 uint16_t new_cons; 3464 int nbd; 3465 3466 /* unmap the mbuf from non-paged memory */ 3467 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3468 3469 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3470 nbd = le16toh(tx_start_bd->nbd) - 1; 3471 3472 new_cons = (tx_buf->first_bd + nbd); 3473 3474 /* free the mbuf */ 3475 if (__predict_true(tx_buf->m != NULL)) { 3476 m_freem(tx_buf->m); 3477 fp->eth_q_stats.mbuf_alloc_tx--; 3478 } else { 3479 fp->eth_q_stats.tx_chain_lost_mbuf++; 3480 } 3481 3482 tx_buf->m = NULL; 3483 tx_buf->first_bd = 0; 3484 3485 return (new_cons); 3486} 3487 3488/* transmit timeout watchdog */ 3489static int 3490bxe_watchdog(struct bxe_softc *sc, 3491 struct bxe_fastpath *fp) 3492{ 3493 BXE_FP_TX_LOCK(fp); 3494 3495 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3496 BXE_FP_TX_UNLOCK(fp); 3497 return (0); 3498 } 3499 3500 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3501 3502 BXE_FP_TX_UNLOCK(fp); 3503 BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK); 3504 taskqueue_enqueue_timeout(taskqueue_thread, 3505 &sc->sp_err_timeout_task, hz/10); 3506 3507 return (-1); 3508} 3509 3510/* processes transmit completions */ 3511static uint8_t 3512bxe_txeof(struct bxe_softc *sc, 3513 struct bxe_fastpath *fp) 3514{ 3515 struct ifnet *ifp = sc->ifnet; 3516 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3517 uint16_t tx_bd_avail; 3518 3519 BXE_FP_TX_LOCK_ASSERT(fp); 3520 3521 bd_cons = fp->tx_bd_cons; 3522 hw_cons = le16toh(*fp->tx_cons_sb); 3523 sw_cons = fp->tx_pkt_cons; 3524 3525 while (sw_cons != hw_cons) { 3526 pkt_cons = TX_BD(sw_cons); 3527 3528 BLOGD(sc, DBG_TX, 3529 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3530 fp->index, hw_cons, sw_cons, pkt_cons); 3531 3532 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3533 3534 sw_cons++; 3535 } 3536 3537 fp->tx_pkt_cons = sw_cons; 3538 fp->tx_bd_cons = bd_cons; 3539 3540 BLOGD(sc, DBG_TX, 3541 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3542 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3543 3544 mb(); 3545 3546 tx_bd_avail = bxe_tx_avail(sc, fp); 3547 3548 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3549 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3550 } else { 3551 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3552 } 3553 3554 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3555 /* reset the watchdog timer if there are pending transmits */ 3556 fp->watchdog_timer = BXE_TX_TIMEOUT; 3557 return (TRUE); 3558 } else { 3559 /* clear watchdog when there are no pending transmits */ 3560 fp->watchdog_timer = 0; 3561 return (FALSE); 3562 } 3563} 3564 3565static void 3566bxe_drain_tx_queues(struct bxe_softc *sc) 3567{ 3568 struct bxe_fastpath *fp; 3569 int i, count; 3570 3571 /* wait until all TX fastpath tasks have completed */ 3572 for (i = 0; i < sc->num_queues; i++) { 3573 fp = &sc->fp[i]; 3574 3575 count = 1000; 3576 3577 while (bxe_has_tx_work(fp)) { 3578 3579 BXE_FP_TX_LOCK(fp); 3580 bxe_txeof(sc, fp); 3581 BXE_FP_TX_UNLOCK(fp); 3582 3583 if (count == 0) { 3584 BLOGE(sc, "Timeout waiting for fp[%d] " 3585 "transmits to complete!\n", i); 3586 bxe_panic(sc, ("tx drain failure\n")); 3587 return; 3588 } 3589 3590 count--; 3591 DELAY(1000); 3592 rmb(); 3593 } 3594 } 3595 3596 return; 3597} 3598 3599static int 3600bxe_del_all_macs(struct bxe_softc *sc, 3601 struct ecore_vlan_mac_obj *mac_obj, 3602 int mac_type, 3603 uint8_t wait_for_comp) 3604{ 3605 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3606 int rc; 3607 3608 /* wait for completion of requested */ 3609 if (wait_for_comp) { 3610 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3611 } 3612 3613 /* Set the mac type of addresses we want to clear */ 3614 bxe_set_bit(mac_type, &vlan_mac_flags); 3615 3616 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3617 if (rc < 0) { 3618 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", 3619 rc, mac_type, wait_for_comp); 3620 } 3621 3622 return (rc); 3623} 3624 3625static int 3626bxe_fill_accept_flags(struct bxe_softc *sc, 3627 uint32_t rx_mode, 3628 unsigned long *rx_accept_flags, 3629 unsigned long *tx_accept_flags) 3630{ 3631 /* Clear the flags first */ 3632 *rx_accept_flags = 0; 3633 *tx_accept_flags = 0; 3634 3635 switch (rx_mode) { 3636 case BXE_RX_MODE_NONE: 3637 /* 3638 * 'drop all' supersedes any accept flags that may have been 3639 * passed to the function. 3640 */ 3641 break; 3642 3643 case BXE_RX_MODE_NORMAL: 3644 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3645 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3646 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3647 3648 /* internal switching mode */ 3649 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3650 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3651 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3652 3653 break; 3654 3655 case BXE_RX_MODE_ALLMULTI: 3656 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3657 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3658 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3659 3660 /* internal switching mode */ 3661 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3662 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3663 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3664 3665 break; 3666 3667 case BXE_RX_MODE_PROMISC: 3668 /* 3669 * According to deffinition of SI mode, iface in promisc mode 3670 * should receive matched and unmatched (in resolution of port) 3671 * unicast packets. 3672 */ 3673 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3674 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3675 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3676 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3677 3678 /* internal switching mode */ 3679 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3680 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3681 3682 if (IS_MF_SI(sc)) { 3683 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3684 } else { 3685 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3686 } 3687 3688 break; 3689 3690 default: 3691 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); 3692 return (-1); 3693 } 3694 3695 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3696 if (rx_mode != BXE_RX_MODE_NONE) { 3697 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3698 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3699 } 3700 3701 return (0); 3702} 3703 3704static int 3705bxe_set_q_rx_mode(struct bxe_softc *sc, 3706 uint8_t cl_id, 3707 unsigned long rx_mode_flags, 3708 unsigned long rx_accept_flags, 3709 unsigned long tx_accept_flags, 3710 unsigned long ramrod_flags) 3711{ 3712 struct ecore_rx_mode_ramrod_params ramrod_param; 3713 int rc; 3714 3715 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3716 3717 /* Prepare ramrod parameters */ 3718 ramrod_param.cid = 0; 3719 ramrod_param.cl_id = cl_id; 3720 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3721 ramrod_param.func_id = SC_FUNC(sc); 3722 3723 ramrod_param.pstate = &sc->sp_state; 3724 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3725 3726 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3727 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3728 3729 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3730 3731 ramrod_param.ramrod_flags = ramrod_flags; 3732 ramrod_param.rx_mode_flags = rx_mode_flags; 3733 3734 ramrod_param.rx_accept_flags = rx_accept_flags; 3735 ramrod_param.tx_accept_flags = tx_accept_flags; 3736 3737 rc = ecore_config_rx_mode(sc, &ramrod_param); 3738 if (rc < 0) { 3739 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " 3740 "rx_accept_flags 0x%x tx_accept_flags 0x%x " 3741 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, 3742 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, 3743 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); 3744 return (rc); 3745 } 3746 3747 return (0); 3748} 3749 3750static int 3751bxe_set_storm_rx_mode(struct bxe_softc *sc) 3752{ 3753 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3754 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3755 int rc; 3756 3757 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3758 &tx_accept_flags); 3759 if (rc) { 3760 return (rc); 3761 } 3762 3763 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3764 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3765 3766 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3767 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3768 rx_accept_flags, tx_accept_flags, 3769 ramrod_flags)); 3770} 3771 3772/* returns the "mcp load_code" according to global load_count array */ 3773static int 3774bxe_nic_load_no_mcp(struct bxe_softc *sc) 3775{ 3776 int path = SC_PATH(sc); 3777 int port = SC_PORT(sc); 3778 3779 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3780 path, load_count[path][0], load_count[path][1], 3781 load_count[path][2]); 3782 load_count[path][0]++; 3783 load_count[path][1 + port]++; 3784 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3785 path, load_count[path][0], load_count[path][1], 3786 load_count[path][2]); 3787 if (load_count[path][0] == 1) { 3788 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3789 } else if (load_count[path][1 + port] == 1) { 3790 return (FW_MSG_CODE_DRV_LOAD_PORT); 3791 } else { 3792 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3793 } 3794} 3795 3796/* returns the "mcp load_code" according to global load_count array */ 3797static int 3798bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3799{ 3800 int port = SC_PORT(sc); 3801 int path = SC_PATH(sc); 3802 3803 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3804 path, load_count[path][0], load_count[path][1], 3805 load_count[path][2]); 3806 load_count[path][0]--; 3807 load_count[path][1 + port]--; 3808 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3809 path, load_count[path][0], load_count[path][1], 3810 load_count[path][2]); 3811 if (load_count[path][0] == 0) { 3812 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3813 } else if (load_count[path][1 + port] == 0) { 3814 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3815 } else { 3816 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3817 } 3818} 3819 3820/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3821static uint32_t 3822bxe_send_unload_req(struct bxe_softc *sc, 3823 int unload_mode) 3824{ 3825 uint32_t reset_code = 0; 3826 3827 /* Select the UNLOAD request mode */ 3828 if (unload_mode == UNLOAD_NORMAL) { 3829 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3830 } else { 3831 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3832 } 3833 3834 /* Send the request to the MCP */ 3835 if (!BXE_NOMCP(sc)) { 3836 reset_code = bxe_fw_command(sc, reset_code, 0); 3837 } else { 3838 reset_code = bxe_nic_unload_no_mcp(sc); 3839 } 3840 3841 return (reset_code); 3842} 3843 3844/* send UNLOAD_DONE command to the MCP */ 3845static void 3846bxe_send_unload_done(struct bxe_softc *sc, 3847 uint8_t keep_link) 3848{ 3849 uint32_t reset_param = 3850 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 3851 3852 /* Report UNLOAD_DONE to MCP */ 3853 if (!BXE_NOMCP(sc)) { 3854 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 3855 } 3856} 3857 3858static int 3859bxe_func_wait_started(struct bxe_softc *sc) 3860{ 3861 int tout = 50; 3862 3863 if (!sc->port.pmf) { 3864 return (0); 3865 } 3866 3867 /* 3868 * (assumption: No Attention from MCP at this stage) 3869 * PMF probably in the middle of TX disable/enable transaction 3870 * 1. Sync IRS for default SB 3871 * 2. Sync SP queue - this guarantees us that attention handling started 3872 * 3. Wait, that TX disable/enable transaction completes 3873 * 3874 * 1+2 guarantee that if DCBX attention was scheduled it already changed 3875 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 3876 * received completion for the transaction the state is TX_STOPPED. 3877 * State will return to STARTED after completion of TX_STOPPED-->STARTED 3878 * transaction. 3879 */ 3880 3881 /* XXX make sure default SB ISR is done */ 3882 /* need a way to synchronize an irq (intr_mtx?) */ 3883 3884 /* XXX flush any work queues */ 3885 3886 while (ecore_func_get_state(sc, &sc->func_obj) != 3887 ECORE_F_STATE_STARTED && tout--) { 3888 DELAY(20000); 3889 } 3890 3891 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 3892 /* 3893 * Failed to complete the transaction in a "good way" 3894 * Force both transactions with CLR bit. 3895 */ 3896 struct ecore_func_state_params func_params = { NULL }; 3897 3898 BLOGE(sc, "Unexpected function state! " 3899 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 3900 3901 func_params.f_obj = &sc->func_obj; 3902 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 3903 3904 /* STARTED-->TX_STOPPED */ 3905 func_params.cmd = ECORE_F_CMD_TX_STOP; 3906 ecore_func_state_change(sc, &func_params); 3907 3908 /* TX_STOPPED-->STARTED */ 3909 func_params.cmd = ECORE_F_CMD_TX_START; 3910 return (ecore_func_state_change(sc, &func_params)); 3911 } 3912 3913 return (0); 3914} 3915 3916static int 3917bxe_stop_queue(struct bxe_softc *sc, 3918 int index) 3919{ 3920 struct bxe_fastpath *fp = &sc->fp[index]; 3921 struct ecore_queue_state_params q_params = { NULL }; 3922 int rc; 3923 3924 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 3925 3926 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 3927 /* We want to wait for completion in this context */ 3928 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3929 3930 /* Stop the primary connection: */ 3931 3932 /* ...halt the connection */ 3933 q_params.cmd = ECORE_Q_CMD_HALT; 3934 rc = ecore_queue_state_change(sc, &q_params); 3935 if (rc) { 3936 return (rc); 3937 } 3938 3939 /* ...terminate the connection */ 3940 q_params.cmd = ECORE_Q_CMD_TERMINATE; 3941 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 3942 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 3943 rc = ecore_queue_state_change(sc, &q_params); 3944 if (rc) { 3945 return (rc); 3946 } 3947 3948 /* ...delete cfc entry */ 3949 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 3950 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 3951 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 3952 return (ecore_queue_state_change(sc, &q_params)); 3953} 3954 3955/* wait for the outstanding SP commands */ 3956static inline uint8_t 3957bxe_wait_sp_comp(struct bxe_softc *sc, 3958 unsigned long mask) 3959{ 3960 unsigned long tmp; 3961 int tout = 5000; /* wait for 5 secs tops */ 3962 3963 while (tout--) { 3964 mb(); 3965 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 3966 return (TRUE); 3967 } 3968 3969 DELAY(1000); 3970 } 3971 3972 mb(); 3973 3974 tmp = atomic_load_acq_long(&sc->sp_state); 3975 if (tmp & mask) { 3976 BLOGE(sc, "Filtering completion timed out: " 3977 "sp_state 0x%lx, mask 0x%lx\n", 3978 tmp, mask); 3979 return (FALSE); 3980 } 3981 3982 return (FALSE); 3983} 3984 3985static int 3986bxe_func_stop(struct bxe_softc *sc) 3987{ 3988 struct ecore_func_state_params func_params = { NULL }; 3989 int rc; 3990 3991 /* prepare parameters for function state transitions */ 3992 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 3993 func_params.f_obj = &sc->func_obj; 3994 func_params.cmd = ECORE_F_CMD_STOP; 3995 3996 /* 3997 * Try to stop the function the 'good way'. If it fails (in case 3998 * of a parity error during bxe_chip_cleanup()) and we are 3999 * not in a debug mode, perform a state transaction in order to 4000 * enable further HW_RESET transaction. 4001 */ 4002 rc = ecore_func_state_change(sc, &func_params); 4003 if (rc) { 4004 BLOGE(sc, "FUNC_STOP ramrod failed. " 4005 "Running a dry transaction (%d)\n", rc); 4006 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4007 return (ecore_func_state_change(sc, &func_params)); 4008 } 4009 4010 return (0); 4011} 4012 4013static int 4014bxe_reset_hw(struct bxe_softc *sc, 4015 uint32_t load_code) 4016{ 4017 struct ecore_func_state_params func_params = { NULL }; 4018 4019 /* Prepare parameters for function state transitions */ 4020 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4021 4022 func_params.f_obj = &sc->func_obj; 4023 func_params.cmd = ECORE_F_CMD_HW_RESET; 4024 4025 func_params.params.hw_init.load_phase = load_code; 4026 4027 return (ecore_func_state_change(sc, &func_params)); 4028} 4029 4030static void 4031bxe_int_disable_sync(struct bxe_softc *sc, 4032 int disable_hw) 4033{ 4034 if (disable_hw) { 4035 /* prevent the HW from sending interrupts */ 4036 bxe_int_disable(sc); 4037 } 4038 4039 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4040 /* make sure all ISRs are done */ 4041 4042 /* XXX make sure sp_task is not running */ 4043 /* cancel and flush work queues */ 4044} 4045 4046static void 4047bxe_chip_cleanup(struct bxe_softc *sc, 4048 uint32_t unload_mode, 4049 uint8_t keep_link) 4050{ 4051 int port = SC_PORT(sc); 4052 struct ecore_mcast_ramrod_params rparam = { NULL }; 4053 uint32_t reset_code; 4054 int i, rc = 0; 4055 4056 bxe_drain_tx_queues(sc); 4057 4058 /* give HW time to discard old tx messages */ 4059 DELAY(1000); 4060 4061 /* Clean all ETH MACs */ 4062 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4063 if (rc < 0) { 4064 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4065 } 4066 4067 /* Clean up UC list */ 4068 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4069 if (rc < 0) { 4070 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4071 } 4072 4073 /* Disable LLH */ 4074 if (!CHIP_IS_E1(sc)) { 4075 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4076 } 4077 4078 /* Set "drop all" to stop Rx */ 4079 4080 /* 4081 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4082 * a race between the completion code and this code. 4083 */ 4084 BXE_MCAST_LOCK(sc); 4085 4086 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4087 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4088 } else { 4089 bxe_set_storm_rx_mode(sc); 4090 } 4091 4092 /* Clean up multicast configuration */ 4093 rparam.mcast_obj = &sc->mcast_obj; 4094 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4095 if (rc < 0) { 4096 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4097 } 4098 4099 BXE_MCAST_UNLOCK(sc); 4100 4101 // XXX bxe_iov_chip_cleanup(sc); 4102 4103 /* 4104 * Send the UNLOAD_REQUEST to the MCP. This will return if 4105 * this function should perform FUNCTION, PORT, or COMMON HW 4106 * reset. 4107 */ 4108 reset_code = bxe_send_unload_req(sc, unload_mode); 4109 4110 /* 4111 * (assumption: No Attention from MCP at this stage) 4112 * PMF probably in the middle of TX disable/enable transaction 4113 */ 4114 rc = bxe_func_wait_started(sc); 4115 if (rc) { 4116 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); 4117 } 4118 4119 /* 4120 * Close multi and leading connections 4121 * Completions for ramrods are collected in a synchronous way 4122 */ 4123 for (i = 0; i < sc->num_queues; i++) { 4124 if (bxe_stop_queue(sc, i)) { 4125 goto unload_error; 4126 } 4127 } 4128 4129 /* 4130 * If SP settings didn't get completed so far - something 4131 * very wrong has happen. 4132 */ 4133 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4134 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); 4135 } 4136 4137unload_error: 4138 4139 rc = bxe_func_stop(sc); 4140 if (rc) { 4141 BLOGE(sc, "Function stop failed!(%d)\n", rc); 4142 } 4143 4144 /* disable HW interrupts */ 4145 bxe_int_disable_sync(sc, TRUE); 4146 4147 /* detach interrupts */ 4148 bxe_interrupt_detach(sc); 4149 4150 /* Reset the chip */ 4151 rc = bxe_reset_hw(sc, reset_code); 4152 if (rc) { 4153 BLOGE(sc, "Hardware reset failed(%d)\n", rc); 4154 } 4155 4156 /* Report UNLOAD_DONE to MCP */ 4157 bxe_send_unload_done(sc, keep_link); 4158} 4159 4160static void 4161bxe_disable_close_the_gate(struct bxe_softc *sc) 4162{ 4163 uint32_t val; 4164 int port = SC_PORT(sc); 4165 4166 BLOGD(sc, DBG_LOAD, 4167 "Disabling 'close the gates'\n"); 4168 4169 if (CHIP_IS_E1(sc)) { 4170 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4171 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4172 val = REG_RD(sc, addr); 4173 val &= ~(0x300); 4174 REG_WR(sc, addr, val); 4175 } else { 4176 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4177 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4178 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4179 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4180 } 4181} 4182 4183/* 4184 * Cleans the object that have internal lists without sending 4185 * ramrods. Should be run when interrutps are disabled. 4186 */ 4187static void 4188bxe_squeeze_objects(struct bxe_softc *sc) 4189{ 4190 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4191 struct ecore_mcast_ramrod_params rparam = { NULL }; 4192 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4193 int rc; 4194 4195 /* Cleanup MACs' object first... */ 4196 4197 /* Wait for completion of requested */ 4198 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4199 /* Perform a dry cleanup */ 4200 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4201 4202 /* Clean ETH primary MAC */ 4203 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4204 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4205 &ramrod_flags); 4206 if (rc != 0) { 4207 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4208 } 4209 4210 /* Cleanup UC list */ 4211 vlan_mac_flags = 0; 4212 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4213 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4214 &ramrod_flags); 4215 if (rc != 0) { 4216 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4217 } 4218 4219 /* Now clean mcast object... */ 4220 4221 rparam.mcast_obj = &sc->mcast_obj; 4222 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4223 4224 /* Add a DEL command... */ 4225 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4226 if (rc < 0) { 4227 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4228 } 4229 4230 /* now wait until all pending commands are cleared */ 4231 4232 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4233 while (rc != 0) { 4234 if (rc < 0) { 4235 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4236 return; 4237 } 4238 4239 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4240 } 4241} 4242 4243/* stop the controller */ 4244static __noinline int 4245bxe_nic_unload(struct bxe_softc *sc, 4246 uint32_t unload_mode, 4247 uint8_t keep_link) 4248{ 4249 uint8_t global = FALSE; 4250 uint32_t val; 4251 int i; 4252 4253 BXE_CORE_LOCK_ASSERT(sc); 4254 4255 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING; 4256 4257 for (i = 0; i < sc->num_queues; i++) { 4258 struct bxe_fastpath *fp; 4259 4260 fp = &sc->fp[i]; 4261 fp->watchdog_timer = 0; 4262 BXE_FP_TX_LOCK(fp); 4263 BXE_FP_TX_UNLOCK(fp); 4264 } 4265 4266 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4267 4268 /* mark driver as unloaded in shmem2 */ 4269 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4270 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4271 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4272 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4273 } 4274 4275 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4276 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4277 4278 if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 4279 /* 4280 * We can get here if the driver has been unloaded 4281 * during parity error recovery and is either waiting for a 4282 * leader to complete or for other functions to unload and 4283 * then ifconfig down has been issued. In this case we want to 4284 * unload and let other functions to complete a recovery 4285 * process. 4286 */ 4287 sc->recovery_state = BXE_RECOVERY_DONE; 4288 sc->is_leader = 0; 4289 bxe_release_leader_lock(sc); 4290 mb(); 4291 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4292 } 4293 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" 4294 " state = 0x%x\n", sc->recovery_state, sc->state); 4295 return (-1); 4296 } 4297 4298 /* 4299 * Nothing to do during unload if previous bxe_nic_load() 4300 * did not completed succesfully - all resourses are released. 4301 */ 4302 if ((sc->state == BXE_STATE_CLOSED) || 4303 (sc->state == BXE_STATE_ERROR)) { 4304 return (0); 4305 } 4306 4307 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4308 mb(); 4309 4310 /* stop tx */ 4311 bxe_tx_disable(sc); 4312 4313 sc->rx_mode = BXE_RX_MODE_NONE; 4314 /* XXX set rx mode ??? */ 4315 4316 if (IS_PF(sc) && !sc->grcdump_done) { 4317 /* set ALWAYS_ALIVE bit in shmem */ 4318 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4319 4320 bxe_drv_pulse(sc); 4321 4322 bxe_stats_handle(sc, STATS_EVENT_STOP); 4323 bxe_save_statistics(sc); 4324 } 4325 4326 /* wait till consumers catch up with producers in all queues */ 4327 bxe_drain_tx_queues(sc); 4328 4329 /* if VF indicate to PF this function is going down (PF will delete sp 4330 * elements and clear initializations 4331 */ 4332 if (IS_VF(sc)) { 4333 ; /* bxe_vfpf_close_vf(sc); */ 4334 } else if (unload_mode != UNLOAD_RECOVERY) { 4335 /* if this is a normal/close unload need to clean up chip */ 4336 if (!sc->grcdump_done) 4337 bxe_chip_cleanup(sc, unload_mode, keep_link); 4338 } else { 4339 /* Send the UNLOAD_REQUEST to the MCP */ 4340 bxe_send_unload_req(sc, unload_mode); 4341 4342 /* 4343 * Prevent transactions to host from the functions on the 4344 * engine that doesn't reset global blocks in case of global 4345 * attention once gloabl blocks are reset and gates are opened 4346 * (the engine which leader will perform the recovery 4347 * last). 4348 */ 4349 if (!CHIP_IS_E1x(sc)) { 4350 bxe_pf_disable(sc); 4351 } 4352 4353 /* disable HW interrupts */ 4354 bxe_int_disable_sync(sc, TRUE); 4355 4356 /* detach interrupts */ 4357 bxe_interrupt_detach(sc); 4358 4359 /* Report UNLOAD_DONE to MCP */ 4360 bxe_send_unload_done(sc, FALSE); 4361 } 4362 4363 /* 4364 * At this stage no more interrupts will arrive so we may safely clean 4365 * the queue'able objects here in case they failed to get cleaned so far. 4366 */ 4367 if (IS_PF(sc)) { 4368 bxe_squeeze_objects(sc); 4369 } 4370 4371 /* There should be no more pending SP commands at this stage */ 4372 sc->sp_state = 0; 4373 4374 sc->port.pmf = 0; 4375 4376 bxe_free_fp_buffers(sc); 4377 4378 if (IS_PF(sc)) { 4379 bxe_free_mem(sc); 4380 } 4381 4382 bxe_free_fw_stats_mem(sc); 4383 4384 sc->state = BXE_STATE_CLOSED; 4385 4386 /* 4387 * Check if there are pending parity attentions. If there are - set 4388 * RECOVERY_IN_PROGRESS. 4389 */ 4390 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4391 bxe_set_reset_in_progress(sc); 4392 4393 /* Set RESET_IS_GLOBAL if needed */ 4394 if (global) { 4395 bxe_set_reset_global(sc); 4396 } 4397 } 4398 4399 /* 4400 * The last driver must disable a "close the gate" if there is no 4401 * parity attention or "process kill" pending. 4402 */ 4403 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4404 bxe_reset_is_done(sc, SC_PATH(sc))) { 4405 bxe_disable_close_the_gate(sc); 4406 } 4407 4408 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4409 4410 bxe_link_report(sc); 4411 4412 return (0); 4413} 4414 4415/* 4416 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4417 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4418 */ 4419static int 4420bxe_ifmedia_update(struct ifnet *ifp) 4421{ 4422 struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc; 4423 struct ifmedia *ifm; 4424 4425 ifm = &sc->ifmedia; 4426 4427 /* We only support Ethernet media type. */ 4428 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4429 return (EINVAL); 4430 } 4431 4432 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4433 case IFM_AUTO: 4434 break; 4435 case IFM_10G_CX4: 4436 case IFM_10G_SR: 4437 case IFM_10G_T: 4438 case IFM_10G_TWINAX: 4439 default: 4440 /* We don't support changing the media type. */ 4441 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4442 IFM_SUBTYPE(ifm->ifm_media)); 4443 return (EINVAL); 4444 } 4445 4446 return (0); 4447} 4448 4449/* 4450 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4451 */ 4452static void 4453bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4454{ 4455 struct bxe_softc *sc = ifp->if_softc; 4456 4457 /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..." 4458 line if the IFM_AVALID flag is *NOT* set. So we need to set this 4459 flag unconditionally (irrespective of the admininistrative 4460 'up/down' state of the interface) to ensure that that line is always 4461 displayed. 4462 */ 4463 ifmr->ifm_status = IFM_AVALID; 4464 4465 /* Setup the default interface info. */ 4466 ifmr->ifm_active = IFM_ETHER; 4467 4468 /* Report link down if the driver isn't running. */ 4469 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4470 ifmr->ifm_active |= IFM_NONE; 4471 BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__); 4472 BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n", 4473 __func__, sc->link_vars.link_up); 4474 return; 4475 } 4476 4477 4478 if (sc->link_vars.link_up) { 4479 ifmr->ifm_status |= IFM_ACTIVE; 4480 ifmr->ifm_active |= IFM_FDX; 4481 } else { 4482 ifmr->ifm_active |= IFM_NONE; 4483 BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n", 4484 __func__); 4485 return; 4486 } 4487 4488 ifmr->ifm_active |= sc->media; 4489 return; 4490} 4491 4492static void 4493bxe_handle_chip_tq(void *context, 4494 int pending) 4495{ 4496 struct bxe_softc *sc = (struct bxe_softc *)context; 4497 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4498 4499 switch (work) 4500 { 4501 case CHIP_TQ_REINIT: 4502 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 4503 /* restart the interface */ 4504 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4505 bxe_periodic_stop(sc); 4506 BXE_CORE_LOCK(sc); 4507 bxe_stop_locked(sc); 4508 bxe_init_locked(sc); 4509 BXE_CORE_UNLOCK(sc); 4510 } 4511 break; 4512 4513 default: 4514 break; 4515 } 4516} 4517 4518/* 4519 * Handles any IOCTL calls from the operating system. 4520 * 4521 * Returns: 4522 * 0 = Success, >0 Failure 4523 */ 4524static int 4525bxe_ioctl(struct ifnet *ifp, 4526 u_long command, 4527 caddr_t data) 4528{ 4529 struct bxe_softc *sc = ifp->if_softc; 4530 struct ifreq *ifr = (struct ifreq *)data; 4531 int mask = 0; 4532 int reinit = 0; 4533 int error = 0; 4534 4535 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4536 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4537 4538 switch (command) 4539 { 4540 case SIOCSIFMTU: 4541 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4542 ifr->ifr_mtu); 4543 4544 if (sc->mtu == ifr->ifr_mtu) { 4545 /* nothing to change */ 4546 break; 4547 } 4548 4549 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4550 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4551 ifr->ifr_mtu, mtu_min, mtu_max); 4552 error = EINVAL; 4553 break; 4554 } 4555 4556 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4557 (unsigned long)ifr->ifr_mtu); 4558 atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu, 4559 (unsigned long)ifr->ifr_mtu); 4560 4561 reinit = 1; 4562 break; 4563 4564 case SIOCSIFFLAGS: 4565 /* toggle the interface state up or down */ 4566 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4567 4568 BXE_CORE_LOCK(sc); 4569 /* check if the interface is up */ 4570 if (ifp->if_flags & IFF_UP) { 4571 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4572 /* set the receive mode flags */ 4573 bxe_set_rx_mode(sc); 4574 } else if(sc->state != BXE_STATE_DISABLED) { 4575 bxe_init_locked(sc); 4576 } 4577 } else { 4578 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4579 bxe_periodic_stop(sc); 4580 bxe_stop_locked(sc); 4581 } 4582 } 4583 BXE_CORE_UNLOCK(sc); 4584 4585 break; 4586 4587 case SIOCADDMULTI: 4588 case SIOCDELMULTI: 4589 /* add/delete multicast addresses */ 4590 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4591 4592 /* check if the interface is up */ 4593 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4594 /* set the receive mode flags */ 4595 BXE_CORE_LOCK(sc); 4596 bxe_set_rx_mode(sc); 4597 BXE_CORE_UNLOCK(sc); 4598 } 4599 4600 break; 4601 4602 case SIOCSIFCAP: 4603 /* find out which capabilities have changed */ 4604 mask = (ifr->ifr_reqcap ^ ifp->if_capenable); 4605 4606 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4607 mask); 4608 4609 /* toggle the LRO capabilites enable flag */ 4610 if (mask & IFCAP_LRO) { 4611 ifp->if_capenable ^= IFCAP_LRO; 4612 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4613 (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF"); 4614 reinit = 1; 4615 } 4616 4617 /* toggle the TXCSUM checksum capabilites enable flag */ 4618 if (mask & IFCAP_TXCSUM) { 4619 ifp->if_capenable ^= IFCAP_TXCSUM; 4620 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4621 (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF"); 4622 if (ifp->if_capenable & IFCAP_TXCSUM) { 4623 ifp->if_hwassist = (CSUM_IP | 4624 CSUM_TCP | 4625 CSUM_UDP | 4626 CSUM_TSO | 4627 CSUM_TCP_IPV6 | 4628 CSUM_UDP_IPV6); 4629 } else { 4630 ifp->if_hwassist = 0; 4631 } 4632 } 4633 4634 /* toggle the RXCSUM checksum capabilities enable flag */ 4635 if (mask & IFCAP_RXCSUM) { 4636 ifp->if_capenable ^= IFCAP_RXCSUM; 4637 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4638 (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF"); 4639 if (ifp->if_capenable & IFCAP_RXCSUM) { 4640 ifp->if_hwassist = (CSUM_IP | 4641 CSUM_TCP | 4642 CSUM_UDP | 4643 CSUM_TSO | 4644 CSUM_TCP_IPV6 | 4645 CSUM_UDP_IPV6); 4646 } else { 4647 ifp->if_hwassist = 0; 4648 } 4649 } 4650 4651 /* toggle TSO4 capabilities enabled flag */ 4652 if (mask & IFCAP_TSO4) { 4653 ifp->if_capenable ^= IFCAP_TSO4; 4654 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4655 (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF"); 4656 } 4657 4658 /* toggle TSO6 capabilities enabled flag */ 4659 if (mask & IFCAP_TSO6) { 4660 ifp->if_capenable ^= IFCAP_TSO6; 4661 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4662 (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF"); 4663 } 4664 4665 /* toggle VLAN_HWTSO capabilities enabled flag */ 4666 if (mask & IFCAP_VLAN_HWTSO) { 4667 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 4668 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4669 (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4670 } 4671 4672 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4673 if (mask & IFCAP_VLAN_HWCSUM) { 4674 /* XXX investigate this... */ 4675 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4676 error = EINVAL; 4677 } 4678 4679 /* toggle VLAN_MTU capabilities enable flag */ 4680 if (mask & IFCAP_VLAN_MTU) { 4681 /* XXX investigate this... */ 4682 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4683 error = EINVAL; 4684 } 4685 4686 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4687 if (mask & IFCAP_VLAN_HWTAGGING) { 4688 /* XXX investigate this... */ 4689 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4690 error = EINVAL; 4691 } 4692 4693 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4694 if (mask & IFCAP_VLAN_HWFILTER) { 4695 /* XXX investigate this... */ 4696 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4697 error = EINVAL; 4698 } 4699 4700 /* XXX not yet... 4701 * IFCAP_WOL_MAGIC 4702 */ 4703 4704 break; 4705 4706 case SIOCSIFMEDIA: 4707 case SIOCGIFMEDIA: 4708 /* set/get interface media */ 4709 BLOGD(sc, DBG_IOCTL, 4710 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 4711 (command & 0xff)); 4712 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 4713 break; 4714 4715 default: 4716 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 4717 (command & 0xff)); 4718 error = ether_ioctl(ifp, command, data); 4719 break; 4720 } 4721 4722 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4723 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 4724 "Re-initializing hardware from IOCTL change\n"); 4725 bxe_periodic_stop(sc); 4726 BXE_CORE_LOCK(sc); 4727 bxe_stop_locked(sc); 4728 bxe_init_locked(sc); 4729 BXE_CORE_UNLOCK(sc); 4730 } 4731 4732 return (error); 4733} 4734 4735static __noinline void 4736bxe_dump_mbuf(struct bxe_softc *sc, 4737 struct mbuf *m, 4738 uint8_t contents) 4739{ 4740 char * type; 4741 int i = 0; 4742 4743 if (!(sc->debug & DBG_MBUF)) { 4744 return; 4745 } 4746 4747 if (m == NULL) { 4748 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 4749 return; 4750 } 4751 4752 while (m) { 4753 4754#if __FreeBSD_version >= 1000000 4755 BLOGD(sc, DBG_MBUF, 4756 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 4757 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 4758 4759 if (m->m_flags & M_PKTHDR) { 4760 BLOGD(sc, DBG_MBUF, 4761 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 4762 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 4763 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 4764 } 4765#else 4766 BLOGD(sc, DBG_MBUF, 4767 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 4768 i, m, m->m_len, m->m_flags, 4769 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data); 4770 4771 if (m->m_flags & M_PKTHDR) { 4772 BLOGD(sc, DBG_MBUF, 4773 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 4774 i, m->m_pkthdr.len, m->m_flags, 4775 "\20\12M_BCAST\13M_MCAST\14M_FRAG" 4776 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" 4777 "\22M_PROMISC\23M_NOFREE", 4778 (int)m->m_pkthdr.csum_flags, 4779 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 4780 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 4781 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 4782 "\14CSUM_PSEUDO_HDR"); 4783 } 4784#endif /* #if __FreeBSD_version >= 1000000 */ 4785 4786 if (m->m_flags & M_EXT) { 4787 switch (m->m_ext.ext_type) { 4788 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 4789 case EXT_SFBUF: type = "EXT_SFBUF"; break; 4790 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 4791 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 4792 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 4793 case EXT_PACKET: type = "EXT_PACKET"; break; 4794 case EXT_MBUF: type = "EXT_MBUF"; break; 4795 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 4796 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 4797 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 4798 case EXT_EXTREF: type = "EXT_EXTREF"; break; 4799 default: type = "UNKNOWN"; break; 4800 } 4801 4802 BLOGD(sc, DBG_MBUF, 4803 "%02d: - m_ext: %p ext_size=%d type=%s\n", 4804 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 4805 } 4806 4807 if (contents) { 4808 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 4809 } 4810 4811 m = m->m_next; 4812 i++; 4813 } 4814} 4815 4816/* 4817 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 4818 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 4819 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 4820 * The headers comes in a seperate bd in FreeBSD so 13-3=10. 4821 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 4822 */ 4823static int 4824bxe_chktso_window(struct bxe_softc *sc, 4825 int nsegs, 4826 bus_dma_segment_t *segs, 4827 struct mbuf *m) 4828{ 4829 uint32_t num_wnds, wnd_size, wnd_sum; 4830 int32_t frag_idx, wnd_idx; 4831 unsigned short lso_mss; 4832 int defrag; 4833 4834 defrag = 0; 4835 wnd_sum = 0; 4836 wnd_size = 10; 4837 num_wnds = nsegs - wnd_size; 4838 lso_mss = htole16(m->m_pkthdr.tso_segsz); 4839 4840 /* 4841 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 4842 * first window sum of data while skipping the first assuming it is the 4843 * header in FreeBSD. 4844 */ 4845 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 4846 wnd_sum += htole16(segs[frag_idx].ds_len); 4847 } 4848 4849 /* check the first 10 bd window size */ 4850 if (wnd_sum < lso_mss) { 4851 return (1); 4852 } 4853 4854 /* run through the windows */ 4855 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 4856 /* subtract the first mbuf->m_len of the last wndw(-header) */ 4857 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 4858 /* add the next mbuf len to the len of our new window */ 4859 wnd_sum += htole16(segs[frag_idx].ds_len); 4860 if (wnd_sum < lso_mss) { 4861 return (1); 4862 } 4863 } 4864 4865 return (0); 4866} 4867 4868static uint8_t 4869bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 4870 struct mbuf *m, 4871 uint32_t *parsing_data) 4872{ 4873 struct ether_vlan_header *eh = NULL; 4874 struct ip *ip4 = NULL; 4875 struct ip6_hdr *ip6 = NULL; 4876 caddr_t ip = NULL; 4877 struct tcphdr *th = NULL; 4878 int e_hlen, ip_hlen, l4_off; 4879 uint16_t proto; 4880 4881 if (m->m_pkthdr.csum_flags == CSUM_IP) { 4882 /* no L4 checksum offload needed */ 4883 return (0); 4884 } 4885 4886 /* get the Ethernet header */ 4887 eh = mtod(m, struct ether_vlan_header *); 4888 4889 /* handle VLAN encapsulation if present */ 4890 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 4891 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4892 proto = ntohs(eh->evl_proto); 4893 } else { 4894 e_hlen = ETHER_HDR_LEN; 4895 proto = ntohs(eh->evl_encap_proto); 4896 } 4897 4898 switch (proto) { 4899 case ETHERTYPE_IP: 4900 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 4901 ip4 = (m->m_len < sizeof(struct ip)) ? 4902 (struct ip *)m->m_next->m_data : 4903 (struct ip *)(m->m_data + e_hlen); 4904 /* ip_hl is number of 32-bit words */ 4905 ip_hlen = (ip4->ip_hl << 2); 4906 ip = (caddr_t)ip4; 4907 break; 4908 case ETHERTYPE_IPV6: 4909 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 4910 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 4911 (struct ip6_hdr *)m->m_next->m_data : 4912 (struct ip6_hdr *)(m->m_data + e_hlen); 4913 /* XXX cannot support offload with IPv6 extensions */ 4914 ip_hlen = sizeof(struct ip6_hdr); 4915 ip = (caddr_t)ip6; 4916 break; 4917 default: 4918 /* We can't offload in this case... */ 4919 /* XXX error stat ??? */ 4920 return (0); 4921 } 4922 4923 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 4924 l4_off = (e_hlen + ip_hlen); 4925 4926 *parsing_data |= 4927 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 4928 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 4929 4930 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 4931 CSUM_TSO | 4932 CSUM_TCP_IPV6)) { 4933 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 4934 th = (struct tcphdr *)(ip + ip_hlen); 4935 /* th_off is number of 32-bit words */ 4936 *parsing_data |= ((th->th_off << 4937 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 4938 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 4939 return (l4_off + (th->th_off << 2)); /* entire header length */ 4940 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 4941 CSUM_UDP_IPV6)) { 4942 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 4943 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 4944 } else { 4945 /* XXX error stat ??? */ 4946 return (0); 4947 } 4948} 4949 4950static uint8_t 4951bxe_set_pbd_csum(struct bxe_fastpath *fp, 4952 struct mbuf *m, 4953 struct eth_tx_parse_bd_e1x *pbd) 4954{ 4955 struct ether_vlan_header *eh = NULL; 4956 struct ip *ip4 = NULL; 4957 struct ip6_hdr *ip6 = NULL; 4958 caddr_t ip = NULL; 4959 struct tcphdr *th = NULL; 4960 struct udphdr *uh = NULL; 4961 int e_hlen, ip_hlen; 4962 uint16_t proto; 4963 uint8_t hlen; 4964 uint16_t tmp_csum; 4965 uint32_t *tmp_uh; 4966 4967 /* get the Ethernet header */ 4968 eh = mtod(m, struct ether_vlan_header *); 4969 4970 /* handle VLAN encapsulation if present */ 4971 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 4972 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4973 proto = ntohs(eh->evl_proto); 4974 } else { 4975 e_hlen = ETHER_HDR_LEN; 4976 proto = ntohs(eh->evl_encap_proto); 4977 } 4978 4979 switch (proto) { 4980 case ETHERTYPE_IP: 4981 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 4982 ip4 = (m->m_len < sizeof(struct ip)) ? 4983 (struct ip *)m->m_next->m_data : 4984 (struct ip *)(m->m_data + e_hlen); 4985 /* ip_hl is number of 32-bit words */ 4986 ip_hlen = (ip4->ip_hl << 1); 4987 ip = (caddr_t)ip4; 4988 break; 4989 case ETHERTYPE_IPV6: 4990 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 4991 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 4992 (struct ip6_hdr *)m->m_next->m_data : 4993 (struct ip6_hdr *)(m->m_data + e_hlen); 4994 /* XXX cannot support offload with IPv6 extensions */ 4995 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 4996 ip = (caddr_t)ip6; 4997 break; 4998 default: 4999 /* We can't offload in this case... */ 5000 /* XXX error stat ??? */ 5001 return (0); 5002 } 5003 5004 hlen = (e_hlen >> 1); 5005 5006 /* note that rest of global_data is indirectly zeroed here */ 5007 if (m->m_flags & M_VLANTAG) { 5008 pbd->global_data = 5009 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5010 } else { 5011 pbd->global_data = htole16(hlen); 5012 } 5013 5014 pbd->ip_hlen_w = ip_hlen; 5015 5016 hlen += pbd->ip_hlen_w; 5017 5018 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5019 5020 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5021 CSUM_TSO | 5022 CSUM_TCP_IPV6)) { 5023 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5024 /* th_off is number of 32-bit words */ 5025 hlen += (uint16_t)(th->th_off << 1); 5026 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5027 CSUM_UDP_IPV6)) { 5028 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5029 hlen += (sizeof(struct udphdr) / 2); 5030 } else { 5031 /* valid case as only CSUM_IP was set */ 5032 return (0); 5033 } 5034 5035 pbd->total_hlen_w = htole16(hlen); 5036 5037 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5038 CSUM_TSO | 5039 CSUM_TCP_IPV6)) { 5040 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5041 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5042 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5043 CSUM_UDP_IPV6)) { 5044 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5045 5046 /* 5047 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5048 * checksums and does not know anything about the UDP header and where 5049 * the checksum field is located. It only knows about TCP. Therefore 5050 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5051 * offload. Since the checksum field offset for TCP is 16 bytes and 5052 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5053 * bytes less than the start of the UDP header. This allows the 5054 * hardware to write the checksum in the correct spot. But the 5055 * hardware will compute a checksum which includes the last 10 bytes 5056 * of the IP header. To correct this we tweak the stack computed 5057 * pseudo checksum by folding in the calculation of the inverse 5058 * checksum for those final 10 bytes of the IP header. This allows 5059 * the correct checksum to be computed by the hardware. 5060 */ 5061 5062 /* set pointer 10 bytes before UDP header */ 5063 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5064 5065 /* calculate a pseudo header checksum over the first 10 bytes */ 5066 tmp_csum = in_pseudo(*tmp_uh, 5067 *(tmp_uh + 1), 5068 *(uint16_t *)(tmp_uh + 2)); 5069 5070 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5071 } 5072 5073 return (hlen * 2); /* entire header length, number of bytes */ 5074} 5075 5076static void 5077bxe_set_pbd_lso_e2(struct mbuf *m, 5078 uint32_t *parsing_data) 5079{ 5080 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5081 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5082 ETH_TX_PARSE_BD_E2_LSO_MSS); 5083 5084 /* XXX test for IPv6 with extension header... */ 5085} 5086 5087static void 5088bxe_set_pbd_lso(struct mbuf *m, 5089 struct eth_tx_parse_bd_e1x *pbd) 5090{ 5091 struct ether_vlan_header *eh = NULL; 5092 struct ip *ip = NULL; 5093 struct tcphdr *th = NULL; 5094 int e_hlen; 5095 5096 /* get the Ethernet header */ 5097 eh = mtod(m, struct ether_vlan_header *); 5098 5099 /* handle VLAN encapsulation if present */ 5100 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5101 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5102 5103 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5104 /* XXX assuming IPv4 */ 5105 ip = (struct ip *)(m->m_data + e_hlen); 5106 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5107 5108 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5109 pbd->tcp_send_seq = ntohl(th->th_seq); 5110 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5111 5112#if 1 5113 /* XXX IPv4 */ 5114 pbd->ip_id = ntohs(ip->ip_id); 5115 pbd->tcp_pseudo_csum = 5116 ntohs(in_pseudo(ip->ip_src.s_addr, 5117 ip->ip_dst.s_addr, 5118 htons(IPPROTO_TCP))); 5119#else 5120 /* XXX IPv6 */ 5121 pbd->tcp_pseudo_csum = 5122 ntohs(in_pseudo(&ip6->ip6_src, 5123 &ip6->ip6_dst, 5124 htons(IPPROTO_TCP))); 5125#endif 5126 5127 pbd->global_data |= 5128 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5129} 5130 5131/* 5132 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5133 * visible to the controller. 5134 * 5135 * If an mbuf is submitted to this routine and cannot be given to the 5136 * controller (e.g. it has too many fragments) then the function may free 5137 * the mbuf and return to the caller. 5138 * 5139 * Returns: 5140 * 0 = Success, !0 = Failure 5141 * Note the side effect that an mbuf may be freed if it causes a problem. 5142 */ 5143static int 5144bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5145{ 5146 bus_dma_segment_t segs[32]; 5147 struct mbuf *m0; 5148 struct bxe_sw_tx_bd *tx_buf; 5149 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5150 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5151 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5152 struct eth_tx_bd *tx_data_bd; 5153 struct eth_tx_bd *tx_total_pkt_size_bd; 5154 struct eth_tx_start_bd *tx_start_bd; 5155 uint16_t bd_prod, pkt_prod, total_pkt_size; 5156 uint8_t mac_type; 5157 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5158 struct bxe_softc *sc; 5159 uint16_t tx_bd_avail; 5160 struct ether_vlan_header *eh; 5161 uint32_t pbd_e2_parsing_data = 0; 5162 uint8_t hlen = 0; 5163 int tmp_bd; 5164 int i; 5165 5166 sc = fp->sc; 5167 5168#if __FreeBSD_version >= 800000 5169 M_ASSERTPKTHDR(*m_head); 5170#endif /* #if __FreeBSD_version >= 800000 */ 5171 5172 m0 = *m_head; 5173 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5174 tx_start_bd = NULL; 5175 tx_data_bd = NULL; 5176 tx_total_pkt_size_bd = NULL; 5177 5178 /* get the H/W pointer for packets and BDs */ 5179 pkt_prod = fp->tx_pkt_prod; 5180 bd_prod = fp->tx_bd_prod; 5181 5182 mac_type = UNICAST_ADDRESS; 5183 5184 /* map the mbuf into the next open DMAable memory */ 5185 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5186 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5187 tx_buf->m_map, m0, 5188 segs, &nsegs, BUS_DMA_NOWAIT); 5189 5190 /* mapping errors */ 5191 if(__predict_false(error != 0)) { 5192 fp->eth_q_stats.tx_dma_mapping_failure++; 5193 if (error == ENOMEM) { 5194 /* resource issue, try again later */ 5195 rc = ENOMEM; 5196 } else if (error == EFBIG) { 5197 /* possibly recoverable with defragmentation */ 5198 fp->eth_q_stats.mbuf_defrag_attempts++; 5199 m0 = m_defrag(*m_head, M_DONTWAIT); 5200 if (m0 == NULL) { 5201 fp->eth_q_stats.mbuf_defrag_failures++; 5202 rc = ENOBUFS; 5203 } else { 5204 /* defrag successful, try mapping again */ 5205 *m_head = m0; 5206 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5207 tx_buf->m_map, m0, 5208 segs, &nsegs, BUS_DMA_NOWAIT); 5209 if (error) { 5210 fp->eth_q_stats.tx_dma_mapping_failure++; 5211 rc = error; 5212 } 5213 } 5214 } else { 5215 /* unknown, unrecoverable mapping error */ 5216 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5217 bxe_dump_mbuf(sc, m0, FALSE); 5218 rc = error; 5219 } 5220 5221 goto bxe_tx_encap_continue; 5222 } 5223 5224 tx_bd_avail = bxe_tx_avail(sc, fp); 5225 5226 /* make sure there is enough room in the send queue */ 5227 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5228 /* Recoverable, try again later. */ 5229 fp->eth_q_stats.tx_hw_queue_full++; 5230 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5231 rc = ENOMEM; 5232 goto bxe_tx_encap_continue; 5233 } 5234 5235 /* capture the current H/W TX chain high watermark */ 5236 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5237 (TX_BD_USABLE - tx_bd_avail))) { 5238 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5239 } 5240 5241 /* make sure it fits in the packet window */ 5242 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5243 /* 5244 * The mbuf may be to big for the controller to handle. If the frame 5245 * is a TSO frame we'll need to do an additional check. 5246 */ 5247 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5248 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5249 goto bxe_tx_encap_continue; /* OK to send */ 5250 } else { 5251 fp->eth_q_stats.tx_window_violation_tso++; 5252 } 5253 } else { 5254 fp->eth_q_stats.tx_window_violation_std++; 5255 } 5256 5257 /* lets try to defragment this mbuf and remap it */ 5258 fp->eth_q_stats.mbuf_defrag_attempts++; 5259 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5260 5261 m0 = m_defrag(*m_head, M_DONTWAIT); 5262 if (m0 == NULL) { 5263 fp->eth_q_stats.mbuf_defrag_failures++; 5264 /* Ugh, just drop the frame... :( */ 5265 rc = ENOBUFS; 5266 } else { 5267 /* defrag successful, try mapping again */ 5268 *m_head = m0; 5269 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5270 tx_buf->m_map, m0, 5271 segs, &nsegs, BUS_DMA_NOWAIT); 5272 if (error) { 5273 fp->eth_q_stats.tx_dma_mapping_failure++; 5274 /* No sense in trying to defrag/copy chain, drop it. :( */ 5275 rc = error; 5276 } else { 5277 /* if the chain is still too long then drop it */ 5278 if(m0->m_pkthdr.csum_flags & CSUM_TSO) { 5279 /* 5280 * in case TSO is enabled nsegs should be checked against 5281 * BXE_TSO_MAX_SEGMENTS 5282 */ 5283 if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) { 5284 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5285 fp->eth_q_stats.nsegs_path1_errors++; 5286 rc = ENODEV; 5287 } 5288 } else { 5289 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5290 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5291 fp->eth_q_stats.nsegs_path2_errors++; 5292 rc = ENODEV; 5293 } 5294 } 5295 } 5296 } 5297 } 5298 5299bxe_tx_encap_continue: 5300 5301 /* Check for errors */ 5302 if (rc) { 5303 if (rc == ENOMEM) { 5304 /* recoverable try again later */ 5305 } else { 5306 fp->eth_q_stats.tx_soft_errors++; 5307 fp->eth_q_stats.mbuf_alloc_tx--; 5308 m_freem(*m_head); 5309 *m_head = NULL; 5310 } 5311 5312 return (rc); 5313 } 5314 5315 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5316 if (m0->m_flags & M_BCAST) { 5317 mac_type = BROADCAST_ADDRESS; 5318 } else if (m0->m_flags & M_MCAST) { 5319 mac_type = MULTICAST_ADDRESS; 5320 } 5321 5322 /* store the mbuf into the mbuf ring */ 5323 tx_buf->m = m0; 5324 tx_buf->first_bd = fp->tx_bd_prod; 5325 tx_buf->flags = 0; 5326 5327 /* prepare the first transmit (start) BD for the mbuf */ 5328 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5329 5330 BLOGD(sc, DBG_TX, 5331 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5332 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5333 5334 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5335 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5336 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5337 total_pkt_size += tx_start_bd->nbytes; 5338 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5339 5340 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5341 5342 /* all frames have at least Start BD + Parsing BD */ 5343 nbds = nsegs + 1; 5344 tx_start_bd->nbd = htole16(nbds); 5345 5346 if (m0->m_flags & M_VLANTAG) { 5347 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5348 tx_start_bd->bd_flags.as_bitfield |= 5349 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5350 } else { 5351 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5352 if (IS_VF(sc)) { 5353 /* map ethernet header to find type and header length */ 5354 eh = mtod(m0, struct ether_vlan_header *); 5355 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5356 } else { 5357 /* used by FW for packet accounting */ 5358 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5359 } 5360 } 5361 5362 /* 5363 * add a parsing BD from the chain. The parsing BD is always added 5364 * though it is only used for TSO and chksum 5365 */ 5366 bd_prod = TX_BD_NEXT(bd_prod); 5367 5368 if (m0->m_pkthdr.csum_flags) { 5369 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5370 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5371 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5372 } 5373 5374 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5375 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5376 ETH_TX_BD_FLAGS_L4_CSUM); 5377 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5378 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5379 ETH_TX_BD_FLAGS_IS_UDP | 5380 ETH_TX_BD_FLAGS_L4_CSUM); 5381 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5382 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5383 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5384 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5385 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5386 ETH_TX_BD_FLAGS_IS_UDP); 5387 } 5388 } 5389 5390 if (!CHIP_IS_E1x(sc)) { 5391 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5392 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5393 5394 if (m0->m_pkthdr.csum_flags) { 5395 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5396 } 5397 5398 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5399 mac_type); 5400 } else { 5401 uint16_t global_data = 0; 5402 5403 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5404 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5405 5406 if (m0->m_pkthdr.csum_flags) { 5407 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5408 } 5409 5410 SET_FLAG(global_data, 5411 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5412 pbd_e1x->global_data |= htole16(global_data); 5413 } 5414 5415 /* setup the parsing BD with TSO specific info */ 5416 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5417 fp->eth_q_stats.tx_ofld_frames_lso++; 5418 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5419 5420 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5421 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5422 5423 /* split the first BD into header/data making the fw job easy */ 5424 nbds++; 5425 tx_start_bd->nbd = htole16(nbds); 5426 tx_start_bd->nbytes = htole16(hlen); 5427 5428 bd_prod = TX_BD_NEXT(bd_prod); 5429 5430 /* new transmit BD after the tx_parse_bd */ 5431 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5432 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5433 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5434 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5435 if (tx_total_pkt_size_bd == NULL) { 5436 tx_total_pkt_size_bd = tx_data_bd; 5437 } 5438 5439 BLOGD(sc, DBG_TX, 5440 "TSO split header size is %d (%x:%x) nbds %d\n", 5441 le16toh(tx_start_bd->nbytes), 5442 le32toh(tx_start_bd->addr_hi), 5443 le32toh(tx_start_bd->addr_lo), 5444 nbds); 5445 } 5446 5447 if (!CHIP_IS_E1x(sc)) { 5448 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5449 } else { 5450 bxe_set_pbd_lso(m0, pbd_e1x); 5451 } 5452 } 5453 5454 if (pbd_e2_parsing_data) { 5455 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5456 } 5457 5458 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5459 for (i = 1; i < nsegs ; i++) { 5460 bd_prod = TX_BD_NEXT(bd_prod); 5461 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5462 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5463 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5464 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5465 if (tx_total_pkt_size_bd == NULL) { 5466 tx_total_pkt_size_bd = tx_data_bd; 5467 } 5468 total_pkt_size += tx_data_bd->nbytes; 5469 } 5470 5471 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5472 5473 if (tx_total_pkt_size_bd != NULL) { 5474 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5475 } 5476 5477 if (__predict_false(sc->debug & DBG_TX)) { 5478 tmp_bd = tx_buf->first_bd; 5479 for (i = 0; i < nbds; i++) 5480 { 5481 if (i == 0) { 5482 BLOGD(sc, DBG_TX, 5483 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5484 "bd_flags=0x%x hdr_nbds=%d\n", 5485 tx_start_bd, 5486 tmp_bd, 5487 le16toh(tx_start_bd->nbd), 5488 le16toh(tx_start_bd->vlan_or_ethertype), 5489 tx_start_bd->bd_flags.as_bitfield, 5490 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5491 } else if (i == 1) { 5492 if (pbd_e1x) { 5493 BLOGD(sc, DBG_TX, 5494 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5495 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5496 "tcp_seq=%u total_hlen_w=%u\n", 5497 pbd_e1x, 5498 tmp_bd, 5499 pbd_e1x->global_data, 5500 pbd_e1x->ip_hlen_w, 5501 pbd_e1x->ip_id, 5502 pbd_e1x->lso_mss, 5503 pbd_e1x->tcp_flags, 5504 pbd_e1x->tcp_pseudo_csum, 5505 pbd_e1x->tcp_send_seq, 5506 le16toh(pbd_e1x->total_hlen_w)); 5507 } else { /* if (pbd_e2) */ 5508 BLOGD(sc, DBG_TX, 5509 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5510 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5511 pbd_e2, 5512 tmp_bd, 5513 pbd_e2->data.mac_addr.dst_hi, 5514 pbd_e2->data.mac_addr.dst_mid, 5515 pbd_e2->data.mac_addr.dst_lo, 5516 pbd_e2->data.mac_addr.src_hi, 5517 pbd_e2->data.mac_addr.src_mid, 5518 pbd_e2->data.mac_addr.src_lo, 5519 pbd_e2->parsing_data); 5520 } 5521 } 5522 5523 if (i != 1) { /* skip parse db as it doesn't hold data */ 5524 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5525 BLOGD(sc, DBG_TX, 5526 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5527 tx_data_bd, 5528 tmp_bd, 5529 le16toh(tx_data_bd->nbytes), 5530 le32toh(tx_data_bd->addr_hi), 5531 le32toh(tx_data_bd->addr_lo)); 5532 } 5533 5534 tmp_bd = TX_BD_NEXT(tmp_bd); 5535 } 5536 } 5537 5538 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5539 5540 /* update TX BD producer index value for next TX */ 5541 bd_prod = TX_BD_NEXT(bd_prod); 5542 5543 /* 5544 * If the chain of tx_bd's describing this frame is adjacent to or spans 5545 * an eth_tx_next_bd element then we need to increment the nbds value. 5546 */ 5547 if (TX_BD_IDX(bd_prod) < nbds) { 5548 nbds++; 5549 } 5550 5551 /* don't allow reordering of writes for nbd and packets */ 5552 mb(); 5553 5554 fp->tx_db.data.prod += nbds; 5555 5556 /* producer points to the next free tx_bd at this point */ 5557 fp->tx_pkt_prod++; 5558 fp->tx_bd_prod = bd_prod; 5559 5560 DOORBELL(sc, fp->index, fp->tx_db.raw); 5561 5562 fp->eth_q_stats.tx_pkts++; 5563 5564 /* Prevent speculative reads from getting ahead of the status block. */ 5565 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5566 0, 0, BUS_SPACE_BARRIER_READ); 5567 5568 /* Prevent speculative reads from getting ahead of the doorbell. */ 5569 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5570 0, 0, BUS_SPACE_BARRIER_READ); 5571 5572 return (0); 5573} 5574 5575static void 5576bxe_tx_start_locked(struct bxe_softc *sc, 5577 struct ifnet *ifp, 5578 struct bxe_fastpath *fp) 5579{ 5580 struct mbuf *m = NULL; 5581 int tx_count = 0; 5582 uint16_t tx_bd_avail; 5583 5584 BXE_FP_TX_LOCK_ASSERT(fp); 5585 5586 /* keep adding entries while there are frames to send */ 5587 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 5588 5589 /* 5590 * check for any frames to send 5591 * dequeue can still be NULL even if queue is not empty 5592 */ 5593 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 5594 if (__predict_false(m == NULL)) { 5595 break; 5596 } 5597 5598 /* the mbuf now belongs to us */ 5599 fp->eth_q_stats.mbuf_alloc_tx++; 5600 5601 /* 5602 * Put the frame into the transmit ring. If we don't have room, 5603 * place the mbuf back at the head of the TX queue, set the 5604 * OACTIVE flag, and wait for the NIC to drain the chain. 5605 */ 5606 if (__predict_false(bxe_tx_encap(fp, &m))) { 5607 fp->eth_q_stats.tx_encap_failures++; 5608 if (m != NULL) { 5609 /* mark the TX queue as full and return the frame */ 5610 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5611 IFQ_DRV_PREPEND(&ifp->if_snd, m); 5612 fp->eth_q_stats.mbuf_alloc_tx--; 5613 fp->eth_q_stats.tx_queue_xoff++; 5614 } 5615 5616 /* stop looking for more work */ 5617 break; 5618 } 5619 5620 /* the frame was enqueued successfully */ 5621 tx_count++; 5622 5623 /* send a copy of the frame to any BPF listeners. */ 5624 BPF_MTAP(ifp, m); 5625 5626 tx_bd_avail = bxe_tx_avail(sc, fp); 5627 5628 /* handle any completions if we're running low */ 5629 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5630 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5631 bxe_txeof(sc, fp); 5632 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 5633 break; 5634 } 5635 } 5636 } 5637 5638 /* all TX packets were dequeued and/or the tx ring is full */ 5639 if (tx_count > 0) { 5640 /* reset the TX watchdog timeout timer */ 5641 fp->watchdog_timer = BXE_TX_TIMEOUT; 5642 } 5643} 5644 5645/* Legacy (non-RSS) dispatch routine */ 5646static void 5647bxe_tx_start(struct ifnet *ifp) 5648{ 5649 struct bxe_softc *sc; 5650 struct bxe_fastpath *fp; 5651 5652 sc = ifp->if_softc; 5653 5654 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5655 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5656 return; 5657 } 5658 5659 if (!sc->link_vars.link_up) { 5660 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5661 return; 5662 } 5663 5664 fp = &sc->fp[0]; 5665 5666 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 5667 fp->eth_q_stats.tx_queue_full_return++; 5668 return; 5669 } 5670 5671 BXE_FP_TX_LOCK(fp); 5672 bxe_tx_start_locked(sc, ifp, fp); 5673 BXE_FP_TX_UNLOCK(fp); 5674} 5675 5676#if __FreeBSD_version >= 901504 5677 5678static int 5679bxe_tx_mq_start_locked(struct bxe_softc *sc, 5680 struct ifnet *ifp, 5681 struct bxe_fastpath *fp, 5682 struct mbuf *m) 5683{ 5684 struct buf_ring *tx_br = fp->tx_br; 5685 struct mbuf *next; 5686 int depth, rc, tx_count; 5687 uint16_t tx_bd_avail; 5688 5689 rc = tx_count = 0; 5690 5691 BXE_FP_TX_LOCK_ASSERT(fp); 5692 5693 if (sc->state != BXE_STATE_OPEN) { 5694 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; 5695 return ENETDOWN; 5696 } 5697 5698 if (!tx_br) { 5699 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 5700 return (EINVAL); 5701 } 5702 5703 if (m != NULL) { 5704 rc = drbr_enqueue(ifp, tx_br, m); 5705 if (rc != 0) { 5706 fp->eth_q_stats.tx_soft_errors++; 5707 goto bxe_tx_mq_start_locked_exit; 5708 } 5709 } 5710 5711 if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5712 fp->eth_q_stats.tx_request_link_down_failures++; 5713 goto bxe_tx_mq_start_locked_exit; 5714 } 5715 5716 /* fetch the depth of the driver queue */ 5717 depth = drbr_inuse(ifp, tx_br); 5718 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 5719 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 5720 } 5721 5722 /* keep adding entries while there are frames to send */ 5723 while ((next = drbr_peek(ifp, tx_br)) != NULL) { 5724 /* handle any completions if we're running low */ 5725 tx_bd_avail = bxe_tx_avail(sc, fp); 5726 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5727 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5728 bxe_txeof(sc, fp); 5729 tx_bd_avail = bxe_tx_avail(sc, fp); 5730 if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) { 5731 fp->eth_q_stats.bd_avail_too_less_failures++; 5732 m_freem(next); 5733 drbr_advance(ifp, tx_br); 5734 rc = ENOBUFS; 5735 break; 5736 } 5737 } 5738 5739 /* the mbuf now belongs to us */ 5740 fp->eth_q_stats.mbuf_alloc_tx++; 5741 5742 /* 5743 * Put the frame into the transmit ring. If we don't have room, 5744 * place the mbuf back at the head of the TX queue, set the 5745 * OACTIVE flag, and wait for the NIC to drain the chain. 5746 */ 5747 rc = bxe_tx_encap(fp, &next); 5748 if (__predict_false(rc != 0)) { 5749 fp->eth_q_stats.tx_encap_failures++; 5750 if (next != NULL) { 5751 /* mark the TX queue as full and save the frame */ 5752 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5753 drbr_putback(ifp, tx_br, next); 5754 fp->eth_q_stats.mbuf_alloc_tx--; 5755 fp->eth_q_stats.tx_frames_deferred++; 5756 } else 5757 drbr_advance(ifp, tx_br); 5758 5759 /* stop looking for more work */ 5760 break; 5761 } 5762 5763 /* the transmit frame was enqueued successfully */ 5764 tx_count++; 5765 5766 /* send a copy of the frame to any BPF listeners */ 5767 BPF_MTAP(ifp, next); 5768 5769 drbr_advance(ifp, tx_br); 5770 } 5771 5772 /* all TX packets were dequeued and/or the tx ring is full */ 5773 if (tx_count > 0) { 5774 /* reset the TX watchdog timeout timer */ 5775 fp->watchdog_timer = BXE_TX_TIMEOUT; 5776 } 5777 5778bxe_tx_mq_start_locked_exit: 5779 /* If we didn't drain the drbr, enqueue a task in the future to do it. */ 5780 if (!drbr_empty(ifp, tx_br)) { 5781 fp->eth_q_stats.tx_mq_not_empty++; 5782 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1); 5783 } 5784 5785 return (rc); 5786} 5787 5788static void 5789bxe_tx_mq_start_deferred(void *arg, 5790 int pending) 5791{ 5792 struct bxe_fastpath *fp = (struct bxe_fastpath *)arg; 5793 struct bxe_softc *sc = fp->sc; 5794 struct ifnet *ifp = sc->ifnet; 5795 5796 BXE_FP_TX_LOCK(fp); 5797 bxe_tx_mq_start_locked(sc, ifp, fp, NULL); 5798 BXE_FP_TX_UNLOCK(fp); 5799} 5800 5801/* Multiqueue (TSS) dispatch routine. */ 5802static int 5803bxe_tx_mq_start(struct ifnet *ifp, 5804 struct mbuf *m) 5805{ 5806 struct bxe_softc *sc = ifp->if_softc; 5807 struct bxe_fastpath *fp; 5808 int fp_index, rc; 5809 5810 fp_index = 0; /* default is the first queue */ 5811 5812 /* check if flowid is set */ 5813 5814 if (BXE_VALID_FLOWID(m)) 5815 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 5816 5817 fp = &sc->fp[fp_index]; 5818 5819 if (sc->state != BXE_STATE_OPEN) { 5820 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; 5821 return ENETDOWN; 5822 } 5823 5824 if (BXE_FP_TX_TRYLOCK(fp)) { 5825 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 5826 BXE_FP_TX_UNLOCK(fp); 5827 } else { 5828 rc = drbr_enqueue(ifp, fp->tx_br, m); 5829 taskqueue_enqueue(fp->tq, &fp->tx_task); 5830 } 5831 5832 return (rc); 5833} 5834 5835static void 5836bxe_mq_flush(struct ifnet *ifp) 5837{ 5838 struct bxe_softc *sc = ifp->if_softc; 5839 struct bxe_fastpath *fp; 5840 struct mbuf *m; 5841 int i; 5842 5843 for (i = 0; i < sc->num_queues; i++) { 5844 fp = &sc->fp[i]; 5845 5846 if (fp->state != BXE_FP_STATE_IRQ) { 5847 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 5848 fp->index, fp->state); 5849 continue; 5850 } 5851 5852 if (fp->tx_br != NULL) { 5853 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 5854 BXE_FP_TX_LOCK(fp); 5855 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 5856 m_freem(m); 5857 } 5858 BXE_FP_TX_UNLOCK(fp); 5859 } 5860 } 5861 5862 if_qflush(ifp); 5863} 5864 5865#endif /* FreeBSD_version >= 901504 */ 5866 5867static uint16_t 5868bxe_cid_ilt_lines(struct bxe_softc *sc) 5869{ 5870 if (IS_SRIOV(sc)) { 5871 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 5872 } 5873 return (L2_ILT_LINES(sc)); 5874} 5875 5876static void 5877bxe_ilt_set_info(struct bxe_softc *sc) 5878{ 5879 struct ilt_client_info *ilt_client; 5880 struct ecore_ilt *ilt = sc->ilt; 5881 uint16_t line = 0; 5882 5883 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 5884 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 5885 5886 /* CDU */ 5887 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 5888 ilt_client->client_num = ILT_CLIENT_CDU; 5889 ilt_client->page_size = CDU_ILT_PAGE_SZ; 5890 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 5891 ilt_client->start = line; 5892 line += bxe_cid_ilt_lines(sc); 5893 5894 if (CNIC_SUPPORT(sc)) { 5895 line += CNIC_ILT_LINES; 5896 } 5897 5898 ilt_client->end = (line - 1); 5899 5900 BLOGD(sc, DBG_LOAD, 5901 "ilt client[CDU]: start %d, end %d, " 5902 "psz 0x%x, flags 0x%x, hw psz %d\n", 5903 ilt_client->start, ilt_client->end, 5904 ilt_client->page_size, 5905 ilt_client->flags, 5906 ilog2(ilt_client->page_size >> 12)); 5907 5908 /* QM */ 5909 if (QM_INIT(sc->qm_cid_count)) { 5910 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 5911 ilt_client->client_num = ILT_CLIENT_QM; 5912 ilt_client->page_size = QM_ILT_PAGE_SZ; 5913 ilt_client->flags = 0; 5914 ilt_client->start = line; 5915 5916 /* 4 bytes for each cid */ 5917 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 5918 QM_ILT_PAGE_SZ); 5919 5920 ilt_client->end = (line - 1); 5921 5922 BLOGD(sc, DBG_LOAD, 5923 "ilt client[QM]: start %d, end %d, " 5924 "psz 0x%x, flags 0x%x, hw psz %d\n", 5925 ilt_client->start, ilt_client->end, 5926 ilt_client->page_size, ilt_client->flags, 5927 ilog2(ilt_client->page_size >> 12)); 5928 } 5929 5930 if (CNIC_SUPPORT(sc)) { 5931 /* SRC */ 5932 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 5933 ilt_client->client_num = ILT_CLIENT_SRC; 5934 ilt_client->page_size = SRC_ILT_PAGE_SZ; 5935 ilt_client->flags = 0; 5936 ilt_client->start = line; 5937 line += SRC_ILT_LINES; 5938 ilt_client->end = (line - 1); 5939 5940 BLOGD(sc, DBG_LOAD, 5941 "ilt client[SRC]: start %d, end %d, " 5942 "psz 0x%x, flags 0x%x, hw psz %d\n", 5943 ilt_client->start, ilt_client->end, 5944 ilt_client->page_size, ilt_client->flags, 5945 ilog2(ilt_client->page_size >> 12)); 5946 5947 /* TM */ 5948 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 5949 ilt_client->client_num = ILT_CLIENT_TM; 5950 ilt_client->page_size = TM_ILT_PAGE_SZ; 5951 ilt_client->flags = 0; 5952 ilt_client->start = line; 5953 line += TM_ILT_LINES; 5954 ilt_client->end = (line - 1); 5955 5956 BLOGD(sc, DBG_LOAD, 5957 "ilt client[TM]: start %d, end %d, " 5958 "psz 0x%x, flags 0x%x, hw psz %d\n", 5959 ilt_client->start, ilt_client->end, 5960 ilt_client->page_size, ilt_client->flags, 5961 ilog2(ilt_client->page_size >> 12)); 5962 } 5963 5964 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 5965} 5966 5967static void 5968bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 5969{ 5970 int i; 5971 uint32_t rx_buf_size; 5972 5973 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 5974 5975 for (i = 0; i < sc->num_queues; i++) { 5976 if(rx_buf_size <= MCLBYTES){ 5977 sc->fp[i].rx_buf_size = rx_buf_size; 5978 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5979 }else if (rx_buf_size <= MJUMPAGESIZE){ 5980 sc->fp[i].rx_buf_size = rx_buf_size; 5981 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 5982 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ 5983 sc->fp[i].rx_buf_size = MCLBYTES; 5984 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5985 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ 5986 sc->fp[i].rx_buf_size = MJUMPAGESIZE; 5987 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 5988 }else { 5989 sc->fp[i].rx_buf_size = MCLBYTES; 5990 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5991 } 5992 } 5993} 5994 5995static int 5996bxe_alloc_ilt_mem(struct bxe_softc *sc) 5997{ 5998 int rc = 0; 5999 6000 if ((sc->ilt = 6001 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 6002 M_BXE_ILT, 6003 (M_NOWAIT | M_ZERO))) == NULL) { 6004 rc = 1; 6005 } 6006 6007 return (rc); 6008} 6009 6010static int 6011bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6012{ 6013 int rc = 0; 6014 6015 if ((sc->ilt->lines = 6016 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6017 M_BXE_ILT, 6018 (M_NOWAIT | M_ZERO))) == NULL) { 6019 rc = 1; 6020 } 6021 6022 return (rc); 6023} 6024 6025static void 6026bxe_free_ilt_mem(struct bxe_softc *sc) 6027{ 6028 if (sc->ilt != NULL) { 6029 free(sc->ilt, M_BXE_ILT); 6030 sc->ilt = NULL; 6031 } 6032} 6033 6034static void 6035bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6036{ 6037 if (sc->ilt->lines != NULL) { 6038 free(sc->ilt->lines, M_BXE_ILT); 6039 sc->ilt->lines = NULL; 6040 } 6041} 6042 6043static void 6044bxe_free_mem(struct bxe_softc *sc) 6045{ 6046 int i; 6047 6048 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6049 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6050 sc->context[i].vcxt = NULL; 6051 sc->context[i].size = 0; 6052 } 6053 6054 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6055 6056 bxe_free_ilt_lines_mem(sc); 6057 6058} 6059 6060static int 6061bxe_alloc_mem(struct bxe_softc *sc) 6062{ 6063 6064 int context_size; 6065 int allocated; 6066 int i; 6067 6068 /* 6069 * Allocate memory for CDU context: 6070 * This memory is allocated separately and not in the generic ILT 6071 * functions because CDU differs in few aspects: 6072 * 1. There can be multiple entities allocating memory for context - 6073 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6074 * its own ILT lines. 6075 * 2. Since CDU page-size is not a single 4KB page (which is the case 6076 * for the other ILT clients), to be efficient we want to support 6077 * allocation of sub-page-size in the last entry. 6078 * 3. Context pointers are used by the driver to pass to FW / update 6079 * the context (for the other ILT clients the pointers are used just to 6080 * free the memory during unload). 6081 */ 6082 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6083 for (i = 0, allocated = 0; allocated < context_size; i++) { 6084 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6085 (context_size - allocated)); 6086 6087 if (bxe_dma_alloc(sc, sc->context[i].size, 6088 &sc->context[i].vcxt_dma, 6089 "cdu context") != 0) { 6090 bxe_free_mem(sc); 6091 return (-1); 6092 } 6093 6094 sc->context[i].vcxt = 6095 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6096 6097 allocated += sc->context[i].size; 6098 } 6099 6100 bxe_alloc_ilt_lines_mem(sc); 6101 6102 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6103 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6104 { 6105 for (i = 0; i < 4; i++) { 6106 BLOGD(sc, DBG_LOAD, 6107 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6108 i, 6109 sc->ilt->clients[i].page_size, 6110 sc->ilt->clients[i].start, 6111 sc->ilt->clients[i].end, 6112 sc->ilt->clients[i].client_num, 6113 sc->ilt->clients[i].flags); 6114 } 6115 } 6116 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6117 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6118 bxe_free_mem(sc); 6119 return (-1); 6120 } 6121 6122 return (0); 6123} 6124 6125static void 6126bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6127{ 6128 struct bxe_softc *sc; 6129 int i; 6130 6131 sc = fp->sc; 6132 6133 if (fp->rx_mbuf_tag == NULL) { 6134 return; 6135 } 6136 6137 /* free all mbufs and unload all maps */ 6138 for (i = 0; i < RX_BD_TOTAL; i++) { 6139 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6140 bus_dmamap_sync(fp->rx_mbuf_tag, 6141 fp->rx_mbuf_chain[i].m_map, 6142 BUS_DMASYNC_POSTREAD); 6143 bus_dmamap_unload(fp->rx_mbuf_tag, 6144 fp->rx_mbuf_chain[i].m_map); 6145 } 6146 6147 if (fp->rx_mbuf_chain[i].m != NULL) { 6148 m_freem(fp->rx_mbuf_chain[i].m); 6149 fp->rx_mbuf_chain[i].m = NULL; 6150 fp->eth_q_stats.mbuf_alloc_rx--; 6151 } 6152 } 6153} 6154 6155static void 6156bxe_free_tpa_pool(struct bxe_fastpath *fp) 6157{ 6158 struct bxe_softc *sc; 6159 int i, max_agg_queues; 6160 6161 sc = fp->sc; 6162 6163 if (fp->rx_mbuf_tag == NULL) { 6164 return; 6165 } 6166 6167 max_agg_queues = MAX_AGG_QS(sc); 6168 6169 /* release all mbufs and unload all DMA maps in the TPA pool */ 6170 for (i = 0; i < max_agg_queues; i++) { 6171 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6172 bus_dmamap_sync(fp->rx_mbuf_tag, 6173 fp->rx_tpa_info[i].bd.m_map, 6174 BUS_DMASYNC_POSTREAD); 6175 bus_dmamap_unload(fp->rx_mbuf_tag, 6176 fp->rx_tpa_info[i].bd.m_map); 6177 } 6178 6179 if (fp->rx_tpa_info[i].bd.m != NULL) { 6180 m_freem(fp->rx_tpa_info[i].bd.m); 6181 fp->rx_tpa_info[i].bd.m = NULL; 6182 fp->eth_q_stats.mbuf_alloc_tpa--; 6183 } 6184 } 6185} 6186 6187static void 6188bxe_free_sge_chain(struct bxe_fastpath *fp) 6189{ 6190 struct bxe_softc *sc; 6191 int i; 6192 6193 sc = fp->sc; 6194 6195 if (fp->rx_sge_mbuf_tag == NULL) { 6196 return; 6197 } 6198 6199 /* rree all mbufs and unload all maps */ 6200 for (i = 0; i < RX_SGE_TOTAL; i++) { 6201 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6202 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6203 fp->rx_sge_mbuf_chain[i].m_map, 6204 BUS_DMASYNC_POSTREAD); 6205 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6206 fp->rx_sge_mbuf_chain[i].m_map); 6207 } 6208 6209 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6210 m_freem(fp->rx_sge_mbuf_chain[i].m); 6211 fp->rx_sge_mbuf_chain[i].m = NULL; 6212 fp->eth_q_stats.mbuf_alloc_sge--; 6213 } 6214 } 6215} 6216 6217static void 6218bxe_free_fp_buffers(struct bxe_softc *sc) 6219{ 6220 struct bxe_fastpath *fp; 6221 int i; 6222 6223 for (i = 0; i < sc->num_queues; i++) { 6224 fp = &sc->fp[i]; 6225 6226#if __FreeBSD_version >= 901504 6227 if (fp->tx_br != NULL) { 6228 /* just in case bxe_mq_flush() wasn't called */ 6229 if (mtx_initialized(&fp->tx_mtx)) { 6230 struct mbuf *m; 6231 6232 BXE_FP_TX_LOCK(fp); 6233 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) 6234 m_freem(m); 6235 BXE_FP_TX_UNLOCK(fp); 6236 } 6237 } 6238#endif 6239 6240 /* free all RX buffers */ 6241 bxe_free_rx_bd_chain(fp); 6242 bxe_free_tpa_pool(fp); 6243 bxe_free_sge_chain(fp); 6244 6245 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6246 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6247 fp->eth_q_stats.mbuf_alloc_rx); 6248 } 6249 6250 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6251 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6252 fp->eth_q_stats.mbuf_alloc_sge); 6253 } 6254 6255 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6256 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6257 fp->eth_q_stats.mbuf_alloc_tpa); 6258 } 6259 6260 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6261 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6262 fp->eth_q_stats.mbuf_alloc_tx); 6263 } 6264 6265 /* XXX verify all mbufs were reclaimed */ 6266 } 6267} 6268 6269static int 6270bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6271 uint16_t prev_index, 6272 uint16_t index) 6273{ 6274 struct bxe_sw_rx_bd *rx_buf; 6275 struct eth_rx_bd *rx_bd; 6276 bus_dma_segment_t segs[1]; 6277 bus_dmamap_t map; 6278 struct mbuf *m; 6279 int nsegs, rc; 6280 6281 rc = 0; 6282 6283 /* allocate the new RX BD mbuf */ 6284 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6285 if (__predict_false(m == NULL)) { 6286 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6287 return (ENOBUFS); 6288 } 6289 6290 fp->eth_q_stats.mbuf_alloc_rx++; 6291 6292 /* initialize the mbuf buffer length */ 6293 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6294 6295 /* map the mbuf into non-paged pool */ 6296 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6297 fp->rx_mbuf_spare_map, 6298 m, segs, &nsegs, BUS_DMA_NOWAIT); 6299 if (__predict_false(rc != 0)) { 6300 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6301 m_freem(m); 6302 fp->eth_q_stats.mbuf_alloc_rx--; 6303 return (rc); 6304 } 6305 6306 /* all mbufs must map to a single segment */ 6307 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6308 6309 /* release any existing RX BD mbuf mappings */ 6310 6311 if (prev_index != index) { 6312 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6313 6314 if (rx_buf->m_map != NULL) { 6315 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6316 BUS_DMASYNC_POSTREAD); 6317 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6318 } 6319 6320 /* 6321 * We only get here from bxe_rxeof() when the maximum number 6322 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6323 * holds the mbuf in the prev_index so it's OK to NULL it out 6324 * here without concern of a memory leak. 6325 */ 6326 fp->rx_mbuf_chain[prev_index].m = NULL; 6327 } 6328 6329 rx_buf = &fp->rx_mbuf_chain[index]; 6330 6331 if (rx_buf->m_map != NULL) { 6332 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6333 BUS_DMASYNC_POSTREAD); 6334 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6335 } 6336 6337 /* save the mbuf and mapping info for a future packet */ 6338 map = (prev_index != index) ? 6339 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6340 rx_buf->m_map = fp->rx_mbuf_spare_map; 6341 fp->rx_mbuf_spare_map = map; 6342 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6343 BUS_DMASYNC_PREREAD); 6344 rx_buf->m = m; 6345 6346 rx_bd = &fp->rx_chain[index]; 6347 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6348 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6349 6350 return (rc); 6351} 6352 6353static int 6354bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6355 int queue) 6356{ 6357 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6358 bus_dma_segment_t segs[1]; 6359 bus_dmamap_t map; 6360 struct mbuf *m; 6361 int nsegs; 6362 int rc = 0; 6363 6364 /* allocate the new TPA mbuf */ 6365 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6366 if (__predict_false(m == NULL)) { 6367 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6368 return (ENOBUFS); 6369 } 6370 6371 fp->eth_q_stats.mbuf_alloc_tpa++; 6372 6373 /* initialize the mbuf buffer length */ 6374 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6375 6376 /* map the mbuf into non-paged pool */ 6377 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6378 fp->rx_tpa_info_mbuf_spare_map, 6379 m, segs, &nsegs, BUS_DMA_NOWAIT); 6380 if (__predict_false(rc != 0)) { 6381 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6382 m_free(m); 6383 fp->eth_q_stats.mbuf_alloc_tpa--; 6384 return (rc); 6385 } 6386 6387 /* all mbufs must map to a single segment */ 6388 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6389 6390 /* release any existing TPA mbuf mapping */ 6391 if (tpa_info->bd.m_map != NULL) { 6392 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6393 BUS_DMASYNC_POSTREAD); 6394 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6395 } 6396 6397 /* save the mbuf and mapping info for the TPA mbuf */ 6398 map = tpa_info->bd.m_map; 6399 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6400 fp->rx_tpa_info_mbuf_spare_map = map; 6401 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6402 BUS_DMASYNC_PREREAD); 6403 tpa_info->bd.m = m; 6404 tpa_info->seg = segs[0]; 6405 6406 return (rc); 6407} 6408 6409/* 6410 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6411 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6412 * chain. 6413 */ 6414static int 6415bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6416 uint16_t index) 6417{ 6418 struct bxe_sw_rx_bd *sge_buf; 6419 struct eth_rx_sge *sge; 6420 bus_dma_segment_t segs[1]; 6421 bus_dmamap_t map; 6422 struct mbuf *m; 6423 int nsegs; 6424 int rc = 0; 6425 6426 /* allocate a new SGE mbuf */ 6427 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6428 if (__predict_false(m == NULL)) { 6429 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6430 return (ENOMEM); 6431 } 6432 6433 fp->eth_q_stats.mbuf_alloc_sge++; 6434 6435 /* initialize the mbuf buffer length */ 6436 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6437 6438 /* map the SGE mbuf into non-paged pool */ 6439 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6440 fp->rx_sge_mbuf_spare_map, 6441 m, segs, &nsegs, BUS_DMA_NOWAIT); 6442 if (__predict_false(rc != 0)) { 6443 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6444 m_freem(m); 6445 fp->eth_q_stats.mbuf_alloc_sge--; 6446 return (rc); 6447 } 6448 6449 /* all mbufs must map to a single segment */ 6450 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6451 6452 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6453 6454 /* release any existing SGE mbuf mapping */ 6455 if (sge_buf->m_map != NULL) { 6456 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6457 BUS_DMASYNC_POSTREAD); 6458 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6459 } 6460 6461 /* save the mbuf and mapping info for a future packet */ 6462 map = sge_buf->m_map; 6463 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6464 fp->rx_sge_mbuf_spare_map = map; 6465 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6466 BUS_DMASYNC_PREREAD); 6467 sge_buf->m = m; 6468 6469 sge = &fp->rx_sge_chain[index]; 6470 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6471 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6472 6473 return (rc); 6474} 6475 6476static __noinline int 6477bxe_alloc_fp_buffers(struct bxe_softc *sc) 6478{ 6479 struct bxe_fastpath *fp; 6480 int i, j, rc = 0; 6481 int ring_prod, cqe_ring_prod; 6482 int max_agg_queues; 6483 6484 for (i = 0; i < sc->num_queues; i++) { 6485 fp = &sc->fp[i]; 6486 6487 ring_prod = cqe_ring_prod = 0; 6488 fp->rx_bd_cons = 0; 6489 fp->rx_cq_cons = 0; 6490 6491 /* allocate buffers for the RX BDs in RX BD chain */ 6492 for (j = 0; j < sc->max_rx_bufs; j++) { 6493 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6494 if (rc != 0) { 6495 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6496 i, rc); 6497 goto bxe_alloc_fp_buffers_error; 6498 } 6499 6500 ring_prod = RX_BD_NEXT(ring_prod); 6501 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6502 } 6503 6504 fp->rx_bd_prod = ring_prod; 6505 fp->rx_cq_prod = cqe_ring_prod; 6506 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6507 6508 max_agg_queues = MAX_AGG_QS(sc); 6509 6510 fp->tpa_enable = TRUE; 6511 6512 /* fill the TPA pool */ 6513 for (j = 0; j < max_agg_queues; j++) { 6514 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6515 if (rc != 0) { 6516 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6517 i, j); 6518 fp->tpa_enable = FALSE; 6519 goto bxe_alloc_fp_buffers_error; 6520 } 6521 6522 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6523 } 6524 6525 if (fp->tpa_enable) { 6526 /* fill the RX SGE chain */ 6527 ring_prod = 0; 6528 for (j = 0; j < RX_SGE_USABLE; j++) { 6529 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6530 if (rc != 0) { 6531 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6532 i, ring_prod); 6533 fp->tpa_enable = FALSE; 6534 ring_prod = 0; 6535 goto bxe_alloc_fp_buffers_error; 6536 } 6537 6538 ring_prod = RX_SGE_NEXT(ring_prod); 6539 } 6540 6541 fp->rx_sge_prod = ring_prod; 6542 } 6543 } 6544 6545 return (0); 6546 6547bxe_alloc_fp_buffers_error: 6548 6549 /* unwind what was already allocated */ 6550 bxe_free_rx_bd_chain(fp); 6551 bxe_free_tpa_pool(fp); 6552 bxe_free_sge_chain(fp); 6553 6554 return (ENOBUFS); 6555} 6556 6557static void 6558bxe_free_fw_stats_mem(struct bxe_softc *sc) 6559{ 6560 bxe_dma_free(sc, &sc->fw_stats_dma); 6561 6562 sc->fw_stats_num = 0; 6563 6564 sc->fw_stats_req_size = 0; 6565 sc->fw_stats_req = NULL; 6566 sc->fw_stats_req_mapping = 0; 6567 6568 sc->fw_stats_data_size = 0; 6569 sc->fw_stats_data = NULL; 6570 sc->fw_stats_data_mapping = 0; 6571} 6572 6573static int 6574bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6575{ 6576 uint8_t num_queue_stats; 6577 int num_groups; 6578 6579 /* number of queues for statistics is number of eth queues */ 6580 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6581 6582 /* 6583 * Total number of FW statistics requests = 6584 * 1 for port stats + 1 for PF stats + num of queues 6585 */ 6586 sc->fw_stats_num = (2 + num_queue_stats); 6587 6588 /* 6589 * Request is built from stats_query_header and an array of 6590 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6591 * rules. The real number or requests is configured in the 6592 * stats_query_header. 6593 */ 6594 num_groups = 6595 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6596 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6597 6598 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6599 sc->fw_stats_num, num_groups); 6600 6601 sc->fw_stats_req_size = 6602 (sizeof(struct stats_query_header) + 6603 (num_groups * sizeof(struct stats_query_cmd_group))); 6604 6605 /* 6606 * Data for statistics requests + stats_counter. 6607 * stats_counter holds per-STORM counters that are incremented when 6608 * STORM has finished with the current request. Memory for FCoE 6609 * offloaded statistics are counted anyway, even if they will not be sent. 6610 * VF stats are not accounted for here as the data of VF stats is stored 6611 * in memory allocated by the VF, not here. 6612 */ 6613 sc->fw_stats_data_size = 6614 (sizeof(struct stats_counter) + 6615 sizeof(struct per_port_stats) + 6616 sizeof(struct per_pf_stats) + 6617 /* sizeof(struct fcoe_statistics_params) + */ 6618 (sizeof(struct per_queue_stats) * num_queue_stats)); 6619 6620 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6621 &sc->fw_stats_dma, "fw stats") != 0) { 6622 bxe_free_fw_stats_mem(sc); 6623 return (-1); 6624 } 6625 6626 /* set up the shortcuts */ 6627 6628 sc->fw_stats_req = 6629 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6630 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6631 6632 sc->fw_stats_data = 6633 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6634 sc->fw_stats_req_size); 6635 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6636 sc->fw_stats_req_size); 6637 6638 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6639 (uintmax_t)sc->fw_stats_req_mapping); 6640 6641 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6642 (uintmax_t)sc->fw_stats_data_mapping); 6643 6644 return (0); 6645} 6646 6647/* 6648 * Bits map: 6649 * 0-7 - Engine0 load counter. 6650 * 8-15 - Engine1 load counter. 6651 * 16 - Engine0 RESET_IN_PROGRESS bit. 6652 * 17 - Engine1 RESET_IN_PROGRESS bit. 6653 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 6654 * function on the engine 6655 * 19 - Engine1 ONE_IS_LOADED. 6656 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 6657 * leader to complete (check for both RESET_IN_PROGRESS bits and not 6658 * for just the one belonging to its engine). 6659 */ 6660#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 6661#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 6662#define BXE_PATH0_LOAD_CNT_SHIFT 0 6663#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 6664#define BXE_PATH1_LOAD_CNT_SHIFT 8 6665#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 6666#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 6667#define BXE_GLOBAL_RESET_BIT 0x00040000 6668 6669/* set the GLOBAL_RESET bit, should be run under rtnl lock */ 6670static void 6671bxe_set_reset_global(struct bxe_softc *sc) 6672{ 6673 uint32_t val; 6674 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6675 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6676 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 6677 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6678} 6679 6680/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 6681static void 6682bxe_clear_reset_global(struct bxe_softc *sc) 6683{ 6684 uint32_t val; 6685 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6686 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6687 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 6688 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6689} 6690 6691/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 6692static uint8_t 6693bxe_reset_is_global(struct bxe_softc *sc) 6694{ 6695 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6696 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 6697 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 6698} 6699 6700/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 6701static void 6702bxe_set_reset_done(struct bxe_softc *sc) 6703{ 6704 uint32_t val; 6705 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 6706 BXE_PATH0_RST_IN_PROG_BIT; 6707 6708 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6709 6710 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6711 /* Clear the bit */ 6712 val &= ~bit; 6713 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6714 6715 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6716} 6717 6718/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 6719static void 6720bxe_set_reset_in_progress(struct bxe_softc *sc) 6721{ 6722 uint32_t val; 6723 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 6724 BXE_PATH0_RST_IN_PROG_BIT; 6725 6726 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6727 6728 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6729 /* Set the bit */ 6730 val |= bit; 6731 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6732 6733 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6734} 6735 6736/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 6737static uint8_t 6738bxe_reset_is_done(struct bxe_softc *sc, 6739 int engine) 6740{ 6741 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6742 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 6743 BXE_PATH0_RST_IN_PROG_BIT; 6744 6745 /* return false if bit is set */ 6746 return (val & bit) ? FALSE : TRUE; 6747} 6748 6749/* get the load status for an engine, should be run under rtnl lock */ 6750static uint8_t 6751bxe_get_load_status(struct bxe_softc *sc, 6752 int engine) 6753{ 6754 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 6755 BXE_PATH0_LOAD_CNT_MASK; 6756 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 6757 BXE_PATH0_LOAD_CNT_SHIFT; 6758 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6759 6760 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 6761 6762 val = ((val & mask) >> shift); 6763 6764 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 6765 6766 return (val != 0); 6767} 6768 6769/* set pf load mark */ 6770/* XXX needs to be under rtnl lock */ 6771static void 6772bxe_set_pf_load(struct bxe_softc *sc) 6773{ 6774 uint32_t val; 6775 uint32_t val1; 6776 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 6777 BXE_PATH0_LOAD_CNT_MASK; 6778 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 6779 BXE_PATH0_LOAD_CNT_SHIFT; 6780 6781 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6782 6783 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6784 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 6785 6786 /* get the current counter value */ 6787 val1 = ((val & mask) >> shift); 6788 6789 /* set bit of this PF */ 6790 val1 |= (1 << SC_ABS_FUNC(sc)); 6791 6792 /* clear the old value */ 6793 val &= ~mask; 6794 6795 /* set the new one */ 6796 val |= ((val1 << shift) & mask); 6797 6798 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6799 6800 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6801} 6802 6803/* clear pf load mark */ 6804/* XXX needs to be under rtnl lock */ 6805static uint8_t 6806bxe_clear_pf_load(struct bxe_softc *sc) 6807{ 6808 uint32_t val1, val; 6809 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 6810 BXE_PATH0_LOAD_CNT_MASK; 6811 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 6812 BXE_PATH0_LOAD_CNT_SHIFT; 6813 6814 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6815 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6816 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 6817 6818 /* get the current counter value */ 6819 val1 = (val & mask) >> shift; 6820 6821 /* clear bit of that PF */ 6822 val1 &= ~(1 << SC_ABS_FUNC(sc)); 6823 6824 /* clear the old value */ 6825 val &= ~mask; 6826 6827 /* set the new one */ 6828 val |= ((val1 << shift) & mask); 6829 6830 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6831 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6832 return (val1 != 0); 6833} 6834 6835/* send load requrest to mcp and analyze response */ 6836static int 6837bxe_nic_load_request(struct bxe_softc *sc, 6838 uint32_t *load_code) 6839{ 6840 /* init fw_seq */ 6841 sc->fw_seq = 6842 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 6843 DRV_MSG_SEQ_NUMBER_MASK); 6844 6845 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 6846 6847 /* get the current FW pulse sequence */ 6848 sc->fw_drv_pulse_wr_seq = 6849 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 6850 DRV_PULSE_SEQ_MASK); 6851 6852 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 6853 sc->fw_drv_pulse_wr_seq); 6854 6855 /* load request */ 6856 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 6857 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 6858 6859 /* if the MCP fails to respond we must abort */ 6860 if (!(*load_code)) { 6861 BLOGE(sc, "MCP response failure!\n"); 6862 return (-1); 6863 } 6864 6865 /* if MCP refused then must abort */ 6866 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 6867 BLOGE(sc, "MCP refused load request\n"); 6868 return (-1); 6869 } 6870 6871 return (0); 6872} 6873 6874/* 6875 * Check whether another PF has already loaded FW to chip. In virtualized 6876 * environments a pf from anoth VM may have already initialized the device 6877 * including loading FW. 6878 */ 6879static int 6880bxe_nic_load_analyze_req(struct bxe_softc *sc, 6881 uint32_t load_code) 6882{ 6883 uint32_t my_fw, loaded_fw; 6884 6885 /* is another pf loaded on this engine? */ 6886 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 6887 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 6888 /* build my FW version dword */ 6889 my_fw = (BCM_5710_FW_MAJOR_VERSION + 6890 (BCM_5710_FW_MINOR_VERSION << 8 ) + 6891 (BCM_5710_FW_REVISION_VERSION << 16) + 6892 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 6893 6894 /* read loaded FW from chip */ 6895 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 6896 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 6897 loaded_fw, my_fw); 6898 6899 /* abort nic load if version mismatch */ 6900 if (my_fw != loaded_fw) { 6901 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 6902 loaded_fw, my_fw); 6903 return (-1); 6904 } 6905 } 6906 6907 return (0); 6908} 6909 6910/* mark PMF if applicable */ 6911static void 6912bxe_nic_load_pmf(struct bxe_softc *sc, 6913 uint32_t load_code) 6914{ 6915 uint32_t ncsi_oem_data_addr; 6916 6917 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 6918 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 6919 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 6920 /* 6921 * Barrier here for ordering between the writing to sc->port.pmf here 6922 * and reading it from the periodic task. 6923 */ 6924 sc->port.pmf = 1; 6925 mb(); 6926 } else { 6927 sc->port.pmf = 0; 6928 } 6929 6930 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 6931 6932 /* XXX needed? */ 6933 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 6934 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 6935 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 6936 if (ncsi_oem_data_addr) { 6937 REG_WR(sc, 6938 (ncsi_oem_data_addr + 6939 offsetof(struct glob_ncsi_oem_data, driver_version)), 6940 0); 6941 } 6942 } 6943 } 6944} 6945 6946static void 6947bxe_read_mf_cfg(struct bxe_softc *sc) 6948{ 6949 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 6950 int abs_func; 6951 int vn; 6952 6953 if (BXE_NOMCP(sc)) { 6954 return; /* what should be the default bvalue in this case */ 6955 } 6956 6957 /* 6958 * The formula for computing the absolute function number is... 6959 * For 2 port configuration (4 functions per port): 6960 * abs_func = 2 * vn + SC_PORT + SC_PATH 6961 * For 4 port configuration (2 functions per port): 6962 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 6963 */ 6964 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 6965 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 6966 if (abs_func >= E1H_FUNC_MAX) { 6967 break; 6968 } 6969 sc->devinfo.mf_info.mf_config[vn] = 6970 MFCFG_RD(sc, func_mf_config[abs_func].config); 6971 } 6972 6973 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 6974 FUNC_MF_CFG_FUNC_DISABLED) { 6975 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 6976 sc->flags |= BXE_MF_FUNC_DIS; 6977 } else { 6978 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 6979 sc->flags &= ~BXE_MF_FUNC_DIS; 6980 } 6981} 6982 6983/* acquire split MCP access lock register */ 6984static int bxe_acquire_alr(struct bxe_softc *sc) 6985{ 6986 uint32_t j, val; 6987 6988 for (j = 0; j < 1000; j++) { 6989 val = (1UL << 31); 6990 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 6991 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 6992 if (val & (1L << 31)) 6993 break; 6994 6995 DELAY(5000); 6996 } 6997 6998 if (!(val & (1L << 31))) { 6999 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 7000 return (-1); 7001 } 7002 7003 return (0); 7004} 7005 7006/* release split MCP access lock register */ 7007static void bxe_release_alr(struct bxe_softc *sc) 7008{ 7009 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7010} 7011 7012static void 7013bxe_fan_failure(struct bxe_softc *sc) 7014{ 7015 int port = SC_PORT(sc); 7016 uint32_t ext_phy_config; 7017 7018 /* mark the failure */ 7019 ext_phy_config = 7020 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7021 7022 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7023 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7024 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7025 ext_phy_config); 7026 7027 /* log the failure */ 7028 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7029 "the card to prevent permanent damage. " 7030 "Please contact OEM Support for assistance\n"); 7031 7032 /* XXX */ 7033#if 1 7034 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7035#else 7036 /* 7037 * Schedule device reset (unload) 7038 * This is due to some boards consuming sufficient power when driver is 7039 * up to overheat if fan fails. 7040 */ 7041 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7042 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7043#endif 7044} 7045 7046/* this function is called upon a link interrupt */ 7047static void 7048bxe_link_attn(struct bxe_softc *sc) 7049{ 7050 uint32_t pause_enabled = 0; 7051 struct host_port_stats *pstats; 7052 int cmng_fns; 7053 struct bxe_fastpath *fp; 7054 int i; 7055 7056 /* Make sure that we are synced with the current statistics */ 7057 bxe_stats_handle(sc, STATS_EVENT_STOP); 7058 BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags); 7059 elink_link_update(&sc->link_params, &sc->link_vars); 7060 7061 if (sc->link_vars.link_up) { 7062 7063 /* dropless flow control */ 7064 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7065 pause_enabled = 0; 7066 7067 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7068 pause_enabled = 1; 7069 } 7070 7071 REG_WR(sc, 7072 (BAR_USTRORM_INTMEM + 7073 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7074 pause_enabled); 7075 } 7076 7077 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7078 pstats = BXE_SP(sc, port_stats); 7079 /* reset old mac stats */ 7080 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7081 } 7082 7083 if (sc->state == BXE_STATE_OPEN) { 7084 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7085 /* Restart tx when the link comes back. */ 7086 FOR_EACH_ETH_QUEUE(sc, i) { 7087 fp = &sc->fp[i]; 7088 taskqueue_enqueue(fp->tq, &fp->tx_task); 7089 } 7090 } 7091 7092 } 7093 7094 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7095 cmng_fns = bxe_get_cmng_fns_mode(sc); 7096 7097 if (cmng_fns != CMNG_FNS_NONE) { 7098 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7099 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7100 } else { 7101 /* rate shaping and fairness are disabled */ 7102 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7103 } 7104 } 7105 7106 bxe_link_report_locked(sc); 7107 7108 if (IS_MF(sc)) { 7109 ; // XXX bxe_link_sync_notify(sc); 7110 } 7111} 7112 7113static void 7114bxe_attn_int_asserted(struct bxe_softc *sc, 7115 uint32_t asserted) 7116{ 7117 int port = SC_PORT(sc); 7118 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7119 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7120 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7121 NIG_REG_MASK_INTERRUPT_PORT0; 7122 uint32_t aeu_mask; 7123 uint32_t nig_mask = 0; 7124 uint32_t reg_addr; 7125 uint32_t igu_acked; 7126 uint32_t cnt; 7127 7128 if (sc->attn_state & asserted) { 7129 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7130 } 7131 7132 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7133 7134 aeu_mask = REG_RD(sc, aeu_addr); 7135 7136 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7137 aeu_mask, asserted); 7138 7139 aeu_mask &= ~(asserted & 0x3ff); 7140 7141 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7142 7143 REG_WR(sc, aeu_addr, aeu_mask); 7144 7145 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7146 7147 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7148 sc->attn_state |= asserted; 7149 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7150 7151 if (asserted & ATTN_HARD_WIRED_MASK) { 7152 if (asserted & ATTN_NIG_FOR_FUNC) { 7153 7154 bxe_acquire_phy_lock(sc); 7155 /* save nig interrupt mask */ 7156 nig_mask = REG_RD(sc, nig_int_mask_addr); 7157 7158 /* If nig_mask is not set, no need to call the update function */ 7159 if (nig_mask) { 7160 REG_WR(sc, nig_int_mask_addr, 0); 7161 7162 bxe_link_attn(sc); 7163 } 7164 7165 /* handle unicore attn? */ 7166 } 7167 7168 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7169 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7170 } 7171 7172 if (asserted & GPIO_2_FUNC) { 7173 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7174 } 7175 7176 if (asserted & GPIO_3_FUNC) { 7177 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7178 } 7179 7180 if (asserted & GPIO_4_FUNC) { 7181 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7182 } 7183 7184 if (port == 0) { 7185 if (asserted & ATTN_GENERAL_ATTN_1) { 7186 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7187 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7188 } 7189 if (asserted & ATTN_GENERAL_ATTN_2) { 7190 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7191 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7192 } 7193 if (asserted & ATTN_GENERAL_ATTN_3) { 7194 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7195 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7196 } 7197 } else { 7198 if (asserted & ATTN_GENERAL_ATTN_4) { 7199 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7200 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7201 } 7202 if (asserted & ATTN_GENERAL_ATTN_5) { 7203 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7204 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7205 } 7206 if (asserted & ATTN_GENERAL_ATTN_6) { 7207 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7208 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7209 } 7210 } 7211 } /* hardwired */ 7212 7213 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7214 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7215 } else { 7216 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7217 } 7218 7219 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7220 asserted, 7221 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7222 REG_WR(sc, reg_addr, asserted); 7223 7224 /* now set back the mask */ 7225 if (asserted & ATTN_NIG_FOR_FUNC) { 7226 /* 7227 * Verify that IGU ack through BAR was written before restoring 7228 * NIG mask. This loop should exit after 2-3 iterations max. 7229 */ 7230 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7231 cnt = 0; 7232 7233 do { 7234 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7235 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7236 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7237 7238 if (!igu_acked) { 7239 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7240 } 7241 7242 mb(); 7243 } 7244 7245 REG_WR(sc, nig_int_mask_addr, nig_mask); 7246 7247 bxe_release_phy_lock(sc); 7248 } 7249} 7250 7251static void 7252bxe_print_next_block(struct bxe_softc *sc, 7253 int idx, 7254 const char *blk) 7255{ 7256 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7257} 7258 7259static int 7260bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7261 uint32_t sig, 7262 int par_num, 7263 uint8_t print) 7264{ 7265 uint32_t cur_bit = 0; 7266 int i = 0; 7267 7268 for (i = 0; sig; i++) { 7269 cur_bit = ((uint32_t)0x1 << i); 7270 if (sig & cur_bit) { 7271 switch (cur_bit) { 7272 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7273 if (print) 7274 bxe_print_next_block(sc, par_num++, "BRB"); 7275 break; 7276 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7277 if (print) 7278 bxe_print_next_block(sc, par_num++, "PARSER"); 7279 break; 7280 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7281 if (print) 7282 bxe_print_next_block(sc, par_num++, "TSDM"); 7283 break; 7284 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7285 if (print) 7286 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7287 break; 7288 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7289 if (print) 7290 bxe_print_next_block(sc, par_num++, "TCM"); 7291 break; 7292 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7293 if (print) 7294 bxe_print_next_block(sc, par_num++, "TSEMI"); 7295 break; 7296 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7297 if (print) 7298 bxe_print_next_block(sc, par_num++, "XPB"); 7299 break; 7300 } 7301 7302 /* Clear the bit */ 7303 sig &= ~cur_bit; 7304 } 7305 } 7306 7307 return (par_num); 7308} 7309 7310static int 7311bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7312 uint32_t sig, 7313 int par_num, 7314 uint8_t *global, 7315 uint8_t print) 7316{ 7317 int i = 0; 7318 uint32_t cur_bit = 0; 7319 for (i = 0; sig; i++) { 7320 cur_bit = ((uint32_t)0x1 << i); 7321 if (sig & cur_bit) { 7322 switch (cur_bit) { 7323 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7324 if (print) 7325 bxe_print_next_block(sc, par_num++, "PBF"); 7326 break; 7327 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7328 if (print) 7329 bxe_print_next_block(sc, par_num++, "QM"); 7330 break; 7331 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7332 if (print) 7333 bxe_print_next_block(sc, par_num++, "TM"); 7334 break; 7335 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7336 if (print) 7337 bxe_print_next_block(sc, par_num++, "XSDM"); 7338 break; 7339 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7340 if (print) 7341 bxe_print_next_block(sc, par_num++, "XCM"); 7342 break; 7343 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7344 if (print) 7345 bxe_print_next_block(sc, par_num++, "XSEMI"); 7346 break; 7347 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7348 if (print) 7349 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7350 break; 7351 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7352 if (print) 7353 bxe_print_next_block(sc, par_num++, "NIG"); 7354 break; 7355 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7356 if (print) 7357 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7358 *global = TRUE; 7359 break; 7360 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7361 if (print) 7362 bxe_print_next_block(sc, par_num++, "DEBUG"); 7363 break; 7364 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7365 if (print) 7366 bxe_print_next_block(sc, par_num++, "USDM"); 7367 break; 7368 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7369 if (print) 7370 bxe_print_next_block(sc, par_num++, "UCM"); 7371 break; 7372 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7373 if (print) 7374 bxe_print_next_block(sc, par_num++, "USEMI"); 7375 break; 7376 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7377 if (print) 7378 bxe_print_next_block(sc, par_num++, "UPB"); 7379 break; 7380 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7381 if (print) 7382 bxe_print_next_block(sc, par_num++, "CSDM"); 7383 break; 7384 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7385 if (print) 7386 bxe_print_next_block(sc, par_num++, "CCM"); 7387 break; 7388 } 7389 7390 /* Clear the bit */ 7391 sig &= ~cur_bit; 7392 } 7393 } 7394 7395 return (par_num); 7396} 7397 7398static int 7399bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7400 uint32_t sig, 7401 int par_num, 7402 uint8_t print) 7403{ 7404 uint32_t cur_bit = 0; 7405 int i = 0; 7406 7407 for (i = 0; sig; i++) { 7408 cur_bit = ((uint32_t)0x1 << i); 7409 if (sig & cur_bit) { 7410 switch (cur_bit) { 7411 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7412 if (print) 7413 bxe_print_next_block(sc, par_num++, "CSEMI"); 7414 break; 7415 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7416 if (print) 7417 bxe_print_next_block(sc, par_num++, "PXP"); 7418 break; 7419 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7420 if (print) 7421 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7422 break; 7423 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7424 if (print) 7425 bxe_print_next_block(sc, par_num++, "CFC"); 7426 break; 7427 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7428 if (print) 7429 bxe_print_next_block(sc, par_num++, "CDU"); 7430 break; 7431 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7432 if (print) 7433 bxe_print_next_block(sc, par_num++, "DMAE"); 7434 break; 7435 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7436 if (print) 7437 bxe_print_next_block(sc, par_num++, "IGU"); 7438 break; 7439 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7440 if (print) 7441 bxe_print_next_block(sc, par_num++, "MISC"); 7442 break; 7443 } 7444 7445 /* Clear the bit */ 7446 sig &= ~cur_bit; 7447 } 7448 } 7449 7450 return (par_num); 7451} 7452 7453static int 7454bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7455 uint32_t sig, 7456 int par_num, 7457 uint8_t *global, 7458 uint8_t print) 7459{ 7460 uint32_t cur_bit = 0; 7461 int i = 0; 7462 7463 for (i = 0; sig; i++) { 7464 cur_bit = ((uint32_t)0x1 << i); 7465 if (sig & cur_bit) { 7466 switch (cur_bit) { 7467 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7468 if (print) 7469 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7470 *global = TRUE; 7471 break; 7472 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7473 if (print) 7474 bxe_print_next_block(sc, par_num++, 7475 "MCP UMP RX"); 7476 *global = TRUE; 7477 break; 7478 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7479 if (print) 7480 bxe_print_next_block(sc, par_num++, 7481 "MCP UMP TX"); 7482 *global = TRUE; 7483 break; 7484 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7485 if (print) 7486 bxe_print_next_block(sc, par_num++, 7487 "MCP SCPAD"); 7488 *global = TRUE; 7489 break; 7490 } 7491 7492 /* Clear the bit */ 7493 sig &= ~cur_bit; 7494 } 7495 } 7496 7497 return (par_num); 7498} 7499 7500static int 7501bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7502 uint32_t sig, 7503 int par_num, 7504 uint8_t print) 7505{ 7506 uint32_t cur_bit = 0; 7507 int i = 0; 7508 7509 for (i = 0; sig; i++) { 7510 cur_bit = ((uint32_t)0x1 << i); 7511 if (sig & cur_bit) { 7512 switch (cur_bit) { 7513 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7514 if (print) 7515 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7516 break; 7517 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7518 if (print) 7519 bxe_print_next_block(sc, par_num++, "ATC"); 7520 break; 7521 } 7522 7523 /* Clear the bit */ 7524 sig &= ~cur_bit; 7525 } 7526 } 7527 7528 return (par_num); 7529} 7530 7531static uint8_t 7532bxe_parity_attn(struct bxe_softc *sc, 7533 uint8_t *global, 7534 uint8_t print, 7535 uint32_t *sig) 7536{ 7537 int par_num = 0; 7538 7539 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7540 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7541 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7542 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7543 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7544 BLOGE(sc, "Parity error: HW block parity attention:\n" 7545 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7546 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7547 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7548 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7549 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7550 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7551 7552 if (print) 7553 BLOGI(sc, "Parity errors detected in blocks: "); 7554 7555 par_num = 7556 bxe_check_blocks_with_parity0(sc, sig[0] & 7557 HW_PRTY_ASSERT_SET_0, 7558 par_num, print); 7559 par_num = 7560 bxe_check_blocks_with_parity1(sc, sig[1] & 7561 HW_PRTY_ASSERT_SET_1, 7562 par_num, global, print); 7563 par_num = 7564 bxe_check_blocks_with_parity2(sc, sig[2] & 7565 HW_PRTY_ASSERT_SET_2, 7566 par_num, print); 7567 par_num = 7568 bxe_check_blocks_with_parity3(sc, sig[3] & 7569 HW_PRTY_ASSERT_SET_3, 7570 par_num, global, print); 7571 par_num = 7572 bxe_check_blocks_with_parity4(sc, sig[4] & 7573 HW_PRTY_ASSERT_SET_4, 7574 par_num, print); 7575 7576 if (print) 7577 BLOGI(sc, "\n"); 7578 7579 if( *global == TRUE ) { 7580 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); 7581 } 7582 7583 return (TRUE); 7584 } 7585 7586 return (FALSE); 7587} 7588 7589static uint8_t 7590bxe_chk_parity_attn(struct bxe_softc *sc, 7591 uint8_t *global, 7592 uint8_t print) 7593{ 7594 struct attn_route attn = { {0} }; 7595 int port = SC_PORT(sc); 7596 7597 if(sc->state != BXE_STATE_OPEN) 7598 return FALSE; 7599 7600 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7601 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7602 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7603 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7604 7605 /* 7606 * Since MCP attentions can't be disabled inside the block, we need to 7607 * read AEU registers to see whether they're currently disabled 7608 */ 7609 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 7610 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & 7611 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 7612 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 7613 7614 7615 if (!CHIP_IS_E1x(sc)) 7616 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7617 7618 return (bxe_parity_attn(sc, global, print, attn.sig)); 7619} 7620 7621static void 7622bxe_attn_int_deasserted4(struct bxe_softc *sc, 7623 uint32_t attn) 7624{ 7625 uint32_t val; 7626 boolean_t err_flg = FALSE; 7627 7628 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7629 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7630 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7631 err_flg = TRUE; 7632 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7633 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7634 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7635 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7636 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7637 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7638 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7639 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7640 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7641 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7642 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7643 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7644 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7645 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7646 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7647 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7648 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7649 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7650 } 7651 7652 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7653 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7654 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7655 err_flg = TRUE; 7656 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7657 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7658 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7659 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7660 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7661 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7662 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7663 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7664 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7665 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7666 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7667 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7668 } 7669 7670 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7671 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7672 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7673 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7674 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7675 err_flg = TRUE; 7676 } 7677 if (err_flg) { 7678 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); 7679 taskqueue_enqueue_timeout(taskqueue_thread, 7680 &sc->sp_err_timeout_task, hz/10); 7681 } 7682 7683} 7684 7685static void 7686bxe_e1h_disable(struct bxe_softc *sc) 7687{ 7688 int port = SC_PORT(sc); 7689 7690 bxe_tx_disable(sc); 7691 7692 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7693} 7694 7695static void 7696bxe_e1h_enable(struct bxe_softc *sc) 7697{ 7698 int port = SC_PORT(sc); 7699 7700 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7701 7702 // XXX bxe_tx_enable(sc); 7703} 7704 7705/* 7706 * called due to MCP event (on pmf): 7707 * reread new bandwidth configuration 7708 * configure FW 7709 * notify others function about the change 7710 */ 7711static void 7712bxe_config_mf_bw(struct bxe_softc *sc) 7713{ 7714 if (sc->link_vars.link_up) { 7715 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 7716 // XXX bxe_link_sync_notify(sc); 7717 } 7718 7719 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7720} 7721 7722static void 7723bxe_set_mf_bw(struct bxe_softc *sc) 7724{ 7725 bxe_config_mf_bw(sc); 7726 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 7727} 7728 7729static void 7730bxe_handle_eee_event(struct bxe_softc *sc) 7731{ 7732 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 7733 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 7734} 7735 7736#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 7737 7738static void 7739bxe_drv_info_ether_stat(struct bxe_softc *sc) 7740{ 7741 struct eth_stats_info *ether_stat = 7742 &sc->sp->drv_info_to_mcp.ether_stat; 7743 7744 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 7745 ETH_STAT_INFO_VERSION_LEN); 7746 7747 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 7748 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 7749 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 7750 ether_stat->mac_local + MAC_PAD, 7751 MAC_PAD, ETH_ALEN); 7752 7753 ether_stat->mtu_size = sc->mtu; 7754 7755 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 7756 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) { 7757 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 7758 } 7759 7760 // XXX ether_stat->feature_flags |= ???; 7761 7762 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 7763 7764 ether_stat->txq_size = sc->tx_ring_size; 7765 ether_stat->rxq_size = sc->rx_ring_size; 7766} 7767 7768static void 7769bxe_handle_drv_info_req(struct bxe_softc *sc) 7770{ 7771 enum drv_info_opcode op_code; 7772 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 7773 7774 /* if drv_info version supported by MFW doesn't match - send NACK */ 7775 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 7776 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 7777 return; 7778 } 7779 7780 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 7781 DRV_INFO_CONTROL_OP_CODE_SHIFT); 7782 7783 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 7784 7785 switch (op_code) { 7786 case ETH_STATS_OPCODE: 7787 bxe_drv_info_ether_stat(sc); 7788 break; 7789 case FCOE_STATS_OPCODE: 7790 case ISCSI_STATS_OPCODE: 7791 default: 7792 /* if op code isn't supported - send NACK */ 7793 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 7794 return; 7795 } 7796 7797 /* 7798 * If we got drv_info attn from MFW then these fields are defined in 7799 * shmem2 for sure 7800 */ 7801 SHMEM2_WR(sc, drv_info_host_addr_lo, 7802 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 7803 SHMEM2_WR(sc, drv_info_host_addr_hi, 7804 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 7805 7806 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 7807} 7808 7809static void 7810bxe_dcc_event(struct bxe_softc *sc, 7811 uint32_t dcc_event) 7812{ 7813 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 7814 7815 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 7816 /* 7817 * This is the only place besides the function initialization 7818 * where the sc->flags can change so it is done without any 7819 * locks 7820 */ 7821 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 7822 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 7823 sc->flags |= BXE_MF_FUNC_DIS; 7824 bxe_e1h_disable(sc); 7825 } else { 7826 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 7827 sc->flags &= ~BXE_MF_FUNC_DIS; 7828 bxe_e1h_enable(sc); 7829 } 7830 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 7831 } 7832 7833 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 7834 bxe_config_mf_bw(sc); 7835 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 7836 } 7837 7838 /* Report results to MCP */ 7839 if (dcc_event) 7840 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 7841 else 7842 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 7843} 7844 7845static void 7846bxe_pmf_update(struct bxe_softc *sc) 7847{ 7848 int port = SC_PORT(sc); 7849 uint32_t val; 7850 7851 sc->port.pmf = 1; 7852 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 7853 7854 /* 7855 * We need the mb() to ensure the ordering between the writing to 7856 * sc->port.pmf here and reading it from the bxe_periodic_task(). 7857 */ 7858 mb(); 7859 7860 /* queue a periodic task */ 7861 // XXX schedule task... 7862 7863 // XXX bxe_dcbx_pmf_update(sc); 7864 7865 /* enable nig attention */ 7866 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 7867 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7868 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 7869 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 7870 } else if (!CHIP_IS_E1x(sc)) { 7871 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 7872 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 7873 } 7874 7875 bxe_stats_handle(sc, STATS_EVENT_PMF); 7876} 7877 7878static int 7879bxe_mc_assert(struct bxe_softc *sc) 7880{ 7881 char last_idx; 7882 int i, rc = 0; 7883 uint32_t row0, row1, row2, row3; 7884 7885 /* XSTORM */ 7886 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 7887 if (last_idx) 7888 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7889 7890 /* print the asserts */ 7891 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7892 7893 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 7894 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 7895 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 7896 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 7897 7898 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7899 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7900 i, row3, row2, row1, row0); 7901 rc++; 7902 } else { 7903 break; 7904 } 7905 } 7906 7907 /* TSTORM */ 7908 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 7909 if (last_idx) { 7910 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7911 } 7912 7913 /* print the asserts */ 7914 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7915 7916 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 7917 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 7918 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 7919 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 7920 7921 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7922 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7923 i, row3, row2, row1, row0); 7924 rc++; 7925 } else { 7926 break; 7927 } 7928 } 7929 7930 /* CSTORM */ 7931 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 7932 if (last_idx) { 7933 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7934 } 7935 7936 /* print the asserts */ 7937 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7938 7939 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 7940 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 7941 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 7942 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 7943 7944 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7945 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7946 i, row3, row2, row1, row0); 7947 rc++; 7948 } else { 7949 break; 7950 } 7951 } 7952 7953 /* USTORM */ 7954 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 7955 if (last_idx) { 7956 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7957 } 7958 7959 /* print the asserts */ 7960 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7961 7962 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 7963 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 7964 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 7965 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 7966 7967 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7968 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7969 i, row3, row2, row1, row0); 7970 rc++; 7971 } else { 7972 break; 7973 } 7974 } 7975 7976 return (rc); 7977} 7978 7979static void 7980bxe_attn_int_deasserted3(struct bxe_softc *sc, 7981 uint32_t attn) 7982{ 7983 int func = SC_FUNC(sc); 7984 uint32_t val; 7985 7986 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 7987 7988 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 7989 7990 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7991 bxe_read_mf_cfg(sc); 7992 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 7993 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 7994 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 7995 7996 if (val & DRV_STATUS_DCC_EVENT_MASK) 7997 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 7998 7999 if (val & DRV_STATUS_SET_MF_BW) 8000 bxe_set_mf_bw(sc); 8001 8002 if (val & DRV_STATUS_DRV_INFO_REQ) 8003 bxe_handle_drv_info_req(sc); 8004 8005 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 8006 bxe_pmf_update(sc); 8007 8008 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8009 bxe_handle_eee_event(sc); 8010 8011 if (sc->link_vars.periodic_flags & 8012 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8013 /* sync with link */ 8014 bxe_acquire_phy_lock(sc); 8015 sc->link_vars.periodic_flags &= 8016 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8017 bxe_release_phy_lock(sc); 8018 if (IS_MF(sc)) 8019 ; // XXX bxe_link_sync_notify(sc); 8020 bxe_link_report(sc); 8021 } 8022 8023 /* 8024 * Always call it here: bxe_link_report() will 8025 * prevent the link indication duplication. 8026 */ 8027 bxe_link_status_update(sc); 8028 8029 } else if (attn & BXE_MC_ASSERT_BITS) { 8030 8031 BLOGE(sc, "MC assert!\n"); 8032 bxe_mc_assert(sc); 8033 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8034 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8035 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8036 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8037 bxe_int_disable(sc); 8038 BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT); 8039 taskqueue_enqueue_timeout(taskqueue_thread, 8040 &sc->sp_err_timeout_task, hz/10); 8041 8042 } else if (attn & BXE_MCP_ASSERT) { 8043 8044 BLOGE(sc, "MCP assert!\n"); 8045 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8046 BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT); 8047 taskqueue_enqueue_timeout(taskqueue_thread, 8048 &sc->sp_err_timeout_task, hz/10); 8049 bxe_int_disable(sc); /*avoid repetive assert alert */ 8050 8051 8052 } else { 8053 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8054 } 8055 } 8056 8057 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8058 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8059 if (attn & BXE_GRC_TIMEOUT) { 8060 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8061 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8062 } 8063 if (attn & BXE_GRC_RSV) { 8064 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8065 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8066 } 8067 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8068 } 8069} 8070 8071static void 8072bxe_attn_int_deasserted2(struct bxe_softc *sc, 8073 uint32_t attn) 8074{ 8075 int port = SC_PORT(sc); 8076 int reg_offset; 8077 uint32_t val0, mask0, val1, mask1; 8078 uint32_t val; 8079 boolean_t err_flg = FALSE; 8080 8081 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8082 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8083 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8084 /* CFC error attention */ 8085 if (val & 0x2) { 8086 BLOGE(sc, "FATAL error from CFC\n"); 8087 err_flg = TRUE; 8088 } 8089 } 8090 8091 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8092 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8093 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8094 /* RQ_USDMDP_FIFO_OVERFLOW */ 8095 if (val & 0x18000) { 8096 BLOGE(sc, "FATAL error from PXP\n"); 8097 err_flg = TRUE; 8098 } 8099 8100 if (!CHIP_IS_E1x(sc)) { 8101 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8102 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8103 err_flg = TRUE; 8104 } 8105 } 8106 8107#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8108#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8109 8110 if (attn & AEU_PXP2_HW_INT_BIT) { 8111 /* CQ47854 workaround do not panic on 8112 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8113 */ 8114 if (!CHIP_IS_E1x(sc)) { 8115 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8116 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8117 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8118 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8119 /* 8120 * If the olny PXP2_EOP_ERROR_BIT is set in 8121 * STS0 and STS1 - clear it 8122 * 8123 * probably we lose additional attentions between 8124 * STS0 and STS_CLR0, in this case user will not 8125 * be notified about them 8126 */ 8127 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8128 !(val1 & mask1)) 8129 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8130 8131 /* print the register, since no one can restore it */ 8132 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8133 8134 /* 8135 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8136 * then notify 8137 */ 8138 if (val0 & PXP2_EOP_ERROR_BIT) { 8139 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8140 err_flg = TRUE; 8141 8142 /* 8143 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8144 * set then clear attention from PXP2 block without panic 8145 */ 8146 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8147 ((val1 & mask1) == 0)) 8148 attn &= ~AEU_PXP2_HW_INT_BIT; 8149 } 8150 } 8151 } 8152 8153 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8154 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8155 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8156 8157 val = REG_RD(sc, reg_offset); 8158 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8159 REG_WR(sc, reg_offset, val); 8160 8161 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8162 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8163 err_flg = TRUE; 8164 bxe_panic(sc, ("HW block attention set2\n")); 8165 } 8166 if(err_flg) { 8167 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); 8168 taskqueue_enqueue_timeout(taskqueue_thread, 8169 &sc->sp_err_timeout_task, hz/10); 8170 } 8171 8172} 8173 8174static void 8175bxe_attn_int_deasserted1(struct bxe_softc *sc, 8176 uint32_t attn) 8177{ 8178 int port = SC_PORT(sc); 8179 int reg_offset; 8180 uint32_t val; 8181 boolean_t err_flg = FALSE; 8182 8183 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8184 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8185 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8186 /* DORQ discard attention */ 8187 if (val & 0x2) { 8188 BLOGE(sc, "FATAL error from DORQ\n"); 8189 err_flg = TRUE; 8190 } 8191 } 8192 8193 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8194 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8195 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8196 8197 val = REG_RD(sc, reg_offset); 8198 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8199 REG_WR(sc, reg_offset, val); 8200 8201 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8202 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8203 err_flg = TRUE; 8204 bxe_panic(sc, ("HW block attention set1\n")); 8205 } 8206 if(err_flg) { 8207 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); 8208 taskqueue_enqueue_timeout(taskqueue_thread, 8209 &sc->sp_err_timeout_task, hz/10); 8210 } 8211 8212} 8213 8214static void 8215bxe_attn_int_deasserted0(struct bxe_softc *sc, 8216 uint32_t attn) 8217{ 8218 int port = SC_PORT(sc); 8219 int reg_offset; 8220 uint32_t val; 8221 8222 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8223 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8224 8225 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8226 val = REG_RD(sc, reg_offset); 8227 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8228 REG_WR(sc, reg_offset, val); 8229 8230 BLOGW(sc, "SPIO5 hw attention\n"); 8231 8232 /* Fan failure attention */ 8233 elink_hw_reset_phy(&sc->link_params); 8234 bxe_fan_failure(sc); 8235 } 8236 8237 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8238 bxe_acquire_phy_lock(sc); 8239 elink_handle_module_detect_int(&sc->link_params); 8240 bxe_release_phy_lock(sc); 8241 } 8242 8243 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8244 val = REG_RD(sc, reg_offset); 8245 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8246 REG_WR(sc, reg_offset, val); 8247 8248 8249 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); 8250 taskqueue_enqueue_timeout(taskqueue_thread, 8251 &sc->sp_err_timeout_task, hz/10); 8252 8253 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8254 (attn & HW_INTERRUT_ASSERT_SET_0))); 8255 } 8256} 8257 8258static void 8259bxe_attn_int_deasserted(struct bxe_softc *sc, 8260 uint32_t deasserted) 8261{ 8262 struct attn_route attn; 8263 struct attn_route *group_mask; 8264 int port = SC_PORT(sc); 8265 int index; 8266 uint32_t reg_addr; 8267 uint32_t val; 8268 uint32_t aeu_mask; 8269 uint8_t global = FALSE; 8270 8271 /* 8272 * Need to take HW lock because MCP or other port might also 8273 * try to handle this event. 8274 */ 8275 bxe_acquire_alr(sc); 8276 8277 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8278 /* XXX 8279 * In case of parity errors don't handle attentions so that 8280 * other function would "see" parity errors. 8281 */ 8282 // XXX schedule a recovery task... 8283 /* disable HW interrupts */ 8284 bxe_int_disable(sc); 8285 BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY); 8286 taskqueue_enqueue_timeout(taskqueue_thread, 8287 &sc->sp_err_timeout_task, hz/10); 8288 bxe_release_alr(sc); 8289 return; 8290 } 8291 8292 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8293 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8294 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8295 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8296 if (!CHIP_IS_E1x(sc)) { 8297 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8298 } else { 8299 attn.sig[4] = 0; 8300 } 8301 8302 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8303 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8304 8305 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8306 if (deasserted & (1 << index)) { 8307 group_mask = &sc->attn_group[index]; 8308 8309 BLOGD(sc, DBG_INTR, 8310 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8311 group_mask->sig[0], group_mask->sig[1], 8312 group_mask->sig[2], group_mask->sig[3], 8313 group_mask->sig[4]); 8314 8315 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8316 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8317 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8318 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8319 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8320 } 8321 } 8322 8323 bxe_release_alr(sc); 8324 8325 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8326 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8327 COMMAND_REG_ATTN_BITS_CLR); 8328 } else { 8329 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8330 } 8331 8332 val = ~deasserted; 8333 BLOGD(sc, DBG_INTR, 8334 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8335 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8336 REG_WR(sc, reg_addr, val); 8337 8338 if (~sc->attn_state & deasserted) { 8339 BLOGE(sc, "IGU error\n"); 8340 } 8341 8342 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8343 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8344 8345 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8346 8347 aeu_mask = REG_RD(sc, reg_addr); 8348 8349 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8350 aeu_mask, deasserted); 8351 aeu_mask |= (deasserted & 0x3ff); 8352 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8353 8354 REG_WR(sc, reg_addr, aeu_mask); 8355 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8356 8357 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8358 sc->attn_state &= ~deasserted; 8359 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8360} 8361 8362static void 8363bxe_attn_int(struct bxe_softc *sc) 8364{ 8365 /* read local copy of bits */ 8366 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8367 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8368 uint32_t attn_state = sc->attn_state; 8369 8370 /* look for changed bits */ 8371 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8372 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8373 8374 BLOGD(sc, DBG_INTR, 8375 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8376 attn_bits, attn_ack, asserted, deasserted); 8377 8378 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8379 BLOGE(sc, "BAD attention state\n"); 8380 } 8381 8382 /* handle bits that were raised */ 8383 if (asserted) { 8384 bxe_attn_int_asserted(sc, asserted); 8385 } 8386 8387 if (deasserted) { 8388 bxe_attn_int_deasserted(sc, deasserted); 8389 } 8390} 8391 8392static uint16_t 8393bxe_update_dsb_idx(struct bxe_softc *sc) 8394{ 8395 struct host_sp_status_block *def_sb = sc->def_sb; 8396 uint16_t rc = 0; 8397 8398 mb(); /* status block is written to by the chip */ 8399 8400 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8401 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8402 rc |= BXE_DEF_SB_ATT_IDX; 8403 } 8404 8405 if (sc->def_idx != def_sb->sp_sb.running_index) { 8406 sc->def_idx = def_sb->sp_sb.running_index; 8407 rc |= BXE_DEF_SB_IDX; 8408 } 8409 8410 mb(); 8411 8412 return (rc); 8413} 8414 8415static inline struct ecore_queue_sp_obj * 8416bxe_cid_to_q_obj(struct bxe_softc *sc, 8417 uint32_t cid) 8418{ 8419 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8420 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8421} 8422 8423static void 8424bxe_handle_mcast_eqe(struct bxe_softc *sc) 8425{ 8426 struct ecore_mcast_ramrod_params rparam; 8427 int rc; 8428 8429 memset(&rparam, 0, sizeof(rparam)); 8430 8431 rparam.mcast_obj = &sc->mcast_obj; 8432 8433 BXE_MCAST_LOCK(sc); 8434 8435 /* clear pending state for the last command */ 8436 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8437 8438 /* if there are pending mcast commands - send them */ 8439 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8440 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8441 if (rc < 0) { 8442 BLOGD(sc, DBG_SP, 8443 "ERROR: Failed to send pending mcast commands (%d)\n", rc); 8444 } 8445 } 8446 8447 BXE_MCAST_UNLOCK(sc); 8448} 8449 8450static void 8451bxe_handle_classification_eqe(struct bxe_softc *sc, 8452 union event_ring_elem *elem) 8453{ 8454 unsigned long ramrod_flags = 0; 8455 int rc = 0; 8456 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8457 struct ecore_vlan_mac_obj *vlan_mac_obj; 8458 8459 /* always push next commands out, don't wait here */ 8460 bit_set(&ramrod_flags, RAMROD_CONT); 8461 8462 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8463 case ECORE_FILTER_MAC_PENDING: 8464 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8465 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8466 break; 8467 8468 case ECORE_FILTER_MCAST_PENDING: 8469 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8470 /* 8471 * This is only relevant for 57710 where multicast MACs are 8472 * configured as unicast MACs using the same ramrod. 8473 */ 8474 bxe_handle_mcast_eqe(sc); 8475 return; 8476 8477 default: 8478 BLOGE(sc, "Unsupported classification command: %d\n", 8479 elem->message.data.eth_event.echo); 8480 return; 8481 } 8482 8483 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8484 8485 if (rc < 0) { 8486 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8487 } else if (rc > 0) { 8488 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8489 } 8490} 8491 8492static void 8493bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8494 union event_ring_elem *elem) 8495{ 8496 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8497 8498 /* send rx_mode command again if was requested */ 8499 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8500 &sc->sp_state)) { 8501 bxe_set_storm_rx_mode(sc); 8502 } 8503} 8504 8505static void 8506bxe_update_eq_prod(struct bxe_softc *sc, 8507 uint16_t prod) 8508{ 8509 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8510 wmb(); /* keep prod updates ordered */ 8511} 8512 8513static void 8514bxe_eq_int(struct bxe_softc *sc) 8515{ 8516 uint16_t hw_cons, sw_cons, sw_prod; 8517 union event_ring_elem *elem; 8518 uint8_t echo; 8519 uint32_t cid; 8520 uint8_t opcode; 8521 int spqe_cnt = 0; 8522 struct ecore_queue_sp_obj *q_obj; 8523 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8524 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8525 8526 hw_cons = le16toh(*sc->eq_cons_sb); 8527 8528 /* 8529 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8530 * when we get to the next-page we need to adjust so the loop 8531 * condition below will be met. The next element is the size of a 8532 * regular element and hence incrementing by 1 8533 */ 8534 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8535 hw_cons++; 8536 } 8537 8538 /* 8539 * This function may never run in parallel with itself for a 8540 * specific sc and no need for a read memory barrier here. 8541 */ 8542 sw_cons = sc->eq_cons; 8543 sw_prod = sc->eq_prod; 8544 8545 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8546 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8547 8548 for (; 8549 sw_cons != hw_cons; 8550 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8551 8552 elem = &sc->eq[EQ_DESC(sw_cons)]; 8553 8554 /* elem CID originates from FW, actually LE */ 8555 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8556 opcode = elem->message.opcode; 8557 8558 /* handle eq element */ 8559 switch (opcode) { 8560 8561 case EVENT_RING_OPCODE_STAT_QUERY: 8562 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8563 sc->stats_comp++); 8564 /* nothing to do with stats comp */ 8565 goto next_spqe; 8566 8567 case EVENT_RING_OPCODE_CFC_DEL: 8568 /* handle according to cid range */ 8569 /* we may want to verify here that the sc state is HALTING */ 8570 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8571 q_obj = bxe_cid_to_q_obj(sc, cid); 8572 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8573 break; 8574 } 8575 goto next_spqe; 8576 8577 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8578 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8579 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8580 break; 8581 } 8582 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8583 goto next_spqe; 8584 8585 case EVENT_RING_OPCODE_START_TRAFFIC: 8586 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8587 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8588 break; 8589 } 8590 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8591 goto next_spqe; 8592 8593 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8594 echo = elem->message.data.function_update_event.echo; 8595 if (echo == SWITCH_UPDATE) { 8596 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8597 if (f_obj->complete_cmd(sc, f_obj, 8598 ECORE_F_CMD_SWITCH_UPDATE)) { 8599 break; 8600 } 8601 } 8602 else { 8603 BLOGD(sc, DBG_SP, 8604 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8605 } 8606 goto next_spqe; 8607 8608 case EVENT_RING_OPCODE_FORWARD_SETUP: 8609 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8610 if (q_obj->complete_cmd(sc, q_obj, 8611 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8612 break; 8613 } 8614 goto next_spqe; 8615 8616 case EVENT_RING_OPCODE_FUNCTION_START: 8617 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8618 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8619 break; 8620 } 8621 goto next_spqe; 8622 8623 case EVENT_RING_OPCODE_FUNCTION_STOP: 8624 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8625 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8626 break; 8627 } 8628 goto next_spqe; 8629 } 8630 8631 switch (opcode | sc->state) { 8632 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8633 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8634 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8635 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8636 rss_raw->clear_pending(rss_raw); 8637 break; 8638 8639 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8640 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8641 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8642 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8643 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8644 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8645 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8646 bxe_handle_classification_eqe(sc, elem); 8647 break; 8648 8649 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8650 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8651 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8652 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8653 bxe_handle_mcast_eqe(sc); 8654 break; 8655 8656 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8657 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8658 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8659 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8660 bxe_handle_rx_mode_eqe(sc, elem); 8661 break; 8662 8663 default: 8664 /* unknown event log error and continue */ 8665 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 8666 elem->message.opcode, sc->state); 8667 } 8668 8669next_spqe: 8670 spqe_cnt++; 8671 } /* for */ 8672 8673 mb(); 8674 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 8675 8676 sc->eq_cons = sw_cons; 8677 sc->eq_prod = sw_prod; 8678 8679 /* make sure that above mem writes were issued towards the memory */ 8680 wmb(); 8681 8682 /* update producer */ 8683 bxe_update_eq_prod(sc, sc->eq_prod); 8684} 8685 8686static void 8687bxe_handle_sp_tq(void *context, 8688 int pending) 8689{ 8690 struct bxe_softc *sc = (struct bxe_softc *)context; 8691 uint16_t status; 8692 8693 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 8694 8695 /* what work needs to be performed? */ 8696 status = bxe_update_dsb_idx(sc); 8697 8698 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 8699 8700 /* HW attentions */ 8701 if (status & BXE_DEF_SB_ATT_IDX) { 8702 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 8703 bxe_attn_int(sc); 8704 status &= ~BXE_DEF_SB_ATT_IDX; 8705 } 8706 8707 /* SP events: STAT_QUERY and others */ 8708 if (status & BXE_DEF_SB_IDX) { 8709 /* handle EQ completions */ 8710 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 8711 bxe_eq_int(sc); 8712 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 8713 le16toh(sc->def_idx), IGU_INT_NOP, 1); 8714 status &= ~BXE_DEF_SB_IDX; 8715 } 8716 8717 /* if status is non zero then something went wrong */ 8718 if (__predict_false(status)) { 8719 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 8720 } 8721 8722 /* ack status block only if something was actually handled */ 8723 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 8724 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 8725 8726 /* 8727 * Must be called after the EQ processing (since eq leads to sriov 8728 * ramrod completion flows). 8729 * This flow may have been scheduled by the arrival of a ramrod 8730 * completion, or by the sriov code rescheduling itself. 8731 */ 8732 // XXX bxe_iov_sp_task(sc); 8733 8734} 8735 8736static void 8737bxe_handle_fp_tq(void *context, 8738 int pending) 8739{ 8740 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 8741 struct bxe_softc *sc = fp->sc; 8742 uint8_t more_tx = FALSE; 8743 uint8_t more_rx = FALSE; 8744 8745 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 8746 8747 /* XXX 8748 * IFF_DRV_RUNNING state can't be checked here since we process 8749 * slowpath events on a client queue during setup. Instead 8750 * we need to add a "process/continue" flag here that the driver 8751 * can use to tell the task here not to do anything. 8752 */ 8753#if 0 8754 if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 8755 return; 8756 } 8757#endif 8758 8759 /* update the fastpath index */ 8760 bxe_update_fp_sb_idx(fp); 8761 8762 /* XXX add loop here if ever support multiple tx CoS */ 8763 /* fp->txdata[cos] */ 8764 if (bxe_has_tx_work(fp)) { 8765 BXE_FP_TX_LOCK(fp); 8766 more_tx = bxe_txeof(sc, fp); 8767 BXE_FP_TX_UNLOCK(fp); 8768 } 8769 8770 if (bxe_has_rx_work(fp)) { 8771 more_rx = bxe_rxeof(sc, fp); 8772 } 8773 8774 if (more_rx /*|| more_tx*/) { 8775 /* still more work to do */ 8776 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 8777 return; 8778 } 8779 8780 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 8781 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 8782} 8783 8784static void 8785bxe_task_fp(struct bxe_fastpath *fp) 8786{ 8787 struct bxe_softc *sc = fp->sc; 8788 uint8_t more_tx = FALSE; 8789 uint8_t more_rx = FALSE; 8790 8791 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 8792 8793 /* update the fastpath index */ 8794 bxe_update_fp_sb_idx(fp); 8795 8796 /* XXX add loop here if ever support multiple tx CoS */ 8797 /* fp->txdata[cos] */ 8798 if (bxe_has_tx_work(fp)) { 8799 BXE_FP_TX_LOCK(fp); 8800 more_tx = bxe_txeof(sc, fp); 8801 BXE_FP_TX_UNLOCK(fp); 8802 } 8803 8804 if (bxe_has_rx_work(fp)) { 8805 more_rx = bxe_rxeof(sc, fp); 8806 } 8807 8808 if (more_rx /*|| more_tx*/) { 8809 /* still more work to do, bail out if this ISR and process later */ 8810 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 8811 return; 8812 } 8813 8814 /* 8815 * Here we write the fastpath index taken before doing any tx or rx work. 8816 * It is very well possible other hw events occurred up to this point and 8817 * they were actually processed accordingly above. Since we're going to 8818 * write an older fastpath index, an interrupt is coming which we might 8819 * not do any work in. 8820 */ 8821 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 8822 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 8823} 8824 8825/* 8826 * Legacy interrupt entry point. 8827 * 8828 * Verifies that the controller generated the interrupt and 8829 * then calls a separate routine to handle the various 8830 * interrupt causes: link, RX, and TX. 8831 */ 8832static void 8833bxe_intr_legacy(void *xsc) 8834{ 8835 struct bxe_softc *sc = (struct bxe_softc *)xsc; 8836 struct bxe_fastpath *fp; 8837 uint16_t status, mask; 8838 int i; 8839 8840 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 8841 8842 /* 8843 * 0 for ustorm, 1 for cstorm 8844 * the bits returned from ack_int() are 0-15 8845 * bit 0 = attention status block 8846 * bit 1 = fast path status block 8847 * a mask of 0x2 or more = tx/rx event 8848 * a mask of 1 = slow path event 8849 */ 8850 8851 status = bxe_ack_int(sc); 8852 8853 /* the interrupt is not for us */ 8854 if (__predict_false(status == 0)) { 8855 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 8856 return; 8857 } 8858 8859 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 8860 8861 FOR_EACH_ETH_QUEUE(sc, i) { 8862 fp = &sc->fp[i]; 8863 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 8864 if (status & mask) { 8865 /* acknowledge and disable further fastpath interrupts */ 8866 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8867 bxe_task_fp(fp); 8868 status &= ~mask; 8869 } 8870 } 8871 8872 if (__predict_false(status & 0x1)) { 8873 /* acknowledge and disable further slowpath interrupts */ 8874 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8875 8876 /* schedule slowpath handler */ 8877 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 8878 8879 status &= ~0x1; 8880 } 8881 8882 if (__predict_false(status)) { 8883 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 8884 } 8885} 8886 8887/* slowpath interrupt entry point */ 8888static void 8889bxe_intr_sp(void *xsc) 8890{ 8891 struct bxe_softc *sc = (struct bxe_softc *)xsc; 8892 8893 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 8894 8895 /* acknowledge and disable further slowpath interrupts */ 8896 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8897 8898 /* schedule slowpath handler */ 8899 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 8900} 8901 8902/* fastpath interrupt entry point */ 8903static void 8904bxe_intr_fp(void *xfp) 8905{ 8906 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 8907 struct bxe_softc *sc = fp->sc; 8908 8909 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 8910 8911 BLOGD(sc, DBG_INTR, 8912 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 8913 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 8914 8915 /* acknowledge and disable further fastpath interrupts */ 8916 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8917 8918 bxe_task_fp(fp); 8919} 8920 8921/* Release all interrupts allocated by the driver. */ 8922static void 8923bxe_interrupt_free(struct bxe_softc *sc) 8924{ 8925 int i; 8926 8927 switch (sc->interrupt_mode) { 8928 case INTR_MODE_INTX: 8929 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 8930 if (sc->intr[0].resource != NULL) { 8931 bus_release_resource(sc->dev, 8932 SYS_RES_IRQ, 8933 sc->intr[0].rid, 8934 sc->intr[0].resource); 8935 } 8936 break; 8937 case INTR_MODE_MSI: 8938 for (i = 0; i < sc->intr_count; i++) { 8939 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 8940 if (sc->intr[i].resource && sc->intr[i].rid) { 8941 bus_release_resource(sc->dev, 8942 SYS_RES_IRQ, 8943 sc->intr[i].rid, 8944 sc->intr[i].resource); 8945 } 8946 } 8947 pci_release_msi(sc->dev); 8948 break; 8949 case INTR_MODE_MSIX: 8950 for (i = 0; i < sc->intr_count; i++) { 8951 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 8952 if (sc->intr[i].resource && sc->intr[i].rid) { 8953 bus_release_resource(sc->dev, 8954 SYS_RES_IRQ, 8955 sc->intr[i].rid, 8956 sc->intr[i].resource); 8957 } 8958 } 8959 pci_release_msi(sc->dev); 8960 break; 8961 default: 8962 /* nothing to do as initial allocation failed */ 8963 break; 8964 } 8965} 8966 8967/* 8968 * This function determines and allocates the appropriate 8969 * interrupt based on system capabilites and user request. 8970 * 8971 * The user may force a particular interrupt mode, specify 8972 * the number of receive queues, specify the method for 8973 * distribuitng received frames to receive queues, or use 8974 * the default settings which will automatically select the 8975 * best supported combination. In addition, the OS may or 8976 * may not support certain combinations of these settings. 8977 * This routine attempts to reconcile the settings requested 8978 * by the user with the capabilites available from the system 8979 * to select the optimal combination of features. 8980 * 8981 * Returns: 8982 * 0 = Success, !0 = Failure. 8983 */ 8984static int 8985bxe_interrupt_alloc(struct bxe_softc *sc) 8986{ 8987 int msix_count = 0; 8988 int msi_count = 0; 8989 int num_requested = 0; 8990 int num_allocated = 0; 8991 int rid, i, j; 8992 int rc; 8993 8994 /* get the number of available MSI/MSI-X interrupts from the OS */ 8995 if (sc->interrupt_mode > 0) { 8996 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 8997 msix_count = pci_msix_count(sc->dev); 8998 } 8999 9000 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 9001 msi_count = pci_msi_count(sc->dev); 9002 } 9003 9004 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 9005 msi_count, msix_count); 9006 } 9007 9008 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9009 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9010 break; 9011 } 9012 9013 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9014 (msix_count < 2)) { 9015 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9016 break; 9017 } 9018 9019 /* ask for the necessary number of MSI-X vectors */ 9020 num_requested = min((sc->num_queues + 1), msix_count); 9021 9022 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9023 9024 num_allocated = num_requested; 9025 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9026 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9027 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9028 break; 9029 } 9030 9031 if (num_allocated < 2) { /* possible? */ 9032 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9033 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9034 pci_release_msi(sc->dev); 9035 break; 9036 } 9037 9038 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9039 num_requested, num_allocated); 9040 9041 /* best effort so use the number of vectors allocated to us */ 9042 sc->intr_count = num_allocated; 9043 sc->num_queues = num_allocated - 1; 9044 9045 rid = 1; /* initial resource identifier */ 9046 9047 /* allocate the MSI-X vectors */ 9048 for (i = 0; i < num_allocated; i++) { 9049 sc->intr[i].rid = (rid + i); 9050 9051 if ((sc->intr[i].resource = 9052 bus_alloc_resource_any(sc->dev, 9053 SYS_RES_IRQ, 9054 &sc->intr[i].rid, 9055 RF_ACTIVE)) == NULL) { 9056 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9057 i, (rid + i)); 9058 9059 for (j = (i - 1); j >= 0; j--) { 9060 bus_release_resource(sc->dev, 9061 SYS_RES_IRQ, 9062 sc->intr[j].rid, 9063 sc->intr[j].resource); 9064 } 9065 9066 sc->intr_count = 0; 9067 sc->num_queues = 0; 9068 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9069 pci_release_msi(sc->dev); 9070 break; 9071 } 9072 9073 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9074 } 9075 } while (0); 9076 9077 do { /* try allocating MSI vector resources (at least 2) */ 9078 if (sc->interrupt_mode != INTR_MODE_MSI) { 9079 break; 9080 } 9081 9082 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9083 (msi_count < 1)) { 9084 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9085 break; 9086 } 9087 9088 /* ask for a single MSI vector */ 9089 num_requested = 1; 9090 9091 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9092 9093 num_allocated = num_requested; 9094 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9095 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9096 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9097 break; 9098 } 9099 9100 if (num_allocated != 1) { /* possible? */ 9101 BLOGE(sc, "MSI allocation is not 1!\n"); 9102 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9103 pci_release_msi(sc->dev); 9104 break; 9105 } 9106 9107 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9108 num_requested, num_allocated); 9109 9110 /* best effort so use the number of vectors allocated to us */ 9111 sc->intr_count = num_allocated; 9112 sc->num_queues = num_allocated; 9113 9114 rid = 1; /* initial resource identifier */ 9115 9116 sc->intr[0].rid = rid; 9117 9118 if ((sc->intr[0].resource = 9119 bus_alloc_resource_any(sc->dev, 9120 SYS_RES_IRQ, 9121 &sc->intr[0].rid, 9122 RF_ACTIVE)) == NULL) { 9123 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9124 sc->intr_count = 0; 9125 sc->num_queues = 0; 9126 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9127 pci_release_msi(sc->dev); 9128 break; 9129 } 9130 9131 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9132 } while (0); 9133 9134 do { /* try allocating INTx vector resources */ 9135 if (sc->interrupt_mode != INTR_MODE_INTX) { 9136 break; 9137 } 9138 9139 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9140 9141 /* only one vector for INTx */ 9142 sc->intr_count = 1; 9143 sc->num_queues = 1; 9144 9145 rid = 0; /* initial resource identifier */ 9146 9147 sc->intr[0].rid = rid; 9148 9149 if ((sc->intr[0].resource = 9150 bus_alloc_resource_any(sc->dev, 9151 SYS_RES_IRQ, 9152 &sc->intr[0].rid, 9153 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9154 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9155 sc->intr_count = 0; 9156 sc->num_queues = 0; 9157 sc->interrupt_mode = -1; /* Failed! */ 9158 break; 9159 } 9160 9161 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9162 } while (0); 9163 9164 if (sc->interrupt_mode == -1) { 9165 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9166 rc = 1; 9167 } else { 9168 BLOGD(sc, DBG_LOAD, 9169 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9170 sc->interrupt_mode, sc->num_queues); 9171 rc = 0; 9172 } 9173 9174 return (rc); 9175} 9176 9177static void 9178bxe_interrupt_detach(struct bxe_softc *sc) 9179{ 9180 struct bxe_fastpath *fp; 9181 int i; 9182 9183 /* release interrupt resources */ 9184 for (i = 0; i < sc->intr_count; i++) { 9185 if (sc->intr[i].resource && sc->intr[i].tag) { 9186 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9187 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9188 } 9189 } 9190 9191 for (i = 0; i < sc->num_queues; i++) { 9192 fp = &sc->fp[i]; 9193 if (fp->tq) { 9194 taskqueue_drain(fp->tq, &fp->tq_task); 9195 taskqueue_drain(fp->tq, &fp->tx_task); 9196 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task, 9197 NULL)) 9198 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task); 9199 } 9200 9201 for (i = 0; i < sc->num_queues; i++) { 9202 fp = &sc->fp[i]; 9203 if (fp->tq != NULL) { 9204 taskqueue_free(fp->tq); 9205 fp->tq = NULL; 9206 } 9207 } 9208 } 9209 9210 if (sc->sp_tq) { 9211 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9212 taskqueue_free(sc->sp_tq); 9213 sc->sp_tq = NULL; 9214 } 9215} 9216 9217/* 9218 * Enables interrupts and attach to the ISR. 9219 * 9220 * When using multiple MSI/MSI-X vectors the first vector 9221 * is used for slowpath operations while all remaining 9222 * vectors are used for fastpath operations. If only a 9223 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9224 * ISR must look for both slowpath and fastpath completions. 9225 */ 9226static int 9227bxe_interrupt_attach(struct bxe_softc *sc) 9228{ 9229 struct bxe_fastpath *fp; 9230 int rc = 0; 9231 int i; 9232 9233 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9234 "bxe%d_sp_tq", sc->unit); 9235 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9236 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT, 9237 taskqueue_thread_enqueue, 9238 &sc->sp_tq); 9239 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9240 "%s", sc->sp_tq_name); 9241 9242 9243 for (i = 0; i < sc->num_queues; i++) { 9244 fp = &sc->fp[i]; 9245 snprintf(fp->tq_name, sizeof(fp->tq_name), 9246 "bxe%d_fp%d_tq", sc->unit, i); 9247 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9248 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); 9249 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, 9250 taskqueue_thread_enqueue, 9251 &fp->tq); 9252 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0, 9253 bxe_tx_mq_start_deferred, fp); 9254 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9255 "%s", fp->tq_name); 9256 } 9257 9258 /* setup interrupt handlers */ 9259 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9260 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9261 9262 /* 9263 * Setup the interrupt handler. Note that we pass the driver instance 9264 * to the interrupt handler for the slowpath. 9265 */ 9266 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9267 (INTR_TYPE_NET | INTR_MPSAFE), 9268 NULL, bxe_intr_sp, sc, 9269 &sc->intr[0].tag)) != 0) { 9270 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9271 goto bxe_interrupt_attach_exit; 9272 } 9273 9274 bus_describe_intr(sc->dev, sc->intr[0].resource, 9275 sc->intr[0].tag, "sp"); 9276 9277 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9278 9279 /* initialize the fastpath vectors (note the first was used for sp) */ 9280 for (i = 0; i < sc->num_queues; i++) { 9281 fp = &sc->fp[i]; 9282 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9283 9284 /* 9285 * Setup the interrupt handler. Note that we pass the 9286 * fastpath context to the interrupt handler in this 9287 * case. 9288 */ 9289 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9290 (INTR_TYPE_NET | INTR_MPSAFE), 9291 NULL, bxe_intr_fp, fp, 9292 &sc->intr[i + 1].tag)) != 0) { 9293 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9294 (i + 1), rc); 9295 goto bxe_interrupt_attach_exit; 9296 } 9297 9298 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9299 sc->intr[i + 1].tag, "fp%02d", i); 9300 9301 /* bind the fastpath instance to a cpu */ 9302 if (sc->num_queues > 1) { 9303 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9304 } 9305 9306 fp->state = BXE_FP_STATE_IRQ; 9307 } 9308 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9309 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9310 9311 /* 9312 * Setup the interrupt handler. Note that we pass the 9313 * driver instance to the interrupt handler which 9314 * will handle both the slowpath and fastpath. 9315 */ 9316 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9317 (INTR_TYPE_NET | INTR_MPSAFE), 9318 NULL, bxe_intr_legacy, sc, 9319 &sc->intr[0].tag)) != 0) { 9320 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9321 goto bxe_interrupt_attach_exit; 9322 } 9323 9324 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9325 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9326 9327 /* 9328 * Setup the interrupt handler. Note that we pass the 9329 * driver instance to the interrupt handler which 9330 * will handle both the slowpath and fastpath. 9331 */ 9332 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9333 (INTR_TYPE_NET | INTR_MPSAFE), 9334 NULL, bxe_intr_legacy, sc, 9335 &sc->intr[0].tag)) != 0) { 9336 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9337 goto bxe_interrupt_attach_exit; 9338 } 9339 } 9340 9341bxe_interrupt_attach_exit: 9342 9343 return (rc); 9344} 9345 9346static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9347static int bxe_init_hw_common(struct bxe_softc *sc); 9348static int bxe_init_hw_port(struct bxe_softc *sc); 9349static int bxe_init_hw_func(struct bxe_softc *sc); 9350static void bxe_reset_common(struct bxe_softc *sc); 9351static void bxe_reset_port(struct bxe_softc *sc); 9352static void bxe_reset_func(struct bxe_softc *sc); 9353static int bxe_gunzip_init(struct bxe_softc *sc); 9354static void bxe_gunzip_end(struct bxe_softc *sc); 9355static int bxe_init_firmware(struct bxe_softc *sc); 9356static void bxe_release_firmware(struct bxe_softc *sc); 9357 9358static struct 9359ecore_func_sp_drv_ops bxe_func_sp_drv = { 9360 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9361 .init_hw_cmn = bxe_init_hw_common, 9362 .init_hw_port = bxe_init_hw_port, 9363 .init_hw_func = bxe_init_hw_func, 9364 9365 .reset_hw_cmn = bxe_reset_common, 9366 .reset_hw_port = bxe_reset_port, 9367 .reset_hw_func = bxe_reset_func, 9368 9369 .gunzip_init = bxe_gunzip_init, 9370 .gunzip_end = bxe_gunzip_end, 9371 9372 .init_fw = bxe_init_firmware, 9373 .release_fw = bxe_release_firmware, 9374}; 9375 9376static void 9377bxe_init_func_obj(struct bxe_softc *sc) 9378{ 9379 sc->dmae_ready = 0; 9380 9381 ecore_init_func_obj(sc, 9382 &sc->func_obj, 9383 BXE_SP(sc, func_rdata), 9384 BXE_SP_MAPPING(sc, func_rdata), 9385 BXE_SP(sc, func_afex_rdata), 9386 BXE_SP_MAPPING(sc, func_afex_rdata), 9387 &bxe_func_sp_drv); 9388} 9389 9390static int 9391bxe_init_hw(struct bxe_softc *sc, 9392 uint32_t load_code) 9393{ 9394 struct ecore_func_state_params func_params = { NULL }; 9395 int rc; 9396 9397 /* prepare the parameters for function state transitions */ 9398 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9399 9400 func_params.f_obj = &sc->func_obj; 9401 func_params.cmd = ECORE_F_CMD_HW_INIT; 9402 9403 func_params.params.hw_init.load_phase = load_code; 9404 9405 /* 9406 * Via a plethora of function pointers, we will eventually reach 9407 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9408 */ 9409 rc = ecore_func_state_change(sc, &func_params); 9410 9411 return (rc); 9412} 9413 9414static void 9415bxe_fill(struct bxe_softc *sc, 9416 uint32_t addr, 9417 int fill, 9418 uint32_t len) 9419{ 9420 uint32_t i; 9421 9422 if (!(len % 4) && !(addr % 4)) { 9423 for (i = 0; i < len; i += 4) { 9424 REG_WR(sc, (addr + i), fill); 9425 } 9426 } else { 9427 for (i = 0; i < len; i++) { 9428 REG_WR8(sc, (addr + i), fill); 9429 } 9430 } 9431} 9432 9433/* writes FP SP data to FW - data_size in dwords */ 9434static void 9435bxe_wr_fp_sb_data(struct bxe_softc *sc, 9436 int fw_sb_id, 9437 uint32_t *sb_data_p, 9438 uint32_t data_size) 9439{ 9440 int index; 9441 9442 for (index = 0; index < data_size; index++) { 9443 REG_WR(sc, 9444 (BAR_CSTRORM_INTMEM + 9445 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9446 (sizeof(uint32_t) * index)), 9447 *(sb_data_p + index)); 9448 } 9449} 9450 9451static void 9452bxe_zero_fp_sb(struct bxe_softc *sc, 9453 int fw_sb_id) 9454{ 9455 struct hc_status_block_data_e2 sb_data_e2; 9456 struct hc_status_block_data_e1x sb_data_e1x; 9457 uint32_t *sb_data_p; 9458 uint32_t data_size = 0; 9459 9460 if (!CHIP_IS_E1x(sc)) { 9461 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9462 sb_data_e2.common.state = SB_DISABLED; 9463 sb_data_e2.common.p_func.vf_valid = FALSE; 9464 sb_data_p = (uint32_t *)&sb_data_e2; 9465 data_size = (sizeof(struct hc_status_block_data_e2) / 9466 sizeof(uint32_t)); 9467 } else { 9468 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9469 sb_data_e1x.common.state = SB_DISABLED; 9470 sb_data_e1x.common.p_func.vf_valid = FALSE; 9471 sb_data_p = (uint32_t *)&sb_data_e1x; 9472 data_size = (sizeof(struct hc_status_block_data_e1x) / 9473 sizeof(uint32_t)); 9474 } 9475 9476 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9477 9478 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9479 0, CSTORM_STATUS_BLOCK_SIZE); 9480 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9481 0, CSTORM_SYNC_BLOCK_SIZE); 9482} 9483 9484static void 9485bxe_wr_sp_sb_data(struct bxe_softc *sc, 9486 struct hc_sp_status_block_data *sp_sb_data) 9487{ 9488 int i; 9489 9490 for (i = 0; 9491 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9492 i++) { 9493 REG_WR(sc, 9494 (BAR_CSTRORM_INTMEM + 9495 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9496 (i * sizeof(uint32_t))), 9497 *((uint32_t *)sp_sb_data + i)); 9498 } 9499} 9500 9501static void 9502bxe_zero_sp_sb(struct bxe_softc *sc) 9503{ 9504 struct hc_sp_status_block_data sp_sb_data; 9505 9506 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9507 9508 sp_sb_data.state = SB_DISABLED; 9509 sp_sb_data.p_func.vf_valid = FALSE; 9510 9511 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9512 9513 bxe_fill(sc, 9514 (BAR_CSTRORM_INTMEM + 9515 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9516 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9517 bxe_fill(sc, 9518 (BAR_CSTRORM_INTMEM + 9519 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9520 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9521} 9522 9523static void 9524bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9525 int igu_sb_id, 9526 int igu_seg_id) 9527{ 9528 hc_sm->igu_sb_id = igu_sb_id; 9529 hc_sm->igu_seg_id = igu_seg_id; 9530 hc_sm->timer_value = 0xFF; 9531 hc_sm->time_to_expire = 0xFFFFFFFF; 9532} 9533 9534static void 9535bxe_map_sb_state_machines(struct hc_index_data *index_data) 9536{ 9537 /* zero out state machine indices */ 9538 9539 /* rx indices */ 9540 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9541 9542 /* tx indices */ 9543 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9544 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9545 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9546 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9547 9548 /* map indices */ 9549 9550 /* rx indices */ 9551 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9552 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9553 9554 /* tx indices */ 9555 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9556 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9557 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9558 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9559 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9560 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9561 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9562 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9563} 9564 9565static void 9566bxe_init_sb(struct bxe_softc *sc, 9567 bus_addr_t busaddr, 9568 int vfid, 9569 uint8_t vf_valid, 9570 int fw_sb_id, 9571 int igu_sb_id) 9572{ 9573 struct hc_status_block_data_e2 sb_data_e2; 9574 struct hc_status_block_data_e1x sb_data_e1x; 9575 struct hc_status_block_sm *hc_sm_p; 9576 uint32_t *sb_data_p; 9577 int igu_seg_id; 9578 int data_size; 9579 9580 if (CHIP_INT_MODE_IS_BC(sc)) { 9581 igu_seg_id = HC_SEG_ACCESS_NORM; 9582 } else { 9583 igu_seg_id = IGU_SEG_ACCESS_NORM; 9584 } 9585 9586 bxe_zero_fp_sb(sc, fw_sb_id); 9587 9588 if (!CHIP_IS_E1x(sc)) { 9589 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9590 sb_data_e2.common.state = SB_ENABLED; 9591 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9592 sb_data_e2.common.p_func.vf_id = vfid; 9593 sb_data_e2.common.p_func.vf_valid = vf_valid; 9594 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9595 sb_data_e2.common.same_igu_sb_1b = TRUE; 9596 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9597 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9598 hc_sm_p = sb_data_e2.common.state_machine; 9599 sb_data_p = (uint32_t *)&sb_data_e2; 9600 data_size = (sizeof(struct hc_status_block_data_e2) / 9601 sizeof(uint32_t)); 9602 bxe_map_sb_state_machines(sb_data_e2.index_data); 9603 } else { 9604 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9605 sb_data_e1x.common.state = SB_ENABLED; 9606 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9607 sb_data_e1x.common.p_func.vf_id = 0xff; 9608 sb_data_e1x.common.p_func.vf_valid = FALSE; 9609 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9610 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9611 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9612 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9613 hc_sm_p = sb_data_e1x.common.state_machine; 9614 sb_data_p = (uint32_t *)&sb_data_e1x; 9615 data_size = (sizeof(struct hc_status_block_data_e1x) / 9616 sizeof(uint32_t)); 9617 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9618 } 9619 9620 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9621 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9622 9623 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9624 9625 /* write indices to HW - PCI guarantees endianity of regpairs */ 9626 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9627} 9628 9629static inline uint8_t 9630bxe_fp_qzone_id(struct bxe_fastpath *fp) 9631{ 9632 if (CHIP_IS_E1x(fp->sc)) { 9633 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9634 } else { 9635 return (fp->cl_id); 9636 } 9637} 9638 9639static inline uint32_t 9640bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9641 struct bxe_fastpath *fp) 9642{ 9643 uint32_t offset = BAR_USTRORM_INTMEM; 9644 9645 if (!CHIP_IS_E1x(sc)) { 9646 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 9647 } else { 9648 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 9649 } 9650 9651 return (offset); 9652} 9653 9654static void 9655bxe_init_eth_fp(struct bxe_softc *sc, 9656 int idx) 9657{ 9658 struct bxe_fastpath *fp = &sc->fp[idx]; 9659 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 9660 unsigned long q_type = 0; 9661 int cos; 9662 9663 fp->sc = sc; 9664 fp->index = idx; 9665 9666 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 9667 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 9668 9669 fp->cl_id = (CHIP_IS_E1x(sc)) ? 9670 (SC_L_ID(sc) + idx) : 9671 /* want client ID same as IGU SB ID for non-E1 */ 9672 fp->igu_sb_id; 9673 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 9674 9675 /* setup sb indices */ 9676 if (!CHIP_IS_E1x(sc)) { 9677 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 9678 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 9679 } else { 9680 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 9681 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 9682 } 9683 9684 /* init shortcut */ 9685 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 9686 9687 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 9688 9689 /* 9690 * XXX If multiple CoS is ever supported then each fastpath structure 9691 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 9692 */ 9693 for (cos = 0; cos < sc->max_cos; cos++) { 9694 cids[cos] = idx; 9695 } 9696 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 9697 9698 /* nothing more for a VF to do */ 9699 if (IS_VF(sc)) { 9700 return; 9701 } 9702 9703 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 9704 fp->fw_sb_id, fp->igu_sb_id); 9705 9706 bxe_update_fp_sb_idx(fp); 9707 9708 /* Configure Queue State object */ 9709 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 9710 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 9711 9712 ecore_init_queue_obj(sc, 9713 &sc->sp_objs[idx].q_obj, 9714 fp->cl_id, 9715 cids, 9716 sc->max_cos, 9717 SC_FUNC(sc), 9718 BXE_SP(sc, q_rdata), 9719 BXE_SP_MAPPING(sc, q_rdata), 9720 q_type); 9721 9722 /* configure classification DBs */ 9723 ecore_init_mac_obj(sc, 9724 &sc->sp_objs[idx].mac_obj, 9725 fp->cl_id, 9726 idx, 9727 SC_FUNC(sc), 9728 BXE_SP(sc, mac_rdata), 9729 BXE_SP_MAPPING(sc, mac_rdata), 9730 ECORE_FILTER_MAC_PENDING, 9731 &sc->sp_state, 9732 ECORE_OBJ_TYPE_RX_TX, 9733 &sc->macs_pool); 9734 9735 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 9736 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 9737} 9738 9739static inline void 9740bxe_update_rx_prod(struct bxe_softc *sc, 9741 struct bxe_fastpath *fp, 9742 uint16_t rx_bd_prod, 9743 uint16_t rx_cq_prod, 9744 uint16_t rx_sge_prod) 9745{ 9746 struct ustorm_eth_rx_producers rx_prods = { 0 }; 9747 uint32_t i; 9748 9749 /* update producers */ 9750 rx_prods.bd_prod = rx_bd_prod; 9751 rx_prods.cqe_prod = rx_cq_prod; 9752 rx_prods.sge_prod = rx_sge_prod; 9753 9754 /* 9755 * Make sure that the BD and SGE data is updated before updating the 9756 * producers since FW might read the BD/SGE right after the producer 9757 * is updated. 9758 * This is only applicable for weak-ordered memory model archs such 9759 * as IA-64. The following barrier is also mandatory since FW will 9760 * assumes BDs must have buffers. 9761 */ 9762 wmb(); 9763 9764 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 9765 REG_WR(sc, 9766 (fp->ustorm_rx_prods_offset + (i * 4)), 9767 ((uint32_t *)&rx_prods)[i]); 9768 } 9769 9770 wmb(); /* keep prod updates ordered */ 9771 9772 BLOGD(sc, DBG_RX, 9773 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 9774 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 9775} 9776 9777static void 9778bxe_init_rx_rings(struct bxe_softc *sc) 9779{ 9780 struct bxe_fastpath *fp; 9781 int i; 9782 9783 for (i = 0; i < sc->num_queues; i++) { 9784 fp = &sc->fp[i]; 9785 9786 fp->rx_bd_cons = 0; 9787 9788 /* 9789 * Activate the BD ring... 9790 * Warning, this will generate an interrupt (to the TSTORM) 9791 * so this can only be done after the chip is initialized 9792 */ 9793 bxe_update_rx_prod(sc, fp, 9794 fp->rx_bd_prod, 9795 fp->rx_cq_prod, 9796 fp->rx_sge_prod); 9797 9798 if (i != 0) { 9799 continue; 9800 } 9801 9802 if (CHIP_IS_E1(sc)) { 9803 REG_WR(sc, 9804 (BAR_USTRORM_INTMEM + 9805 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 9806 U64_LO(fp->rcq_dma.paddr)); 9807 REG_WR(sc, 9808 (BAR_USTRORM_INTMEM + 9809 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 9810 U64_HI(fp->rcq_dma.paddr)); 9811 } 9812 } 9813} 9814 9815static void 9816bxe_init_tx_ring_one(struct bxe_fastpath *fp) 9817{ 9818 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); 9819 fp->tx_db.data.zero_fill1 = 0; 9820 fp->tx_db.data.prod = 0; 9821 9822 fp->tx_pkt_prod = 0; 9823 fp->tx_pkt_cons = 0; 9824 fp->tx_bd_prod = 0; 9825 fp->tx_bd_cons = 0; 9826 fp->eth_q_stats.tx_pkts = 0; 9827} 9828 9829static inline void 9830bxe_init_tx_rings(struct bxe_softc *sc) 9831{ 9832 int i; 9833 9834 for (i = 0; i < sc->num_queues; i++) { 9835 bxe_init_tx_ring_one(&sc->fp[i]); 9836 } 9837} 9838 9839static void 9840bxe_init_def_sb(struct bxe_softc *sc) 9841{ 9842 struct host_sp_status_block *def_sb = sc->def_sb; 9843 bus_addr_t mapping = sc->def_sb_dma.paddr; 9844 int igu_sp_sb_index; 9845 int igu_seg_id; 9846 int port = SC_PORT(sc); 9847 int func = SC_FUNC(sc); 9848 int reg_offset, reg_offset_en5; 9849 uint64_t section; 9850 int index, sindex; 9851 struct hc_sp_status_block_data sp_sb_data; 9852 9853 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9854 9855 if (CHIP_INT_MODE_IS_BC(sc)) { 9856 igu_sp_sb_index = DEF_SB_IGU_ID; 9857 igu_seg_id = HC_SEG_ACCESS_DEF; 9858 } else { 9859 igu_sp_sb_index = sc->igu_dsb_id; 9860 igu_seg_id = IGU_SEG_ACCESS_DEF; 9861 } 9862 9863 /* attentions */ 9864 section = ((uint64_t)mapping + 9865 offsetof(struct host_sp_status_block, atten_status_block)); 9866 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 9867 sc->attn_state = 0; 9868 9869 reg_offset = (port) ? 9870 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 9871 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 9872 reg_offset_en5 = (port) ? 9873 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 9874 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 9875 9876 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 9877 /* take care of sig[0]..sig[4] */ 9878 for (sindex = 0; sindex < 4; sindex++) { 9879 sc->attn_group[index].sig[sindex] = 9880 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 9881 } 9882 9883 if (!CHIP_IS_E1x(sc)) { 9884 /* 9885 * enable5 is separate from the rest of the registers, 9886 * and the address skip is 4 and not 16 between the 9887 * different groups 9888 */ 9889 sc->attn_group[index].sig[4] = 9890 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 9891 } else { 9892 sc->attn_group[index].sig[4] = 0; 9893 } 9894 } 9895 9896 if (sc->devinfo.int_block == INT_BLOCK_HC) { 9897 reg_offset = (port) ? 9898 HC_REG_ATTN_MSG1_ADDR_L : 9899 HC_REG_ATTN_MSG0_ADDR_L; 9900 REG_WR(sc, reg_offset, U64_LO(section)); 9901 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 9902 } else if (!CHIP_IS_E1x(sc)) { 9903 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 9904 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 9905 } 9906 9907 section = ((uint64_t)mapping + 9908 offsetof(struct host_sp_status_block, sp_sb)); 9909 9910 bxe_zero_sp_sb(sc); 9911 9912 /* PCI guarantees endianity of regpair */ 9913 sp_sb_data.state = SB_ENABLED; 9914 sp_sb_data.host_sb_addr.lo = U64_LO(section); 9915 sp_sb_data.host_sb_addr.hi = U64_HI(section); 9916 sp_sb_data.igu_sb_id = igu_sp_sb_index; 9917 sp_sb_data.igu_seg_id = igu_seg_id; 9918 sp_sb_data.p_func.pf_id = func; 9919 sp_sb_data.p_func.vnic_id = SC_VN(sc); 9920 sp_sb_data.p_func.vf_id = 0xff; 9921 9922 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9923 9924 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 9925} 9926 9927static void 9928bxe_init_sp_ring(struct bxe_softc *sc) 9929{ 9930 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 9931 sc->spq_prod_idx = 0; 9932 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 9933 sc->spq_prod_bd = sc->spq; 9934 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 9935} 9936 9937static void 9938bxe_init_eq_ring(struct bxe_softc *sc) 9939{ 9940 union event_ring_elem *elem; 9941 int i; 9942 9943 for (i = 1; i <= NUM_EQ_PAGES; i++) { 9944 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 9945 9946 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 9947 BCM_PAGE_SIZE * 9948 (i % NUM_EQ_PAGES))); 9949 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 9950 BCM_PAGE_SIZE * 9951 (i % NUM_EQ_PAGES))); 9952 } 9953 9954 sc->eq_cons = 0; 9955 sc->eq_prod = NUM_EQ_DESC; 9956 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 9957 9958 atomic_store_rel_long(&sc->eq_spq_left, 9959 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 9960 NUM_EQ_DESC) - 1)); 9961} 9962 9963static void 9964bxe_init_internal_common(struct bxe_softc *sc) 9965{ 9966 int i; 9967 9968 /* 9969 * Zero this manually as its initialization is currently missing 9970 * in the initTool. 9971 */ 9972 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 9973 REG_WR(sc, 9974 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 9975 0); 9976 } 9977 9978 if (!CHIP_IS_E1x(sc)) { 9979 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 9980 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 9981 } 9982} 9983 9984static void 9985bxe_init_internal(struct bxe_softc *sc, 9986 uint32_t load_code) 9987{ 9988 switch (load_code) { 9989 case FW_MSG_CODE_DRV_LOAD_COMMON: 9990 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 9991 bxe_init_internal_common(sc); 9992 /* no break */ 9993 9994 case FW_MSG_CODE_DRV_LOAD_PORT: 9995 /* nothing to do */ 9996 /* no break */ 9997 9998 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 9999 /* internal memory per function is initialized inside bxe_pf_init */ 10000 break; 10001 10002 default: 10003 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 10004 break; 10005 } 10006} 10007 10008static void 10009storm_memset_func_cfg(struct bxe_softc *sc, 10010 struct tstorm_eth_function_common_config *tcfg, 10011 uint16_t abs_fid) 10012{ 10013 uint32_t addr; 10014 size_t size; 10015 10016 addr = (BAR_TSTRORM_INTMEM + 10017 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10018 size = sizeof(struct tstorm_eth_function_common_config); 10019 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10020} 10021 10022static void 10023bxe_func_init(struct bxe_softc *sc, 10024 struct bxe_func_init_params *p) 10025{ 10026 struct tstorm_eth_function_common_config tcfg = { 0 }; 10027 10028 if (CHIP_IS_E1x(sc)) { 10029 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10030 } 10031 10032 /* Enable the function in the FW */ 10033 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10034 storm_memset_func_en(sc, p->func_id, 1); 10035 10036 /* spq */ 10037 if (p->func_flgs & FUNC_FLG_SPQ) { 10038 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10039 REG_WR(sc, 10040 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10041 p->spq_prod); 10042 } 10043} 10044 10045/* 10046 * Calculates the sum of vn_min_rates. 10047 * It's needed for further normalizing of the min_rates. 10048 * Returns: 10049 * sum of vn_min_rates. 10050 * or 10051 * 0 - if all the min_rates are 0. 10052 * In the later case fainess algorithm should be deactivated. 10053 * If all min rates are not zero then those that are zeroes will be set to 1. 10054 */ 10055static void 10056bxe_calc_vn_min(struct bxe_softc *sc, 10057 struct cmng_init_input *input) 10058{ 10059 uint32_t vn_cfg; 10060 uint32_t vn_min_rate; 10061 int all_zero = 1; 10062 int vn; 10063 10064 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10065 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10066 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10067 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10068 10069 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10070 /* skip hidden VNs */ 10071 vn_min_rate = 0; 10072 } else if (!vn_min_rate) { 10073 /* If min rate is zero - set it to 100 */ 10074 vn_min_rate = DEF_MIN_RATE; 10075 } else { 10076 all_zero = 0; 10077 } 10078 10079 input->vnic_min_rate[vn] = vn_min_rate; 10080 } 10081 10082 /* if ETS or all min rates are zeros - disable fairness */ 10083 if (BXE_IS_ETS_ENABLED(sc)) { 10084 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10085 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10086 } else if (all_zero) { 10087 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10088 BLOGD(sc, DBG_LOAD, 10089 "Fariness disabled (all MIN values are zeroes)\n"); 10090 } else { 10091 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10092 } 10093} 10094 10095static inline uint16_t 10096bxe_extract_max_cfg(struct bxe_softc *sc, 10097 uint32_t mf_cfg) 10098{ 10099 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10100 FUNC_MF_CFG_MAX_BW_SHIFT); 10101 10102 if (!max_cfg) { 10103 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10104 max_cfg = 100; 10105 } 10106 10107 return (max_cfg); 10108} 10109 10110static void 10111bxe_calc_vn_max(struct bxe_softc *sc, 10112 int vn, 10113 struct cmng_init_input *input) 10114{ 10115 uint16_t vn_max_rate; 10116 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10117 uint32_t max_cfg; 10118 10119 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10120 vn_max_rate = 0; 10121 } else { 10122 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10123 10124 if (IS_MF_SI(sc)) { 10125 /* max_cfg in percents of linkspeed */ 10126 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10127 } else { /* SD modes */ 10128 /* max_cfg is absolute in 100Mb units */ 10129 vn_max_rate = (max_cfg * 100); 10130 } 10131 } 10132 10133 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10134 10135 input->vnic_max_rate[vn] = vn_max_rate; 10136} 10137 10138static void 10139bxe_cmng_fns_init(struct bxe_softc *sc, 10140 uint8_t read_cfg, 10141 uint8_t cmng_type) 10142{ 10143 struct cmng_init_input input; 10144 int vn; 10145 10146 memset(&input, 0, sizeof(struct cmng_init_input)); 10147 10148 input.port_rate = sc->link_vars.line_speed; 10149 10150 if (cmng_type == CMNG_FNS_MINMAX) { 10151 /* read mf conf from shmem */ 10152 if (read_cfg) { 10153 bxe_read_mf_cfg(sc); 10154 } 10155 10156 /* get VN min rate and enable fairness if not 0 */ 10157 bxe_calc_vn_min(sc, &input); 10158 10159 /* get VN max rate */ 10160 if (sc->port.pmf) { 10161 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10162 bxe_calc_vn_max(sc, vn, &input); 10163 } 10164 } 10165 10166 /* always enable rate shaping and fairness */ 10167 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10168 10169 ecore_init_cmng(&input, &sc->cmng); 10170 return; 10171 } 10172 10173 /* rate shaping and fairness are disabled */ 10174 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10175} 10176 10177static int 10178bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10179{ 10180 if (CHIP_REV_IS_SLOW(sc)) { 10181 return (CMNG_FNS_NONE); 10182 } 10183 10184 if (IS_MF(sc)) { 10185 return (CMNG_FNS_MINMAX); 10186 } 10187 10188 return (CMNG_FNS_NONE); 10189} 10190 10191static void 10192storm_memset_cmng(struct bxe_softc *sc, 10193 struct cmng_init *cmng, 10194 uint8_t port) 10195{ 10196 int vn; 10197 int func; 10198 uint32_t addr; 10199 size_t size; 10200 10201 addr = (BAR_XSTRORM_INTMEM + 10202 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10203 size = sizeof(struct cmng_struct_per_port); 10204 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10205 10206 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10207 func = func_by_vn(sc, vn); 10208 10209 addr = (BAR_XSTRORM_INTMEM + 10210 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10211 size = sizeof(struct rate_shaping_vars_per_vn); 10212 ecore_storm_memset_struct(sc, addr, size, 10213 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10214 10215 addr = (BAR_XSTRORM_INTMEM + 10216 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10217 size = sizeof(struct fairness_vars_per_vn); 10218 ecore_storm_memset_struct(sc, addr, size, 10219 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10220 } 10221} 10222 10223static void 10224bxe_pf_init(struct bxe_softc *sc) 10225{ 10226 struct bxe_func_init_params func_init = { 0 }; 10227 struct event_ring_data eq_data = { { 0 } }; 10228 uint16_t flags; 10229 10230 if (!CHIP_IS_E1x(sc)) { 10231 /* reset IGU PF statistics: MSIX + ATTN */ 10232 /* PF */ 10233 REG_WR(sc, 10234 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10235 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10236 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10237 0); 10238 /* ATTN */ 10239 REG_WR(sc, 10240 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10241 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10242 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10243 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10244 0); 10245 } 10246 10247 /* function setup flags */ 10248 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10249 10250 /* 10251 * This flag is relevant for E1x only. 10252 * E2 doesn't have a TPA configuration in a function level. 10253 */ 10254 flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10255 10256 func_init.func_flgs = flags; 10257 func_init.pf_id = SC_FUNC(sc); 10258 func_init.func_id = SC_FUNC(sc); 10259 func_init.spq_map = sc->spq_dma.paddr; 10260 func_init.spq_prod = sc->spq_prod_idx; 10261 10262 bxe_func_init(sc, &func_init); 10263 10264 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10265 10266 /* 10267 * Congestion management values depend on the link rate. 10268 * There is no active link so initial link rate is set to 10Gbps. 10269 * When the link comes up the congestion management values are 10270 * re-calculated according to the actual link rate. 10271 */ 10272 sc->link_vars.line_speed = SPEED_10000; 10273 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10274 10275 /* Only the PMF sets the HW */ 10276 if (sc->port.pmf) { 10277 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10278 } 10279 10280 /* init Event Queue - PCI bus guarantees correct endainity */ 10281 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10282 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10283 eq_data.producer = sc->eq_prod; 10284 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10285 eq_data.sb_id = DEF_SB_ID; 10286 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10287} 10288 10289static void 10290bxe_hc_int_enable(struct bxe_softc *sc) 10291{ 10292 int port = SC_PORT(sc); 10293 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10294 uint32_t val = REG_RD(sc, addr); 10295 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10296 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10297 (sc->intr_count == 1)) ? TRUE : FALSE; 10298 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10299 10300 if (msix) { 10301 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10302 HC_CONFIG_0_REG_INT_LINE_EN_0); 10303 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10304 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10305 if (single_msix) { 10306 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10307 } 10308 } else if (msi) { 10309 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10310 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10311 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10312 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10313 } else { 10314 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10315 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10316 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10317 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10318 10319 if (!CHIP_IS_E1(sc)) { 10320 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10321 val, port, addr); 10322 10323 REG_WR(sc, addr, val); 10324 10325 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10326 } 10327 } 10328 10329 if (CHIP_IS_E1(sc)) { 10330 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10331 } 10332 10333 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10334 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10335 10336 REG_WR(sc, addr, val); 10337 10338 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10339 mb(); 10340 10341 if (!CHIP_IS_E1(sc)) { 10342 /* init leading/trailing edge */ 10343 if (IS_MF(sc)) { 10344 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10345 if (sc->port.pmf) { 10346 /* enable nig and gpio3 attention */ 10347 val |= 0x1100; 10348 } 10349 } else { 10350 val = 0xffff; 10351 } 10352 10353 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10354 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10355 } 10356 10357 /* make sure that interrupts are indeed enabled from here on */ 10358 mb(); 10359} 10360 10361static void 10362bxe_igu_int_enable(struct bxe_softc *sc) 10363{ 10364 uint32_t val; 10365 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10366 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10367 (sc->intr_count == 1)) ? TRUE : FALSE; 10368 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10369 10370 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10371 10372 if (msix) { 10373 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10374 IGU_PF_CONF_SINGLE_ISR_EN); 10375 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10376 IGU_PF_CONF_ATTN_BIT_EN); 10377 if (single_msix) { 10378 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10379 } 10380 } else if (msi) { 10381 val &= ~IGU_PF_CONF_INT_LINE_EN; 10382 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10383 IGU_PF_CONF_ATTN_BIT_EN | 10384 IGU_PF_CONF_SINGLE_ISR_EN); 10385 } else { 10386 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10387 val |= (IGU_PF_CONF_INT_LINE_EN | 10388 IGU_PF_CONF_ATTN_BIT_EN | 10389 IGU_PF_CONF_SINGLE_ISR_EN); 10390 } 10391 10392 /* clean previous status - need to configure igu prior to ack*/ 10393 if ((!msix) || single_msix) { 10394 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10395 bxe_ack_int(sc); 10396 } 10397 10398 val |= IGU_PF_CONF_FUNC_EN; 10399 10400 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10401 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10402 10403 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10404 10405 mb(); 10406 10407 /* init leading/trailing edge */ 10408 if (IS_MF(sc)) { 10409 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10410 if (sc->port.pmf) { 10411 /* enable nig and gpio3 attention */ 10412 val |= 0x1100; 10413 } 10414 } else { 10415 val = 0xffff; 10416 } 10417 10418 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10419 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10420 10421 /* make sure that interrupts are indeed enabled from here on */ 10422 mb(); 10423} 10424 10425static void 10426bxe_int_enable(struct bxe_softc *sc) 10427{ 10428 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10429 bxe_hc_int_enable(sc); 10430 } else { 10431 bxe_igu_int_enable(sc); 10432 } 10433} 10434 10435static void 10436bxe_hc_int_disable(struct bxe_softc *sc) 10437{ 10438 int port = SC_PORT(sc); 10439 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10440 uint32_t val = REG_RD(sc, addr); 10441 10442 /* 10443 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10444 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10445 * block 10446 */ 10447 if (CHIP_IS_E1(sc)) { 10448 /* 10449 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10450 * to prevent from HC sending interrupts after we exit the function 10451 */ 10452 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10453 10454 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10455 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10456 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10457 } else { 10458 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10459 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10460 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10461 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10462 } 10463 10464 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10465 10466 /* flush all outstanding writes */ 10467 mb(); 10468 10469 REG_WR(sc, addr, val); 10470 if (REG_RD(sc, addr) != val) { 10471 BLOGE(sc, "proper val not read from HC IGU!\n"); 10472 } 10473} 10474 10475static void 10476bxe_igu_int_disable(struct bxe_softc *sc) 10477{ 10478 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10479 10480 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10481 IGU_PF_CONF_INT_LINE_EN | 10482 IGU_PF_CONF_ATTN_BIT_EN); 10483 10484 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10485 10486 /* flush all outstanding writes */ 10487 mb(); 10488 10489 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10490 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10491 BLOGE(sc, "proper val not read from IGU!\n"); 10492 } 10493} 10494 10495static void 10496bxe_int_disable(struct bxe_softc *sc) 10497{ 10498 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10499 bxe_hc_int_disable(sc); 10500 } else { 10501 bxe_igu_int_disable(sc); 10502 } 10503} 10504 10505static void 10506bxe_nic_init(struct bxe_softc *sc, 10507 int load_code) 10508{ 10509 int i; 10510 10511 for (i = 0; i < sc->num_queues; i++) { 10512 bxe_init_eth_fp(sc, i); 10513 } 10514 10515 rmb(); /* ensure status block indices were read */ 10516 10517 bxe_init_rx_rings(sc); 10518 bxe_init_tx_rings(sc); 10519 10520 if (IS_VF(sc)) { 10521 return; 10522 } 10523 10524 /* initialize MOD_ABS interrupts */ 10525 elink_init_mod_abs_int(sc, &sc->link_vars, 10526 sc->devinfo.chip_id, 10527 sc->devinfo.shmem_base, 10528 sc->devinfo.shmem2_base, 10529 SC_PORT(sc)); 10530 10531 bxe_init_def_sb(sc); 10532 bxe_update_dsb_idx(sc); 10533 bxe_init_sp_ring(sc); 10534 bxe_init_eq_ring(sc); 10535 bxe_init_internal(sc, load_code); 10536 bxe_pf_init(sc); 10537 bxe_stats_init(sc); 10538 10539 /* flush all before enabling interrupts */ 10540 mb(); 10541 10542 bxe_int_enable(sc); 10543 10544 /* check for SPIO5 */ 10545 bxe_attn_int_deasserted0(sc, 10546 REG_RD(sc, 10547 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10548 SC_PORT(sc)*4)) & 10549 AEU_INPUTS_ATTN_BITS_SPIO5); 10550} 10551 10552static inline void 10553bxe_init_objs(struct bxe_softc *sc) 10554{ 10555 /* mcast rules must be added to tx if tx switching is enabled */ 10556 ecore_obj_type o_type = 10557 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10558 ECORE_OBJ_TYPE_RX; 10559 10560 /* RX_MODE controlling object */ 10561 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10562 10563 /* multicast configuration controlling object */ 10564 ecore_init_mcast_obj(sc, 10565 &sc->mcast_obj, 10566 sc->fp[0].cl_id, 10567 sc->fp[0].index, 10568 SC_FUNC(sc), 10569 SC_FUNC(sc), 10570 BXE_SP(sc, mcast_rdata), 10571 BXE_SP_MAPPING(sc, mcast_rdata), 10572 ECORE_FILTER_MCAST_PENDING, 10573 &sc->sp_state, 10574 o_type); 10575 10576 /* Setup CAM credit pools */ 10577 ecore_init_mac_credit_pool(sc, 10578 &sc->macs_pool, 10579 SC_FUNC(sc), 10580 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10581 VNICS_PER_PATH(sc)); 10582 10583 ecore_init_vlan_credit_pool(sc, 10584 &sc->vlans_pool, 10585 SC_ABS_FUNC(sc) >> 1, 10586 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10587 VNICS_PER_PATH(sc)); 10588 10589 /* RSS configuration object */ 10590 ecore_init_rss_config_obj(sc, 10591 &sc->rss_conf_obj, 10592 sc->fp[0].cl_id, 10593 sc->fp[0].index, 10594 SC_FUNC(sc), 10595 SC_FUNC(sc), 10596 BXE_SP(sc, rss_rdata), 10597 BXE_SP_MAPPING(sc, rss_rdata), 10598 ECORE_FILTER_RSS_CONF_PENDING, 10599 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10600} 10601 10602/* 10603 * Initialize the function. This must be called before sending CLIENT_SETUP 10604 * for the first client. 10605 */ 10606static inline int 10607bxe_func_start(struct bxe_softc *sc) 10608{ 10609 struct ecore_func_state_params func_params = { NULL }; 10610 struct ecore_func_start_params *start_params = &func_params.params.start; 10611 10612 /* Prepare parameters for function state transitions */ 10613 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 10614 10615 func_params.f_obj = &sc->func_obj; 10616 func_params.cmd = ECORE_F_CMD_START; 10617 10618 /* Function parameters */ 10619 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 10620 start_params->sd_vlan_tag = OVLAN(sc); 10621 10622 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 10623 start_params->network_cos_mode = STATIC_COS; 10624 } else { /* CHIP_IS_E1X */ 10625 start_params->network_cos_mode = FW_WRR; 10626 } 10627 10628 //start_params->gre_tunnel_mode = 0; 10629 //start_params->gre_tunnel_rss = 0; 10630 10631 return (ecore_func_state_change(sc, &func_params)); 10632} 10633 10634static int 10635bxe_set_power_state(struct bxe_softc *sc, 10636 uint8_t state) 10637{ 10638 uint16_t pmcsr; 10639 10640 /* If there is no power capability, silently succeed */ 10641 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 10642 BLOGW(sc, "No power capability\n"); 10643 return (0); 10644 } 10645 10646 pmcsr = pci_read_config(sc->dev, 10647 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10648 2); 10649 10650 switch (state) { 10651 case PCI_PM_D0: 10652 pci_write_config(sc->dev, 10653 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10654 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 10655 10656 if (pmcsr & PCIM_PSTAT_DMASK) { 10657 /* delay required during transition out of D3hot */ 10658 DELAY(20000); 10659 } 10660 10661 break; 10662 10663 case PCI_PM_D3hot: 10664 /* XXX if there are other clients above don't shut down the power */ 10665 10666 /* don't shut down the power for emulation and FPGA */ 10667 if (CHIP_REV_IS_SLOW(sc)) { 10668 return (0); 10669 } 10670 10671 pmcsr &= ~PCIM_PSTAT_DMASK; 10672 pmcsr |= PCIM_PSTAT_D3; 10673 10674 if (sc->wol) { 10675 pmcsr |= PCIM_PSTAT_PMEENABLE; 10676 } 10677 10678 pci_write_config(sc->dev, 10679 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10680 pmcsr, 4); 10681 10682 /* 10683 * No more memory access after this point until device is brought back 10684 * to D0 state. 10685 */ 10686 break; 10687 10688 default: 10689 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", 10690 state, pmcsr); 10691 return (-1); 10692 } 10693 10694 return (0); 10695} 10696 10697 10698/* return true if succeeded to acquire the lock */ 10699static uint8_t 10700bxe_trylock_hw_lock(struct bxe_softc *sc, 10701 uint32_t resource) 10702{ 10703 uint32_t lock_status; 10704 uint32_t resource_bit = (1 << resource); 10705 int func = SC_FUNC(sc); 10706 uint32_t hw_lock_control_reg; 10707 10708 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 10709 10710 /* Validating that the resource is within range */ 10711 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 10712 BLOGD(sc, DBG_LOAD, 10713 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 10714 resource, HW_LOCK_MAX_RESOURCE_VALUE); 10715 return (FALSE); 10716 } 10717 10718 if (func <= 5) { 10719 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 10720 } else { 10721 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 10722 } 10723 10724 /* try to acquire the lock */ 10725 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 10726 lock_status = REG_RD(sc, hw_lock_control_reg); 10727 if (lock_status & resource_bit) { 10728 return (TRUE); 10729 } 10730 10731 BLOGE(sc, "Failed to get a resource lock 0x%x func %d " 10732 "lock_status 0x%x resource_bit 0x%x\n", resource, func, 10733 lock_status, resource_bit); 10734 10735 return (FALSE); 10736} 10737 10738/* 10739 * Get the recovery leader resource id according to the engine this function 10740 * belongs to. Currently only only 2 engines is supported. 10741 */ 10742static int 10743bxe_get_leader_lock_resource(struct bxe_softc *sc) 10744{ 10745 if (SC_PATH(sc)) { 10746 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 10747 } else { 10748 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 10749 } 10750} 10751 10752/* try to acquire a leader lock for current engine */ 10753static uint8_t 10754bxe_trylock_leader_lock(struct bxe_softc *sc) 10755{ 10756 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 10757} 10758 10759static int 10760bxe_release_leader_lock(struct bxe_softc *sc) 10761{ 10762 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 10763} 10764 10765/* close gates #2, #3 and #4 */ 10766static void 10767bxe_set_234_gates(struct bxe_softc *sc, 10768 uint8_t close) 10769{ 10770 uint32_t val; 10771 10772 /* gates #2 and #4a are closed/opened for "not E1" only */ 10773 if (!CHIP_IS_E1(sc)) { 10774 /* #4 */ 10775 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 10776 /* #2 */ 10777 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 10778 } 10779 10780 /* #3 */ 10781 if (CHIP_IS_E1x(sc)) { 10782 /* prevent interrupts from HC on both ports */ 10783 val = REG_RD(sc, HC_REG_CONFIG_1); 10784 REG_WR(sc, HC_REG_CONFIG_1, 10785 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 10786 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 10787 10788 val = REG_RD(sc, HC_REG_CONFIG_0); 10789 REG_WR(sc, HC_REG_CONFIG_0, 10790 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 10791 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 10792 } else { 10793 /* Prevent incomming interrupts in IGU */ 10794 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 10795 10796 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 10797 (!close) ? 10798 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 10799 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 10800 } 10801 10802 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 10803 close ? "closing" : "opening"); 10804 10805 wmb(); 10806} 10807 10808/* poll for pending writes bit, it should get cleared in no more than 1s */ 10809static int 10810bxe_er_poll_igu_vq(struct bxe_softc *sc) 10811{ 10812 uint32_t cnt = 1000; 10813 uint32_t pend_bits = 0; 10814 10815 do { 10816 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 10817 10818 if (pend_bits == 0) { 10819 break; 10820 } 10821 10822 DELAY(1000); 10823 } while (--cnt > 0); 10824 10825 if (cnt == 0) { 10826 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 10827 return (-1); 10828 } 10829 10830 return (0); 10831} 10832 10833#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 10834 10835static void 10836bxe_clp_reset_prep(struct bxe_softc *sc, 10837 uint32_t *magic_val) 10838{ 10839 /* Do some magic... */ 10840 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 10841 *magic_val = val & SHARED_MF_CLP_MAGIC; 10842 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 10843} 10844 10845/* restore the value of the 'magic' bit */ 10846static void 10847bxe_clp_reset_done(struct bxe_softc *sc, 10848 uint32_t magic_val) 10849{ 10850 /* Restore the 'magic' bit value... */ 10851 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 10852 MFCFG_WR(sc, shared_mf_config.clp_mb, 10853 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 10854} 10855 10856/* prepare for MCP reset, takes care of CLP configurations */ 10857static void 10858bxe_reset_mcp_prep(struct bxe_softc *sc, 10859 uint32_t *magic_val) 10860{ 10861 uint32_t shmem; 10862 uint32_t validity_offset; 10863 10864 /* set `magic' bit in order to save MF config */ 10865 if (!CHIP_IS_E1(sc)) { 10866 bxe_clp_reset_prep(sc, magic_val); 10867 } 10868 10869 /* get shmem offset */ 10870 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 10871 validity_offset = 10872 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 10873 10874 /* Clear validity map flags */ 10875 if (shmem > 0) { 10876 REG_WR(sc, shmem + validity_offset, 0); 10877 } 10878} 10879 10880#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 10881#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 10882 10883static void 10884bxe_mcp_wait_one(struct bxe_softc *sc) 10885{ 10886 /* special handling for emulation and FPGA (10 times longer) */ 10887 if (CHIP_REV_IS_SLOW(sc)) { 10888 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 10889 } else { 10890 DELAY((MCP_ONE_TIMEOUT) * 1000); 10891 } 10892} 10893 10894/* initialize shmem_base and waits for validity signature to appear */ 10895static int 10896bxe_init_shmem(struct bxe_softc *sc) 10897{ 10898 int cnt = 0; 10899 uint32_t val = 0; 10900 10901 do { 10902 sc->devinfo.shmem_base = 10903 sc->link_params.shmem_base = 10904 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 10905 10906 if (sc->devinfo.shmem_base) { 10907 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 10908 if (val & SHR_MEM_VALIDITY_MB) 10909 return (0); 10910 } 10911 10912 bxe_mcp_wait_one(sc); 10913 10914 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 10915 10916 BLOGE(sc, "BAD MCP validity signature\n"); 10917 10918 return (-1); 10919} 10920 10921static int 10922bxe_reset_mcp_comp(struct bxe_softc *sc, 10923 uint32_t magic_val) 10924{ 10925 int rc = bxe_init_shmem(sc); 10926 10927 /* Restore the `magic' bit value */ 10928 if (!CHIP_IS_E1(sc)) { 10929 bxe_clp_reset_done(sc, magic_val); 10930 } 10931 10932 return (rc); 10933} 10934 10935static void 10936bxe_pxp_prep(struct bxe_softc *sc) 10937{ 10938 if (!CHIP_IS_E1(sc)) { 10939 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 10940 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 10941 wmb(); 10942 } 10943} 10944 10945/* 10946 * Reset the whole chip except for: 10947 * - PCIE core 10948 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 10949 * - IGU 10950 * - MISC (including AEU) 10951 * - GRC 10952 * - RBCN, RBCP 10953 */ 10954static void 10955bxe_process_kill_chip_reset(struct bxe_softc *sc, 10956 uint8_t global) 10957{ 10958 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 10959 uint32_t global_bits2, stay_reset2; 10960 10961 /* 10962 * Bits that have to be set in reset_mask2 if we want to reset 'global' 10963 * (per chip) blocks. 10964 */ 10965 global_bits2 = 10966 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 10967 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 10968 10969 /* 10970 * Don't reset the following blocks. 10971 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 10972 * reset, as in 4 port device they might still be owned 10973 * by the MCP (there is only one leader per path). 10974 */ 10975 not_reset_mask1 = 10976 MISC_REGISTERS_RESET_REG_1_RST_HC | 10977 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 10978 MISC_REGISTERS_RESET_REG_1_RST_PXP; 10979 10980 not_reset_mask2 = 10981 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 10982 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 10983 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 10984 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 10985 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 10986 MISC_REGISTERS_RESET_REG_2_RST_GRC | 10987 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 10988 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 10989 MISC_REGISTERS_RESET_REG_2_RST_ATC | 10990 MISC_REGISTERS_RESET_REG_2_PGLC | 10991 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 10992 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 10993 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 10994 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 10995 MISC_REGISTERS_RESET_REG_2_UMAC0 | 10996 MISC_REGISTERS_RESET_REG_2_UMAC1; 10997 10998 /* 10999 * Keep the following blocks in reset: 11000 * - all xxMACs are handled by the elink code. 11001 */ 11002 stay_reset2 = 11003 MISC_REGISTERS_RESET_REG_2_XMAC | 11004 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 11005 11006 /* Full reset masks according to the chip */ 11007 reset_mask1 = 0xffffffff; 11008 11009 if (CHIP_IS_E1(sc)) 11010 reset_mask2 = 0xffff; 11011 else if (CHIP_IS_E1H(sc)) 11012 reset_mask2 = 0x1ffff; 11013 else if (CHIP_IS_E2(sc)) 11014 reset_mask2 = 0xfffff; 11015 else /* CHIP_IS_E3 */ 11016 reset_mask2 = 0x3ffffff; 11017 11018 /* Don't reset global blocks unless we need to */ 11019 if (!global) 11020 reset_mask2 &= ~global_bits2; 11021 11022 /* 11023 * In case of attention in the QM, we need to reset PXP 11024 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11025 * because otherwise QM reset would release 'close the gates' shortly 11026 * before resetting the PXP, then the PSWRQ would send a write 11027 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11028 * read the payload data from PSWWR, but PSWWR would not 11029 * respond. The write queue in PGLUE would stuck, dmae commands 11030 * would not return. Therefore it's important to reset the second 11031 * reset register (containing the 11032 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11033 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11034 * bit). 11035 */ 11036 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11037 reset_mask2 & (~not_reset_mask2)); 11038 11039 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11040 reset_mask1 & (~not_reset_mask1)); 11041 11042 mb(); 11043 wmb(); 11044 11045 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11046 reset_mask2 & (~stay_reset2)); 11047 11048 mb(); 11049 wmb(); 11050 11051 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11052 wmb(); 11053} 11054 11055static int 11056bxe_process_kill(struct bxe_softc *sc, 11057 uint8_t global) 11058{ 11059 int cnt = 1000; 11060 uint32_t val = 0; 11061 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11062 uint32_t tags_63_32 = 0; 11063 11064 /* Empty the Tetris buffer, wait for 1s */ 11065 do { 11066 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11067 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11068 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11069 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11070 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11071 if (CHIP_IS_E3(sc)) { 11072 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11073 } 11074 11075 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11076 ((port_is_idle_0 & 0x1) == 0x1) && 11077 ((port_is_idle_1 & 0x1) == 0x1) && 11078 (pgl_exp_rom2 == 0xffffffff) && 11079 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11080 break; 11081 DELAY(1000); 11082 } while (cnt-- > 0); 11083 11084 if (cnt <= 0) { 11085 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11086 "are still outstanding read requests after 1s! " 11087 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11088 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11089 sr_cnt, blk_cnt, port_is_idle_0, 11090 port_is_idle_1, pgl_exp_rom2); 11091 return (-1); 11092 } 11093 11094 mb(); 11095 11096 /* Close gates #2, #3 and #4 */ 11097 bxe_set_234_gates(sc, TRUE); 11098 11099 /* Poll for IGU VQs for 57712 and newer chips */ 11100 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11101 return (-1); 11102 } 11103 11104 /* XXX indicate that "process kill" is in progress to MCP */ 11105 11106 /* clear "unprepared" bit */ 11107 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11108 mb(); 11109 11110 /* Make sure all is written to the chip before the reset */ 11111 wmb(); 11112 11113 /* 11114 * Wait for 1ms to empty GLUE and PCI-E core queues, 11115 * PSWHST, GRC and PSWRD Tetris buffer. 11116 */ 11117 DELAY(1000); 11118 11119 /* Prepare to chip reset: */ 11120 /* MCP */ 11121 if (global) { 11122 bxe_reset_mcp_prep(sc, &val); 11123 } 11124 11125 /* PXP */ 11126 bxe_pxp_prep(sc); 11127 mb(); 11128 11129 /* reset the chip */ 11130 bxe_process_kill_chip_reset(sc, global); 11131 mb(); 11132 11133 /* clear errors in PGB */ 11134 if (!CHIP_IS_E1(sc)) 11135 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11136 11137 /* Recover after reset: */ 11138 /* MCP */ 11139 if (global && bxe_reset_mcp_comp(sc, val)) { 11140 return (-1); 11141 } 11142 11143 /* XXX add resetting the NO_MCP mode DB here */ 11144 11145 /* Open the gates #2, #3 and #4 */ 11146 bxe_set_234_gates(sc, FALSE); 11147 11148 /* XXX 11149 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11150 * re-enable attentions 11151 */ 11152 11153 return (0); 11154} 11155 11156static int 11157bxe_leader_reset(struct bxe_softc *sc) 11158{ 11159 int rc = 0; 11160 uint8_t global = bxe_reset_is_global(sc); 11161 uint32_t load_code; 11162 11163 /* 11164 * If not going to reset MCP, load "fake" driver to reset HW while 11165 * driver is owner of the HW. 11166 */ 11167 if (!global && !BXE_NOMCP(sc)) { 11168 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11169 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11170 if (!load_code) { 11171 BLOGE(sc, "MCP response failure, aborting\n"); 11172 rc = -1; 11173 goto exit_leader_reset; 11174 } 11175 11176 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11177 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11178 BLOGE(sc, "MCP unexpected response, aborting\n"); 11179 rc = -1; 11180 goto exit_leader_reset2; 11181 } 11182 11183 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11184 if (!load_code) { 11185 BLOGE(sc, "MCP response failure, aborting\n"); 11186 rc = -1; 11187 goto exit_leader_reset2; 11188 } 11189 } 11190 11191 /* try to recover after the failure */ 11192 if (bxe_process_kill(sc, global)) { 11193 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11194 rc = -1; 11195 goto exit_leader_reset2; 11196 } 11197 11198 /* 11199 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11200 * state. 11201 */ 11202 bxe_set_reset_done(sc); 11203 if (global) { 11204 bxe_clear_reset_global(sc); 11205 } 11206 11207exit_leader_reset2: 11208 11209 /* unload "fake driver" if it was loaded */ 11210 if (!global && !BXE_NOMCP(sc)) { 11211 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11212 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11213 } 11214 11215exit_leader_reset: 11216 11217 sc->is_leader = 0; 11218 bxe_release_leader_lock(sc); 11219 11220 mb(); 11221 return (rc); 11222} 11223 11224/* 11225 * prepare INIT transition, parameters configured: 11226 * - HC configuration 11227 * - Queue's CDU context 11228 */ 11229static void 11230bxe_pf_q_prep_init(struct bxe_softc *sc, 11231 struct bxe_fastpath *fp, 11232 struct ecore_queue_init_params *init_params) 11233{ 11234 uint8_t cos; 11235 int cxt_index, cxt_offset; 11236 11237 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11238 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11239 11240 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11241 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11242 11243 /* HC rate */ 11244 init_params->rx.hc_rate = 11245 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11246 init_params->tx.hc_rate = 11247 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11248 11249 /* FW SB ID */ 11250 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11251 11252 /* CQ index among the SB indices */ 11253 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11254 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11255 11256 /* set maximum number of COSs supported by this queue */ 11257 init_params->max_cos = sc->max_cos; 11258 11259 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11260 fp->index, init_params->max_cos); 11261 11262 /* set the context pointers queue object */ 11263 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11264 /* XXX change index/cid here if ever support multiple tx CoS */ 11265 /* fp->txdata[cos]->cid */ 11266 cxt_index = fp->index / ILT_PAGE_CIDS; 11267 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11268 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11269 } 11270} 11271 11272/* set flags that are common for the Tx-only and not normal connections */ 11273static unsigned long 11274bxe_get_common_flags(struct bxe_softc *sc, 11275 struct bxe_fastpath *fp, 11276 uint8_t zero_stats) 11277{ 11278 unsigned long flags = 0; 11279 11280 /* PF driver will always initialize the Queue to an ACTIVE state */ 11281 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11282 11283 /* 11284 * tx only connections collect statistics (on the same index as the 11285 * parent connection). The statistics are zeroed when the parent 11286 * connection is initialized. 11287 */ 11288 11289 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11290 if (zero_stats) { 11291 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11292 } 11293 11294 /* 11295 * tx only connections can support tx-switching, though their 11296 * CoS-ness doesn't survive the loopback 11297 */ 11298 if (sc->flags & BXE_TX_SWITCHING) { 11299 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11300 } 11301 11302 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11303 11304 return (flags); 11305} 11306 11307static unsigned long 11308bxe_get_q_flags(struct bxe_softc *sc, 11309 struct bxe_fastpath *fp, 11310 uint8_t leading) 11311{ 11312 unsigned long flags = 0; 11313 11314 if (IS_MF_SD(sc)) { 11315 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11316 } 11317 11318 if (sc->ifnet->if_capenable & IFCAP_LRO) { 11319 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11320#if __FreeBSD_version >= 800000 11321 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11322#endif 11323 } 11324 11325 if (leading) { 11326 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11327 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11328 } 11329 11330 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11331 11332 /* merge with common flags */ 11333 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11334} 11335 11336static void 11337bxe_pf_q_prep_general(struct bxe_softc *sc, 11338 struct bxe_fastpath *fp, 11339 struct ecore_general_setup_params *gen_init, 11340 uint8_t cos) 11341{ 11342 gen_init->stat_id = bxe_stats_id(fp); 11343 gen_init->spcl_id = fp->cl_id; 11344 gen_init->mtu = sc->mtu; 11345 gen_init->cos = cos; 11346} 11347 11348static void 11349bxe_pf_rx_q_prep(struct bxe_softc *sc, 11350 struct bxe_fastpath *fp, 11351 struct rxq_pause_params *pause, 11352 struct ecore_rxq_setup_params *rxq_init) 11353{ 11354 uint8_t max_sge = 0; 11355 uint16_t sge_sz = 0; 11356 uint16_t tpa_agg_size = 0; 11357 11358 pause->sge_th_lo = SGE_TH_LO(sc); 11359 pause->sge_th_hi = SGE_TH_HI(sc); 11360 11361 /* validate SGE ring has enough to cross high threshold */ 11362 if (sc->dropless_fc && 11363 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11364 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11365 BLOGW(sc, "sge ring threshold limit\n"); 11366 } 11367 11368 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11369 tpa_agg_size = (2 * sc->mtu); 11370 if (tpa_agg_size < sc->max_aggregation_size) { 11371 tpa_agg_size = sc->max_aggregation_size; 11372 } 11373 11374 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11375 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11376 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11377 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11378 11379 /* pause - not for e1 */ 11380 if (!CHIP_IS_E1(sc)) { 11381 pause->bd_th_lo = BD_TH_LO(sc); 11382 pause->bd_th_hi = BD_TH_HI(sc); 11383 11384 pause->rcq_th_lo = RCQ_TH_LO(sc); 11385 pause->rcq_th_hi = RCQ_TH_HI(sc); 11386 11387 /* validate rings have enough entries to cross high thresholds */ 11388 if (sc->dropless_fc && 11389 pause->bd_th_hi + FW_PREFETCH_CNT > 11390 sc->rx_ring_size) { 11391 BLOGW(sc, "rx bd ring threshold limit\n"); 11392 } 11393 11394 if (sc->dropless_fc && 11395 pause->rcq_th_hi + FW_PREFETCH_CNT > 11396 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11397 BLOGW(sc, "rcq ring threshold limit\n"); 11398 } 11399 11400 pause->pri_map = 1; 11401 } 11402 11403 /* rxq setup */ 11404 rxq_init->dscr_map = fp->rx_dma.paddr; 11405 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11406 rxq_init->rcq_map = fp->rcq_dma.paddr; 11407 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11408 11409 /* 11410 * This should be a maximum number of data bytes that may be 11411 * placed on the BD (not including paddings). 11412 */ 11413 rxq_init->buf_sz = (fp->rx_buf_size - 11414 IP_HEADER_ALIGNMENT_PADDING); 11415 11416 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11417 rxq_init->tpa_agg_sz = tpa_agg_size; 11418 rxq_init->sge_buf_sz = sge_sz; 11419 rxq_init->max_sges_pkt = max_sge; 11420 rxq_init->rss_engine_id = SC_FUNC(sc); 11421 rxq_init->mcast_engine_id = SC_FUNC(sc); 11422 11423 /* 11424 * Maximum number or simultaneous TPA aggregation for this Queue. 11425 * For PF Clients it should be the maximum available number. 11426 * VF driver(s) may want to define it to a smaller value. 11427 */ 11428 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11429 11430 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11431 rxq_init->fw_sb_id = fp->fw_sb_id; 11432 11433 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11434 11435 /* 11436 * configure silent vlan removal 11437 * if multi function mode is afex, then mask default vlan 11438 */ 11439 if (IS_MF_AFEX(sc)) { 11440 rxq_init->silent_removal_value = 11441 sc->devinfo.mf_info.afex_def_vlan_tag; 11442 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11443 } 11444} 11445 11446static void 11447bxe_pf_tx_q_prep(struct bxe_softc *sc, 11448 struct bxe_fastpath *fp, 11449 struct ecore_txq_setup_params *txq_init, 11450 uint8_t cos) 11451{ 11452 /* 11453 * XXX If multiple CoS is ever supported then each fastpath structure 11454 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11455 * fp->txdata[cos]->tx_dma.paddr; 11456 */ 11457 txq_init->dscr_map = fp->tx_dma.paddr; 11458 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11459 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11460 txq_init->fw_sb_id = fp->fw_sb_id; 11461 11462 /* 11463 * set the TSS leading client id for TX classfication to the 11464 * leading RSS client id 11465 */ 11466 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11467} 11468 11469/* 11470 * This function performs 2 steps in a queue state machine: 11471 * 1) RESET->INIT 11472 * 2) INIT->SETUP 11473 */ 11474static int 11475bxe_setup_queue(struct bxe_softc *sc, 11476 struct bxe_fastpath *fp, 11477 uint8_t leading) 11478{ 11479 struct ecore_queue_state_params q_params = { NULL }; 11480 struct ecore_queue_setup_params *setup_params = 11481 &q_params.params.setup; 11482 int rc; 11483 11484 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11485 11486 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11487 11488 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11489 11490 /* we want to wait for completion in this context */ 11491 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11492 11493 /* prepare the INIT parameters */ 11494 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11495 11496 /* Set the command */ 11497 q_params.cmd = ECORE_Q_CMD_INIT; 11498 11499 /* Change the state to INIT */ 11500 rc = ecore_queue_state_change(sc, &q_params); 11501 if (rc) { 11502 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); 11503 return (rc); 11504 } 11505 11506 BLOGD(sc, DBG_LOAD, "init complete\n"); 11507 11508 /* now move the Queue to the SETUP state */ 11509 memset(setup_params, 0, sizeof(*setup_params)); 11510 11511 /* set Queue flags */ 11512 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11513 11514 /* set general SETUP parameters */ 11515 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11516 FIRST_TX_COS_INDEX); 11517 11518 bxe_pf_rx_q_prep(sc, fp, 11519 &setup_params->pause_params, 11520 &setup_params->rxq_params); 11521 11522 bxe_pf_tx_q_prep(sc, fp, 11523 &setup_params->txq_params, 11524 FIRST_TX_COS_INDEX); 11525 11526 /* Set the command */ 11527 q_params.cmd = ECORE_Q_CMD_SETUP; 11528 11529 /* change the state to SETUP */ 11530 rc = ecore_queue_state_change(sc, &q_params); 11531 if (rc) { 11532 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); 11533 return (rc); 11534 } 11535 11536 return (rc); 11537} 11538 11539static int 11540bxe_setup_leading(struct bxe_softc *sc) 11541{ 11542 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11543} 11544 11545static int 11546bxe_config_rss_pf(struct bxe_softc *sc, 11547 struct ecore_rss_config_obj *rss_obj, 11548 uint8_t config_hash) 11549{ 11550 struct ecore_config_rss_params params = { NULL }; 11551 int i; 11552 11553 /* 11554 * Although RSS is meaningless when there is a single HW queue we 11555 * still need it enabled in order to have HW Rx hash generated. 11556 */ 11557 11558 params.rss_obj = rss_obj; 11559 11560 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11561 11562 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11563 11564 /* RSS configuration */ 11565 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11566 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11567 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11568 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11569 if (rss_obj->udp_rss_v4) { 11570 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11571 } 11572 if (rss_obj->udp_rss_v6) { 11573 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11574 } 11575 11576 /* Hash bits */ 11577 params.rss_result_mask = MULTI_MASK; 11578 11579 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 11580 11581 if (config_hash) { 11582 /* RSS keys */ 11583 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 11584 params.rss_key[i] = arc4random(); 11585 } 11586 11587 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 11588 } 11589 11590 return (ecore_config_rss(sc, ¶ms)); 11591} 11592 11593static int 11594bxe_config_rss_eth(struct bxe_softc *sc, 11595 uint8_t config_hash) 11596{ 11597 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 11598} 11599 11600static int 11601bxe_init_rss_pf(struct bxe_softc *sc) 11602{ 11603 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 11604 int i; 11605 11606 /* 11607 * Prepare the initial contents of the indirection table if 11608 * RSS is enabled 11609 */ 11610 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 11611 sc->rss_conf_obj.ind_table[i] = 11612 (sc->fp->cl_id + (i % num_eth_queues)); 11613 } 11614 11615 if (sc->udp_rss) { 11616 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 11617 } 11618 11619 /* 11620 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 11621 * per-port, so if explicit configuration is needed, do it only 11622 * for a PMF. 11623 * 11624 * For 57712 and newer it's a per-function configuration. 11625 */ 11626 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 11627} 11628 11629static int 11630bxe_set_mac_one(struct bxe_softc *sc, 11631 uint8_t *mac, 11632 struct ecore_vlan_mac_obj *obj, 11633 uint8_t set, 11634 int mac_type, 11635 unsigned long *ramrod_flags) 11636{ 11637 struct ecore_vlan_mac_ramrod_params ramrod_param; 11638 int rc; 11639 11640 memset(&ramrod_param, 0, sizeof(ramrod_param)); 11641 11642 /* fill in general parameters */ 11643 ramrod_param.vlan_mac_obj = obj; 11644 ramrod_param.ramrod_flags = *ramrod_flags; 11645 11646 /* fill a user request section if needed */ 11647 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 11648 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 11649 11650 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 11651 11652 /* Set the command: ADD or DEL */ 11653 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 11654 ECORE_VLAN_MAC_DEL; 11655 } 11656 11657 rc = ecore_config_vlan_mac(sc, &ramrod_param); 11658 11659 if (rc == ECORE_EXISTS) { 11660 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 11661 /* do not treat adding same MAC as error */ 11662 rc = 0; 11663 } else if (rc < 0) { 11664 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 11665 } 11666 11667 return (rc); 11668} 11669 11670static int 11671bxe_set_eth_mac(struct bxe_softc *sc, 11672 uint8_t set) 11673{ 11674 unsigned long ramrod_flags = 0; 11675 11676 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 11677 11678 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 11679 11680 /* Eth MAC is set on RSS leading client (fp[0]) */ 11681 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 11682 &sc->sp_objs->mac_obj, 11683 set, ECORE_ETH_MAC, &ramrod_flags)); 11684} 11685 11686static int 11687bxe_get_cur_phy_idx(struct bxe_softc *sc) 11688{ 11689 uint32_t sel_phy_idx = 0; 11690 11691 if (sc->link_params.num_phys <= 1) { 11692 return (ELINK_INT_PHY); 11693 } 11694 11695 if (sc->link_vars.link_up) { 11696 sel_phy_idx = ELINK_EXT_PHY1; 11697 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 11698 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 11699 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 11700 ELINK_SUPPORTED_FIBRE)) 11701 sel_phy_idx = ELINK_EXT_PHY2; 11702 } else { 11703 switch (elink_phy_selection(&sc->link_params)) { 11704 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 11705 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 11706 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 11707 sel_phy_idx = ELINK_EXT_PHY1; 11708 break; 11709 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 11710 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 11711 sel_phy_idx = ELINK_EXT_PHY2; 11712 break; 11713 } 11714 } 11715 11716 return (sel_phy_idx); 11717} 11718 11719static int 11720bxe_get_link_cfg_idx(struct bxe_softc *sc) 11721{ 11722 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 11723 11724 /* 11725 * The selected activated PHY is always after swapping (in case PHY 11726 * swapping is enabled). So when swapping is enabled, we need to reverse 11727 * the configuration 11728 */ 11729 11730 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 11731 if (sel_phy_idx == ELINK_EXT_PHY1) 11732 sel_phy_idx = ELINK_EXT_PHY2; 11733 else if (sel_phy_idx == ELINK_EXT_PHY2) 11734 sel_phy_idx = ELINK_EXT_PHY1; 11735 } 11736 11737 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 11738} 11739 11740static void 11741bxe_set_requested_fc(struct bxe_softc *sc) 11742{ 11743 /* 11744 * Initialize link parameters structure variables 11745 * It is recommended to turn off RX FC for jumbo frames 11746 * for better performance 11747 */ 11748 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 11749 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 11750 } else { 11751 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 11752 } 11753} 11754 11755static void 11756bxe_calc_fc_adv(struct bxe_softc *sc) 11757{ 11758 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 11759 11760 11761 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 11762 ADVERTISED_Pause); 11763 11764 switch (sc->link_vars.ieee_fc & 11765 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 11766 11767 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 11768 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 11769 ADVERTISED_Pause); 11770 break; 11771 11772 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 11773 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 11774 break; 11775 11776 default: 11777 break; 11778 11779 } 11780} 11781 11782static uint16_t 11783bxe_get_mf_speed(struct bxe_softc *sc) 11784{ 11785 uint16_t line_speed = sc->link_vars.line_speed; 11786 if (IS_MF(sc)) { 11787 uint16_t maxCfg = 11788 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 11789 11790 /* calculate the current MAX line speed limit for the MF devices */ 11791 if (IS_MF_SI(sc)) { 11792 line_speed = (line_speed * maxCfg) / 100; 11793 } else { /* SD mode */ 11794 uint16_t vn_max_rate = maxCfg * 100; 11795 11796 if (vn_max_rate < line_speed) { 11797 line_speed = vn_max_rate; 11798 } 11799 } 11800 } 11801 11802 return (line_speed); 11803} 11804 11805static void 11806bxe_fill_report_data(struct bxe_softc *sc, 11807 struct bxe_link_report_data *data) 11808{ 11809 uint16_t line_speed = bxe_get_mf_speed(sc); 11810 11811 memset(data, 0, sizeof(*data)); 11812 11813 /* fill the report data with the effective line speed */ 11814 data->line_speed = line_speed; 11815 11816 /* Link is down */ 11817 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 11818 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 11819 } 11820 11821 /* Full DUPLEX */ 11822 if (sc->link_vars.duplex == DUPLEX_FULL) { 11823 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 11824 } 11825 11826 /* Rx Flow Control is ON */ 11827 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 11828 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 11829 } 11830 11831 /* Tx Flow Control is ON */ 11832 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 11833 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 11834 } 11835} 11836 11837/* report link status to OS, should be called under phy_lock */ 11838static void 11839bxe_link_report_locked(struct bxe_softc *sc) 11840{ 11841 struct bxe_link_report_data cur_data; 11842 11843 /* reread mf_cfg */ 11844 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 11845 bxe_read_mf_cfg(sc); 11846 } 11847 11848 /* Read the current link report info */ 11849 bxe_fill_report_data(sc, &cur_data); 11850 11851 /* Don't report link down or exactly the same link status twice */ 11852 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 11853 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11854 &sc->last_reported_link.link_report_flags) && 11855 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11856 &cur_data.link_report_flags))) { 11857 return; 11858 } 11859 11860 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n", 11861 cur_data.link_report_flags, sc->last_reported_link.link_report_flags); 11862 sc->link_cnt++; 11863 11864 ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt); 11865 /* report new link params and remember the state for the next time */ 11866 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 11867 11868 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11869 &cur_data.link_report_flags)) { 11870 if_link_state_change(sc->ifnet, LINK_STATE_DOWN); 11871 } else { 11872 const char *duplex; 11873 const char *flow; 11874 11875 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 11876 &cur_data.link_report_flags)) { 11877 duplex = "full"; 11878 ELINK_DEBUG_P0(sc, "link set to full duplex\n"); 11879 } else { 11880 duplex = "half"; 11881 ELINK_DEBUG_P0(sc, "link set to half duplex\n"); 11882 } 11883 11884 /* 11885 * Handle the FC at the end so that only these flags would be 11886 * possibly set. This way we may easily check if there is no FC 11887 * enabled. 11888 */ 11889 if (cur_data.link_report_flags) { 11890 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11891 &cur_data.link_report_flags) && 11892 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11893 &cur_data.link_report_flags)) { 11894 flow = "ON - receive & transmit"; 11895 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11896 &cur_data.link_report_flags) && 11897 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11898 &cur_data.link_report_flags)) { 11899 flow = "ON - receive"; 11900 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11901 &cur_data.link_report_flags) && 11902 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11903 &cur_data.link_report_flags)) { 11904 flow = "ON - transmit"; 11905 } else { 11906 flow = "none"; /* possible? */ 11907 } 11908 } else { 11909 flow = "none"; 11910 } 11911 11912 if_link_state_change(sc->ifnet, LINK_STATE_UP); 11913 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 11914 cur_data.line_speed, duplex, flow); 11915 } 11916} 11917 11918static void 11919bxe_link_report(struct bxe_softc *sc) 11920{ 11921 bxe_acquire_phy_lock(sc); 11922 bxe_link_report_locked(sc); 11923 bxe_release_phy_lock(sc); 11924} 11925 11926static void 11927bxe_link_status_update(struct bxe_softc *sc) 11928{ 11929 if (sc->state != BXE_STATE_OPEN) { 11930 return; 11931 } 11932 11933 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 11934 elink_link_status_update(&sc->link_params, &sc->link_vars); 11935 } else { 11936 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 11937 ELINK_SUPPORTED_10baseT_Full | 11938 ELINK_SUPPORTED_100baseT_Half | 11939 ELINK_SUPPORTED_100baseT_Full | 11940 ELINK_SUPPORTED_1000baseT_Full | 11941 ELINK_SUPPORTED_2500baseX_Full | 11942 ELINK_SUPPORTED_10000baseT_Full | 11943 ELINK_SUPPORTED_TP | 11944 ELINK_SUPPORTED_FIBRE | 11945 ELINK_SUPPORTED_Autoneg | 11946 ELINK_SUPPORTED_Pause | 11947 ELINK_SUPPORTED_Asym_Pause); 11948 sc->port.advertising[0] = sc->port.supported[0]; 11949 11950 sc->link_params.sc = sc; 11951 sc->link_params.port = SC_PORT(sc); 11952 sc->link_params.req_duplex[0] = DUPLEX_FULL; 11953 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 11954 sc->link_params.req_line_speed[0] = SPEED_10000; 11955 sc->link_params.speed_cap_mask[0] = 0x7f0000; 11956 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 11957 11958 if (CHIP_REV_IS_FPGA(sc)) { 11959 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 11960 sc->link_vars.line_speed = ELINK_SPEED_1000; 11961 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 11962 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 11963 } else { 11964 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 11965 sc->link_vars.line_speed = ELINK_SPEED_10000; 11966 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 11967 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 11968 } 11969 11970 sc->link_vars.link_up = 1; 11971 11972 sc->link_vars.duplex = DUPLEX_FULL; 11973 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 11974 11975 if (IS_PF(sc)) { 11976 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 11977 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11978 bxe_link_report(sc); 11979 } 11980 } 11981 11982 if (IS_PF(sc)) { 11983 if (sc->link_vars.link_up) { 11984 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11985 } else { 11986 bxe_stats_handle(sc, STATS_EVENT_STOP); 11987 } 11988 bxe_link_report(sc); 11989 } else { 11990 bxe_link_report(sc); 11991 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11992 } 11993} 11994 11995static int 11996bxe_initial_phy_init(struct bxe_softc *sc, 11997 int load_mode) 11998{ 11999 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 12000 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 12001 struct elink_params *lp = &sc->link_params; 12002 12003 bxe_set_requested_fc(sc); 12004 12005 if (CHIP_REV_IS_SLOW(sc)) { 12006 uint32_t bond = CHIP_BOND_ID(sc); 12007 uint32_t feat = 0; 12008 12009 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12010 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12011 } else if (bond & 0x4) { 12012 if (CHIP_IS_E3(sc)) { 12013 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12014 } else { 12015 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12016 } 12017 } else if (bond & 0x8) { 12018 if (CHIP_IS_E3(sc)) { 12019 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12020 } else { 12021 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12022 } 12023 } 12024 12025 /* disable EMAC for E3 and above */ 12026 if (bond & 0x2) { 12027 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12028 } 12029 12030 sc->link_params.feature_config_flags |= feat; 12031 } 12032 12033 bxe_acquire_phy_lock(sc); 12034 12035 if (load_mode == LOAD_DIAG) { 12036 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12037 /* Prefer doing PHY loopback at 10G speed, if possible */ 12038 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12039 if (lp->speed_cap_mask[cfg_idx] & 12040 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12041 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12042 } else { 12043 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12044 } 12045 } 12046 } 12047 12048 if (load_mode == LOAD_LOOPBACK_EXT) { 12049 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12050 } 12051 12052 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12053 12054 bxe_release_phy_lock(sc); 12055 12056 bxe_calc_fc_adv(sc); 12057 12058 if (sc->link_vars.link_up) { 12059 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12060 bxe_link_report(sc); 12061 } 12062 12063 if (!CHIP_REV_IS_SLOW(sc)) { 12064 bxe_periodic_start(sc); 12065 } 12066 12067 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12068 return (rc); 12069} 12070 12071/* must be called under IF_ADDR_LOCK */ 12072static int 12073bxe_init_mcast_macs_list(struct bxe_softc *sc, 12074 struct ecore_mcast_ramrod_params *p) 12075{ 12076 struct ifnet *ifp = sc->ifnet; 12077 int mc_count = 0; 12078 struct ifmultiaddr *ifma; 12079 struct ecore_mcast_list_elem *mc_mac; 12080 12081 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12082 if (ifma->ifma_addr->sa_family != AF_LINK) { 12083 continue; 12084 } 12085 12086 mc_count++; 12087 } 12088 12089 ECORE_LIST_INIT(&p->mcast_list); 12090 p->mcast_list_len = 0; 12091 12092 if (!mc_count) { 12093 return (0); 12094 } 12095 12096 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, 12097 (M_NOWAIT | M_ZERO)); 12098 if (!mc_mac) { 12099 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12100 return (-1); 12101 } 12102 bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); 12103 12104 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12105 if (ifma->ifma_addr->sa_family != AF_LINK) { 12106 continue; 12107 } 12108 12109 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 12110 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list); 12111 12112 BLOGD(sc, DBG_LOAD, 12113 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n", 12114 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12115 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5], mc_count); 12116 mc_mac++; 12117 } 12118 12119 p->mcast_list_len = mc_count; 12120 12121 return (0); 12122} 12123 12124static void 12125bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) 12126{ 12127 struct ecore_mcast_list_elem *mc_mac = 12128 ECORE_LIST_FIRST_ENTRY(&p->mcast_list, 12129 struct ecore_mcast_list_elem, 12130 link); 12131 12132 if (mc_mac) { 12133 /* only a single free as all mc_macs are in the same heap array */ 12134 free(mc_mac, M_DEVBUF); 12135 } 12136} 12137 12138static int 12139bxe_set_mc_list(struct bxe_softc *sc) 12140{ 12141 struct ecore_mcast_ramrod_params rparam = { NULL }; 12142 int rc = 0; 12143 12144 rparam.mcast_obj = &sc->mcast_obj; 12145 12146 BXE_MCAST_LOCK(sc); 12147 12148 /* first, clear all configured multicast MACs */ 12149 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12150 if (rc < 0) { 12151 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12152 /* Manual backport parts of FreeBSD upstream r284470. */ 12153 BXE_MCAST_UNLOCK(sc); 12154 return (rc); 12155 } 12156 12157 /* configure a new MACs list */ 12158 rc = bxe_init_mcast_macs_list(sc, &rparam); 12159 if (rc) { 12160 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); 12161 BXE_MCAST_UNLOCK(sc); 12162 return (rc); 12163 } 12164 12165 /* Now add the new MACs */ 12166 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12167 if (rc < 0) { 12168 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12169 } 12170 12171 bxe_free_mcast_macs_list(&rparam); 12172 12173 BXE_MCAST_UNLOCK(sc); 12174 12175 return (rc); 12176} 12177 12178static int 12179bxe_set_uc_list(struct bxe_softc *sc) 12180{ 12181 struct ifnet *ifp = sc->ifnet; 12182 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12183 struct ifaddr *ifa; 12184 unsigned long ramrod_flags = 0; 12185 int rc; 12186 12187#if __FreeBSD_version < 800000 12188 IF_ADDR_LOCK(ifp); 12189#else 12190 if_addr_rlock(ifp); 12191#endif 12192 12193 /* first schedule a cleanup up of old configuration */ 12194 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12195 if (rc < 0) { 12196 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12197#if __FreeBSD_version < 800000 12198 IF_ADDR_UNLOCK(ifp); 12199#else 12200 if_addr_runlock(ifp); 12201#endif 12202 return (rc); 12203 } 12204 12205 ifa = ifp->if_addr; 12206 while (ifa) { 12207 if (ifa->ifa_addr->sa_family != AF_LINK) { 12208 ifa = TAILQ_NEXT(ifa, ifa_link); 12209 continue; 12210 } 12211 12212 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12213 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12214 if (rc == -EEXIST) { 12215 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12216 /* do not treat adding same MAC as an error */ 12217 rc = 0; 12218 } else if (rc < 0) { 12219 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12220#if __FreeBSD_version < 800000 12221 IF_ADDR_UNLOCK(ifp); 12222#else 12223 if_addr_runlock(ifp); 12224#endif 12225 return (rc); 12226 } 12227 12228 ifa = TAILQ_NEXT(ifa, ifa_link); 12229 } 12230 12231#if __FreeBSD_version < 800000 12232 IF_ADDR_UNLOCK(ifp); 12233#else 12234 if_addr_runlock(ifp); 12235#endif 12236 12237 /* Execute the pending commands */ 12238 bit_set(&ramrod_flags, RAMROD_CONT); 12239 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12240 ECORE_UC_LIST_MAC, &ramrod_flags)); 12241} 12242 12243static void 12244bxe_set_rx_mode(struct bxe_softc *sc) 12245{ 12246 struct ifnet *ifp = sc->ifnet; 12247 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12248 12249 if (sc->state != BXE_STATE_OPEN) { 12250 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12251 return; 12252 } 12253 12254 BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags); 12255 12256 if (ifp->if_flags & IFF_PROMISC) { 12257 rx_mode = BXE_RX_MODE_PROMISC; 12258 } else if ((ifp->if_flags & IFF_ALLMULTI) || 12259 ((ifp->if_amcount > BXE_MAX_MULTICAST) && 12260 CHIP_IS_E1(sc))) { 12261 rx_mode = BXE_RX_MODE_ALLMULTI; 12262 } else { 12263 if (IS_PF(sc)) { 12264 /* some multicasts */ 12265 if (bxe_set_mc_list(sc) < 0) { 12266 rx_mode = BXE_RX_MODE_ALLMULTI; 12267 } 12268 if (bxe_set_uc_list(sc) < 0) { 12269 rx_mode = BXE_RX_MODE_PROMISC; 12270 } 12271 } 12272 } 12273 12274 sc->rx_mode = rx_mode; 12275 12276 /* schedule the rx_mode command */ 12277 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12278 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12279 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12280 return; 12281 } 12282 12283 if (IS_PF(sc)) { 12284 bxe_set_storm_rx_mode(sc); 12285 } 12286} 12287 12288 12289/* update flags in shmem */ 12290static void 12291bxe_update_drv_flags(struct bxe_softc *sc, 12292 uint32_t flags, 12293 uint32_t set) 12294{ 12295 uint32_t drv_flags; 12296 12297 if (SHMEM2_HAS(sc, drv_flags)) { 12298 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12299 drv_flags = SHMEM2_RD(sc, drv_flags); 12300 12301 if (set) { 12302 SET_FLAGS(drv_flags, flags); 12303 } else { 12304 RESET_FLAGS(drv_flags, flags); 12305 } 12306 12307 SHMEM2_WR(sc, drv_flags, drv_flags); 12308 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12309 12310 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12311 } 12312} 12313 12314/* periodic timer callout routine, only runs when the interface is up */ 12315 12316static void 12317bxe_periodic_callout_func(void *xsc) 12318{ 12319 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12320 int i; 12321 12322 if (!BXE_CORE_TRYLOCK(sc)) { 12323 /* just bail and try again next time */ 12324 12325 if ((sc->state == BXE_STATE_OPEN) && 12326 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12327 /* schedule the next periodic callout */ 12328 callout_reset(&sc->periodic_callout, hz, 12329 bxe_periodic_callout_func, sc); 12330 } 12331 12332 return; 12333 } 12334 12335 if ((sc->state != BXE_STATE_OPEN) || 12336 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12337 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12338 BXE_CORE_UNLOCK(sc); 12339 return; 12340 } 12341 12342 12343 /* Check for TX timeouts on any fastpath. */ 12344 FOR_EACH_QUEUE(sc, i) { 12345 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12346 /* Ruh-Roh, chip was reset! */ 12347 break; 12348 } 12349 } 12350 12351 if (!CHIP_REV_IS_SLOW(sc)) { 12352 /* 12353 * This barrier is needed to ensure the ordering between the writing 12354 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12355 * the reading here. 12356 */ 12357 mb(); 12358 if (sc->port.pmf) { 12359 bxe_acquire_phy_lock(sc); 12360 elink_period_func(&sc->link_params, &sc->link_vars); 12361 bxe_release_phy_lock(sc); 12362 } 12363 } 12364 12365 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { 12366 int mb_idx = SC_FW_MB_IDX(sc); 12367 uint32_t drv_pulse; 12368 uint32_t mcp_pulse; 12369 12370 ++sc->fw_drv_pulse_wr_seq; 12371 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12372 12373 drv_pulse = sc->fw_drv_pulse_wr_seq; 12374 bxe_drv_pulse(sc); 12375 12376 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12377 MCP_PULSE_SEQ_MASK); 12378 12379 /* 12380 * The delta between driver pulse and mcp response should 12381 * be 1 (before mcp response) or 0 (after mcp response). 12382 */ 12383 if ((drv_pulse != mcp_pulse) && 12384 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12385 /* someone lost a heartbeat... */ 12386 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12387 drv_pulse, mcp_pulse); 12388 } 12389 } 12390 12391 /* state is BXE_STATE_OPEN */ 12392 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12393 12394 BXE_CORE_UNLOCK(sc); 12395 12396 if ((sc->state == BXE_STATE_OPEN) && 12397 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12398 /* schedule the next periodic callout */ 12399 callout_reset(&sc->periodic_callout, hz, 12400 bxe_periodic_callout_func, sc); 12401 } 12402} 12403 12404static void 12405bxe_periodic_start(struct bxe_softc *sc) 12406{ 12407 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12408 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12409} 12410 12411static void 12412bxe_periodic_stop(struct bxe_softc *sc) 12413{ 12414 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12415 callout_drain(&sc->periodic_callout); 12416} 12417 12418void 12419bxe_parity_recover(struct bxe_softc *sc) 12420{ 12421 uint8_t global = FALSE; 12422 uint32_t error_recovered, error_unrecovered; 12423 bool is_parity; 12424 12425 12426 if ((sc->recovery_state == BXE_RECOVERY_FAILED) && 12427 (sc->state == BXE_STATE_ERROR)) { 12428 BLOGE(sc, "RECOVERY failed, " 12429 "stack notified driver is NOT running! " 12430 "Please reboot/power cycle the system.\n"); 12431 return; 12432 } 12433 12434 while (1) { 12435 BLOGD(sc, DBG_SP, 12436 "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n", 12437 __func__, sc, sc->state, sc->recovery_state, sc->error_status); 12438 12439 switch(sc->recovery_state) { 12440 12441 case BXE_RECOVERY_INIT: 12442 is_parity = bxe_chk_parity_attn(sc, &global, FALSE); 12443 12444 if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) || 12445 (sc->error_status & BXE_ERR_MCP_ASSERT) || 12446 (sc->error_status & BXE_ERR_GLOBAL)) { 12447 12448 BXE_CORE_LOCK(sc); 12449 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 12450 bxe_periodic_stop(sc); 12451 } 12452 bxe_nic_unload(sc, UNLOAD_RECOVERY, false); 12453 sc->state = BXE_STATE_ERROR; 12454 sc->recovery_state = BXE_RECOVERY_FAILED; 12455 BLOGE(sc, " No Recovery tried for error 0x%x" 12456 " stack notified driver is NOT running!" 12457 " Please reboot/power cycle the system.\n", 12458 sc->error_status); 12459 BXE_CORE_UNLOCK(sc); 12460 return; 12461 } 12462 12463 12464 /* Try to get a LEADER_LOCK HW lock */ 12465 if (bxe_trylock_leader_lock(sc)) { 12466 12467 bxe_set_reset_in_progress(sc); 12468 /* 12469 * Check if there is a global attention and if 12470 * there was a global attention, set the global 12471 * reset bit. 12472 */ 12473 if (global) { 12474 bxe_set_reset_global(sc); 12475 } 12476 sc->is_leader = 1; 12477 } 12478 12479 /* If interface has been removed - break */ 12480 12481 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 12482 bxe_periodic_stop(sc); 12483 } 12484 12485 BXE_CORE_LOCK(sc); 12486 bxe_nic_unload(sc,UNLOAD_RECOVERY, false); 12487 sc->recovery_state = BXE_RECOVERY_WAIT; 12488 BXE_CORE_UNLOCK(sc); 12489 12490 /* 12491 * Ensure "is_leader", MCP command sequence and 12492 * "recovery_state" update values are seen on other 12493 * CPUs. 12494 */ 12495 mb(); 12496 break; 12497 case BXE_RECOVERY_WAIT: 12498 12499 if (sc->is_leader) { 12500 int other_engine = SC_PATH(sc) ? 0 : 1; 12501 bool other_load_status = 12502 bxe_get_load_status(sc, other_engine); 12503 bool load_status = 12504 bxe_get_load_status(sc, SC_PATH(sc)); 12505 global = bxe_reset_is_global(sc); 12506 12507 /* 12508 * In case of a parity in a global block, let 12509 * the first leader that performs a 12510 * leader_reset() reset the global blocks in 12511 * order to clear global attentions. Otherwise 12512 * the gates will remain closed for that 12513 * engine. 12514 */ 12515 if (load_status || 12516 (global && other_load_status)) { 12517 /* 12518 * Wait until all other functions get 12519 * down. 12520 */ 12521 taskqueue_enqueue_timeout(taskqueue_thread, 12522 &sc->sp_err_timeout_task, hz/10); 12523 return; 12524 } else { 12525 /* 12526 * If all other functions got down 12527 * try to bring the chip back to 12528 * normal. In any case it's an exit 12529 * point for a leader. 12530 */ 12531 if (bxe_leader_reset(sc)) { 12532 BLOGE(sc, "RECOVERY failed, " 12533 "stack notified driver is NOT running!\n"); 12534 sc->recovery_state = BXE_RECOVERY_FAILED; 12535 sc->state = BXE_STATE_ERROR; 12536 mb(); 12537 return; 12538 } 12539 12540 /* 12541 * If we are here, means that the 12542 * leader has succeeded and doesn't 12543 * want to be a leader any more. Try 12544 * to continue as a none-leader. 12545 */ 12546 break; 12547 } 12548 12549 } else { /* non-leader */ 12550 if (!bxe_reset_is_done(sc, SC_PATH(sc))) { 12551 /* 12552 * Try to get a LEADER_LOCK HW lock as 12553 * long as a former leader may have 12554 * been unloaded by the user or 12555 * released a leadership by another 12556 * reason. 12557 */ 12558 if (bxe_trylock_leader_lock(sc)) { 12559 /* 12560 * I'm a leader now! Restart a 12561 * switch case. 12562 */ 12563 sc->is_leader = 1; 12564 break; 12565 } 12566 12567 taskqueue_enqueue_timeout(taskqueue_thread, 12568 &sc->sp_err_timeout_task, hz/10); 12569 return; 12570 12571 } else { 12572 /* 12573 * If there was a global attention, wait 12574 * for it to be cleared. 12575 */ 12576 if (bxe_reset_is_global(sc)) { 12577 taskqueue_enqueue_timeout(taskqueue_thread, 12578 &sc->sp_err_timeout_task, hz/10); 12579 return; 12580 } 12581 12582 error_recovered = 12583 sc->eth_stats.recoverable_error; 12584 error_unrecovered = 12585 sc->eth_stats.unrecoverable_error; 12586 BXE_CORE_LOCK(sc); 12587 sc->recovery_state = 12588 BXE_RECOVERY_NIC_LOADING; 12589 if (bxe_nic_load(sc, LOAD_NORMAL)) { 12590 error_unrecovered++; 12591 sc->recovery_state = BXE_RECOVERY_FAILED; 12592 sc->state = BXE_STATE_ERROR; 12593 BLOGE(sc, "Recovery is NOT successfull, " 12594 " state=0x%x recovery_state=0x%x error=%x\n", 12595 sc->state, sc->recovery_state, sc->error_status); 12596 sc->error_status = 0; 12597 } else { 12598 sc->recovery_state = 12599 BXE_RECOVERY_DONE; 12600 error_recovered++; 12601 BLOGI(sc, "Recovery is successfull from errors %x," 12602 " state=0x%x" 12603 " recovery_state=0x%x \n", sc->error_status, 12604 sc->state, sc->recovery_state); 12605 mb(); 12606 } 12607 sc->error_status = 0; 12608 BXE_CORE_UNLOCK(sc); 12609 sc->eth_stats.recoverable_error = 12610 error_recovered; 12611 sc->eth_stats.unrecoverable_error = 12612 error_unrecovered; 12613 12614 return; 12615 } 12616 } 12617 default: 12618 return; 12619 } 12620 } 12621} 12622void 12623bxe_handle_error(struct bxe_softc * sc) 12624{ 12625 12626 if(sc->recovery_state == BXE_RECOVERY_WAIT) { 12627 return; 12628 } 12629 if(sc->error_status) { 12630 if (sc->state == BXE_STATE_OPEN) { 12631 bxe_int_disable(sc); 12632 } 12633 if (sc->link_vars.link_up) { 12634 if_link_state_change(sc->ifnet, LINK_STATE_DOWN); 12635 } 12636 sc->recovery_state = BXE_RECOVERY_INIT; 12637 BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n", 12638 sc->unit, sc->error_status, sc->recovery_state); 12639 bxe_parity_recover(sc); 12640 } 12641} 12642 12643static void 12644bxe_sp_err_timeout_task(void *arg, int pending) 12645{ 12646 12647 struct bxe_softc *sc = (struct bxe_softc *)arg; 12648 12649 BLOGD(sc, DBG_SP, 12650 "%s state = 0x%x rec state=0x%x error_status=%x\n", 12651 __func__, sc->state, sc->recovery_state, sc->error_status); 12652 12653 if((sc->recovery_state == BXE_RECOVERY_FAILED) && 12654 (sc->state == BXE_STATE_ERROR)) { 12655 return; 12656 } 12657 /* if can be taken */ 12658 if ((sc->error_status) && (sc->trigger_grcdump)) { 12659 bxe_grc_dump(sc); 12660 } 12661 if (sc->recovery_state != BXE_RECOVERY_DONE) { 12662 bxe_handle_error(sc); 12663 bxe_parity_recover(sc); 12664 } else if (sc->error_status) { 12665 bxe_handle_error(sc); 12666 } 12667 12668 return; 12669} 12670 12671/* start the controller */ 12672static __noinline int 12673bxe_nic_load(struct bxe_softc *sc, 12674 int load_mode) 12675{ 12676 uint32_t val; 12677 int load_code = 0; 12678 int i, rc = 0; 12679 12680 BXE_CORE_LOCK_ASSERT(sc); 12681 12682 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12683 12684 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12685 12686 if (IS_PF(sc)) { 12687 /* must be called before memory allocation and HW init */ 12688 bxe_ilt_set_info(sc); 12689 } 12690 12691 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12692 12693 bxe_set_fp_rx_buf_size(sc); 12694 12695 if (bxe_alloc_fp_buffers(sc) != 0) { 12696 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12697 sc->state = BXE_STATE_CLOSED; 12698 rc = ENOMEM; 12699 goto bxe_nic_load_error0; 12700 } 12701 12702 if (bxe_alloc_mem(sc) != 0) { 12703 sc->state = BXE_STATE_CLOSED; 12704 rc = ENOMEM; 12705 goto bxe_nic_load_error0; 12706 } 12707 12708 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12709 sc->state = BXE_STATE_CLOSED; 12710 rc = ENOMEM; 12711 goto bxe_nic_load_error0; 12712 } 12713 12714 if (IS_PF(sc)) { 12715 /* set pf load just before approaching the MCP */ 12716 bxe_set_pf_load(sc); 12717 12718 /* if MCP exists send load request and analyze response */ 12719 if (!BXE_NOMCP(sc)) { 12720 /* attempt to load pf */ 12721 if (bxe_nic_load_request(sc, &load_code) != 0) { 12722 sc->state = BXE_STATE_CLOSED; 12723 rc = ENXIO; 12724 goto bxe_nic_load_error1; 12725 } 12726 12727 /* what did the MCP say? */ 12728 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12729 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12730 sc->state = BXE_STATE_CLOSED; 12731 rc = ENXIO; 12732 goto bxe_nic_load_error2; 12733 } 12734 } else { 12735 BLOGI(sc, "Device has no MCP!\n"); 12736 load_code = bxe_nic_load_no_mcp(sc); 12737 } 12738 12739 /* mark PMF if applicable */ 12740 bxe_nic_load_pmf(sc, load_code); 12741 12742 /* Init Function state controlling object */ 12743 bxe_init_func_obj(sc); 12744 12745 /* Initialize HW */ 12746 if (bxe_init_hw(sc, load_code) != 0) { 12747 BLOGE(sc, "HW init failed\n"); 12748 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12749 sc->state = BXE_STATE_CLOSED; 12750 rc = ENXIO; 12751 goto bxe_nic_load_error2; 12752 } 12753 } 12754 12755 /* set ALWAYS_ALIVE bit in shmem */ 12756 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 12757 bxe_drv_pulse(sc); 12758 sc->flags |= BXE_NO_PULSE; 12759 12760 /* attach interrupts */ 12761 if (bxe_interrupt_attach(sc) != 0) { 12762 sc->state = BXE_STATE_CLOSED; 12763 rc = ENXIO; 12764 goto bxe_nic_load_error2; 12765 } 12766 12767 bxe_nic_init(sc, load_code); 12768 12769 /* Init per-function objects */ 12770 if (IS_PF(sc)) { 12771 bxe_init_objs(sc); 12772 // XXX bxe_iov_nic_init(sc); 12773 12774 /* set AFEX default VLAN tag to an invalid value */ 12775 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12776 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12777 12778 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12779 rc = bxe_func_start(sc); 12780 if (rc) { 12781 BLOGE(sc, "Function start failed! rc = %d\n", rc); 12782 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12783 sc->state = BXE_STATE_ERROR; 12784 goto bxe_nic_load_error3; 12785 } 12786 12787 /* send LOAD_DONE command to MCP */ 12788 if (!BXE_NOMCP(sc)) { 12789 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12790 if (!load_code) { 12791 BLOGE(sc, "MCP response failure, aborting\n"); 12792 sc->state = BXE_STATE_ERROR; 12793 rc = ENXIO; 12794 goto bxe_nic_load_error3; 12795 } 12796 } 12797 12798 rc = bxe_setup_leading(sc); 12799 if (rc) { 12800 BLOGE(sc, "Setup leading failed! rc = %d\n", rc); 12801 sc->state = BXE_STATE_ERROR; 12802 goto bxe_nic_load_error3; 12803 } 12804 12805 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 12806 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 12807 if (rc) { 12808 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); 12809 sc->state = BXE_STATE_ERROR; 12810 goto bxe_nic_load_error3; 12811 } 12812 } 12813 12814 rc = bxe_init_rss_pf(sc); 12815 if (rc) { 12816 BLOGE(sc, "PF RSS init failed\n"); 12817 sc->state = BXE_STATE_ERROR; 12818 goto bxe_nic_load_error3; 12819 } 12820 } 12821 /* XXX VF */ 12822 12823 /* now when Clients are configured we are ready to work */ 12824 sc->state = BXE_STATE_OPEN; 12825 12826 /* Configure a ucast MAC */ 12827 if (IS_PF(sc)) { 12828 rc = bxe_set_eth_mac(sc, TRUE); 12829 } 12830 if (rc) { 12831 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); 12832 sc->state = BXE_STATE_ERROR; 12833 goto bxe_nic_load_error3; 12834 } 12835 12836 if (sc->port.pmf) { 12837 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 12838 if (rc) { 12839 sc->state = BXE_STATE_ERROR; 12840 goto bxe_nic_load_error3; 12841 } 12842 } 12843 12844 sc->link_params.feature_config_flags &= 12845 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 12846 12847 /* start fast path */ 12848 12849 /* Initialize Rx filter */ 12850 bxe_set_rx_mode(sc); 12851 12852 /* start the Tx */ 12853 switch (/* XXX load_mode */LOAD_OPEN) { 12854 case LOAD_NORMAL: 12855 case LOAD_OPEN: 12856 break; 12857 12858 case LOAD_DIAG: 12859 case LOAD_LOOPBACK_EXT: 12860 sc->state = BXE_STATE_DIAG; 12861 break; 12862 12863 default: 12864 break; 12865 } 12866 12867 if (sc->port.pmf) { 12868 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 12869 } else { 12870 bxe_link_status_update(sc); 12871 } 12872 12873 /* start the periodic timer callout */ 12874 bxe_periodic_start(sc); 12875 12876 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 12877 /* mark driver is loaded in shmem2 */ 12878 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 12879 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 12880 (val | 12881 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 12882 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 12883 } 12884 12885 /* wait for all pending SP commands to complete */ 12886 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 12887 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 12888 bxe_periodic_stop(sc); 12889 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 12890 return (ENXIO); 12891 } 12892 12893 /* Tell the stack the driver is running! */ 12894 sc->ifnet->if_drv_flags = IFF_DRV_RUNNING; 12895 12896 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 12897 12898 return (0); 12899 12900bxe_nic_load_error3: 12901 12902 if (IS_PF(sc)) { 12903 bxe_int_disable_sync(sc, 1); 12904 12905 /* clean out queued objects */ 12906 bxe_squeeze_objects(sc); 12907 } 12908 12909 bxe_interrupt_detach(sc); 12910 12911bxe_nic_load_error2: 12912 12913 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 12914 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 12915 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 12916 } 12917 12918 sc->port.pmf = 0; 12919 12920bxe_nic_load_error1: 12921 12922 /* clear pf_load status, as it was already set */ 12923 if (IS_PF(sc)) { 12924 bxe_clear_pf_load(sc); 12925 } 12926 12927bxe_nic_load_error0: 12928 12929 bxe_free_fw_stats_mem(sc); 12930 bxe_free_fp_buffers(sc); 12931 bxe_free_mem(sc); 12932 12933 return (rc); 12934} 12935 12936static int 12937bxe_init_locked(struct bxe_softc *sc) 12938{ 12939 int other_engine = SC_PATH(sc) ? 0 : 1; 12940 uint8_t other_load_status, load_status; 12941 uint8_t global = FALSE; 12942 int rc; 12943 12944 BXE_CORE_LOCK_ASSERT(sc); 12945 12946 /* check if the driver is already running */ 12947 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 12948 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 12949 return (0); 12950 } 12951 12952 if((sc->state == BXE_STATE_ERROR) && 12953 (sc->recovery_state == BXE_RECOVERY_FAILED)) { 12954 BLOGE(sc, "Initialization not done, " 12955 "as previous recovery failed." 12956 "Reboot/Power-cycle the system\n" ); 12957 return (ENXIO); 12958 } 12959 12960 12961 bxe_set_power_state(sc, PCI_PM_D0); 12962 12963 /* 12964 * If parity occurred during the unload, then attentions and/or 12965 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 12966 * loaded on the current engine to complete the recovery. Parity recovery 12967 * is only relevant for PF driver. 12968 */ 12969 if (IS_PF(sc)) { 12970 other_load_status = bxe_get_load_status(sc, other_engine); 12971 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 12972 12973 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 12974 bxe_chk_parity_attn(sc, &global, TRUE)) { 12975 do { 12976 /* 12977 * If there are attentions and they are in global blocks, set 12978 * the GLOBAL_RESET bit regardless whether it will be this 12979 * function that will complete the recovery or not. 12980 */ 12981 if (global) { 12982 bxe_set_reset_global(sc); 12983 } 12984 12985 /* 12986 * Only the first function on the current engine should try 12987 * to recover in open. In case of attentions in global blocks 12988 * only the first in the chip should try to recover. 12989 */ 12990 if ((!load_status && (!global || !other_load_status)) && 12991 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 12992 BLOGI(sc, "Recovered during init\n"); 12993 break; 12994 } 12995 12996 /* recovery has failed... */ 12997 bxe_set_power_state(sc, PCI_PM_D3hot); 12998 sc->recovery_state = BXE_RECOVERY_FAILED; 12999 13000 BLOGE(sc, "Recovery flow hasn't properly " 13001 "completed yet, try again later. " 13002 "If you still see this message after a " 13003 "few retries then power cycle is required.\n"); 13004 13005 rc = ENXIO; 13006 goto bxe_init_locked_done; 13007 } while (0); 13008 } 13009 } 13010 13011 sc->recovery_state = BXE_RECOVERY_DONE; 13012 13013 rc = bxe_nic_load(sc, LOAD_OPEN); 13014 13015bxe_init_locked_done: 13016 13017 if (rc) { 13018 /* Tell the stack the driver is NOT running! */ 13019 BLOGE(sc, "Initialization failed, " 13020 "stack notified driver is NOT running!\n"); 13021 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING; 13022 } 13023 13024 return (rc); 13025} 13026 13027static int 13028bxe_stop_locked(struct bxe_softc *sc) 13029{ 13030 BXE_CORE_LOCK_ASSERT(sc); 13031 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13032} 13033 13034/* 13035 * Handles controller initialization when called from an unlocked routine. 13036 * ifconfig calls this function. 13037 * 13038 * Returns: 13039 * void 13040 */ 13041static void 13042bxe_init(void *xsc) 13043{ 13044 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13045 13046 BXE_CORE_LOCK(sc); 13047 bxe_init_locked(sc); 13048 BXE_CORE_UNLOCK(sc); 13049} 13050 13051static int 13052bxe_init_ifnet(struct bxe_softc *sc) 13053{ 13054 struct ifnet *ifp; 13055 13056 /* ifconfig entrypoint for media type/status reporting */ 13057 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13058 bxe_ifmedia_update, 13059 bxe_ifmedia_status); 13060 13061 /* set the default interface values */ 13062 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13063 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13064 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13065 13066 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13067 BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media); 13068 13069 /* allocate the ifnet structure */ 13070 if ((ifp = if_alloc(IFT_ETHER)) == NULL) { 13071 BLOGE(sc, "Interface allocation failed!\n"); 13072 return (ENXIO); 13073 } 13074 13075 ifp->if_softc = sc; 13076 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13077 ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 13078 ifp->if_ioctl = bxe_ioctl; 13079 ifp->if_start = bxe_tx_start; 13080#if __FreeBSD_version >= 901504 13081 ifp->if_transmit = bxe_tx_mq_start; 13082 ifp->if_qflush = bxe_mq_flush; 13083#endif 13084#ifdef FreeBSD8_0 13085 ifp->if_timer = 0; 13086#endif 13087 ifp->if_init = bxe_init; 13088 ifp->if_mtu = sc->mtu; 13089 ifp->if_hwassist = (CSUM_IP | 13090 CSUM_TCP | 13091 CSUM_UDP | 13092 CSUM_TSO | 13093 CSUM_TCP_IPV6 | 13094 CSUM_UDP_IPV6); 13095 ifp->if_capabilities = 13096#if __FreeBSD_version < 700000 13097 (IFCAP_VLAN_MTU | 13098 IFCAP_VLAN_HWTAGGING | 13099 IFCAP_HWCSUM | 13100 IFCAP_JUMBO_MTU | 13101 IFCAP_LRO); 13102#else 13103 (IFCAP_VLAN_MTU | 13104 IFCAP_VLAN_HWTAGGING | 13105 IFCAP_VLAN_HWTSO | 13106 IFCAP_VLAN_HWFILTER | 13107 IFCAP_VLAN_HWCSUM | 13108 IFCAP_HWCSUM | 13109 IFCAP_JUMBO_MTU | 13110 IFCAP_LRO | 13111 IFCAP_TSO4 | 13112 IFCAP_TSO6 | 13113 IFCAP_WOL_MAGIC); 13114#endif 13115 ifp->if_capenable = ifp->if_capabilities; 13116 ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */ 13117#if __FreeBSD_version < 1000025 13118 ifp->if_baudrate = 1000000000; 13119#else 13120 if_initbaudrate(ifp, IF_Gbps(10)); 13121#endif 13122 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size; 13123 13124 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 13125 IFQ_SET_READY(&ifp->if_snd); 13126 13127 sc->ifnet = ifp; 13128 13129 /* attach to the Ethernet interface list */ 13130 ether_ifattach(ifp, sc->link_params.mac_addr); 13131 13132 return (0); 13133} 13134 13135static void 13136bxe_deallocate_bars(struct bxe_softc *sc) 13137{ 13138 int i; 13139 13140 for (i = 0; i < MAX_BARS; i++) { 13141 if (sc->bar[i].resource != NULL) { 13142 bus_release_resource(sc->dev, 13143 SYS_RES_MEMORY, 13144 sc->bar[i].rid, 13145 sc->bar[i].resource); 13146 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13147 i, PCIR_BAR(i)); 13148 } 13149 } 13150} 13151 13152static int 13153bxe_allocate_bars(struct bxe_softc *sc) 13154{ 13155 u_int flags; 13156 int i; 13157 13158 memset(sc->bar, 0, sizeof(sc->bar)); 13159 13160 for (i = 0; i < MAX_BARS; i++) { 13161 13162 /* memory resources reside at BARs 0, 2, 4 */ 13163 /* Run `pciconf -lb` to see mappings */ 13164 if ((i != 0) && (i != 2) && (i != 4)) { 13165 continue; 13166 } 13167 13168 sc->bar[i].rid = PCIR_BAR(i); 13169 13170 flags = RF_ACTIVE; 13171 if (i == 0) { 13172 flags |= RF_SHAREABLE; 13173 } 13174 13175 if ((sc->bar[i].resource = 13176 bus_alloc_resource_any(sc->dev, 13177 SYS_RES_MEMORY, 13178 &sc->bar[i].rid, 13179 flags)) == NULL) { 13180 return (0); 13181 } 13182 13183 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13184 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13185 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13186 13187 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#lx-%#lx (%ld) -> %#jx\n", 13188 i, PCIR_BAR(i), 13189 rman_get_start(sc->bar[i].resource), 13190 rman_get_end(sc->bar[i].resource), 13191 rman_get_size(sc->bar[i].resource), 13192 (uintmax_t)sc->bar[i].kva); 13193 } 13194 13195 return (0); 13196} 13197 13198static void 13199bxe_get_function_num(struct bxe_softc *sc) 13200{ 13201 uint32_t val = 0; 13202 13203 /* 13204 * Read the ME register to get the function number. The ME register 13205 * holds the relative-function number and absolute-function number. The 13206 * absolute-function number appears only in E2 and above. Before that 13207 * these bits always contained zero, therefore we cannot blindly use them. 13208 */ 13209 13210 val = REG_RD(sc, BAR_ME_REGISTER); 13211 13212 sc->pfunc_rel = 13213 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13214 sc->path_id = 13215 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13216 13217 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13218 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13219 } else { 13220 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13221 } 13222 13223 BLOGD(sc, DBG_LOAD, 13224 "Relative function %d, Absolute function %d, Path %d\n", 13225 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13226} 13227 13228static uint32_t 13229bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13230{ 13231 uint32_t shmem2_size; 13232 uint32_t offset; 13233 uint32_t mf_cfg_offset_value; 13234 13235 /* Non 57712 */ 13236 offset = (SHMEM_RD(sc, func_mb) + 13237 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13238 13239 /* 57712 plus */ 13240 if (sc->devinfo.shmem2_base != 0) { 13241 shmem2_size = SHMEM2_RD(sc, size); 13242 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13243 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13244 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13245 offset = mf_cfg_offset_value; 13246 } 13247 } 13248 } 13249 13250 return (offset); 13251} 13252 13253static uint32_t 13254bxe_pcie_capability_read(struct bxe_softc *sc, 13255 int reg, 13256 int width) 13257{ 13258 int pcie_reg; 13259 13260 /* ensure PCIe capability is enabled */ 13261 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13262 if (pcie_reg != 0) { 13263 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13264 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13265 } 13266 } 13267 13268 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13269 13270 return (0); 13271} 13272 13273static uint8_t 13274bxe_is_pcie_pending(struct bxe_softc *sc) 13275{ 13276 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13277 PCIM_EXP_STA_TRANSACTION_PND); 13278} 13279 13280/* 13281 * Walk the PCI capabiites list for the device to find what features are 13282 * supported. These capabilites may be enabled/disabled by firmware so it's 13283 * best to walk the list rather than make assumptions. 13284 */ 13285static void 13286bxe_probe_pci_caps(struct bxe_softc *sc) 13287{ 13288 uint16_t link_status; 13289 int reg; 13290 13291 /* check if PCI Power Management is enabled */ 13292 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13293 if (reg != 0) { 13294 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13295 13296 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13297 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13298 } 13299 } 13300 13301 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13302 13303 /* handle PCIe 2.0 workarounds for 57710 */ 13304 if (CHIP_IS_E1(sc)) { 13305 /* workaround for 57710 errata E4_57710_27462 */ 13306 sc->devinfo.pcie_link_speed = 13307 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13308 13309 /* workaround for 57710 errata E4_57710_27488 */ 13310 sc->devinfo.pcie_link_width = 13311 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13312 if (sc->devinfo.pcie_link_speed > 1) { 13313 sc->devinfo.pcie_link_width = 13314 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13315 } 13316 } else { 13317 sc->devinfo.pcie_link_speed = 13318 (link_status & PCIM_LINK_STA_SPEED); 13319 sc->devinfo.pcie_link_width = 13320 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13321 } 13322 13323 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13324 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13325 13326 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13327 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13328 13329 /* check if MSI capability is enabled */ 13330 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13331 if (reg != 0) { 13332 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13333 13334 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13335 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13336 } 13337 } 13338 13339 /* check if MSI-X capability is enabled */ 13340 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13341 if (reg != 0) { 13342 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13343 13344 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13345 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13346 } 13347 } 13348} 13349 13350static int 13351bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13352{ 13353 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13354 uint32_t val; 13355 13356 /* get the outer vlan if we're in switch-dependent mode */ 13357 13358 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13359 mf_info->ext_id = (uint16_t)val; 13360 13361 mf_info->multi_vnics_mode = 1; 13362 13363 if (!VALID_OVLAN(mf_info->ext_id)) { 13364 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13365 return (1); 13366 } 13367 13368 /* get the capabilities */ 13369 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13370 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13371 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13372 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13373 FUNC_MF_CFG_PROTOCOL_FCOE) { 13374 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13375 } else { 13376 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13377 } 13378 13379 mf_info->vnics_per_port = 13380 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13381 13382 return (0); 13383} 13384 13385static uint32_t 13386bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13387{ 13388 uint32_t retval = 0; 13389 uint32_t val; 13390 13391 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13392 13393 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13394 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13395 retval |= MF_PROTO_SUPPORT_ETHERNET; 13396 } 13397 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13398 retval |= MF_PROTO_SUPPORT_ISCSI; 13399 } 13400 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13401 retval |= MF_PROTO_SUPPORT_FCOE; 13402 } 13403 } 13404 13405 return (retval); 13406} 13407 13408static int 13409bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13410{ 13411 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13412 uint32_t val; 13413 13414 /* 13415 * There is no outer vlan if we're in switch-independent mode. 13416 * If the mac is valid then assume multi-function. 13417 */ 13418 13419 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13420 13421 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13422 13423 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13424 13425 mf_info->vnics_per_port = 13426 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13427 13428 return (0); 13429} 13430 13431static int 13432bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13433{ 13434 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13435 uint32_t e1hov_tag; 13436 uint32_t func_config; 13437 uint32_t niv_config; 13438 13439 mf_info->multi_vnics_mode = 1; 13440 13441 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13442 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13443 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13444 13445 mf_info->ext_id = 13446 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13447 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13448 13449 mf_info->default_vlan = 13450 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13451 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13452 13453 mf_info->niv_allowed_priorities = 13454 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13455 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13456 13457 mf_info->niv_default_cos = 13458 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13459 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13460 13461 mf_info->afex_vlan_mode = 13462 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13463 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13464 13465 mf_info->niv_mba_enabled = 13466 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13467 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13468 13469 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13470 13471 mf_info->vnics_per_port = 13472 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13473 13474 return (0); 13475} 13476 13477static int 13478bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13479{ 13480 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13481 uint32_t mf_cfg1; 13482 uint32_t mf_cfg2; 13483 uint32_t ovlan1; 13484 uint32_t ovlan2; 13485 uint8_t i, j; 13486 13487 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13488 SC_PORT(sc)); 13489 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13490 mf_info->mf_config[SC_VN(sc)]); 13491 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13492 mf_info->multi_vnics_mode); 13493 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13494 mf_info->vnics_per_port); 13495 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13496 mf_info->ext_id); 13497 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13498 mf_info->min_bw[0], mf_info->min_bw[1], 13499 mf_info->min_bw[2], mf_info->min_bw[3]); 13500 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13501 mf_info->max_bw[0], mf_info->max_bw[1], 13502 mf_info->max_bw[2], mf_info->max_bw[3]); 13503 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13504 sc->mac_addr_str); 13505 13506 /* various MF mode sanity checks... */ 13507 13508 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13509 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13510 SC_PORT(sc)); 13511 return (1); 13512 } 13513 13514 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13515 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13516 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13517 return (1); 13518 } 13519 13520 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13521 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13522 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13523 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13524 SC_VN(sc), OVLAN(sc)); 13525 return (1); 13526 } 13527 13528 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13529 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13530 mf_info->multi_vnics_mode, OVLAN(sc)); 13531 return (1); 13532 } 13533 13534 /* 13535 * Verify all functions are either MF or SF mode. If MF, make sure 13536 * sure that all non-hidden functions have a valid ovlan. If SF, 13537 * make sure that all non-hidden functions have an invalid ovlan. 13538 */ 13539 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13540 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13541 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13542 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13543 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13544 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13545 BLOGE(sc, "mf_mode=SD function %d MF config " 13546 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13547 i, mf_info->multi_vnics_mode, ovlan1); 13548 return (1); 13549 } 13550 } 13551 13552 /* Verify all funcs on the same port each have a different ovlan. */ 13553 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13554 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13555 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13556 /* iterate from the next function on the port to the max func */ 13557 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13558 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13559 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13560 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13561 VALID_OVLAN(ovlan1) && 13562 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13563 VALID_OVLAN(ovlan2) && 13564 (ovlan1 == ovlan2)) { 13565 BLOGE(sc, "mf_mode=SD functions %d and %d " 13566 "have the same ovlan (%d)\n", 13567 i, j, ovlan1); 13568 return (1); 13569 } 13570 } 13571 } 13572 } /* MULTI_FUNCTION_SD */ 13573 13574 return (0); 13575} 13576 13577static int 13578bxe_get_mf_cfg_info(struct bxe_softc *sc) 13579{ 13580 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13581 uint32_t val, mac_upper; 13582 uint8_t i, vnic; 13583 13584 /* initialize mf_info defaults */ 13585 mf_info->vnics_per_port = 1; 13586 mf_info->multi_vnics_mode = FALSE; 13587 mf_info->path_has_ovlan = FALSE; 13588 mf_info->mf_mode = SINGLE_FUNCTION; 13589 13590 if (!CHIP_IS_MF_CAP(sc)) { 13591 return (0); 13592 } 13593 13594 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13595 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13596 return (1); 13597 } 13598 13599 /* get the MF mode (switch dependent / independent / single-function) */ 13600 13601 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13602 13603 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13604 { 13605 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13606 13607 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13608 13609 /* check for legal upper mac bytes */ 13610 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13611 mf_info->mf_mode = MULTI_FUNCTION_SI; 13612 } else { 13613 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13614 } 13615 13616 break; 13617 13618 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13619 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13620 13621 /* get outer vlan configuration */ 13622 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13623 13624 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13625 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13626 mf_info->mf_mode = MULTI_FUNCTION_SD; 13627 } else { 13628 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13629 } 13630 13631 break; 13632 13633 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13634 13635 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13636 return (0); 13637 13638 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13639 13640 /* 13641 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13642 * and the MAC address is valid. 13643 */ 13644 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13645 13646 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13647 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13648 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13649 } else { 13650 BLOGE(sc, "Invalid config for AFEX mode\n"); 13651 } 13652 13653 break; 13654 13655 default: 13656 13657 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13658 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13659 13660 return (1); 13661 } 13662 13663 /* set path mf_mode (which could be different than function mf_mode) */ 13664 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13665 mf_info->path_has_ovlan = TRUE; 13666 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13667 /* 13668 * Decide on path multi vnics mode. If we're not in MF mode and in 13669 * 4-port mode, this is good enough to check vnic-0 of the other port 13670 * on the same path 13671 */ 13672 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13673 uint8_t other_port = !(PORT_ID(sc) & 1); 13674 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13675 13676 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13677 13678 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13679 } 13680 } 13681 13682 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13683 /* invalid MF config */ 13684 if (SC_VN(sc) >= 1) { 13685 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13686 return (1); 13687 } 13688 13689 return (0); 13690 } 13691 13692 /* get the MF configuration */ 13693 mf_info->mf_config[SC_VN(sc)] = 13694 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13695 13696 switch(mf_info->mf_mode) 13697 { 13698 case MULTI_FUNCTION_SD: 13699 13700 bxe_get_shmem_mf_cfg_info_sd(sc); 13701 break; 13702 13703 case MULTI_FUNCTION_SI: 13704 13705 bxe_get_shmem_mf_cfg_info_si(sc); 13706 break; 13707 13708 case MULTI_FUNCTION_AFEX: 13709 13710 bxe_get_shmem_mf_cfg_info_niv(sc); 13711 break; 13712 13713 default: 13714 13715 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13716 mf_info->mf_mode); 13717 return (1); 13718 } 13719 13720 /* get the congestion management parameters */ 13721 13722 vnic = 0; 13723 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13724 /* get min/max bw */ 13725 val = MFCFG_RD(sc, func_mf_config[i].config); 13726 mf_info->min_bw[vnic] = 13727 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13728 mf_info->max_bw[vnic] = 13729 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13730 vnic++; 13731 } 13732 13733 return (bxe_check_valid_mf_cfg(sc)); 13734} 13735 13736static int 13737bxe_get_shmem_info(struct bxe_softc *sc) 13738{ 13739 int port; 13740 uint32_t mac_hi, mac_lo, val; 13741 13742 port = SC_PORT(sc); 13743 mac_hi = mac_lo = 0; 13744 13745 sc->link_params.sc = sc; 13746 sc->link_params.port = port; 13747 13748 /* get the hardware config info */ 13749 sc->devinfo.hw_config = 13750 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13751 sc->devinfo.hw_config2 = 13752 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13753 13754 sc->link_params.hw_led_mode = 13755 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13756 SHARED_HW_CFG_LED_MODE_SHIFT); 13757 13758 /* get the port feature config */ 13759 sc->port.config = 13760 SHMEM_RD(sc, dev_info.port_feature_config[port].config); 13761 13762 /* get the link params */ 13763 sc->link_params.speed_cap_mask[0] = 13764 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13765 sc->link_params.speed_cap_mask[1] = 13766 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13767 13768 /* get the lane config */ 13769 sc->link_params.lane_config = 13770 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13771 13772 /* get the link config */ 13773 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13774 sc->port.link_config[ELINK_INT_PHY] = val; 13775 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13776 sc->port.link_config[ELINK_EXT_PHY1] = 13777 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13778 13779 /* get the override preemphasis flag and enable it or turn it off */ 13780 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13781 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 13782 sc->link_params.feature_config_flags |= 13783 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13784 } else { 13785 sc->link_params.feature_config_flags &= 13786 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13787 } 13788 13789 /* get the initial value of the link params */ 13790 sc->link_params.multi_phy_config = 13791 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 13792 13793 /* get external phy info */ 13794 sc->port.ext_phy_config = 13795 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 13796 13797 /* get the multifunction configuration */ 13798 bxe_get_mf_cfg_info(sc); 13799 13800 /* get the mac address */ 13801 if (IS_MF(sc)) { 13802 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13803 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 13804 } else { 13805 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 13806 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 13807 } 13808 13809 if ((mac_lo == 0) && (mac_hi == 0)) { 13810 *sc->mac_addr_str = 0; 13811 BLOGE(sc, "No Ethernet address programmed!\n"); 13812 } else { 13813 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 13814 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 13815 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 13816 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 13817 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 13818 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 13819 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 13820 "%02x:%02x:%02x:%02x:%02x:%02x", 13821 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 13822 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 13823 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 13824 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 13825 } 13826 13827 return (0); 13828} 13829 13830static void 13831bxe_get_tunable_params(struct bxe_softc *sc) 13832{ 13833 /* sanity checks */ 13834 13835 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 13836 (bxe_interrupt_mode != INTR_MODE_MSI) && 13837 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 13838 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 13839 bxe_interrupt_mode = INTR_MODE_MSIX; 13840 } 13841 13842 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 13843 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 13844 bxe_queue_count = 0; 13845 } 13846 13847 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 13848 if (bxe_max_rx_bufs == 0) { 13849 bxe_max_rx_bufs = RX_BD_USABLE; 13850 } else { 13851 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 13852 bxe_max_rx_bufs = 2048; 13853 } 13854 } 13855 13856 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 13857 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 13858 bxe_hc_rx_ticks = 25; 13859 } 13860 13861 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 13862 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 13863 bxe_hc_tx_ticks = 50; 13864 } 13865 13866 if (bxe_max_aggregation_size == 0) { 13867 bxe_max_aggregation_size = TPA_AGG_SIZE; 13868 } 13869 13870 if (bxe_max_aggregation_size > 0xffff) { 13871 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 13872 bxe_max_aggregation_size); 13873 bxe_max_aggregation_size = TPA_AGG_SIZE; 13874 } 13875 13876 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 13877 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 13878 bxe_mrrs = -1; 13879 } 13880 13881 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 13882 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 13883 bxe_autogreeen = 0; 13884 } 13885 13886 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 13887 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 13888 bxe_udp_rss = 0; 13889 } 13890 13891 /* pull in user settings */ 13892 13893 sc->interrupt_mode = bxe_interrupt_mode; 13894 sc->max_rx_bufs = bxe_max_rx_bufs; 13895 sc->hc_rx_ticks = bxe_hc_rx_ticks; 13896 sc->hc_tx_ticks = bxe_hc_tx_ticks; 13897 sc->max_aggregation_size = bxe_max_aggregation_size; 13898 sc->mrrs = bxe_mrrs; 13899 sc->autogreeen = bxe_autogreeen; 13900 sc->udp_rss = bxe_udp_rss; 13901 13902 if (bxe_interrupt_mode == INTR_MODE_INTX) { 13903 sc->num_queues = 1; 13904 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 13905 sc->num_queues = 13906 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 13907 MAX_RSS_CHAINS); 13908 if (sc->num_queues > mp_ncpus) { 13909 sc->num_queues = mp_ncpus; 13910 } 13911 } 13912 13913 BLOGD(sc, DBG_LOAD, 13914 "User Config: " 13915 "debug=0x%lx " 13916 "interrupt_mode=%d " 13917 "queue_count=%d " 13918 "hc_rx_ticks=%d " 13919 "hc_tx_ticks=%d " 13920 "rx_budget=%d " 13921 "max_aggregation_size=%d " 13922 "mrrs=%d " 13923 "autogreeen=%d " 13924 "udp_rss=%d\n", 13925 bxe_debug, 13926 sc->interrupt_mode, 13927 sc->num_queues, 13928 sc->hc_rx_ticks, 13929 sc->hc_tx_ticks, 13930 bxe_rx_budget, 13931 sc->max_aggregation_size, 13932 sc->mrrs, 13933 sc->autogreeen, 13934 sc->udp_rss); 13935} 13936 13937static int 13938bxe_media_detect(struct bxe_softc *sc) 13939{ 13940 int port_type; 13941 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 13942 13943 switch (sc->link_params.phy[phy_idx].media_type) { 13944 case ELINK_ETH_PHY_SFPP_10G_FIBER: 13945 case ELINK_ETH_PHY_XFP_FIBER: 13946 BLOGI(sc, "Found 10Gb Fiber media.\n"); 13947 sc->media = IFM_10G_SR; 13948 port_type = PORT_FIBRE; 13949 break; 13950 case ELINK_ETH_PHY_SFP_1G_FIBER: 13951 BLOGI(sc, "Found 1Gb Fiber media.\n"); 13952 sc->media = IFM_1000_SX; 13953 port_type = PORT_FIBRE; 13954 break; 13955 case ELINK_ETH_PHY_KR: 13956 case ELINK_ETH_PHY_CX4: 13957 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 13958 sc->media = IFM_10G_CX4; 13959 port_type = PORT_FIBRE; 13960 break; 13961 case ELINK_ETH_PHY_DA_TWINAX: 13962 BLOGI(sc, "Found 10Gb Twinax media.\n"); 13963 sc->media = IFM_10G_TWINAX; 13964 port_type = PORT_DA; 13965 break; 13966 case ELINK_ETH_PHY_BASE_T: 13967 if (sc->link_params.speed_cap_mask[0] & 13968 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 13969 BLOGI(sc, "Found 10GBase-T media.\n"); 13970 sc->media = IFM_10G_T; 13971 port_type = PORT_TP; 13972 } else { 13973 BLOGI(sc, "Found 1000Base-T media.\n"); 13974 sc->media = IFM_1000_T; 13975 port_type = PORT_TP; 13976 } 13977 break; 13978 case ELINK_ETH_PHY_NOT_PRESENT: 13979 BLOGI(sc, "Media not present.\n"); 13980 sc->media = 0; 13981 port_type = PORT_OTHER; 13982 break; 13983 case ELINK_ETH_PHY_UNSPECIFIED: 13984 default: 13985 BLOGI(sc, "Unknown media!\n"); 13986 sc->media = 0; 13987 port_type = PORT_OTHER; 13988 break; 13989 } 13990 return port_type; 13991} 13992 13993#define GET_FIELD(value, fname) \ 13994 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 13995#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 13996#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 13997 13998static int 13999bxe_get_igu_cam_info(struct bxe_softc *sc) 14000{ 14001 int pfid = SC_FUNC(sc); 14002 int igu_sb_id; 14003 uint32_t val; 14004 uint8_t fid, igu_sb_cnt = 0; 14005 14006 sc->igu_base_sb = 0xff; 14007 14008 if (CHIP_INT_MODE_IS_BC(sc)) { 14009 int vn = SC_VN(sc); 14010 igu_sb_cnt = sc->igu_sb_cnt; 14011 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14012 FP_SB_MAX_E1x); 14013 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14014 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14015 return (0); 14016 } 14017 14018 /* IGU in normal mode - read CAM */ 14019 for (igu_sb_id = 0; 14020 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14021 igu_sb_id++) { 14022 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14023 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14024 continue; 14025 } 14026 fid = IGU_FID(val); 14027 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14028 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14029 continue; 14030 } 14031 if (IGU_VEC(val) == 0) { 14032 /* default status block */ 14033 sc->igu_dsb_id = igu_sb_id; 14034 } else { 14035 if (sc->igu_base_sb == 0xff) { 14036 sc->igu_base_sb = igu_sb_id; 14037 } 14038 igu_sb_cnt++; 14039 } 14040 } 14041 } 14042 14043 /* 14044 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14045 * that number of CAM entries will not be equal to the value advertised in 14046 * PCI. Driver should use the minimal value of both as the actual status 14047 * block count 14048 */ 14049 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14050 14051 if (igu_sb_cnt == 0) { 14052 BLOGE(sc, "CAM configuration error\n"); 14053 return (-1); 14054 } 14055 14056 return (0); 14057} 14058 14059/* 14060 * Gather various information from the device config space, the device itself, 14061 * shmem, and the user input. 14062 */ 14063static int 14064bxe_get_device_info(struct bxe_softc *sc) 14065{ 14066 uint32_t val; 14067 int rc; 14068 14069 /* Get the data for the device */ 14070 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14071 sc->devinfo.device_id = pci_get_device(sc->dev); 14072 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14073 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14074 14075 /* get the chip revision (chip metal comes from pci config space) */ 14076 sc->devinfo.chip_id = 14077 sc->link_params.chip_id = 14078 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14079 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14080 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14081 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14082 14083 /* force 57811 according to MISC register */ 14084 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14085 if (CHIP_IS_57810(sc)) { 14086 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14087 (sc->devinfo.chip_id & 0x0000ffff)); 14088 } else if (CHIP_IS_57810_MF(sc)) { 14089 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14090 (sc->devinfo.chip_id & 0x0000ffff)); 14091 } 14092 sc->devinfo.chip_id |= 0x1; 14093 } 14094 14095 BLOGD(sc, DBG_LOAD, 14096 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14097 sc->devinfo.chip_id, 14098 ((sc->devinfo.chip_id >> 16) & 0xffff), 14099 ((sc->devinfo.chip_id >> 12) & 0xf), 14100 ((sc->devinfo.chip_id >> 4) & 0xff), 14101 ((sc->devinfo.chip_id >> 0) & 0xf)); 14102 14103 val = (REG_RD(sc, 0x2874) & 0x55); 14104 if ((sc->devinfo.chip_id & 0x1) || 14105 (CHIP_IS_E1(sc) && val) || 14106 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14107 sc->flags |= BXE_ONE_PORT_FLAG; 14108 BLOGD(sc, DBG_LOAD, "single port device\n"); 14109 } 14110 14111 /* set the doorbell size */ 14112 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14113 14114 /* determine whether the device is in 2 port or 4 port mode */ 14115 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14116 if (CHIP_IS_E2E3(sc)) { 14117 /* 14118 * Read port4mode_en_ovwr[0]: 14119 * If 1, four port mode is in port4mode_en_ovwr[1]. 14120 * If 0, four port mode is in port4mode_en[0]. 14121 */ 14122 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14123 if (val & 1) { 14124 val = ((val >> 1) & 1); 14125 } else { 14126 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14127 } 14128 14129 sc->devinfo.chip_port_mode = 14130 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14131 14132 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14133 } 14134 14135 /* get the function and path info for the device */ 14136 bxe_get_function_num(sc); 14137 14138 /* get the shared memory base address */ 14139 sc->devinfo.shmem_base = 14140 sc->link_params.shmem_base = 14141 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14142 sc->devinfo.shmem2_base = 14143 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14144 MISC_REG_GENERIC_CR_0)); 14145 14146 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14147 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14148 14149 if (!sc->devinfo.shmem_base) { 14150 /* this should ONLY prevent upcoming shmem reads */ 14151 BLOGI(sc, "MCP not active\n"); 14152 sc->flags |= BXE_NO_MCP_FLAG; 14153 return (0); 14154 } 14155 14156 /* make sure the shared memory contents are valid */ 14157 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14158 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14159 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14160 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14161 return (0); 14162 } 14163 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14164 14165 /* get the bootcode version */ 14166 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14167 snprintf(sc->devinfo.bc_ver_str, 14168 sizeof(sc->devinfo.bc_ver_str), 14169 "%d.%d.%d", 14170 ((sc->devinfo.bc_ver >> 24) & 0xff), 14171 ((sc->devinfo.bc_ver >> 16) & 0xff), 14172 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14173 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14174 14175 /* get the bootcode shmem address */ 14176 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14177 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14178 14179 /* clean indirect addresses as they're not used */ 14180 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14181 if (IS_PF(sc)) { 14182 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14183 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14184 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14185 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14186 if (CHIP_IS_E1x(sc)) { 14187 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14188 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14189 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14190 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14191 } 14192 14193 /* 14194 * Enable internal target-read (in case we are probed after PF 14195 * FLR). Must be done prior to any BAR read access. Only for 14196 * 57712 and up 14197 */ 14198 if (!CHIP_IS_E1x(sc)) { 14199 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14200 } 14201 } 14202 14203 /* get the nvram size */ 14204 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14205 sc->devinfo.flash_size = 14206 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14207 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14208 14209 /* get PCI capabilites */ 14210 bxe_probe_pci_caps(sc); 14211 14212 bxe_set_power_state(sc, PCI_PM_D0); 14213 14214 /* get various configuration parameters from shmem */ 14215 bxe_get_shmem_info(sc); 14216 14217 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14218 val = pci_read_config(sc->dev, 14219 (sc->devinfo.pcie_msix_cap_reg + 14220 PCIR_MSIX_CTRL), 14221 2); 14222 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14223 } else { 14224 sc->igu_sb_cnt = 1; 14225 } 14226 14227 sc->igu_base_addr = BAR_IGU_INTMEM; 14228 14229 /* initialize IGU parameters */ 14230 if (CHIP_IS_E1x(sc)) { 14231 sc->devinfo.int_block = INT_BLOCK_HC; 14232 sc->igu_dsb_id = DEF_SB_IGU_ID; 14233 sc->igu_base_sb = 0; 14234 } else { 14235 sc->devinfo.int_block = INT_BLOCK_IGU; 14236 14237 /* do not allow device reset during IGU info preocessing */ 14238 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14239 14240 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14241 14242 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14243 int tout = 5000; 14244 14245 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14246 14247 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14248 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14249 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14250 14251 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14252 tout--; 14253 DELAY(1000); 14254 } 14255 14256 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14257 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14258 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14259 return (-1); 14260 } 14261 } 14262 14263 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14264 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14265 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14266 } else { 14267 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14268 } 14269 14270 rc = bxe_get_igu_cam_info(sc); 14271 14272 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14273 14274 if (rc) { 14275 return (rc); 14276 } 14277 } 14278 14279 /* 14280 * Get base FW non-default (fast path) status block ID. This value is 14281 * used to initialize the fw_sb_id saved on the fp/queue structure to 14282 * determine the id used by the FW. 14283 */ 14284 if (CHIP_IS_E1x(sc)) { 14285 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14286 } else { 14287 /* 14288 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14289 * the same queue are indicated on the same IGU SB). So we prefer 14290 * FW and IGU SBs to be the same value. 14291 */ 14292 sc->base_fw_ndsb = sc->igu_base_sb; 14293 } 14294 14295 BLOGD(sc, DBG_LOAD, 14296 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14297 sc->igu_dsb_id, sc->igu_base_sb, 14298 sc->igu_sb_cnt, sc->base_fw_ndsb); 14299 14300 elink_phy_probe(&sc->link_params); 14301 14302 return (0); 14303} 14304 14305static void 14306bxe_link_settings_supported(struct bxe_softc *sc, 14307 uint32_t switch_cfg) 14308{ 14309 uint32_t cfg_size = 0; 14310 uint32_t idx; 14311 uint8_t port = SC_PORT(sc); 14312 14313 /* aggregation of supported attributes of all external phys */ 14314 sc->port.supported[0] = 0; 14315 sc->port.supported[1] = 0; 14316 14317 switch (sc->link_params.num_phys) { 14318 case 1: 14319 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14320 cfg_size = 1; 14321 break; 14322 case 2: 14323 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14324 cfg_size = 1; 14325 break; 14326 case 3: 14327 if (sc->link_params.multi_phy_config & 14328 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14329 sc->port.supported[1] = 14330 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14331 sc->port.supported[0] = 14332 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14333 } else { 14334 sc->port.supported[0] = 14335 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14336 sc->port.supported[1] = 14337 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14338 } 14339 cfg_size = 2; 14340 break; 14341 } 14342 14343 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14344 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14345 SHMEM_RD(sc, 14346 dev_info.port_hw_config[port].external_phy_config), 14347 SHMEM_RD(sc, 14348 dev_info.port_hw_config[port].external_phy_config2)); 14349 return; 14350 } 14351 14352 if (CHIP_IS_E3(sc)) 14353 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14354 else { 14355 switch (switch_cfg) { 14356 case ELINK_SWITCH_CFG_1G: 14357 sc->port.phy_addr = 14358 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14359 break; 14360 case ELINK_SWITCH_CFG_10G: 14361 sc->port.phy_addr = 14362 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14363 break; 14364 default: 14365 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14366 sc->port.link_config[0]); 14367 return; 14368 } 14369 } 14370 14371 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14372 14373 /* mask what we support according to speed_cap_mask per configuration */ 14374 for (idx = 0; idx < cfg_size; idx++) { 14375 if (!(sc->link_params.speed_cap_mask[idx] & 14376 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14377 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14378 } 14379 14380 if (!(sc->link_params.speed_cap_mask[idx] & 14381 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14382 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14383 } 14384 14385 if (!(sc->link_params.speed_cap_mask[idx] & 14386 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14387 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14388 } 14389 14390 if (!(sc->link_params.speed_cap_mask[idx] & 14391 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14392 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14393 } 14394 14395 if (!(sc->link_params.speed_cap_mask[idx] & 14396 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14397 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14398 } 14399 14400 if (!(sc->link_params.speed_cap_mask[idx] & 14401 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14402 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14403 } 14404 14405 if (!(sc->link_params.speed_cap_mask[idx] & 14406 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14407 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14408 } 14409 14410 if (!(sc->link_params.speed_cap_mask[idx] & 14411 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14412 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14413 } 14414 } 14415 14416 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14417 sc->port.supported[0], sc->port.supported[1]); 14418 ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n", 14419 sc->port.supported[0], sc->port.supported[1]); 14420} 14421 14422static void 14423bxe_link_settings_requested(struct bxe_softc *sc) 14424{ 14425 uint32_t link_config; 14426 uint32_t idx; 14427 uint32_t cfg_size = 0; 14428 14429 sc->port.advertising[0] = 0; 14430 sc->port.advertising[1] = 0; 14431 14432 switch (sc->link_params.num_phys) { 14433 case 1: 14434 case 2: 14435 cfg_size = 1; 14436 break; 14437 case 3: 14438 cfg_size = 2; 14439 break; 14440 } 14441 14442 for (idx = 0; idx < cfg_size; idx++) { 14443 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14444 link_config = sc->port.link_config[idx]; 14445 14446 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14447 case PORT_FEATURE_LINK_SPEED_AUTO: 14448 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14449 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14450 sc->port.advertising[idx] |= sc->port.supported[idx]; 14451 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14452 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14453 sc->port.advertising[idx] |= 14454 (ELINK_SUPPORTED_100baseT_Half | 14455 ELINK_SUPPORTED_100baseT_Full); 14456 } else { 14457 /* force 10G, no AN */ 14458 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14459 sc->port.advertising[idx] |= 14460 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14461 continue; 14462 } 14463 break; 14464 14465 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14466 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14467 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14468 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14469 ADVERTISED_TP); 14470 } else { 14471 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14472 "speed_cap_mask=0x%08x\n", 14473 link_config, sc->link_params.speed_cap_mask[idx]); 14474 return; 14475 } 14476 break; 14477 14478 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14479 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14480 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14481 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14482 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14483 ADVERTISED_TP); 14484 ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n", 14485 sc->link_params.req_duplex[idx]); 14486 } else { 14487 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14488 "speed_cap_mask=0x%08x\n", 14489 link_config, sc->link_params.speed_cap_mask[idx]); 14490 return; 14491 } 14492 break; 14493 14494 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14495 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14496 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14497 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14498 ADVERTISED_TP); 14499 } else { 14500 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14501 "speed_cap_mask=0x%08x\n", 14502 link_config, sc->link_params.speed_cap_mask[idx]); 14503 return; 14504 } 14505 break; 14506 14507 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14508 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14509 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14510 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14511 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14512 ADVERTISED_TP); 14513 } else { 14514 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14515 "speed_cap_mask=0x%08x\n", 14516 link_config, sc->link_params.speed_cap_mask[idx]); 14517 return; 14518 } 14519 break; 14520 14521 case PORT_FEATURE_LINK_SPEED_1G: 14522 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14523 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14524 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14525 ADVERTISED_TP); 14526 } else { 14527 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14528 "speed_cap_mask=0x%08x\n", 14529 link_config, sc->link_params.speed_cap_mask[idx]); 14530 return; 14531 } 14532 break; 14533 14534 case PORT_FEATURE_LINK_SPEED_2_5G: 14535 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14536 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14537 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14538 ADVERTISED_TP); 14539 } else { 14540 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14541 "speed_cap_mask=0x%08x\n", 14542 link_config, sc->link_params.speed_cap_mask[idx]); 14543 return; 14544 } 14545 break; 14546 14547 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14548 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14549 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14550 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14551 ADVERTISED_FIBRE); 14552 } else { 14553 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14554 "speed_cap_mask=0x%08x\n", 14555 link_config, sc->link_params.speed_cap_mask[idx]); 14556 return; 14557 } 14558 break; 14559 14560 case PORT_FEATURE_LINK_SPEED_20G: 14561 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14562 break; 14563 14564 default: 14565 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14566 "speed_cap_mask=0x%08x\n", 14567 link_config, sc->link_params.speed_cap_mask[idx]); 14568 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14569 sc->port.advertising[idx] = sc->port.supported[idx]; 14570 break; 14571 } 14572 14573 sc->link_params.req_flow_ctrl[idx] = 14574 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14575 14576 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14577 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14578 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14579 } else { 14580 bxe_set_requested_fc(sc); 14581 } 14582 } 14583 14584 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14585 "req_flow_ctrl=0x%x advertising=0x%x\n", 14586 sc->link_params.req_line_speed[idx], 14587 sc->link_params.req_duplex[idx], 14588 sc->link_params.req_flow_ctrl[idx], 14589 sc->port.advertising[idx]); 14590 ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d " 14591 "advertising=0x%x\n", 14592 sc->link_params.req_line_speed[idx], 14593 sc->link_params.req_duplex[idx], 14594 sc->port.advertising[idx]); 14595 } 14596} 14597 14598static void 14599bxe_get_phy_info(struct bxe_softc *sc) 14600{ 14601 uint8_t port = SC_PORT(sc); 14602 uint32_t config = sc->port.config; 14603 uint32_t eee_mode; 14604 14605 /* shmem data already read in bxe_get_shmem_info() */ 14606 14607 ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14608 "link_config0=0x%08x\n", 14609 sc->link_params.lane_config, 14610 sc->link_params.speed_cap_mask[0], 14611 sc->port.link_config[0]); 14612 14613 14614 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14615 bxe_link_settings_requested(sc); 14616 14617 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14618 sc->link_params.feature_config_flags |= 14619 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14620 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14621 sc->link_params.feature_config_flags &= 14622 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14623 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14624 sc->link_params.feature_config_flags |= 14625 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14626 } 14627 14628 /* configure link feature according to nvram value */ 14629 eee_mode = 14630 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14631 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14632 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14633 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14634 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14635 ELINK_EEE_MODE_ENABLE_LPI | 14636 ELINK_EEE_MODE_OUTPUT_TIME); 14637 } else { 14638 sc->link_params.eee_mode = 0; 14639 } 14640 14641 /* get the media type */ 14642 bxe_media_detect(sc); 14643 ELINK_DEBUG_P1(sc, "detected media type\n", sc->media); 14644} 14645 14646static void 14647bxe_get_params(struct bxe_softc *sc) 14648{ 14649 /* get user tunable params */ 14650 bxe_get_tunable_params(sc); 14651 14652 /* select the RX and TX ring sizes */ 14653 sc->tx_ring_size = TX_BD_USABLE; 14654 sc->rx_ring_size = RX_BD_USABLE; 14655 14656 /* XXX disable WoL */ 14657 sc->wol = 0; 14658} 14659 14660static void 14661bxe_set_modes_bitmap(struct bxe_softc *sc) 14662{ 14663 uint32_t flags = 0; 14664 14665 if (CHIP_REV_IS_FPGA(sc)) { 14666 SET_FLAGS(flags, MODE_FPGA); 14667 } else if (CHIP_REV_IS_EMUL(sc)) { 14668 SET_FLAGS(flags, MODE_EMUL); 14669 } else { 14670 SET_FLAGS(flags, MODE_ASIC); 14671 } 14672 14673 if (CHIP_IS_MODE_4_PORT(sc)) { 14674 SET_FLAGS(flags, MODE_PORT4); 14675 } else { 14676 SET_FLAGS(flags, MODE_PORT2); 14677 } 14678 14679 if (CHIP_IS_E2(sc)) { 14680 SET_FLAGS(flags, MODE_E2); 14681 } else if (CHIP_IS_E3(sc)) { 14682 SET_FLAGS(flags, MODE_E3); 14683 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14684 SET_FLAGS(flags, MODE_E3_A0); 14685 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14686 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14687 } 14688 } 14689 14690 if (IS_MF(sc)) { 14691 SET_FLAGS(flags, MODE_MF); 14692 switch (sc->devinfo.mf_info.mf_mode) { 14693 case MULTI_FUNCTION_SD: 14694 SET_FLAGS(flags, MODE_MF_SD); 14695 break; 14696 case MULTI_FUNCTION_SI: 14697 SET_FLAGS(flags, MODE_MF_SI); 14698 break; 14699 case MULTI_FUNCTION_AFEX: 14700 SET_FLAGS(flags, MODE_MF_AFEX); 14701 break; 14702 } 14703 } else { 14704 SET_FLAGS(flags, MODE_SF); 14705 } 14706 14707#if defined(__LITTLE_ENDIAN) 14708 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14709#else /* __BIG_ENDIAN */ 14710 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14711#endif 14712 14713 INIT_MODE_FLAGS(sc) = flags; 14714} 14715 14716static int 14717bxe_alloc_hsi_mem(struct bxe_softc *sc) 14718{ 14719 struct bxe_fastpath *fp; 14720 bus_addr_t busaddr; 14721 int max_agg_queues; 14722 int max_segments; 14723 bus_size_t max_size; 14724 bus_size_t max_seg_size; 14725 char buf[32]; 14726 int rc; 14727 int i, j; 14728 14729 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14730 14731 /* allocate the parent bus DMA tag */ 14732 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14733 1, /* alignment */ 14734 0, /* boundary limit */ 14735 BUS_SPACE_MAXADDR, /* restricted low */ 14736 BUS_SPACE_MAXADDR, /* restricted hi */ 14737 NULL, /* addr filter() */ 14738 NULL, /* addr filter() arg */ 14739 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14740 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14741 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14742 0, /* flags */ 14743 NULL, /* lock() */ 14744 NULL, /* lock() arg */ 14745 &sc->parent_dma_tag); /* returned dma tag */ 14746 if (rc != 0) { 14747 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14748 return (1); 14749 } 14750 14751 /************************/ 14752 /* DEFAULT STATUS BLOCK */ 14753 /************************/ 14754 14755 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14756 &sc->def_sb_dma, "default status block") != 0) { 14757 /* XXX */ 14758 bus_dma_tag_destroy(sc->parent_dma_tag); 14759 return (1); 14760 } 14761 14762 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14763 14764 /***************/ 14765 /* EVENT QUEUE */ 14766 /***************/ 14767 14768 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14769 &sc->eq_dma, "event queue") != 0) { 14770 /* XXX */ 14771 bxe_dma_free(sc, &sc->def_sb_dma); 14772 sc->def_sb = NULL; 14773 bus_dma_tag_destroy(sc->parent_dma_tag); 14774 return (1); 14775 } 14776 14777 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14778 14779 /*************/ 14780 /* SLOW PATH */ 14781 /*************/ 14782 14783 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14784 &sc->sp_dma, "slow path") != 0) { 14785 /* XXX */ 14786 bxe_dma_free(sc, &sc->eq_dma); 14787 sc->eq = NULL; 14788 bxe_dma_free(sc, &sc->def_sb_dma); 14789 sc->def_sb = NULL; 14790 bus_dma_tag_destroy(sc->parent_dma_tag); 14791 return (1); 14792 } 14793 14794 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 14795 14796 /*******************/ 14797 /* SLOW PATH QUEUE */ 14798 /*******************/ 14799 14800 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14801 &sc->spq_dma, "slow path queue") != 0) { 14802 /* XXX */ 14803 bxe_dma_free(sc, &sc->sp_dma); 14804 sc->sp = NULL; 14805 bxe_dma_free(sc, &sc->eq_dma); 14806 sc->eq = NULL; 14807 bxe_dma_free(sc, &sc->def_sb_dma); 14808 sc->def_sb = NULL; 14809 bus_dma_tag_destroy(sc->parent_dma_tag); 14810 return (1); 14811 } 14812 14813 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 14814 14815 /***************************/ 14816 /* FW DECOMPRESSION BUFFER */ 14817 /***************************/ 14818 14819 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 14820 "fw decompression buffer") != 0) { 14821 /* XXX */ 14822 bxe_dma_free(sc, &sc->spq_dma); 14823 sc->spq = NULL; 14824 bxe_dma_free(sc, &sc->sp_dma); 14825 sc->sp = NULL; 14826 bxe_dma_free(sc, &sc->eq_dma); 14827 sc->eq = NULL; 14828 bxe_dma_free(sc, &sc->def_sb_dma); 14829 sc->def_sb = NULL; 14830 bus_dma_tag_destroy(sc->parent_dma_tag); 14831 return (1); 14832 } 14833 14834 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 14835 14836 if ((sc->gz_strm = 14837 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 14838 /* XXX */ 14839 bxe_dma_free(sc, &sc->gz_buf_dma); 14840 sc->gz_buf = NULL; 14841 bxe_dma_free(sc, &sc->spq_dma); 14842 sc->spq = NULL; 14843 bxe_dma_free(sc, &sc->sp_dma); 14844 sc->sp = NULL; 14845 bxe_dma_free(sc, &sc->eq_dma); 14846 sc->eq = NULL; 14847 bxe_dma_free(sc, &sc->def_sb_dma); 14848 sc->def_sb = NULL; 14849 bus_dma_tag_destroy(sc->parent_dma_tag); 14850 return (1); 14851 } 14852 14853 /*************/ 14854 /* FASTPATHS */ 14855 /*************/ 14856 14857 /* allocate DMA memory for each fastpath structure */ 14858 for (i = 0; i < sc->num_queues; i++) { 14859 fp = &sc->fp[i]; 14860 fp->sc = sc; 14861 fp->index = i; 14862 14863 /*******************/ 14864 /* FP STATUS BLOCK */ 14865 /*******************/ 14866 14867 snprintf(buf, sizeof(buf), "fp %d status block", i); 14868 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 14869 &fp->sb_dma, buf) != 0) { 14870 /* XXX unwind and free previous fastpath allocations */ 14871 BLOGE(sc, "Failed to alloc %s\n", buf); 14872 return (1); 14873 } else { 14874 if (CHIP_IS_E2E3(sc)) { 14875 fp->status_block.e2_sb = 14876 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 14877 } else { 14878 fp->status_block.e1x_sb = 14879 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 14880 } 14881 } 14882 14883 /******************/ 14884 /* FP TX BD CHAIN */ 14885 /******************/ 14886 14887 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 14888 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 14889 &fp->tx_dma, buf) != 0) { 14890 /* XXX unwind and free previous fastpath allocations */ 14891 BLOGE(sc, "Failed to alloc %s\n", buf); 14892 return (1); 14893 } else { 14894 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 14895 } 14896 14897 /* link together the tx bd chain pages */ 14898 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 14899 /* index into the tx bd chain array to last entry per page */ 14900 struct eth_tx_next_bd *tx_next_bd = 14901 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 14902 /* point to the next page and wrap from last page */ 14903 busaddr = (fp->tx_dma.paddr + 14904 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 14905 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 14906 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 14907 } 14908 14909 /******************/ 14910 /* FP RX BD CHAIN */ 14911 /******************/ 14912 14913 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 14914 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 14915 &fp->rx_dma, buf) != 0) { 14916 /* XXX unwind and free previous fastpath allocations */ 14917 BLOGE(sc, "Failed to alloc %s\n", buf); 14918 return (1); 14919 } else { 14920 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 14921 } 14922 14923 /* link together the rx bd chain pages */ 14924 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 14925 /* index into the rx bd chain array to last entry per page */ 14926 struct eth_rx_bd *rx_bd = 14927 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 14928 /* point to the next page and wrap from last page */ 14929 busaddr = (fp->rx_dma.paddr + 14930 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 14931 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 14932 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 14933 } 14934 14935 /*******************/ 14936 /* FP RX RCQ CHAIN */ 14937 /*******************/ 14938 14939 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 14940 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 14941 &fp->rcq_dma, buf) != 0) { 14942 /* XXX unwind and free previous fastpath allocations */ 14943 BLOGE(sc, "Failed to alloc %s\n", buf); 14944 return (1); 14945 } else { 14946 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 14947 } 14948 14949 /* link together the rcq chain pages */ 14950 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 14951 /* index into the rcq chain array to last entry per page */ 14952 struct eth_rx_cqe_next_page *rx_cqe_next = 14953 (struct eth_rx_cqe_next_page *) 14954 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 14955 /* point to the next page and wrap from last page */ 14956 busaddr = (fp->rcq_dma.paddr + 14957 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 14958 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 14959 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 14960 } 14961 14962 /*******************/ 14963 /* FP RX SGE CHAIN */ 14964 /*******************/ 14965 14966 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 14967 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 14968 &fp->rx_sge_dma, buf) != 0) { 14969 /* XXX unwind and free previous fastpath allocations */ 14970 BLOGE(sc, "Failed to alloc %s\n", buf); 14971 return (1); 14972 } else { 14973 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 14974 } 14975 14976 /* link together the sge chain pages */ 14977 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 14978 /* index into the rcq chain array to last entry per page */ 14979 struct eth_rx_sge *rx_sge = 14980 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 14981 /* point to the next page and wrap from last page */ 14982 busaddr = (fp->rx_sge_dma.paddr + 14983 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 14984 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 14985 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 14986 } 14987 14988 /***********************/ 14989 /* FP TX MBUF DMA MAPS */ 14990 /***********************/ 14991 14992 /* set required sizes before mapping to conserve resources */ 14993 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) { 14994 max_size = BXE_TSO_MAX_SIZE; 14995 max_segments = BXE_TSO_MAX_SEGMENTS; 14996 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 14997 } else { 14998 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 14999 max_segments = BXE_MAX_SEGMENTS; 15000 max_seg_size = MCLBYTES; 15001 } 15002 15003 /* create a dma tag for the tx mbufs */ 15004 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15005 1, /* alignment */ 15006 0, /* boundary limit */ 15007 BUS_SPACE_MAXADDR, /* restricted low */ 15008 BUS_SPACE_MAXADDR, /* restricted hi */ 15009 NULL, /* addr filter() */ 15010 NULL, /* addr filter() arg */ 15011 max_size, /* max map size */ 15012 max_segments, /* num discontinuous */ 15013 max_seg_size, /* max seg size */ 15014 0, /* flags */ 15015 NULL, /* lock() */ 15016 NULL, /* lock() arg */ 15017 &fp->tx_mbuf_tag); /* returned dma tag */ 15018 if (rc != 0) { 15019 /* XXX unwind and free previous fastpath allocations */ 15020 BLOGE(sc, "Failed to create dma tag for " 15021 "'fp %d tx mbufs' (%d)\n", i, rc); 15022 return (1); 15023 } 15024 15025 /* create dma maps for each of the tx mbuf clusters */ 15026 for (j = 0; j < TX_BD_TOTAL; j++) { 15027 if (bus_dmamap_create(fp->tx_mbuf_tag, 15028 BUS_DMA_NOWAIT, 15029 &fp->tx_mbuf_chain[j].m_map)) { 15030 /* XXX unwind and free previous fastpath allocations */ 15031 BLOGE(sc, "Failed to create dma map for " 15032 "'fp %d tx mbuf %d' (%d)\n", i, j, rc); 15033 return (1); 15034 } 15035 } 15036 15037 /***********************/ 15038 /* FP RX MBUF DMA MAPS */ 15039 /***********************/ 15040 15041 /* create a dma tag for the rx mbufs */ 15042 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15043 1, /* alignment */ 15044 0, /* boundary limit */ 15045 BUS_SPACE_MAXADDR, /* restricted low */ 15046 BUS_SPACE_MAXADDR, /* restricted hi */ 15047 NULL, /* addr filter() */ 15048 NULL, /* addr filter() arg */ 15049 MJUM9BYTES, /* max map size */ 15050 1, /* num discontinuous */ 15051 MJUM9BYTES, /* max seg size */ 15052 0, /* flags */ 15053 NULL, /* lock() */ 15054 NULL, /* lock() arg */ 15055 &fp->rx_mbuf_tag); /* returned dma tag */ 15056 if (rc != 0) { 15057 /* XXX unwind and free previous fastpath allocations */ 15058 BLOGE(sc, "Failed to create dma tag for " 15059 "'fp %d rx mbufs' (%d)\n", i, rc); 15060 return (1); 15061 } 15062 15063 /* create dma maps for each of the rx mbuf clusters */ 15064 for (j = 0; j < RX_BD_TOTAL; j++) { 15065 if (bus_dmamap_create(fp->rx_mbuf_tag, 15066 BUS_DMA_NOWAIT, 15067 &fp->rx_mbuf_chain[j].m_map)) { 15068 /* XXX unwind and free previous fastpath allocations */ 15069 BLOGE(sc, "Failed to create dma map for " 15070 "'fp %d rx mbuf %d' (%d)\n", i, j, rc); 15071 return (1); 15072 } 15073 } 15074 15075 /* create dma map for the spare rx mbuf cluster */ 15076 if (bus_dmamap_create(fp->rx_mbuf_tag, 15077 BUS_DMA_NOWAIT, 15078 &fp->rx_mbuf_spare_map)) { 15079 /* XXX unwind and free previous fastpath allocations */ 15080 BLOGE(sc, "Failed to create dma map for " 15081 "'fp %d spare rx mbuf' (%d)\n", i, rc); 15082 return (1); 15083 } 15084 15085 /***************************/ 15086 /* FP RX SGE MBUF DMA MAPS */ 15087 /***************************/ 15088 15089 /* create a dma tag for the rx sge mbufs */ 15090 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15091 1, /* alignment */ 15092 0, /* boundary limit */ 15093 BUS_SPACE_MAXADDR, /* restricted low */ 15094 BUS_SPACE_MAXADDR, /* restricted hi */ 15095 NULL, /* addr filter() */ 15096 NULL, /* addr filter() arg */ 15097 BCM_PAGE_SIZE, /* max map size */ 15098 1, /* num discontinuous */ 15099 BCM_PAGE_SIZE, /* max seg size */ 15100 0, /* flags */ 15101 NULL, /* lock() */ 15102 NULL, /* lock() arg */ 15103 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15104 if (rc != 0) { 15105 /* XXX unwind and free previous fastpath allocations */ 15106 BLOGE(sc, "Failed to create dma tag for " 15107 "'fp %d rx sge mbufs' (%d)\n", i, rc); 15108 return (1); 15109 } 15110 15111 /* create dma maps for the rx sge mbuf clusters */ 15112 for (j = 0; j < RX_SGE_TOTAL; j++) { 15113 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15114 BUS_DMA_NOWAIT, 15115 &fp->rx_sge_mbuf_chain[j].m_map)) { 15116 /* XXX unwind and free previous fastpath allocations */ 15117 BLOGE(sc, "Failed to create dma map for " 15118 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); 15119 return (1); 15120 } 15121 } 15122 15123 /* create dma map for the spare rx sge mbuf cluster */ 15124 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15125 BUS_DMA_NOWAIT, 15126 &fp->rx_sge_mbuf_spare_map)) { 15127 /* XXX unwind and free previous fastpath allocations */ 15128 BLOGE(sc, "Failed to create dma map for " 15129 "'fp %d spare rx sge mbuf' (%d)\n", i, rc); 15130 return (1); 15131 } 15132 15133 /***************************/ 15134 /* FP RX TPA MBUF DMA MAPS */ 15135 /***************************/ 15136 15137 /* create dma maps for the rx tpa mbuf clusters */ 15138 max_agg_queues = MAX_AGG_QS(sc); 15139 15140 for (j = 0; j < max_agg_queues; j++) { 15141 if (bus_dmamap_create(fp->rx_mbuf_tag, 15142 BUS_DMA_NOWAIT, 15143 &fp->rx_tpa_info[j].bd.m_map)) { 15144 /* XXX unwind and free previous fastpath allocations */ 15145 BLOGE(sc, "Failed to create dma map for " 15146 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); 15147 return (1); 15148 } 15149 } 15150 15151 /* create dma map for the spare rx tpa mbuf cluster */ 15152 if (bus_dmamap_create(fp->rx_mbuf_tag, 15153 BUS_DMA_NOWAIT, 15154 &fp->rx_tpa_info_mbuf_spare_map)) { 15155 /* XXX unwind and free previous fastpath allocations */ 15156 BLOGE(sc, "Failed to create dma map for " 15157 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); 15158 return (1); 15159 } 15160 15161 bxe_init_sge_ring_bit_mask(fp); 15162 } 15163 15164 return (0); 15165} 15166 15167static void 15168bxe_free_hsi_mem(struct bxe_softc *sc) 15169{ 15170 struct bxe_fastpath *fp; 15171 int max_agg_queues; 15172 int i, j; 15173 15174 if (sc->parent_dma_tag == NULL) { 15175 return; /* assume nothing was allocated */ 15176 } 15177 15178 for (i = 0; i < sc->num_queues; i++) { 15179 fp = &sc->fp[i]; 15180 15181 /*******************/ 15182 /* FP STATUS BLOCK */ 15183 /*******************/ 15184 15185 bxe_dma_free(sc, &fp->sb_dma); 15186 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15187 15188 /******************/ 15189 /* FP TX BD CHAIN */ 15190 /******************/ 15191 15192 bxe_dma_free(sc, &fp->tx_dma); 15193 fp->tx_chain = NULL; 15194 15195 /******************/ 15196 /* FP RX BD CHAIN */ 15197 /******************/ 15198 15199 bxe_dma_free(sc, &fp->rx_dma); 15200 fp->rx_chain = NULL; 15201 15202 /*******************/ 15203 /* FP RX RCQ CHAIN */ 15204 /*******************/ 15205 15206 bxe_dma_free(sc, &fp->rcq_dma); 15207 fp->rcq_chain = NULL; 15208 15209 /*******************/ 15210 /* FP RX SGE CHAIN */ 15211 /*******************/ 15212 15213 bxe_dma_free(sc, &fp->rx_sge_dma); 15214 fp->rx_sge_chain = NULL; 15215 15216 /***********************/ 15217 /* FP TX MBUF DMA MAPS */ 15218 /***********************/ 15219 15220 if (fp->tx_mbuf_tag != NULL) { 15221 for (j = 0; j < TX_BD_TOTAL; j++) { 15222 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15223 bus_dmamap_unload(fp->tx_mbuf_tag, 15224 fp->tx_mbuf_chain[j].m_map); 15225 bus_dmamap_destroy(fp->tx_mbuf_tag, 15226 fp->tx_mbuf_chain[j].m_map); 15227 } 15228 } 15229 15230 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15231 fp->tx_mbuf_tag = NULL; 15232 } 15233 15234 /***********************/ 15235 /* FP RX MBUF DMA MAPS */ 15236 /***********************/ 15237 15238 if (fp->rx_mbuf_tag != NULL) { 15239 for (j = 0; j < RX_BD_TOTAL; j++) { 15240 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15241 bus_dmamap_unload(fp->rx_mbuf_tag, 15242 fp->rx_mbuf_chain[j].m_map); 15243 bus_dmamap_destroy(fp->rx_mbuf_tag, 15244 fp->rx_mbuf_chain[j].m_map); 15245 } 15246 } 15247 15248 if (fp->rx_mbuf_spare_map != NULL) { 15249 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15250 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15251 } 15252 15253 /***************************/ 15254 /* FP RX TPA MBUF DMA MAPS */ 15255 /***************************/ 15256 15257 max_agg_queues = MAX_AGG_QS(sc); 15258 15259 for (j = 0; j < max_agg_queues; j++) { 15260 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15261 bus_dmamap_unload(fp->rx_mbuf_tag, 15262 fp->rx_tpa_info[j].bd.m_map); 15263 bus_dmamap_destroy(fp->rx_mbuf_tag, 15264 fp->rx_tpa_info[j].bd.m_map); 15265 } 15266 } 15267 15268 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15269 bus_dmamap_unload(fp->rx_mbuf_tag, 15270 fp->rx_tpa_info_mbuf_spare_map); 15271 bus_dmamap_destroy(fp->rx_mbuf_tag, 15272 fp->rx_tpa_info_mbuf_spare_map); 15273 } 15274 15275 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15276 fp->rx_mbuf_tag = NULL; 15277 } 15278 15279 /***************************/ 15280 /* FP RX SGE MBUF DMA MAPS */ 15281 /***************************/ 15282 15283 if (fp->rx_sge_mbuf_tag != NULL) { 15284 for (j = 0; j < RX_SGE_TOTAL; j++) { 15285 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15286 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15287 fp->rx_sge_mbuf_chain[j].m_map); 15288 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15289 fp->rx_sge_mbuf_chain[j].m_map); 15290 } 15291 } 15292 15293 if (fp->rx_sge_mbuf_spare_map != NULL) { 15294 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15295 fp->rx_sge_mbuf_spare_map); 15296 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15297 fp->rx_sge_mbuf_spare_map); 15298 } 15299 15300 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15301 fp->rx_sge_mbuf_tag = NULL; 15302 } 15303 } 15304 15305 /***************************/ 15306 /* FW DECOMPRESSION BUFFER */ 15307 /***************************/ 15308 15309 bxe_dma_free(sc, &sc->gz_buf_dma); 15310 sc->gz_buf = NULL; 15311 free(sc->gz_strm, M_DEVBUF); 15312 sc->gz_strm = NULL; 15313 15314 /*******************/ 15315 /* SLOW PATH QUEUE */ 15316 /*******************/ 15317 15318 bxe_dma_free(sc, &sc->spq_dma); 15319 sc->spq = NULL; 15320 15321 /*************/ 15322 /* SLOW PATH */ 15323 /*************/ 15324 15325 bxe_dma_free(sc, &sc->sp_dma); 15326 sc->sp = NULL; 15327 15328 /***************/ 15329 /* EVENT QUEUE */ 15330 /***************/ 15331 15332 bxe_dma_free(sc, &sc->eq_dma); 15333 sc->eq = NULL; 15334 15335 /************************/ 15336 /* DEFAULT STATUS BLOCK */ 15337 /************************/ 15338 15339 bxe_dma_free(sc, &sc->def_sb_dma); 15340 sc->def_sb = NULL; 15341 15342 bus_dma_tag_destroy(sc->parent_dma_tag); 15343 sc->parent_dma_tag = NULL; 15344} 15345 15346/* 15347 * Previous driver DMAE transaction may have occurred when pre-boot stage 15348 * ended and boot began. This would invalidate the addresses of the 15349 * transaction, resulting in was-error bit set in the PCI causing all 15350 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15351 * the interrupt which detected this from the pglueb and the was-done bit 15352 */ 15353static void 15354bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15355{ 15356 uint32_t val; 15357 15358 if (!CHIP_IS_E1x(sc)) { 15359 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15360 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15361 BLOGD(sc, DBG_LOAD, 15362 "Clearing 'was-error' bit that was set in pglueb"); 15363 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15364 } 15365 } 15366} 15367 15368static int 15369bxe_prev_mcp_done(struct bxe_softc *sc) 15370{ 15371 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15372 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15373 if (!rc) { 15374 BLOGE(sc, "MCP response failure, aborting\n"); 15375 return (-1); 15376 } 15377 15378 return (0); 15379} 15380 15381static struct bxe_prev_list_node * 15382bxe_prev_path_get_entry(struct bxe_softc *sc) 15383{ 15384 struct bxe_prev_list_node *tmp; 15385 15386 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15387 if ((sc->pcie_bus == tmp->bus) && 15388 (sc->pcie_device == tmp->slot) && 15389 (SC_PATH(sc) == tmp->path)) { 15390 return (tmp); 15391 } 15392 } 15393 15394 return (NULL); 15395} 15396 15397static uint8_t 15398bxe_prev_is_path_marked(struct bxe_softc *sc) 15399{ 15400 struct bxe_prev_list_node *tmp; 15401 int rc = FALSE; 15402 15403 mtx_lock(&bxe_prev_mtx); 15404 15405 tmp = bxe_prev_path_get_entry(sc); 15406 if (tmp) { 15407 if (tmp->aer) { 15408 BLOGD(sc, DBG_LOAD, 15409 "Path %d/%d/%d was marked by AER\n", 15410 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15411 } else { 15412 rc = TRUE; 15413 BLOGD(sc, DBG_LOAD, 15414 "Path %d/%d/%d was already cleaned from previous drivers\n", 15415 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15416 } 15417 } 15418 15419 mtx_unlock(&bxe_prev_mtx); 15420 15421 return (rc); 15422} 15423 15424static int 15425bxe_prev_mark_path(struct bxe_softc *sc, 15426 uint8_t after_undi) 15427{ 15428 struct bxe_prev_list_node *tmp; 15429 15430 mtx_lock(&bxe_prev_mtx); 15431 15432 /* Check whether the entry for this path already exists */ 15433 tmp = bxe_prev_path_get_entry(sc); 15434 if (tmp) { 15435 if (!tmp->aer) { 15436 BLOGD(sc, DBG_LOAD, 15437 "Re-marking AER in path %d/%d/%d\n", 15438 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15439 } else { 15440 BLOGD(sc, DBG_LOAD, 15441 "Removing AER indication from path %d/%d/%d\n", 15442 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15443 tmp->aer = 0; 15444 } 15445 15446 mtx_unlock(&bxe_prev_mtx); 15447 return (0); 15448 } 15449 15450 mtx_unlock(&bxe_prev_mtx); 15451 15452 /* Create an entry for this path and add it */ 15453 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15454 (M_NOWAIT | M_ZERO)); 15455 if (!tmp) { 15456 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15457 return (-1); 15458 } 15459 15460 tmp->bus = sc->pcie_bus; 15461 tmp->slot = sc->pcie_device; 15462 tmp->path = SC_PATH(sc); 15463 tmp->aer = 0; 15464 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15465 15466 mtx_lock(&bxe_prev_mtx); 15467 15468 BLOGD(sc, DBG_LOAD, 15469 "Marked path %d/%d/%d - finished previous unload\n", 15470 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15471 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15472 15473 mtx_unlock(&bxe_prev_mtx); 15474 15475 return (0); 15476} 15477 15478static int 15479bxe_do_flr(struct bxe_softc *sc) 15480{ 15481 int i; 15482 15483 /* only E2 and onwards support FLR */ 15484 if (CHIP_IS_E1x(sc)) { 15485 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15486 return (-1); 15487 } 15488 15489 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15490 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15491 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15492 sc->devinfo.bc_ver); 15493 return (-1); 15494 } 15495 15496 /* Wait for Transaction Pending bit clean */ 15497 for (i = 0; i < 4; i++) { 15498 if (i) { 15499 DELAY(((1 << (i - 1)) * 100) * 1000); 15500 } 15501 15502 if (!bxe_is_pcie_pending(sc)) { 15503 goto clear; 15504 } 15505 } 15506 15507 BLOGE(sc, "PCIE transaction is not cleared, " 15508 "proceeding with reset anyway\n"); 15509 15510clear: 15511 15512 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15513 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15514 15515 return (0); 15516} 15517 15518struct bxe_mac_vals { 15519 uint32_t xmac_addr; 15520 uint32_t xmac_val; 15521 uint32_t emac_addr; 15522 uint32_t emac_val; 15523 uint32_t umac_addr; 15524 uint32_t umac_val; 15525 uint32_t bmac_addr; 15526 uint32_t bmac_val[2]; 15527}; 15528 15529static void 15530bxe_prev_unload_close_mac(struct bxe_softc *sc, 15531 struct bxe_mac_vals *vals) 15532{ 15533 uint32_t val, base_addr, offset, mask, reset_reg; 15534 uint8_t mac_stopped = FALSE; 15535 uint8_t port = SC_PORT(sc); 15536 uint32_t wb_data[2]; 15537 15538 /* reset addresses as they also mark which values were changed */ 15539 vals->bmac_addr = 0; 15540 vals->umac_addr = 0; 15541 vals->xmac_addr = 0; 15542 vals->emac_addr = 0; 15543 15544 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15545 15546 if (!CHIP_IS_E3(sc)) { 15547 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15548 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15549 if ((mask & reset_reg) && val) { 15550 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15551 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15552 : NIG_REG_INGRESS_BMAC0_MEM; 15553 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15554 : BIGMAC_REGISTER_BMAC_CONTROL; 15555 15556 /* 15557 * use rd/wr since we cannot use dmae. This is safe 15558 * since MCP won't access the bus due to the request 15559 * to unload, and no function on the path can be 15560 * loaded at this time. 15561 */ 15562 wb_data[0] = REG_RD(sc, base_addr + offset); 15563 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15564 vals->bmac_addr = base_addr + offset; 15565 vals->bmac_val[0] = wb_data[0]; 15566 vals->bmac_val[1] = wb_data[1]; 15567 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15568 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15569 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15570 } 15571 15572 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15573 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15574 vals->emac_val = REG_RD(sc, vals->emac_addr); 15575 REG_WR(sc, vals->emac_addr, 0); 15576 mac_stopped = TRUE; 15577 } else { 15578 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15579 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15580 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15581 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15582 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15583 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15584 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15585 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15586 REG_WR(sc, vals->xmac_addr, 0); 15587 mac_stopped = TRUE; 15588 } 15589 15590 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15591 if (mask & reset_reg) { 15592 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15593 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15594 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15595 vals->umac_val = REG_RD(sc, vals->umac_addr); 15596 REG_WR(sc, vals->umac_addr, 0); 15597 mac_stopped = TRUE; 15598 } 15599 } 15600 15601 if (mac_stopped) { 15602 DELAY(20000); 15603 } 15604} 15605 15606#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15607#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15608#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15609#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15610 15611static void 15612bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15613 uint8_t port, 15614 uint8_t inc) 15615{ 15616 uint16_t rcq, bd; 15617 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15618 15619 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15620 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15621 15622 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15623 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15624 15625 BLOGD(sc, DBG_LOAD, 15626 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15627 port, bd, rcq); 15628} 15629 15630static int 15631bxe_prev_unload_common(struct bxe_softc *sc) 15632{ 15633 uint32_t reset_reg, tmp_reg = 0, rc; 15634 uint8_t prev_undi = FALSE; 15635 struct bxe_mac_vals mac_vals; 15636 uint32_t timer_count = 1000; 15637 uint32_t prev_brb; 15638 15639 /* 15640 * It is possible a previous function received 'common' answer, 15641 * but hasn't loaded yet, therefore creating a scenario of 15642 * multiple functions receiving 'common' on the same path. 15643 */ 15644 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15645 15646 memset(&mac_vals, 0, sizeof(mac_vals)); 15647 15648 if (bxe_prev_is_path_marked(sc)) { 15649 return (bxe_prev_mcp_done(sc)); 15650 } 15651 15652 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15653 15654 /* Reset should be performed after BRB is emptied */ 15655 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15656 /* Close the MAC Rx to prevent BRB from filling up */ 15657 bxe_prev_unload_close_mac(sc, &mac_vals); 15658 15659 /* close LLH filters towards the BRB */ 15660 elink_set_rx_filter(&sc->link_params, 0); 15661 15662 /* 15663 * Check if the UNDI driver was previously loaded. 15664 * UNDI driver initializes CID offset for normal bell to 0x7 15665 */ 15666 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15667 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15668 if (tmp_reg == 0x7) { 15669 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15670 prev_undi = TRUE; 15671 /* clear the UNDI indication */ 15672 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15673 /* clear possible idle check errors */ 15674 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15675 } 15676 } 15677 15678 /* wait until BRB is empty */ 15679 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15680 while (timer_count) { 15681 prev_brb = tmp_reg; 15682 15683 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15684 if (!tmp_reg) { 15685 break; 15686 } 15687 15688 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15689 15690 /* reset timer as long as BRB actually gets emptied */ 15691 if (prev_brb > tmp_reg) { 15692 timer_count = 1000; 15693 } else { 15694 timer_count--; 15695 } 15696 15697 /* If UNDI resides in memory, manually increment it */ 15698 if (prev_undi) { 15699 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15700 } 15701 15702 DELAY(10); 15703 } 15704 15705 if (!timer_count) { 15706 BLOGE(sc, "Failed to empty BRB\n"); 15707 } 15708 } 15709 15710 /* No packets are in the pipeline, path is ready for reset */ 15711 bxe_reset_common(sc); 15712 15713 if (mac_vals.xmac_addr) { 15714 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15715 } 15716 if (mac_vals.umac_addr) { 15717 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15718 } 15719 if (mac_vals.emac_addr) { 15720 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15721 } 15722 if (mac_vals.bmac_addr) { 15723 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15724 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15725 } 15726 15727 rc = bxe_prev_mark_path(sc, prev_undi); 15728 if (rc) { 15729 bxe_prev_mcp_done(sc); 15730 return (rc); 15731 } 15732 15733 return (bxe_prev_mcp_done(sc)); 15734} 15735 15736static int 15737bxe_prev_unload_uncommon(struct bxe_softc *sc) 15738{ 15739 int rc; 15740 15741 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15742 15743 /* Test if previous unload process was already finished for this path */ 15744 if (bxe_prev_is_path_marked(sc)) { 15745 return (bxe_prev_mcp_done(sc)); 15746 } 15747 15748 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15749 15750 /* 15751 * If function has FLR capabilities, and existing FW version matches 15752 * the one required, then FLR will be sufficient to clean any residue 15753 * left by previous driver 15754 */ 15755 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15756 if (!rc) { 15757 /* fw version is good */ 15758 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15759 rc = bxe_do_flr(sc); 15760 } 15761 15762 if (!rc) { 15763 /* FLR was performed */ 15764 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15765 return (0); 15766 } 15767 15768 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15769 15770 /* Close the MCP request, return failure*/ 15771 rc = bxe_prev_mcp_done(sc); 15772 if (!rc) { 15773 rc = BXE_PREV_WAIT_NEEDED; 15774 } 15775 15776 return (rc); 15777} 15778 15779static int 15780bxe_prev_unload(struct bxe_softc *sc) 15781{ 15782 int time_counter = 10; 15783 uint32_t fw, hw_lock_reg, hw_lock_val; 15784 uint32_t rc = 0; 15785 15786 /* 15787 * Clear HW from errors which may have resulted from an interrupted 15788 * DMAE transaction. 15789 */ 15790 bxe_prev_interrupted_dmae(sc); 15791 15792 /* Release previously held locks */ 15793 hw_lock_reg = 15794 (SC_FUNC(sc) <= 5) ? 15795 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 15796 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 15797 15798 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 15799 if (hw_lock_val) { 15800 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 15801 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 15802 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 15803 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 15804 } 15805 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 15806 REG_WR(sc, hw_lock_reg, 0xffffffff); 15807 } else { 15808 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 15809 } 15810 15811 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 15812 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 15813 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 15814 } 15815 15816 do { 15817 /* Lock MCP using an unload request */ 15818 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 15819 if (!fw) { 15820 BLOGE(sc, "MCP response failure, aborting\n"); 15821 rc = -1; 15822 break; 15823 } 15824 15825 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 15826 rc = bxe_prev_unload_common(sc); 15827 break; 15828 } 15829 15830 /* non-common reply from MCP night require looping */ 15831 rc = bxe_prev_unload_uncommon(sc); 15832 if (rc != BXE_PREV_WAIT_NEEDED) { 15833 break; 15834 } 15835 15836 DELAY(20000); 15837 } while (--time_counter); 15838 15839 if (!time_counter || rc) { 15840 BLOGE(sc, "Failed to unload previous driver!" 15841 " time_counter %d rc %d\n", time_counter, rc); 15842 rc = -1; 15843 } 15844 15845 return (rc); 15846} 15847 15848void 15849bxe_dcbx_set_state(struct bxe_softc *sc, 15850 uint8_t dcb_on, 15851 uint32_t dcbx_enabled) 15852{ 15853 if (!CHIP_IS_E1x(sc)) { 15854 sc->dcb_state = dcb_on; 15855 sc->dcbx_enabled = dcbx_enabled; 15856 } else { 15857 sc->dcb_state = FALSE; 15858 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 15859 } 15860 BLOGD(sc, DBG_LOAD, 15861 "DCB state [%s:%s]\n", 15862 dcb_on ? "ON" : "OFF", 15863 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 15864 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 15865 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 15866 "on-chip with negotiation" : "invalid"); 15867} 15868 15869/* must be called after sriov-enable */ 15870static int 15871bxe_set_qm_cid_count(struct bxe_softc *sc) 15872{ 15873 int cid_count = BXE_L2_MAX_CID(sc); 15874 15875 if (IS_SRIOV(sc)) { 15876 cid_count += BXE_VF_CIDS; 15877 } 15878 15879 if (CNIC_SUPPORT(sc)) { 15880 cid_count += CNIC_CID_MAX; 15881 } 15882 15883 return (roundup(cid_count, QM_CID_ROUND)); 15884} 15885 15886static void 15887bxe_init_multi_cos(struct bxe_softc *sc) 15888{ 15889 int pri, cos; 15890 15891 uint32_t pri_map = 0; /* XXX change to user config */ 15892 15893 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 15894 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 15895 if (cos < sc->max_cos) { 15896 sc->prio_to_cos[pri] = cos; 15897 } else { 15898 BLOGW(sc, "Invalid COS %d for priority %d " 15899 "(max COS is %d), setting to 0\n", 15900 cos, pri, (sc->max_cos - 1)); 15901 sc->prio_to_cos[pri] = 0; 15902 } 15903 } 15904} 15905 15906static int 15907bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 15908{ 15909 struct bxe_softc *sc; 15910 int error, result; 15911 15912 result = 0; 15913 error = sysctl_handle_int(oidp, &result, 0, req); 15914 15915 if (error || !req->newptr) { 15916 return (error); 15917 } 15918 15919 if (result == 1) { 15920 uint32_t temp; 15921 sc = (struct bxe_softc *)arg1; 15922 15923 BLOGI(sc, "... dumping driver state ...\n"); 15924 temp = SHMEM2_RD(sc, temperature_in_half_celsius); 15925 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); 15926 } 15927 15928 return (error); 15929} 15930 15931static int 15932bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 15933{ 15934 struct bxe_softc *sc = (struct bxe_softc *)arg1; 15935 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 15936 uint32_t *offset; 15937 uint64_t value = 0; 15938 int index = (int)arg2; 15939 15940 if (index >= BXE_NUM_ETH_STATS) { 15941 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 15942 return (-1); 15943 } 15944 15945 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 15946 15947 switch (bxe_eth_stats_arr[index].size) { 15948 case 4: 15949 value = (uint64_t)*offset; 15950 break; 15951 case 8: 15952 value = HILO_U64(*offset, *(offset + 1)); 15953 break; 15954 default: 15955 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 15956 index, bxe_eth_stats_arr[index].size); 15957 return (-1); 15958 } 15959 15960 return (sysctl_handle_64(oidp, &value, 0, req)); 15961} 15962 15963static int 15964bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 15965{ 15966 struct bxe_softc *sc = (struct bxe_softc *)arg1; 15967 uint32_t *eth_stats; 15968 uint32_t *offset; 15969 uint64_t value = 0; 15970 uint32_t q_stat = (uint32_t)arg2; 15971 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 15972 uint32_t index = (q_stat & 0xffff); 15973 15974 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 15975 15976 if (index >= BXE_NUM_ETH_Q_STATS) { 15977 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 15978 return (-1); 15979 } 15980 15981 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 15982 15983 switch (bxe_eth_q_stats_arr[index].size) { 15984 case 4: 15985 value = (uint64_t)*offset; 15986 break; 15987 case 8: 15988 value = HILO_U64(*offset, *(offset + 1)); 15989 break; 15990 default: 15991 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 15992 index, bxe_eth_q_stats_arr[index].size); 15993 return (-1); 15994 } 15995 15996 return (sysctl_handle_64(oidp, &value, 0, req)); 15997} 15998 15999static void bxe_force_link_reset(struct bxe_softc *sc) 16000{ 16001 16002 bxe_acquire_phy_lock(sc); 16003 elink_link_reset(&sc->link_params, &sc->link_vars, 1); 16004 bxe_release_phy_lock(sc); 16005} 16006 16007static int 16008bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS) 16009{ 16010 struct bxe_softc *sc = (struct bxe_softc *)arg1;; 16011 uint32_t cfg_idx = bxe_get_link_cfg_idx(sc); 16012 int rc = 0; 16013 int error; 16014 int result; 16015 16016 16017 error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req); 16018 16019 if (error || !req->newptr) { 16020 return (error); 16021 } 16022 if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) { 16023 BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param); 16024 sc->bxe_pause_param = 8; 16025 } 16026 16027 result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT); 16028 16029 16030 if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) { 16031 BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param); 16032 return -EINVAL; 16033 } 16034 16035 if(IS_MF(sc)) 16036 return 0; 16037 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; 16038 if(result & ELINK_FLOW_CTRL_RX) 16039 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX; 16040 16041 if(result & ELINK_FLOW_CTRL_TX) 16042 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX; 16043 if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO) 16044 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE; 16045 16046 if(result & 0x400) { 16047 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) { 16048 sc->link_params.req_flow_ctrl[cfg_idx] = 16049 ELINK_FLOW_CTRL_AUTO; 16050 } 16051 sc->link_params.req_fc_auto_adv = 0; 16052 if (result & ELINK_FLOW_CTRL_RX) 16053 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX; 16054 16055 if (result & ELINK_FLOW_CTRL_TX) 16056 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX; 16057 if (!sc->link_params.req_fc_auto_adv) 16058 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE; 16059 } 16060 if (IS_PF(sc)) { 16061 if (sc->link_vars.link_up) { 16062 bxe_stats_handle(sc, STATS_EVENT_STOP); 16063 } 16064 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 16065 bxe_force_link_reset(sc); 16066 bxe_acquire_phy_lock(sc); 16067 16068 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 16069 16070 bxe_release_phy_lock(sc); 16071 16072 bxe_calc_fc_adv(sc); 16073 } 16074 } 16075 return rc; 16076} 16077 16078 16079static void 16080bxe_add_sysctls(struct bxe_softc *sc) 16081{ 16082 struct sysctl_ctx_list *ctx; 16083 struct sysctl_oid_list *children; 16084 struct sysctl_oid *queue_top, *queue; 16085 struct sysctl_oid_list *queue_top_children, *queue_children; 16086 char queue_num_buf[32]; 16087 uint32_t q_stat; 16088 int i, j; 16089 16090 ctx = device_get_sysctl_ctx(sc->dev); 16091 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16092 16093 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16094 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16095 "version"); 16096 16097 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16098 BCM_5710_FW_MAJOR_VERSION, 16099 BCM_5710_FW_MINOR_VERSION, 16100 BCM_5710_FW_REVISION_VERSION, 16101 BCM_5710_FW_ENGINEERING_VERSION); 16102 16103 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16104 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16105 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16106 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16107 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16108 "Unknown")); 16109 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16110 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16111 "multifunction vnics per port"); 16112 16113 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16114 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16115 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16116 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16117 "???GT/s"), 16118 sc->devinfo.pcie_link_width); 16119 16120 sc->debug = bxe_debug; 16121 16122#if __FreeBSD_version >= 900000 16123 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16124 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 16125 "bootcode version"); 16126 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16127 CTLFLAG_RD, sc->fw_ver_str, 0, 16128 "firmware version"); 16129 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16130 CTLFLAG_RD, sc->mf_mode_str, 0, 16131 "multifunction mode"); 16132 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16133 CTLFLAG_RD, sc->mac_addr_str, 0, 16134 "mac address"); 16135 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16136 CTLFLAG_RD, sc->pci_link_str, 0, 16137 "pci link status"); 16138 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 16139 CTLFLAG_RW, &sc->debug, 16140 "debug logging mode"); 16141#else 16142 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16143 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0, 16144 "bootcode version"); 16145 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16146 CTLFLAG_RD, &sc->fw_ver_str, 0, 16147 "firmware version"); 16148 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16149 CTLFLAG_RD, &sc->mf_mode_str, 0, 16150 "multifunction mode"); 16151 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16152 CTLFLAG_RD, &sc->mac_addr_str, 0, 16153 "mac address"); 16154 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16155 CTLFLAG_RD, &sc->pci_link_str, 0, 16156 "pci link status"); 16157 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", 16158 CTLFLAG_RW, &sc->debug, 0, 16159 "debug logging mode"); 16160#endif /* #if __FreeBSD_version >= 900000 */ 16161 16162 sc->trigger_grcdump = 0; 16163 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", 16164 CTLFLAG_RW, &sc->trigger_grcdump, 0, 16165 "trigger grcdump should be invoked" 16166 " before collecting grcdump"); 16167 16168 sc->grcdump_started = 0; 16169 sc->grcdump_done = 0; 16170 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", 16171 CTLFLAG_RD, &sc->grcdump_done, 0, 16172 "set by driver when grcdump is done"); 16173 16174 sc->rx_budget = bxe_rx_budget; 16175 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16176 CTLFLAG_RW, &sc->rx_budget, 0, 16177 "rx processing budget"); 16178 16179 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param", 16180 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16181 bxe_sysctl_pauseparam, "IU", 16182 "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8"); 16183 16184 16185 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16186 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16187 bxe_sysctl_state, "IU", "dump driver state"); 16188 16189 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16190 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16191 bxe_eth_stats_arr[i].string, 16192 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16193 bxe_sysctl_eth_stat, "LU", 16194 bxe_eth_stats_arr[i].string); 16195 } 16196 16197 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16198 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16199 CTLFLAG_RD, NULL, "queue"); 16200 queue_top_children = SYSCTL_CHILDREN(queue_top); 16201 16202 for (i = 0; i < sc->num_queues; i++) { 16203 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16204 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16205 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16206 queue_num_buf, CTLFLAG_RD, NULL, 16207 "single queue"); 16208 queue_children = SYSCTL_CHILDREN(queue); 16209 16210 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16211 q_stat = ((i << 16) | j); 16212 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16213 bxe_eth_q_stats_arr[j].string, 16214 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16215 bxe_sysctl_eth_q_stat, "LU", 16216 bxe_eth_q_stats_arr[j].string); 16217 } 16218 } 16219} 16220 16221static int 16222bxe_alloc_buf_rings(struct bxe_softc *sc) 16223{ 16224#if __FreeBSD_version >= 901504 16225 16226 int i; 16227 struct bxe_fastpath *fp; 16228 16229 for (i = 0; i < sc->num_queues; i++) { 16230 16231 fp = &sc->fp[i]; 16232 16233 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 16234 M_NOWAIT, &fp->tx_mtx); 16235 if (fp->tx_br == NULL) 16236 return (-1); 16237 } 16238#endif 16239 return (0); 16240} 16241 16242static void 16243bxe_free_buf_rings(struct bxe_softc *sc) 16244{ 16245#if __FreeBSD_version >= 901504 16246 16247 int i; 16248 struct bxe_fastpath *fp; 16249 16250 for (i = 0; i < sc->num_queues; i++) { 16251 16252 fp = &sc->fp[i]; 16253 16254 if (fp->tx_br) { 16255 buf_ring_free(fp->tx_br, M_DEVBUF); 16256 fp->tx_br = NULL; 16257 } 16258 } 16259 16260#endif 16261} 16262 16263static void 16264bxe_init_fp_mutexs(struct bxe_softc *sc) 16265{ 16266 int i; 16267 struct bxe_fastpath *fp; 16268 16269 for (i = 0; i < sc->num_queues; i++) { 16270 16271 fp = &sc->fp[i]; 16272 16273 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 16274 "bxe%d_fp%d_tx_lock", sc->unit, i); 16275 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 16276 16277 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 16278 "bxe%d_fp%d_rx_lock", sc->unit, i); 16279 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 16280 } 16281} 16282 16283static void 16284bxe_destroy_fp_mutexs(struct bxe_softc *sc) 16285{ 16286 int i; 16287 struct bxe_fastpath *fp; 16288 16289 for (i = 0; i < sc->num_queues; i++) { 16290 16291 fp = &sc->fp[i]; 16292 16293 if (mtx_initialized(&fp->tx_mtx)) { 16294 mtx_destroy(&fp->tx_mtx); 16295 } 16296 16297 if (mtx_initialized(&fp->rx_mtx)) { 16298 mtx_destroy(&fp->rx_mtx); 16299 } 16300 } 16301} 16302 16303 16304/* 16305 * Device attach function. 16306 * 16307 * Allocates device resources, performs secondary chip identification, and 16308 * initializes driver instance variables. This function is called from driver 16309 * load after a successful probe. 16310 * 16311 * Returns: 16312 * 0 = Success, >0 = Failure 16313 */ 16314static int 16315bxe_attach(device_t dev) 16316{ 16317 struct bxe_softc *sc; 16318 16319 sc = device_get_softc(dev); 16320 16321 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16322 16323 sc->state = BXE_STATE_CLOSED; 16324 16325 sc->dev = dev; 16326 sc->unit = device_get_unit(dev); 16327 16328 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16329 16330 sc->pcie_bus = pci_get_bus(dev); 16331 sc->pcie_device = pci_get_slot(dev); 16332 sc->pcie_func = pci_get_function(dev); 16333 16334 /* enable bus master capability */ 16335 pci_enable_busmaster(dev); 16336 16337 /* get the BARs */ 16338 if (bxe_allocate_bars(sc) != 0) { 16339 return (ENXIO); 16340 } 16341 16342 /* initialize the mutexes */ 16343 bxe_init_mutexes(sc); 16344 16345 /* prepare the periodic callout */ 16346 callout_init(&sc->periodic_callout, 0); 16347 16348 /* prepare the chip taskqueue */ 16349 sc->chip_tq_flags = CHIP_TQ_NONE; 16350 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16351 "bxe%d_chip_tq", sc->unit); 16352 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16353 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16354 taskqueue_thread_enqueue, 16355 &sc->chip_tq); 16356 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16357 "%s", sc->chip_tq_name); 16358 16359 TIMEOUT_TASK_INIT(taskqueue_thread, 16360 &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc); 16361 16362 16363 /* get device info and set params */ 16364 if (bxe_get_device_info(sc) != 0) { 16365 BLOGE(sc, "getting device info\n"); 16366 bxe_deallocate_bars(sc); 16367 pci_disable_busmaster(dev); 16368 return (ENXIO); 16369 } 16370 16371 /* get final misc params */ 16372 bxe_get_params(sc); 16373 16374 /* set the default MTU (changed via ifconfig) */ 16375 sc->mtu = ETHERMTU; 16376 16377 bxe_set_modes_bitmap(sc); 16378 16379 /* XXX 16380 * If in AFEX mode and the function is configured for FCoE 16381 * then bail... no L2 allowed. 16382 */ 16383 16384 /* get phy settings from shmem and 'and' against admin settings */ 16385 bxe_get_phy_info(sc); 16386 16387 /* initialize the FreeBSD ifnet interface */ 16388 if (bxe_init_ifnet(sc) != 0) { 16389 bxe_release_mutexes(sc); 16390 bxe_deallocate_bars(sc); 16391 pci_disable_busmaster(dev); 16392 return (ENXIO); 16393 } 16394 16395 if (bxe_add_cdev(sc) != 0) { 16396 if (sc->ifnet != NULL) { 16397 ether_ifdetach(sc->ifnet); 16398 } 16399 ifmedia_removeall(&sc->ifmedia); 16400 bxe_release_mutexes(sc); 16401 bxe_deallocate_bars(sc); 16402 pci_disable_busmaster(dev); 16403 return (ENXIO); 16404 } 16405 16406 /* allocate device interrupts */ 16407 if (bxe_interrupt_alloc(sc) != 0) { 16408 bxe_del_cdev(sc); 16409 if (sc->ifnet != NULL) { 16410 ether_ifdetach(sc->ifnet); 16411 } 16412 ifmedia_removeall(&sc->ifmedia); 16413 bxe_release_mutexes(sc); 16414 bxe_deallocate_bars(sc); 16415 pci_disable_busmaster(dev); 16416 return (ENXIO); 16417 } 16418 16419 bxe_init_fp_mutexs(sc); 16420 16421 if (bxe_alloc_buf_rings(sc) != 0) { 16422 bxe_free_buf_rings(sc); 16423 bxe_interrupt_free(sc); 16424 bxe_del_cdev(sc); 16425 if (sc->ifnet != NULL) { 16426 ether_ifdetach(sc->ifnet); 16427 } 16428 ifmedia_removeall(&sc->ifmedia); 16429 bxe_release_mutexes(sc); 16430 bxe_deallocate_bars(sc); 16431 pci_disable_busmaster(dev); 16432 return (ENXIO); 16433 } 16434 16435 /* allocate ilt */ 16436 if (bxe_alloc_ilt_mem(sc) != 0) { 16437 bxe_free_buf_rings(sc); 16438 bxe_interrupt_free(sc); 16439 bxe_del_cdev(sc); 16440 if (sc->ifnet != NULL) { 16441 ether_ifdetach(sc->ifnet); 16442 } 16443 ifmedia_removeall(&sc->ifmedia); 16444 bxe_release_mutexes(sc); 16445 bxe_deallocate_bars(sc); 16446 pci_disable_busmaster(dev); 16447 return (ENXIO); 16448 } 16449 16450 /* allocate the host hardware/software hsi structures */ 16451 if (bxe_alloc_hsi_mem(sc) != 0) { 16452 bxe_free_ilt_mem(sc); 16453 bxe_free_buf_rings(sc); 16454 bxe_interrupt_free(sc); 16455 bxe_del_cdev(sc); 16456 if (sc->ifnet != NULL) { 16457 ether_ifdetach(sc->ifnet); 16458 } 16459 ifmedia_removeall(&sc->ifmedia); 16460 bxe_release_mutexes(sc); 16461 bxe_deallocate_bars(sc); 16462 pci_disable_busmaster(dev); 16463 return (ENXIO); 16464 } 16465 16466 /* need to reset chip if UNDI was active */ 16467 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16468 /* init fw_seq */ 16469 sc->fw_seq = 16470 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16471 DRV_MSG_SEQ_NUMBER_MASK); 16472 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16473 bxe_prev_unload(sc); 16474 } 16475 16476#if 1 16477 /* XXX */ 16478 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16479#else 16480 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16481 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16482 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16483 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16484 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16485 bxe_dcbx_init_params(sc); 16486 } else { 16487 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16488 } 16489#endif 16490 16491 /* calculate qm_cid_count */ 16492 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16493 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16494 16495 sc->max_cos = 1; 16496 bxe_init_multi_cos(sc); 16497 16498 bxe_add_sysctls(sc); 16499 16500 return (0); 16501} 16502 16503/* 16504 * Device detach function. 16505 * 16506 * Stops the controller, resets the controller, and releases resources. 16507 * 16508 * Returns: 16509 * 0 = Success, >0 = Failure 16510 */ 16511static int 16512bxe_detach(device_t dev) 16513{ 16514 struct bxe_softc *sc; 16515 struct ifnet *ifp; 16516 16517 sc = device_get_softc(dev); 16518 16519 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16520 16521 ifp = sc->ifnet; 16522 if (ifp != NULL && ifp->if_vlantrunk != NULL) { 16523 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16524 return(EBUSY); 16525 } 16526 16527 bxe_del_cdev(sc); 16528 16529 /* stop the periodic callout */ 16530 bxe_periodic_stop(sc); 16531 16532 /* stop the chip taskqueue */ 16533 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16534 if (sc->chip_tq) { 16535 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16536 taskqueue_free(sc->chip_tq); 16537 sc->chip_tq = NULL; 16538 taskqueue_drain_timeout(taskqueue_thread, 16539 &sc->sp_err_timeout_task); 16540 } 16541 16542 /* stop and reset the controller if it was open */ 16543 if (sc->state != BXE_STATE_CLOSED) { 16544 BXE_CORE_LOCK(sc); 16545 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16546 sc->state = BXE_STATE_DISABLED; 16547 BXE_CORE_UNLOCK(sc); 16548 } 16549 16550 /* release the network interface */ 16551 if (ifp != NULL) { 16552 ether_ifdetach(ifp); 16553 } 16554 ifmedia_removeall(&sc->ifmedia); 16555 16556 /* XXX do the following based on driver state... */ 16557 16558 /* free the host hardware/software hsi structures */ 16559 bxe_free_hsi_mem(sc); 16560 16561 /* free ilt */ 16562 bxe_free_ilt_mem(sc); 16563 16564 bxe_free_buf_rings(sc); 16565 16566 /* release the interrupts */ 16567 bxe_interrupt_free(sc); 16568 16569 /* Release the mutexes*/ 16570 bxe_destroy_fp_mutexs(sc); 16571 bxe_release_mutexes(sc); 16572 16573 16574 /* Release the PCIe BAR mapped memory */ 16575 bxe_deallocate_bars(sc); 16576 16577 /* Release the FreeBSD interface. */ 16578 if (sc->ifnet != NULL) { 16579 if_free(sc->ifnet); 16580 } 16581 16582 pci_disable_busmaster(dev); 16583 16584 return (0); 16585} 16586 16587/* 16588 * Device shutdown function. 16589 * 16590 * Stops and resets the controller. 16591 * 16592 * Returns: 16593 * Nothing 16594 */ 16595static int 16596bxe_shutdown(device_t dev) 16597{ 16598 struct bxe_softc *sc; 16599 16600 sc = device_get_softc(dev); 16601 16602 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16603 16604 /* stop the periodic callout */ 16605 bxe_periodic_stop(sc); 16606 16607 if (sc->state != BXE_STATE_CLOSED) { 16608 BXE_CORE_LOCK(sc); 16609 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16610 BXE_CORE_UNLOCK(sc); 16611 } 16612 16613 return (0); 16614} 16615 16616void 16617bxe_igu_ack_sb(struct bxe_softc *sc, 16618 uint8_t igu_sb_id, 16619 uint8_t segment, 16620 uint16_t index, 16621 uint8_t op, 16622 uint8_t update) 16623{ 16624 uint32_t igu_addr = sc->igu_base_addr; 16625 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16626 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16627} 16628 16629static void 16630bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16631 uint8_t func, 16632 uint8_t idu_sb_id, 16633 uint8_t is_pf) 16634{ 16635 uint32_t data, ctl, cnt = 100; 16636 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16637 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16638 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16639 uint32_t sb_bit = 1 << (idu_sb_id%32); 16640 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16641 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16642 16643 /* Not supported in BC mode */ 16644 if (CHIP_INT_MODE_IS_BC(sc)) { 16645 return; 16646 } 16647 16648 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16649 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16650 IGU_REGULAR_CLEANUP_SET | 16651 IGU_REGULAR_BCLEANUP); 16652 16653 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16654 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16655 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16656 16657 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16658 data, igu_addr_data); 16659 REG_WR(sc, igu_addr_data, data); 16660 16661 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16662 BUS_SPACE_BARRIER_WRITE); 16663 mb(); 16664 16665 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16666 ctl, igu_addr_ctl); 16667 REG_WR(sc, igu_addr_ctl, ctl); 16668 16669 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16670 BUS_SPACE_BARRIER_WRITE); 16671 mb(); 16672 16673 /* wait for clean up to finish */ 16674 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16675 DELAY(20000); 16676 } 16677 16678 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16679 BLOGD(sc, DBG_LOAD, 16680 "Unable to finish IGU cleanup: " 16681 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16682 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16683 } 16684} 16685 16686static void 16687bxe_igu_clear_sb(struct bxe_softc *sc, 16688 uint8_t idu_sb_id) 16689{ 16690 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16691} 16692 16693 16694 16695 16696 16697 16698 16699/*******************/ 16700/* ECORE CALLBACKS */ 16701/*******************/ 16702 16703static void 16704bxe_reset_common(struct bxe_softc *sc) 16705{ 16706 uint32_t val = 0x1400; 16707 16708 /* reset_common */ 16709 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16710 16711 if (CHIP_IS_E3(sc)) { 16712 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16713 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16714 } 16715 16716 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16717} 16718 16719static void 16720bxe_common_init_phy(struct bxe_softc *sc) 16721{ 16722 uint32_t shmem_base[2]; 16723 uint32_t shmem2_base[2]; 16724 16725 /* Avoid common init in case MFW supports LFA */ 16726 if (SHMEM2_RD(sc, size) > 16727 (uint32_t)offsetof(struct shmem2_region, 16728 lfa_host_addr[SC_PORT(sc)])) { 16729 return; 16730 } 16731 16732 shmem_base[0] = sc->devinfo.shmem_base; 16733 shmem2_base[0] = sc->devinfo.shmem2_base; 16734 16735 if (!CHIP_IS_E1x(sc)) { 16736 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16737 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16738 } 16739 16740 bxe_acquire_phy_lock(sc); 16741 elink_common_init_phy(sc, shmem_base, shmem2_base, 16742 sc->devinfo.chip_id, 0); 16743 bxe_release_phy_lock(sc); 16744} 16745 16746static void 16747bxe_pf_disable(struct bxe_softc *sc) 16748{ 16749 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16750 16751 val &= ~IGU_PF_CONF_FUNC_EN; 16752 16753 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16754 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16755 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16756} 16757 16758static void 16759bxe_init_pxp(struct bxe_softc *sc) 16760{ 16761 uint16_t devctl; 16762 int r_order, w_order; 16763 16764 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16765 16766 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16767 16768 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16769 16770 if (sc->mrrs == -1) { 16771 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16772 } else { 16773 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16774 r_order = sc->mrrs; 16775 } 16776 16777 ecore_init_pxp_arb(sc, r_order, w_order); 16778} 16779 16780static uint32_t 16781bxe_get_pretend_reg(struct bxe_softc *sc) 16782{ 16783 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16784 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16785 return (base + (SC_ABS_FUNC(sc)) * stride); 16786} 16787 16788/* 16789 * Called only on E1H or E2. 16790 * When pretending to be PF, the pretend value is the function number 0..7. 16791 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16792 * combination. 16793 */ 16794static int 16795bxe_pretend_func(struct bxe_softc *sc, 16796 uint16_t pretend_func_val) 16797{ 16798 uint32_t pretend_reg; 16799 16800 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16801 return (-1); 16802 } 16803 16804 /* get my own pretend register */ 16805 pretend_reg = bxe_get_pretend_reg(sc); 16806 REG_WR(sc, pretend_reg, pretend_func_val); 16807 REG_RD(sc, pretend_reg); 16808 return (0); 16809} 16810 16811static void 16812bxe_iov_init_dmae(struct bxe_softc *sc) 16813{ 16814 return; 16815} 16816 16817static void 16818bxe_iov_init_dq(struct bxe_softc *sc) 16819{ 16820 return; 16821} 16822 16823/* send a NIG loopback debug packet */ 16824static void 16825bxe_lb_pckt(struct bxe_softc *sc) 16826{ 16827 uint32_t wb_write[3]; 16828 16829 /* Ethernet source and destination addresses */ 16830 wb_write[0] = 0x55555555; 16831 wb_write[1] = 0x55555555; 16832 wb_write[2] = 0x20; /* SOP */ 16833 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16834 16835 /* NON-IP protocol */ 16836 wb_write[0] = 0x09000000; 16837 wb_write[1] = 0x55555555; 16838 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16839 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16840} 16841 16842/* 16843 * Some of the internal memories are not directly readable from the driver. 16844 * To test them we send debug packets. 16845 */ 16846static int 16847bxe_int_mem_test(struct bxe_softc *sc) 16848{ 16849 int factor; 16850 int count, i; 16851 uint32_t val = 0; 16852 16853 if (CHIP_REV_IS_FPGA(sc)) { 16854 factor = 120; 16855 } else if (CHIP_REV_IS_EMUL(sc)) { 16856 factor = 200; 16857 } else { 16858 factor = 1; 16859 } 16860 16861 /* disable inputs of parser neighbor blocks */ 16862 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16863 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16864 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16865 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16866 16867 /* write 0 to parser credits for CFC search request */ 16868 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16869 16870 /* send Ethernet packet */ 16871 bxe_lb_pckt(sc); 16872 16873 /* TODO do i reset NIG statistic? */ 16874 /* Wait until NIG register shows 1 packet of size 0x10 */ 16875 count = 1000 * factor; 16876 while (count) { 16877 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16878 val = *BXE_SP(sc, wb_data[0]); 16879 if (val == 0x10) { 16880 break; 16881 } 16882 16883 DELAY(10000); 16884 count--; 16885 } 16886 16887 if (val != 0x10) { 16888 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16889 return (-1); 16890 } 16891 16892 /* wait until PRS register shows 1 packet */ 16893 count = (1000 * factor); 16894 while (count) { 16895 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16896 if (val == 1) { 16897 break; 16898 } 16899 16900 DELAY(10000); 16901 count--; 16902 } 16903 16904 if (val != 0x1) { 16905 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16906 return (-2); 16907 } 16908 16909 /* Reset and init BRB, PRS */ 16910 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16911 DELAY(50000); 16912 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16913 DELAY(50000); 16914 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16915 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16916 16917 /* Disable inputs of parser neighbor blocks */ 16918 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16919 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16920 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16921 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16922 16923 /* Write 0 to parser credits for CFC search request */ 16924 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16925 16926 /* send 10 Ethernet packets */ 16927 for (i = 0; i < 10; i++) { 16928 bxe_lb_pckt(sc); 16929 } 16930 16931 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 16932 count = (1000 * factor); 16933 while (count) { 16934 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16935 val = *BXE_SP(sc, wb_data[0]); 16936 if (val == 0xb0) { 16937 break; 16938 } 16939 16940 DELAY(10000); 16941 count--; 16942 } 16943 16944 if (val != 0xb0) { 16945 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16946 return (-3); 16947 } 16948 16949 /* Wait until PRS register shows 2 packets */ 16950 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16951 if (val != 2) { 16952 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16953 } 16954 16955 /* Write 1 to parser credits for CFC search request */ 16956 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 16957 16958 /* Wait until PRS register shows 3 packets */ 16959 DELAY(10000 * factor); 16960 16961 /* Wait until NIG register shows 1 packet of size 0x10 */ 16962 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16963 if (val != 3) { 16964 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16965 } 16966 16967 /* clear NIG EOP FIFO */ 16968 for (i = 0; i < 11; i++) { 16969 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 16970 } 16971 16972 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 16973 if (val != 1) { 16974 BLOGE(sc, "clear of NIG failed val=0x%x\n", val); 16975 return (-4); 16976 } 16977 16978 /* Reset and init BRB, PRS, NIG */ 16979 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16980 DELAY(50000); 16981 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16982 DELAY(50000); 16983 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16984 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16985 if (!CNIC_SUPPORT(sc)) { 16986 /* set NIC mode */ 16987 REG_WR(sc, PRS_REG_NIC_MODE, 1); 16988 } 16989 16990 /* Enable inputs of parser neighbor blocks */ 16991 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 16992 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 16993 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 16994 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 16995 16996 return (0); 16997} 16998 16999static void 17000bxe_setup_fan_failure_detection(struct bxe_softc *sc) 17001{ 17002 int is_required; 17003 uint32_t val; 17004 int port; 17005 17006 is_required = 0; 17007 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 17008 SHARED_HW_CFG_FAN_FAILURE_MASK); 17009 17010 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17011 is_required = 1; 17012 } 17013 /* 17014 * The fan failure mechanism is usually related to the PHY type since 17015 * the power consumption of the board is affected by the PHY. Currently, 17016 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17017 */ 17018 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17019 for (port = PORT_0; port < PORT_MAX; port++) { 17020 is_required |= elink_fan_failure_det_req(sc, 17021 sc->devinfo.shmem_base, 17022 sc->devinfo.shmem2_base, 17023 port); 17024 } 17025 } 17026 17027 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17028 17029 if (is_required == 0) { 17030 return; 17031 } 17032 17033 /* Fan failure is indicated by SPIO 5 */ 17034 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17035 17036 /* set to active low mode */ 17037 val = REG_RD(sc, MISC_REG_SPIO_INT); 17038 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17039 REG_WR(sc, MISC_REG_SPIO_INT, val); 17040 17041 /* enable interrupt to signal the IGU */ 17042 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17043 val |= MISC_SPIO_SPIO5; 17044 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17045} 17046 17047static void 17048bxe_enable_blocks_attention(struct bxe_softc *sc) 17049{ 17050 uint32_t val; 17051 17052 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17053 if (!CHIP_IS_E1x(sc)) { 17054 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17055 } else { 17056 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17057 } 17058 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17059 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17060 /* 17061 * mask read length error interrupts in brb for parser 17062 * (parsing unit and 'checksum and crc' unit) 17063 * these errors are legal (PU reads fixed length and CAC can cause 17064 * read length error on truncated packets) 17065 */ 17066 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17067 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17068 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17069 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17070 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17071 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17072/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17073/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17074 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17075 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17076 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17077/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17078/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17079 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17080 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17081 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17082 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17083/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17084/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17085 17086 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17087 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17088 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17089 if (!CHIP_IS_E1x(sc)) { 17090 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17091 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17092 } 17093 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17094 17095 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17096 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17097 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17098/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17099 17100 if (!CHIP_IS_E1x(sc)) { 17101 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17102 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17103 } 17104 17105 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17106 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17107/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17108 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17109} 17110 17111/** 17112 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17113 * 17114 * @sc: driver handle 17115 */ 17116static int 17117bxe_init_hw_common(struct bxe_softc *sc) 17118{ 17119 uint8_t abs_func_id; 17120 uint32_t val; 17121 17122 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17123 SC_ABS_FUNC(sc)); 17124 17125 /* 17126 * take the RESET lock to protect undi_unload flow from accessing 17127 * registers while we are resetting the chip 17128 */ 17129 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17130 17131 bxe_reset_common(sc); 17132 17133 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17134 17135 val = 0xfffc; 17136 if (CHIP_IS_E3(sc)) { 17137 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17138 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17139 } 17140 17141 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17142 17143 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17144 17145 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17146 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17147 17148 if (!CHIP_IS_E1x(sc)) { 17149 /* 17150 * 4-port mode or 2-port mode we need to turn off master-enable for 17151 * everyone. After that we turn it back on for self. So, we disregard 17152 * multi-function, and always disable all functions on the given path, 17153 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17154 */ 17155 for (abs_func_id = SC_PATH(sc); 17156 abs_func_id < (E2_FUNC_MAX * 2); 17157 abs_func_id += 2) { 17158 if (abs_func_id == SC_ABS_FUNC(sc)) { 17159 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17160 continue; 17161 } 17162 17163 bxe_pretend_func(sc, abs_func_id); 17164 17165 /* clear pf enable */ 17166 bxe_pf_disable(sc); 17167 17168 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17169 } 17170 } 17171 17172 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17173 17174 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17175 17176 if (CHIP_IS_E1(sc)) { 17177 /* 17178 * enable HW interrupt from PXP on USDM overflow 17179 * bit 16 on INT_MASK_0 17180 */ 17181 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17182 } 17183 17184 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17185 bxe_init_pxp(sc); 17186 17187#ifdef __BIG_ENDIAN 17188 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17189 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17190 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17191 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17192 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17193 /* make sure this value is 0 */ 17194 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17195 17196 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17197 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17198 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17199 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17200 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17201#endif 17202 17203 ecore_ilt_init_page_size(sc, INITOP_SET); 17204 17205 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17206 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17207 } 17208 17209 /* let the HW do it's magic... */ 17210 DELAY(100000); 17211 17212 /* finish PXP init */ 17213 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17214 if (val != 1) { 17215 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", 17216 val); 17217 return (-1); 17218 } 17219 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17220 if (val != 1) { 17221 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); 17222 return (-1); 17223 } 17224 17225 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17226 17227 /* 17228 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17229 * entries with value "0" and valid bit on. This needs to be done by the 17230 * first PF that is loaded in a path (i.e. common phase) 17231 */ 17232 if (!CHIP_IS_E1x(sc)) { 17233/* 17234 * In E2 there is a bug in the timers block that can cause function 6 / 7 17235 * (i.e. vnic3) to start even if it is marked as "scan-off". 17236 * This occurs when a different function (func2,3) is being marked 17237 * as "scan-off". Real-life scenario for example: if a driver is being 17238 * load-unloaded while func6,7 are down. This will cause the timer to access 17239 * the ilt, translate to a logical address and send a request to read/write. 17240 * Since the ilt for the function that is down is not valid, this will cause 17241 * a translation error which is unrecoverable. 17242 * The Workaround is intended to make sure that when this happens nothing 17243 * fatal will occur. The workaround: 17244 * 1. First PF driver which loads on a path will: 17245 * a. After taking the chip out of reset, by using pretend, 17246 * it will write "0" to the following registers of 17247 * the other vnics. 17248 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17249 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17250 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17251 * And for itself it will write '1' to 17252 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17253 * dmae-operations (writing to pram for example.) 17254 * note: can be done for only function 6,7 but cleaner this 17255 * way. 17256 * b. Write zero+valid to the entire ILT. 17257 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17258 * VNIC3 (of that port). The range allocated will be the 17259 * entire ILT. This is needed to prevent ILT range error. 17260 * 2. Any PF driver load flow: 17261 * a. ILT update with the physical addresses of the allocated 17262 * logical pages. 17263 * b. Wait 20msec. - note that this timeout is needed to make 17264 * sure there are no requests in one of the PXP internal 17265 * queues with "old" ILT addresses. 17266 * c. PF enable in the PGLC. 17267 * d. Clear the was_error of the PF in the PGLC. (could have 17268 * occurred while driver was down) 17269 * e. PF enable in the CFC (WEAK + STRONG) 17270 * f. Timers scan enable 17271 * 3. PF driver unload flow: 17272 * a. Clear the Timers scan_en. 17273 * b. Polling for scan_on=0 for that PF. 17274 * c. Clear the PF enable bit in the PXP. 17275 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17276 * e. Write zero+valid to all ILT entries (The valid bit must 17277 * stay set) 17278 * f. If this is VNIC 3 of a port then also init 17279 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17280 * to the last enrty in the ILT. 17281 * 17282 * Notes: 17283 * Currently the PF error in the PGLC is non recoverable. 17284 * In the future the there will be a recovery routine for this error. 17285 * Currently attention is masked. 17286 * Having an MCP lock on the load/unload process does not guarantee that 17287 * there is no Timer disable during Func6/7 enable. This is because the 17288 * Timers scan is currently being cleared by the MCP on FLR. 17289 * Step 2.d can be done only for PF6/7 and the driver can also check if 17290 * there is error before clearing it. But the flow above is simpler and 17291 * more general. 17292 * All ILT entries are written by zero+valid and not just PF6/7 17293 * ILT entries since in the future the ILT entries allocation for 17294 * PF-s might be dynamic. 17295 */ 17296 struct ilt_client_info ilt_cli; 17297 struct ecore_ilt ilt; 17298 17299 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17300 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17301 17302 /* initialize dummy TM client */ 17303 ilt_cli.start = 0; 17304 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17305 ilt_cli.client_num = ILT_CLIENT_TM; 17306 17307 /* 17308 * Step 1: set zeroes to all ilt page entries with valid bit on 17309 * Step 2: set the timers first/last ilt entry to point 17310 * to the entire range to prevent ILT range error for 3rd/4th 17311 * vnic (this code assumes existence of the vnic) 17312 * 17313 * both steps performed by call to ecore_ilt_client_init_op() 17314 * with dummy TM client 17315 * 17316 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17317 * and his brother are split registers 17318 */ 17319 17320 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17321 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17322 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17323 17324 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17325 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17326 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17327 } 17328 17329 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17330 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17331 17332 if (!CHIP_IS_E1x(sc)) { 17333 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17334 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17335 17336 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17337 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17338 17339 /* let the HW do it's magic... */ 17340 do { 17341 DELAY(200000); 17342 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17343 } while (factor-- && (val != 1)); 17344 17345 if (val != 1) { 17346 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); 17347 return (-1); 17348 } 17349 } 17350 17351 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17352 17353 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17354 17355 bxe_iov_init_dmae(sc); 17356 17357 /* clean the DMAE memory */ 17358 sc->dmae_ready = 1; 17359 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17360 17361 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17362 17363 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17364 17365 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17366 17367 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17368 17369 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17370 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17371 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17372 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17373 17374 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17375 17376 /* QM queues pointers table */ 17377 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17378 17379 /* soft reset pulse */ 17380 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17381 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17382 17383 if (CNIC_SUPPORT(sc)) 17384 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17385 17386 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17387 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17388 if (!CHIP_REV_IS_SLOW(sc)) { 17389 /* enable hw interrupt from doorbell Q */ 17390 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17391 } 17392 17393 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17394 17395 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17396 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17397 17398 if (!CHIP_IS_E1(sc)) { 17399 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17400 } 17401 17402 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17403 if (IS_MF_AFEX(sc)) { 17404 /* 17405 * configure that AFEX and VLAN headers must be 17406 * received in AFEX mode 17407 */ 17408 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17409 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17410 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17411 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17412 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17413 } else { 17414 /* 17415 * Bit-map indicating which L2 hdrs may appear 17416 * after the basic Ethernet header 17417 */ 17418 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17419 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17420 } 17421 } 17422 17423 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17424 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17425 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17426 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17427 17428 if (!CHIP_IS_E1x(sc)) { 17429 /* reset VFC memories */ 17430 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17431 VFC_MEMORIES_RST_REG_CAM_RST | 17432 VFC_MEMORIES_RST_REG_RAM_RST); 17433 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17434 VFC_MEMORIES_RST_REG_CAM_RST | 17435 VFC_MEMORIES_RST_REG_RAM_RST); 17436 17437 DELAY(20000); 17438 } 17439 17440 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17441 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17442 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17443 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17444 17445 /* sync semi rtc */ 17446 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17447 0x80000000); 17448 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17449 0x80000000); 17450 17451 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17452 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17453 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17454 17455 if (!CHIP_IS_E1x(sc)) { 17456 if (IS_MF_AFEX(sc)) { 17457 /* 17458 * configure that AFEX and VLAN headers must be 17459 * sent in AFEX mode 17460 */ 17461 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17462 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17463 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17464 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17465 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17466 } else { 17467 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17468 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17469 } 17470 } 17471 17472 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17473 17474 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17475 17476 if (CNIC_SUPPORT(sc)) { 17477 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17478 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17479 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17480 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17481 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17482 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17483 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17484 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17485 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17486 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17487 } 17488 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17489 17490 if (sizeof(union cdu_context) != 1024) { 17491 /* we currently assume that a context is 1024 bytes */ 17492 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17493 (long)sizeof(union cdu_context)); 17494 } 17495 17496 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17497 val = (4 << 24) + (0 << 12) + 1024; 17498 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17499 17500 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17501 17502 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17503 /* enable context validation interrupt from CFC */ 17504 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17505 17506 /* set the thresholds to prevent CFC/CDU race */ 17507 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17508 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17509 17510 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17511 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17512 } 17513 17514 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17515 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17516 17517 /* Reset PCIE errors for debug */ 17518 REG_WR(sc, 0x2814, 0xffffffff); 17519 REG_WR(sc, 0x3820, 0xffffffff); 17520 17521 if (!CHIP_IS_E1x(sc)) { 17522 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17523 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17524 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17525 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17526 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17527 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17528 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17529 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17530 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17531 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17532 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17533 } 17534 17535 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17536 17537 if (!CHIP_IS_E1(sc)) { 17538 /* in E3 this done in per-port section */ 17539 if (!CHIP_IS_E3(sc)) 17540 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17541 } 17542 17543 if (CHIP_IS_E1H(sc)) { 17544 /* not applicable for E2 (and above ...) */ 17545 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17546 } 17547 17548 if (CHIP_REV_IS_SLOW(sc)) { 17549 DELAY(200000); 17550 } 17551 17552 /* finish CFC init */ 17553 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17554 if (val != 1) { 17555 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); 17556 return (-1); 17557 } 17558 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17559 if (val != 1) { 17560 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); 17561 return (-1); 17562 } 17563 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17564 if (val != 1) { 17565 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); 17566 return (-1); 17567 } 17568 REG_WR(sc, CFC_REG_DEBUG0, 0); 17569 17570 if (CHIP_IS_E1(sc)) { 17571 /* read NIG statistic to see if this is our first up since powerup */ 17572 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17573 val = *BXE_SP(sc, wb_data[0]); 17574 17575 /* do internal memory self test */ 17576 if ((val == 0) && bxe_int_mem_test(sc)) { 17577 BLOGE(sc, "internal mem self test failed val=0x%x\n", val); 17578 return (-1); 17579 } 17580 } 17581 17582 bxe_setup_fan_failure_detection(sc); 17583 17584 /* clear PXP2 attentions */ 17585 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17586 17587 bxe_enable_blocks_attention(sc); 17588 17589 if (!CHIP_REV_IS_SLOW(sc)) { 17590 ecore_enable_blocks_parity(sc); 17591 } 17592 17593 if (!BXE_NOMCP(sc)) { 17594 if (CHIP_IS_E1x(sc)) { 17595 bxe_common_init_phy(sc); 17596 } 17597 } 17598 17599 return (0); 17600} 17601 17602/** 17603 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17604 * 17605 * @sc: driver handle 17606 */ 17607static int 17608bxe_init_hw_common_chip(struct bxe_softc *sc) 17609{ 17610 int rc = bxe_init_hw_common(sc); 17611 17612 if (rc) { 17613 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); 17614 return (rc); 17615 } 17616 17617 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17618 if (!BXE_NOMCP(sc)) { 17619 bxe_common_init_phy(sc); 17620 } 17621 17622 return (0); 17623} 17624 17625static int 17626bxe_init_hw_port(struct bxe_softc *sc) 17627{ 17628 int port = SC_PORT(sc); 17629 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17630 uint32_t low, high; 17631 uint32_t val; 17632 17633 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17634 17635 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17636 17637 ecore_init_block(sc, BLOCK_MISC, init_phase); 17638 ecore_init_block(sc, BLOCK_PXP, init_phase); 17639 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17640 17641 /* 17642 * Timers bug workaround: disables the pf_master bit in pglue at 17643 * common phase, we need to enable it here before any dmae access are 17644 * attempted. Therefore we manually added the enable-master to the 17645 * port phase (it also happens in the function phase) 17646 */ 17647 if (!CHIP_IS_E1x(sc)) { 17648 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17649 } 17650 17651 ecore_init_block(sc, BLOCK_ATC, init_phase); 17652 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17653 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17654 ecore_init_block(sc, BLOCK_QM, init_phase); 17655 17656 ecore_init_block(sc, BLOCK_TCM, init_phase); 17657 ecore_init_block(sc, BLOCK_UCM, init_phase); 17658 ecore_init_block(sc, BLOCK_CCM, init_phase); 17659 ecore_init_block(sc, BLOCK_XCM, init_phase); 17660 17661 /* QM cid (connection) count */ 17662 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17663 17664 if (CNIC_SUPPORT(sc)) { 17665 ecore_init_block(sc, BLOCK_TM, init_phase); 17666 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17667 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17668 } 17669 17670 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17671 17672 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17673 17674 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17675 if (IS_MF(sc)) { 17676 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17677 } else if (sc->mtu > 4096) { 17678 if (BXE_ONE_PORT(sc)) { 17679 low = 160; 17680 } else { 17681 val = sc->mtu; 17682 /* (24*1024 + val*4)/256 */ 17683 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17684 } 17685 } else { 17686 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17687 } 17688 high = (low + 56); /* 14*1024/256 */ 17689 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17690 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17691 } 17692 17693 if (CHIP_IS_MODE_4_PORT(sc)) { 17694 REG_WR(sc, SC_PORT(sc) ? 17695 BRB1_REG_MAC_GUARANTIED_1 : 17696 BRB1_REG_MAC_GUARANTIED_0, 40); 17697 } 17698 17699 ecore_init_block(sc, BLOCK_PRS, init_phase); 17700 if (CHIP_IS_E3B0(sc)) { 17701 if (IS_MF_AFEX(sc)) { 17702 /* configure headers for AFEX mode */ 17703 REG_WR(sc, SC_PORT(sc) ? 17704 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17705 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17706 REG_WR(sc, SC_PORT(sc) ? 17707 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17708 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17709 REG_WR(sc, SC_PORT(sc) ? 17710 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17711 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17712 } else { 17713 /* Ovlan exists only if we are in multi-function + 17714 * switch-dependent mode, in switch-independent there 17715 * is no ovlan headers 17716 */ 17717 REG_WR(sc, SC_PORT(sc) ? 17718 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17719 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17720 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17721 } 17722 } 17723 17724 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17725 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17726 ecore_init_block(sc, BLOCK_USDM, init_phase); 17727 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17728 17729 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17730 ecore_init_block(sc, BLOCK_USEM, init_phase); 17731 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17732 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17733 17734 ecore_init_block(sc, BLOCK_UPB, init_phase); 17735 ecore_init_block(sc, BLOCK_XPB, init_phase); 17736 17737 ecore_init_block(sc, BLOCK_PBF, init_phase); 17738 17739 if (CHIP_IS_E1x(sc)) { 17740 /* configure PBF to work without PAUSE mtu 9000 */ 17741 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17742 17743 /* update threshold */ 17744 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17745 /* update init credit */ 17746 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17747 17748 /* probe changes */ 17749 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17750 DELAY(50); 17751 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17752 } 17753 17754 if (CNIC_SUPPORT(sc)) { 17755 ecore_init_block(sc, BLOCK_SRC, init_phase); 17756 } 17757 17758 ecore_init_block(sc, BLOCK_CDU, init_phase); 17759 ecore_init_block(sc, BLOCK_CFC, init_phase); 17760 17761 if (CHIP_IS_E1(sc)) { 17762 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17763 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17764 } 17765 ecore_init_block(sc, BLOCK_HC, init_phase); 17766 17767 ecore_init_block(sc, BLOCK_IGU, init_phase); 17768 17769 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17770 /* init aeu_mask_attn_func_0/1: 17771 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17772 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17773 * bits 4-7 are used for "per vn group attention" */ 17774 val = IS_MF(sc) ? 0xF7 : 0x7; 17775 /* Enable DCBX attention for all but E1 */ 17776 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17777 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17778 17779 ecore_init_block(sc, BLOCK_NIG, init_phase); 17780 17781 if (!CHIP_IS_E1x(sc)) { 17782 /* Bit-map indicating which L2 hdrs may appear after the 17783 * basic Ethernet header 17784 */ 17785 if (IS_MF_AFEX(sc)) { 17786 REG_WR(sc, SC_PORT(sc) ? 17787 NIG_REG_P1_HDRS_AFTER_BASIC : 17788 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17789 } else { 17790 REG_WR(sc, SC_PORT(sc) ? 17791 NIG_REG_P1_HDRS_AFTER_BASIC : 17792 NIG_REG_P0_HDRS_AFTER_BASIC, 17793 IS_MF_SD(sc) ? 7 : 6); 17794 } 17795 17796 if (CHIP_IS_E3(sc)) { 17797 REG_WR(sc, SC_PORT(sc) ? 17798 NIG_REG_LLH1_MF_MODE : 17799 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17800 } 17801 } 17802 if (!CHIP_IS_E3(sc)) { 17803 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17804 } 17805 17806 if (!CHIP_IS_E1(sc)) { 17807 /* 0x2 disable mf_ov, 0x1 enable */ 17808 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17809 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17810 17811 if (!CHIP_IS_E1x(sc)) { 17812 val = 0; 17813 switch (sc->devinfo.mf_info.mf_mode) { 17814 case MULTI_FUNCTION_SD: 17815 val = 1; 17816 break; 17817 case MULTI_FUNCTION_SI: 17818 case MULTI_FUNCTION_AFEX: 17819 val = 2; 17820 break; 17821 } 17822 17823 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17824 NIG_REG_LLH0_CLS_TYPE), val); 17825 } 17826 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17827 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17828 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17829 } 17830 17831 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17832 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17833 if (val & MISC_SPIO_SPIO5) { 17834 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17835 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17836 val = REG_RD(sc, reg_addr); 17837 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17838 REG_WR(sc, reg_addr, val); 17839 } 17840 17841 return (0); 17842} 17843 17844static uint32_t 17845bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17846 uint32_t reg, 17847 uint32_t expected, 17848 uint32_t poll_count) 17849{ 17850 uint32_t cur_cnt = poll_count; 17851 uint32_t val; 17852 17853 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17854 DELAY(FLR_WAIT_INTERVAL); 17855 } 17856 17857 return (val); 17858} 17859 17860static int 17861bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17862 uint32_t reg, 17863 char *msg, 17864 uint32_t poll_cnt) 17865{ 17866 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17867 17868 if (val != 0) { 17869 BLOGE(sc, "%s usage count=%d\n", msg, val); 17870 return (1); 17871 } 17872 17873 return (0); 17874} 17875 17876/* Common routines with VF FLR cleanup */ 17877static uint32_t 17878bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17879{ 17880 /* adjust polling timeout */ 17881 if (CHIP_REV_IS_EMUL(sc)) { 17882 return (FLR_POLL_CNT * 2000); 17883 } 17884 17885 if (CHIP_REV_IS_FPGA(sc)) { 17886 return (FLR_POLL_CNT * 120); 17887 } 17888 17889 return (FLR_POLL_CNT); 17890} 17891 17892static int 17893bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17894 uint32_t poll_cnt) 17895{ 17896 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17897 if (bxe_flr_clnup_poll_hw_counter(sc, 17898 CFC_REG_NUM_LCIDS_INSIDE_PF, 17899 "CFC PF usage counter timed out", 17900 poll_cnt)) { 17901 return (1); 17902 } 17903 17904 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17905 if (bxe_flr_clnup_poll_hw_counter(sc, 17906 DORQ_REG_PF_USAGE_CNT, 17907 "DQ PF usage counter timed out", 17908 poll_cnt)) { 17909 return (1); 17910 } 17911 17912 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17913 if (bxe_flr_clnup_poll_hw_counter(sc, 17914 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17915 "QM PF usage counter timed out", 17916 poll_cnt)) { 17917 return (1); 17918 } 17919 17920 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17921 if (bxe_flr_clnup_poll_hw_counter(sc, 17922 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17923 "Timers VNIC usage counter timed out", 17924 poll_cnt)) { 17925 return (1); 17926 } 17927 17928 if (bxe_flr_clnup_poll_hw_counter(sc, 17929 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17930 "Timers NUM_SCANS usage counter timed out", 17931 poll_cnt)) { 17932 return (1); 17933 } 17934 17935 /* Wait DMAE PF usage counter to zero */ 17936 if (bxe_flr_clnup_poll_hw_counter(sc, 17937 dmae_reg_go_c[INIT_DMAE_C(sc)], 17938 "DMAE dommand register timed out", 17939 poll_cnt)) { 17940 return (1); 17941 } 17942 17943 return (0); 17944} 17945 17946#define OP_GEN_PARAM(param) \ 17947 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 17948#define OP_GEN_TYPE(type) \ 17949 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 17950#define OP_GEN_AGG_VECT(index) \ 17951 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 17952 17953static int 17954bxe_send_final_clnup(struct bxe_softc *sc, 17955 uint8_t clnup_func, 17956 uint32_t poll_cnt) 17957{ 17958 uint32_t op_gen_command = 0; 17959 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 17960 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 17961 int ret = 0; 17962 17963 if (REG_RD(sc, comp_addr)) { 17964 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 17965 return (1); 17966 } 17967 17968 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 17969 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 17970 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 17971 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 17972 17973 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 17974 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 17975 17976 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 17977 BLOGE(sc, "FW final cleanup did not succeed\n"); 17978 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 17979 (REG_RD(sc, comp_addr))); 17980 bxe_panic(sc, ("FLR cleanup failed\n")); 17981 return (1); 17982 } 17983 17984 /* Zero completion for nxt FLR */ 17985 REG_WR(sc, comp_addr, 0); 17986 17987 return (ret); 17988} 17989 17990static void 17991bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 17992 struct pbf_pN_buf_regs *regs, 17993 uint32_t poll_count) 17994{ 17995 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 17996 uint32_t cur_cnt = poll_count; 17997 17998 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 17999 crd = crd_start = REG_RD(sc, regs->crd); 18000 init_crd = REG_RD(sc, regs->init_crd); 18001 18002 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 18003 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 18004 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 18005 18006 while ((crd != init_crd) && 18007 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 18008 (init_crd - crd_start))) { 18009 if (cur_cnt--) { 18010 DELAY(FLR_WAIT_INTERVAL); 18011 crd = REG_RD(sc, regs->crd); 18012 crd_freed = REG_RD(sc, regs->crd_freed); 18013 } else { 18014 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18015 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18016 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18017 break; 18018 } 18019 } 18020 18021 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18022 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18023} 18024 18025static void 18026bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18027 struct pbf_pN_cmd_regs *regs, 18028 uint32_t poll_count) 18029{ 18030 uint32_t occup, to_free, freed, freed_start; 18031 uint32_t cur_cnt = poll_count; 18032 18033 occup = to_free = REG_RD(sc, regs->lines_occup); 18034 freed = freed_start = REG_RD(sc, regs->lines_freed); 18035 18036 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18037 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18038 18039 while (occup && 18040 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18041 if (cur_cnt--) { 18042 DELAY(FLR_WAIT_INTERVAL); 18043 occup = REG_RD(sc, regs->lines_occup); 18044 freed = REG_RD(sc, regs->lines_freed); 18045 } else { 18046 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18047 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18048 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18049 break; 18050 } 18051 } 18052 18053 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18054 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18055} 18056 18057static void 18058bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18059{ 18060 struct pbf_pN_cmd_regs cmd_regs[] = { 18061 {0, (CHIP_IS_E3B0(sc)) ? 18062 PBF_REG_TQ_OCCUPANCY_Q0 : 18063 PBF_REG_P0_TQ_OCCUPANCY, 18064 (CHIP_IS_E3B0(sc)) ? 18065 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18066 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18067 {1, (CHIP_IS_E3B0(sc)) ? 18068 PBF_REG_TQ_OCCUPANCY_Q1 : 18069 PBF_REG_P1_TQ_OCCUPANCY, 18070 (CHIP_IS_E3B0(sc)) ? 18071 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18072 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18073 {4, (CHIP_IS_E3B0(sc)) ? 18074 PBF_REG_TQ_OCCUPANCY_LB_Q : 18075 PBF_REG_P4_TQ_OCCUPANCY, 18076 (CHIP_IS_E3B0(sc)) ? 18077 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18078 PBF_REG_P4_TQ_LINES_FREED_CNT} 18079 }; 18080 18081 struct pbf_pN_buf_regs buf_regs[] = { 18082 {0, (CHIP_IS_E3B0(sc)) ? 18083 PBF_REG_INIT_CRD_Q0 : 18084 PBF_REG_P0_INIT_CRD , 18085 (CHIP_IS_E3B0(sc)) ? 18086 PBF_REG_CREDIT_Q0 : 18087 PBF_REG_P0_CREDIT, 18088 (CHIP_IS_E3B0(sc)) ? 18089 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18090 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18091 {1, (CHIP_IS_E3B0(sc)) ? 18092 PBF_REG_INIT_CRD_Q1 : 18093 PBF_REG_P1_INIT_CRD, 18094 (CHIP_IS_E3B0(sc)) ? 18095 PBF_REG_CREDIT_Q1 : 18096 PBF_REG_P1_CREDIT, 18097 (CHIP_IS_E3B0(sc)) ? 18098 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18099 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18100 {4, (CHIP_IS_E3B0(sc)) ? 18101 PBF_REG_INIT_CRD_LB_Q : 18102 PBF_REG_P4_INIT_CRD, 18103 (CHIP_IS_E3B0(sc)) ? 18104 PBF_REG_CREDIT_LB_Q : 18105 PBF_REG_P4_CREDIT, 18106 (CHIP_IS_E3B0(sc)) ? 18107 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18108 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18109 }; 18110 18111 int i; 18112 18113 /* Verify the command queues are flushed P0, P1, P4 */ 18114 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18115 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18116 } 18117 18118 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18119 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18120 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18121 } 18122} 18123 18124static void 18125bxe_hw_enable_status(struct bxe_softc *sc) 18126{ 18127 uint32_t val; 18128 18129 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18130 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18131 18132 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18133 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18134 18135 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18136 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18137 18138 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18139 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18140 18141 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18142 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18143 18144 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18145 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18146 18147 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18148 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18149 18150 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18151 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18152} 18153 18154static int 18155bxe_pf_flr_clnup(struct bxe_softc *sc) 18156{ 18157 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18158 18159 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18160 18161 /* Re-enable PF target read access */ 18162 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18163 18164 /* Poll HW usage counters */ 18165 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18166 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18167 return (-1); 18168 } 18169 18170 /* Zero the igu 'trailing edge' and 'leading edge' */ 18171 18172 /* Send the FW cleanup command */ 18173 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18174 return (-1); 18175 } 18176 18177 /* ATC cleanup */ 18178 18179 /* Verify TX hw is flushed */ 18180 bxe_tx_hw_flushed(sc, poll_cnt); 18181 18182 /* Wait 100ms (not adjusted according to platform) */ 18183 DELAY(100000); 18184 18185 /* Verify no pending pci transactions */ 18186 if (bxe_is_pcie_pending(sc)) { 18187 BLOGE(sc, "PCIE Transactions still pending\n"); 18188 } 18189 18190 /* Debug */ 18191 bxe_hw_enable_status(sc); 18192 18193 /* 18194 * Master enable - Due to WB DMAE writes performed before this 18195 * register is re-initialized as part of the regular function init 18196 */ 18197 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18198 18199 return (0); 18200} 18201 18202static int 18203bxe_init_hw_func(struct bxe_softc *sc) 18204{ 18205 int port = SC_PORT(sc); 18206 int func = SC_FUNC(sc); 18207 int init_phase = PHASE_PF0 + func; 18208 struct ecore_ilt *ilt = sc->ilt; 18209 uint16_t cdu_ilt_start; 18210 uint32_t addr, val; 18211 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18212 int i, main_mem_width, rc; 18213 18214 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18215 18216 /* FLR cleanup */ 18217 if (!CHIP_IS_E1x(sc)) { 18218 rc = bxe_pf_flr_clnup(sc); 18219 if (rc) { 18220 BLOGE(sc, "FLR cleanup failed!\n"); 18221 // XXX bxe_fw_dump(sc); 18222 // XXX bxe_idle_chk(sc); 18223 return (rc); 18224 } 18225 } 18226 18227 /* set MSI reconfigure capability */ 18228 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18229 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18230 val = REG_RD(sc, addr); 18231 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18232 REG_WR(sc, addr, val); 18233 } 18234 18235 ecore_init_block(sc, BLOCK_PXP, init_phase); 18236 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18237 18238 ilt = sc->ilt; 18239 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18240 18241 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18242 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18243 ilt->lines[cdu_ilt_start + i].page_mapping = 18244 sc->context[i].vcxt_dma.paddr; 18245 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18246 } 18247 ecore_ilt_init_op(sc, INITOP_SET); 18248 18249 /* Set NIC mode */ 18250 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18251 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18252 18253 if (!CHIP_IS_E1x(sc)) { 18254 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18255 18256 /* Turn on a single ISR mode in IGU if driver is going to use 18257 * INT#x or MSI 18258 */ 18259 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18260 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18261 } 18262 18263 /* 18264 * Timers workaround bug: function init part. 18265 * Need to wait 20msec after initializing ILT, 18266 * needed to make sure there are no requests in 18267 * one of the PXP internal queues with "old" ILT addresses 18268 */ 18269 DELAY(20000); 18270 18271 /* 18272 * Master enable - Due to WB DMAE writes performed before this 18273 * register is re-initialized as part of the regular function 18274 * init 18275 */ 18276 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18277 /* Enable the function in IGU */ 18278 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18279 } 18280 18281 sc->dmae_ready = 1; 18282 18283 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18284 18285 if (!CHIP_IS_E1x(sc)) 18286 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18287 18288 ecore_init_block(sc, BLOCK_ATC, init_phase); 18289 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18290 ecore_init_block(sc, BLOCK_NIG, init_phase); 18291 ecore_init_block(sc, BLOCK_SRC, init_phase); 18292 ecore_init_block(sc, BLOCK_MISC, init_phase); 18293 ecore_init_block(sc, BLOCK_TCM, init_phase); 18294 ecore_init_block(sc, BLOCK_UCM, init_phase); 18295 ecore_init_block(sc, BLOCK_CCM, init_phase); 18296 ecore_init_block(sc, BLOCK_XCM, init_phase); 18297 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18298 ecore_init_block(sc, BLOCK_USEM, init_phase); 18299 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18300 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18301 18302 if (!CHIP_IS_E1x(sc)) 18303 REG_WR(sc, QM_REG_PF_EN, 1); 18304 18305 if (!CHIP_IS_E1x(sc)) { 18306 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18307 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18308 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18309 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18310 } 18311 ecore_init_block(sc, BLOCK_QM, init_phase); 18312 18313 ecore_init_block(sc, BLOCK_TM, init_phase); 18314 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18315 18316 bxe_iov_init_dq(sc); 18317 18318 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18319 ecore_init_block(sc, BLOCK_PRS, init_phase); 18320 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18321 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18322 ecore_init_block(sc, BLOCK_USDM, init_phase); 18323 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18324 ecore_init_block(sc, BLOCK_UPB, init_phase); 18325 ecore_init_block(sc, BLOCK_XPB, init_phase); 18326 ecore_init_block(sc, BLOCK_PBF, init_phase); 18327 if (!CHIP_IS_E1x(sc)) 18328 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18329 18330 ecore_init_block(sc, BLOCK_CDU, init_phase); 18331 18332 ecore_init_block(sc, BLOCK_CFC, init_phase); 18333 18334 if (!CHIP_IS_E1x(sc)) 18335 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18336 18337 if (IS_MF(sc)) { 18338 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18339 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18340 } 18341 18342 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18343 18344 /* HC init per function */ 18345 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18346 if (CHIP_IS_E1H(sc)) { 18347 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18348 18349 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18350 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18351 } 18352 ecore_init_block(sc, BLOCK_HC, init_phase); 18353 18354 } else { 18355 int num_segs, sb_idx, prod_offset; 18356 18357 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18358 18359 if (!CHIP_IS_E1x(sc)) { 18360 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18361 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18362 } 18363 18364 ecore_init_block(sc, BLOCK_IGU, init_phase); 18365 18366 if (!CHIP_IS_E1x(sc)) { 18367 int dsb_idx = 0; 18368 /** 18369 * Producer memory: 18370 * E2 mode: address 0-135 match to the mapping memory; 18371 * 136 - PF0 default prod; 137 - PF1 default prod; 18372 * 138 - PF2 default prod; 139 - PF3 default prod; 18373 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18374 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18375 * 144-147 reserved. 18376 * 18377 * E1.5 mode - In backward compatible mode; 18378 * for non default SB; each even line in the memory 18379 * holds the U producer and each odd line hold 18380 * the C producer. The first 128 producers are for 18381 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18382 * producers are for the DSB for each PF. 18383 * Each PF has five segments: (the order inside each 18384 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18385 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18386 * 144-147 attn prods; 18387 */ 18388 /* non-default-status-blocks */ 18389 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18390 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18391 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18392 prod_offset = (sc->igu_base_sb + sb_idx) * 18393 num_segs; 18394 18395 for (i = 0; i < num_segs; i++) { 18396 addr = IGU_REG_PROD_CONS_MEMORY + 18397 (prod_offset + i) * 4; 18398 REG_WR(sc, addr, 0); 18399 } 18400 /* send consumer update with value 0 */ 18401 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18402 USTORM_ID, 0, IGU_INT_NOP, 1); 18403 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18404 } 18405 18406 /* default-status-blocks */ 18407 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18408 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18409 18410 if (CHIP_IS_MODE_4_PORT(sc)) 18411 dsb_idx = SC_FUNC(sc); 18412 else 18413 dsb_idx = SC_VN(sc); 18414 18415 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18416 IGU_BC_BASE_DSB_PROD + dsb_idx : 18417 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18418 18419 /* 18420 * igu prods come in chunks of E1HVN_MAX (4) - 18421 * does not matters what is the current chip mode 18422 */ 18423 for (i = 0; i < (num_segs * E1HVN_MAX); 18424 i += E1HVN_MAX) { 18425 addr = IGU_REG_PROD_CONS_MEMORY + 18426 (prod_offset + i)*4; 18427 REG_WR(sc, addr, 0); 18428 } 18429 /* send consumer update with 0 */ 18430 if (CHIP_INT_MODE_IS_BC(sc)) { 18431 bxe_ack_sb(sc, sc->igu_dsb_id, 18432 USTORM_ID, 0, IGU_INT_NOP, 1); 18433 bxe_ack_sb(sc, sc->igu_dsb_id, 18434 CSTORM_ID, 0, IGU_INT_NOP, 1); 18435 bxe_ack_sb(sc, sc->igu_dsb_id, 18436 XSTORM_ID, 0, IGU_INT_NOP, 1); 18437 bxe_ack_sb(sc, sc->igu_dsb_id, 18438 TSTORM_ID, 0, IGU_INT_NOP, 1); 18439 bxe_ack_sb(sc, sc->igu_dsb_id, 18440 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18441 } else { 18442 bxe_ack_sb(sc, sc->igu_dsb_id, 18443 USTORM_ID, 0, IGU_INT_NOP, 1); 18444 bxe_ack_sb(sc, sc->igu_dsb_id, 18445 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18446 } 18447 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18448 18449 /* !!! these should become driver const once 18450 rf-tool supports split-68 const */ 18451 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18452 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18453 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18454 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18455 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18456 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18457 } 18458 } 18459 18460 /* Reset PCIE errors for debug */ 18461 REG_WR(sc, 0x2114, 0xffffffff); 18462 REG_WR(sc, 0x2120, 0xffffffff); 18463 18464 if (CHIP_IS_E1x(sc)) { 18465 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18466 main_mem_base = HC_REG_MAIN_MEMORY + 18467 SC_PORT(sc) * (main_mem_size * 4); 18468 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18469 main_mem_width = 8; 18470 18471 val = REG_RD(sc, main_mem_prty_clr); 18472 if (val) { 18473 BLOGD(sc, DBG_LOAD, 18474 "Parity errors in HC block during function init (0x%x)!\n", 18475 val); 18476 } 18477 18478 /* Clear "false" parity errors in MSI-X table */ 18479 for (i = main_mem_base; 18480 i < main_mem_base + main_mem_size * 4; 18481 i += main_mem_width) { 18482 bxe_read_dmae(sc, i, main_mem_width / 4); 18483 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18484 i, main_mem_width / 4); 18485 } 18486 /* Clear HC parity attention */ 18487 REG_RD(sc, main_mem_prty_clr); 18488 } 18489 18490#if 1 18491 /* Enable STORMs SP logging */ 18492 REG_WR8(sc, BAR_USTRORM_INTMEM + 18493 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18494 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18495 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18496 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18497 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18498 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18499 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18500#endif 18501 18502 elink_phy_probe(&sc->link_params); 18503 18504 return (0); 18505} 18506 18507static void 18508bxe_link_reset(struct bxe_softc *sc) 18509{ 18510 if (!BXE_NOMCP(sc)) { 18511 bxe_acquire_phy_lock(sc); 18512 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18513 bxe_release_phy_lock(sc); 18514 } else { 18515 if (!CHIP_REV_IS_SLOW(sc)) { 18516 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18517 } 18518 } 18519} 18520 18521static void 18522bxe_reset_port(struct bxe_softc *sc) 18523{ 18524 int port = SC_PORT(sc); 18525 uint32_t val; 18526 18527 ELINK_DEBUG_P0(sc, "bxe_reset_port called\n"); 18528 /* reset physical Link */ 18529 bxe_link_reset(sc); 18530 18531 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18532 18533 /* Do not rcv packets to BRB */ 18534 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18535 /* Do not direct rcv packets that are not for MCP to the BRB */ 18536 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18537 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18538 18539 /* Configure AEU */ 18540 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18541 18542 DELAY(100000); 18543 18544 /* Check for BRB port occupancy */ 18545 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18546 if (val) { 18547 BLOGD(sc, DBG_LOAD, 18548 "BRB1 is not empty, %d blocks are occupied\n", val); 18549 } 18550 18551 /* TODO: Close Doorbell port? */ 18552} 18553 18554static void 18555bxe_ilt_wr(struct bxe_softc *sc, 18556 uint32_t index, 18557 bus_addr_t addr) 18558{ 18559 int reg; 18560 uint32_t wb_write[2]; 18561 18562 if (CHIP_IS_E1(sc)) { 18563 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18564 } else { 18565 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18566 } 18567 18568 wb_write[0] = ONCHIP_ADDR1(addr); 18569 wb_write[1] = ONCHIP_ADDR2(addr); 18570 REG_WR_DMAE(sc, reg, wb_write, 2); 18571} 18572 18573static void 18574bxe_clear_func_ilt(struct bxe_softc *sc, 18575 uint32_t func) 18576{ 18577 uint32_t i, base = FUNC_ILT_BASE(func); 18578 for (i = base; i < base + ILT_PER_FUNC; i++) { 18579 bxe_ilt_wr(sc, i, 0); 18580 } 18581} 18582 18583static void 18584bxe_reset_func(struct bxe_softc *sc) 18585{ 18586 struct bxe_fastpath *fp; 18587 int port = SC_PORT(sc); 18588 int func = SC_FUNC(sc); 18589 int i; 18590 18591 /* Disable the function in the FW */ 18592 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18593 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18594 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18595 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18596 18597 /* FP SBs */ 18598 FOR_EACH_ETH_QUEUE(sc, i) { 18599 fp = &sc->fp[i]; 18600 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18601 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18602 SB_DISABLED); 18603 } 18604 18605 /* SP SB */ 18606 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18607 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18608 SB_DISABLED); 18609 18610 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18611 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18612 } 18613 18614 /* Configure IGU */ 18615 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18616 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18617 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18618 } else { 18619 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18620 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18621 } 18622 18623 if (CNIC_LOADED(sc)) { 18624 /* Disable Timer scan */ 18625 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18626 /* 18627 * Wait for at least 10ms and up to 2 second for the timers 18628 * scan to complete 18629 */ 18630 for (i = 0; i < 200; i++) { 18631 DELAY(10000); 18632 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18633 break; 18634 } 18635 } 18636 18637 /* Clear ILT */ 18638 bxe_clear_func_ilt(sc, func); 18639 18640 /* 18641 * Timers workaround bug for E2: if this is vnic-3, 18642 * we need to set the entire ilt range for this timers. 18643 */ 18644 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18645 struct ilt_client_info ilt_cli; 18646 /* use dummy TM client */ 18647 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18648 ilt_cli.start = 0; 18649 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18650 ilt_cli.client_num = ILT_CLIENT_TM; 18651 18652 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18653 } 18654 18655 /* this assumes that reset_port() called before reset_func()*/ 18656 if (!CHIP_IS_E1x(sc)) { 18657 bxe_pf_disable(sc); 18658 } 18659 18660 sc->dmae_ready = 0; 18661} 18662 18663static int 18664bxe_gunzip_init(struct bxe_softc *sc) 18665{ 18666 return (0); 18667} 18668 18669static void 18670bxe_gunzip_end(struct bxe_softc *sc) 18671{ 18672 return; 18673} 18674 18675static int 18676bxe_init_firmware(struct bxe_softc *sc) 18677{ 18678 if (CHIP_IS_E1(sc)) { 18679 ecore_init_e1_firmware(sc); 18680 sc->iro_array = e1_iro_arr; 18681 } else if (CHIP_IS_E1H(sc)) { 18682 ecore_init_e1h_firmware(sc); 18683 sc->iro_array = e1h_iro_arr; 18684 } else if (!CHIP_IS_E1x(sc)) { 18685 ecore_init_e2_firmware(sc); 18686 sc->iro_array = e2_iro_arr; 18687 } else { 18688 BLOGE(sc, "Unsupported chip revision\n"); 18689 return (-1); 18690 } 18691 18692 return (0); 18693} 18694 18695static void 18696bxe_release_firmware(struct bxe_softc *sc) 18697{ 18698 /* Do nothing */ 18699 return; 18700} 18701 18702static int 18703ecore_gunzip(struct bxe_softc *sc, 18704 const uint8_t *zbuf, 18705 int len) 18706{ 18707 /* XXX : Implement... */ 18708 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18709 return (FALSE); 18710} 18711 18712static void 18713ecore_reg_wr_ind(struct bxe_softc *sc, 18714 uint32_t addr, 18715 uint32_t val) 18716{ 18717 bxe_reg_wr_ind(sc, addr, val); 18718} 18719 18720static void 18721ecore_write_dmae_phys_len(struct bxe_softc *sc, 18722 bus_addr_t phys_addr, 18723 uint32_t addr, 18724 uint32_t len) 18725{ 18726 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18727} 18728 18729void 18730ecore_storm_memset_struct(struct bxe_softc *sc, 18731 uint32_t addr, 18732 size_t size, 18733 uint32_t *data) 18734{ 18735 uint8_t i; 18736 for (i = 0; i < size/4; i++) { 18737 REG_WR(sc, addr + (i * 4), data[i]); 18738 } 18739} 18740 18741 18742/* 18743 * character device - ioctl interface definitions 18744 */ 18745 18746 18747#include "bxe_dump.h" 18748#include "bxe_ioctl.h" 18749#include <sys/conf.h> 18750 18751static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 18752 struct thread *td); 18753 18754static struct cdevsw bxe_cdevsw = { 18755 .d_version = D_VERSION, 18756 .d_ioctl = bxe_eioctl, 18757 .d_name = "bxecnic", 18758}; 18759 18760#define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) 18761 18762 18763#define DUMP_ALL_PRESETS 0x1FFF 18764#define DUMP_MAX_PRESETS 13 18765#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) 18766#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) 18767#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) 18768#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) 18769#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) 18770 18771#define IS_REG_IN_PRESET(presets, idx) \ 18772 ((presets & (1 << (idx-1))) == (1 << (idx-1))) 18773 18774 18775static int 18776bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) 18777{ 18778 if (CHIP_IS_E1(sc)) 18779 return dump_num_registers[0][preset-1]; 18780 else if (CHIP_IS_E1H(sc)) 18781 return dump_num_registers[1][preset-1]; 18782 else if (CHIP_IS_E2(sc)) 18783 return dump_num_registers[2][preset-1]; 18784 else if (CHIP_IS_E3A0(sc)) 18785 return dump_num_registers[3][preset-1]; 18786 else if (CHIP_IS_E3B0(sc)) 18787 return dump_num_registers[4][preset-1]; 18788 else 18789 return 0; 18790} 18791 18792static int 18793bxe_get_total_regs_len32(struct bxe_softc *sc) 18794{ 18795 uint32_t preset_idx; 18796 int regdump_len32 = 0; 18797 18798 18799 /* Calculate the total preset regs length */ 18800 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 18801 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); 18802 } 18803 18804 return regdump_len32; 18805} 18806 18807static const uint32_t * 18808__bxe_get_page_addr_ar(struct bxe_softc *sc) 18809{ 18810 if (CHIP_IS_E2(sc)) 18811 return page_vals_e2; 18812 else if (CHIP_IS_E3(sc)) 18813 return page_vals_e3; 18814 else 18815 return NULL; 18816} 18817 18818static uint32_t 18819__bxe_get_page_reg_num(struct bxe_softc *sc) 18820{ 18821 if (CHIP_IS_E2(sc)) 18822 return PAGE_MODE_VALUES_E2; 18823 else if (CHIP_IS_E3(sc)) 18824 return PAGE_MODE_VALUES_E3; 18825 else 18826 return 0; 18827} 18828 18829static const uint32_t * 18830__bxe_get_page_write_ar(struct bxe_softc *sc) 18831{ 18832 if (CHIP_IS_E2(sc)) 18833 return page_write_regs_e2; 18834 else if (CHIP_IS_E3(sc)) 18835 return page_write_regs_e3; 18836 else 18837 return NULL; 18838} 18839 18840static uint32_t 18841__bxe_get_page_write_num(struct bxe_softc *sc) 18842{ 18843 if (CHIP_IS_E2(sc)) 18844 return PAGE_WRITE_REGS_E2; 18845 else if (CHIP_IS_E3(sc)) 18846 return PAGE_WRITE_REGS_E3; 18847 else 18848 return 0; 18849} 18850 18851static const struct reg_addr * 18852__bxe_get_page_read_ar(struct bxe_softc *sc) 18853{ 18854 if (CHIP_IS_E2(sc)) 18855 return page_read_regs_e2; 18856 else if (CHIP_IS_E3(sc)) 18857 return page_read_regs_e3; 18858 else 18859 return NULL; 18860} 18861 18862static uint32_t 18863__bxe_get_page_read_num(struct bxe_softc *sc) 18864{ 18865 if (CHIP_IS_E2(sc)) 18866 return PAGE_READ_REGS_E2; 18867 else if (CHIP_IS_E3(sc)) 18868 return PAGE_READ_REGS_E3; 18869 else 18870 return 0; 18871} 18872 18873static bool 18874bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) 18875{ 18876 if (CHIP_IS_E1(sc)) 18877 return IS_E1_REG(reg_info->chips); 18878 else if (CHIP_IS_E1H(sc)) 18879 return IS_E1H_REG(reg_info->chips); 18880 else if (CHIP_IS_E2(sc)) 18881 return IS_E2_REG(reg_info->chips); 18882 else if (CHIP_IS_E3A0(sc)) 18883 return IS_E3A0_REG(reg_info->chips); 18884 else if (CHIP_IS_E3B0(sc)) 18885 return IS_E3B0_REG(reg_info->chips); 18886 else 18887 return 0; 18888} 18889 18890static bool 18891bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) 18892{ 18893 if (CHIP_IS_E1(sc)) 18894 return IS_E1_REG(wreg_info->chips); 18895 else if (CHIP_IS_E1H(sc)) 18896 return IS_E1H_REG(wreg_info->chips); 18897 else if (CHIP_IS_E2(sc)) 18898 return IS_E2_REG(wreg_info->chips); 18899 else if (CHIP_IS_E3A0(sc)) 18900 return IS_E3A0_REG(wreg_info->chips); 18901 else if (CHIP_IS_E3B0(sc)) 18902 return IS_E3B0_REG(wreg_info->chips); 18903 else 18904 return 0; 18905} 18906 18907/** 18908 * bxe_read_pages_regs - read "paged" registers 18909 * 18910 * @bp device handle 18911 * @p output buffer 18912 * 18913 * Reads "paged" memories: memories that may only be read by first writing to a 18914 * specific address ("write address") and then reading from a specific address 18915 * ("read address"). There may be more than one write address per "page" and 18916 * more than one read address per write address. 18917 */ 18918static void 18919bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 18920{ 18921 uint32_t i, j, k, n; 18922 18923 /* addresses of the paged registers */ 18924 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); 18925 /* number of paged registers */ 18926 int num_pages = __bxe_get_page_reg_num(sc); 18927 /* write addresses */ 18928 const uint32_t *write_addr = __bxe_get_page_write_ar(sc); 18929 /* number of write addresses */ 18930 int write_num = __bxe_get_page_write_num(sc); 18931 /* read addresses info */ 18932 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); 18933 /* number of read addresses */ 18934 int read_num = __bxe_get_page_read_num(sc); 18935 uint32_t addr, size; 18936 18937 for (i = 0; i < num_pages; i++) { 18938 for (j = 0; j < write_num; j++) { 18939 REG_WR(sc, write_addr[j], page_addr[i]); 18940 18941 for (k = 0; k < read_num; k++) { 18942 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { 18943 size = read_addr[k].size; 18944 for (n = 0; n < size; n++) { 18945 addr = read_addr[k].addr + n*4; 18946 *p++ = REG_RD(sc, addr); 18947 } 18948 } 18949 } 18950 } 18951 } 18952 return; 18953} 18954 18955 18956static int 18957bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 18958{ 18959 uint32_t i, j, addr; 18960 const struct wreg_addr *wreg_addr_p = NULL; 18961 18962 if (CHIP_IS_E1(sc)) 18963 wreg_addr_p = &wreg_addr_e1; 18964 else if (CHIP_IS_E1H(sc)) 18965 wreg_addr_p = &wreg_addr_e1h; 18966 else if (CHIP_IS_E2(sc)) 18967 wreg_addr_p = &wreg_addr_e2; 18968 else if (CHIP_IS_E3A0(sc)) 18969 wreg_addr_p = &wreg_addr_e3; 18970 else if (CHIP_IS_E3B0(sc)) 18971 wreg_addr_p = &wreg_addr_e3b0; 18972 else 18973 return (-1); 18974 18975 /* Read the idle_chk registers */ 18976 for (i = 0; i < IDLE_REGS_COUNT; i++) { 18977 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && 18978 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { 18979 for (j = 0; j < idle_reg_addrs[i].size; j++) 18980 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); 18981 } 18982 } 18983 18984 /* Read the regular registers */ 18985 for (i = 0; i < REGS_COUNT; i++) { 18986 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && 18987 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { 18988 for (j = 0; j < reg_addrs[i].size; j++) 18989 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); 18990 } 18991 } 18992 18993 /* Read the CAM registers */ 18994 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && 18995 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { 18996 for (i = 0; i < wreg_addr_p->size; i++) { 18997 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); 18998 18999 /* In case of wreg_addr register, read additional 19000 registers from read_regs array 19001 */ 19002 for (j = 0; j < wreg_addr_p->read_regs_count; j++) { 19003 addr = *(wreg_addr_p->read_regs); 19004 *p++ = REG_RD(sc, addr + j*4); 19005 } 19006 } 19007 } 19008 19009 /* Paged registers are supported in E2 & E3 only */ 19010 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 19011 /* Read "paged" registers */ 19012 bxe_read_pages_regs(sc, p, preset); 19013 } 19014 19015 return 0; 19016} 19017 19018int 19019bxe_grc_dump(struct bxe_softc *sc) 19020{ 19021 int rval = 0; 19022 uint32_t preset_idx; 19023 uint8_t *buf; 19024 uint32_t size; 19025 struct dump_header *d_hdr; 19026 uint32_t i; 19027 uint32_t reg_val; 19028 uint32_t reg_addr; 19029 uint32_t cmd_offset; 19030 struct ecore_ilt *ilt = SC_ILT(sc); 19031 struct bxe_fastpath *fp; 19032 struct ilt_client_info *ilt_cli; 19033 int grc_dump_size; 19034 19035 19036 if (sc->grcdump_done || sc->grcdump_started) 19037 return (rval); 19038 19039 sc->grcdump_started = 1; 19040 BLOGI(sc, "Started collecting grcdump\n"); 19041 19042 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19043 sizeof(struct dump_header); 19044 19045 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); 19046 19047 if (sc->grc_dump == NULL) { 19048 BLOGW(sc, "Unable to allocate memory for grcdump collection\n"); 19049 return(ENOMEM); 19050 } 19051 19052 19053 19054 /* Disable parity attentions as long as following dump may 19055 * cause false alarms by reading never written registers. We 19056 * will re-enable parity attentions right after the dump. 19057 */ 19058 19059 /* Disable parity on path 0 */ 19060 bxe_pretend_func(sc, 0); 19061 19062 ecore_disable_blocks_parity(sc); 19063 19064 /* Disable parity on path 1 */ 19065 bxe_pretend_func(sc, 1); 19066 ecore_disable_blocks_parity(sc); 19067 19068 /* Return to current function */ 19069 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 19070 19071 buf = sc->grc_dump; 19072 d_hdr = sc->grc_dump; 19073 19074 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; 19075 d_hdr->version = BNX2X_DUMP_VERSION; 19076 d_hdr->preset = DUMP_ALL_PRESETS; 19077 19078 if (CHIP_IS_E1(sc)) { 19079 d_hdr->dump_meta_data = DUMP_CHIP_E1; 19080 } else if (CHIP_IS_E1H(sc)) { 19081 d_hdr->dump_meta_data = DUMP_CHIP_E1H; 19082 } else if (CHIP_IS_E2(sc)) { 19083 d_hdr->dump_meta_data = DUMP_CHIP_E2 | 19084 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19085 } else if (CHIP_IS_E3A0(sc)) { 19086 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | 19087 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19088 } else if (CHIP_IS_E3B0(sc)) { 19089 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | 19090 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19091 } 19092 19093 buf += sizeof(struct dump_header); 19094 19095 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 19096 19097 /* Skip presets with IOR */ 19098 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || 19099 (preset_idx == 11)) 19100 continue; 19101 19102 rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx); 19103 19104 if (rval) 19105 break; 19106 19107 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); 19108 19109 buf += size; 19110 } 19111 19112 bxe_pretend_func(sc, 0); 19113 ecore_clear_blocks_parity(sc); 19114 ecore_enable_blocks_parity(sc); 19115 19116 bxe_pretend_func(sc, 1); 19117 ecore_clear_blocks_parity(sc); 19118 ecore_enable_blocks_parity(sc); 19119 19120 /* Return to current function */ 19121 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 19122 19123 19124 19125 if(sc->state == BXE_STATE_OPEN) { 19126 if(sc->fw_stats_req != NULL) { 19127 BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", 19128 (uintmax_t)sc->fw_stats_req_mapping, 19129 (uintmax_t)sc->fw_stats_data_mapping, 19130 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); 19131 } 19132 if(sc->def_sb != NULL) { 19133 BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", 19134 (void *)sc->def_sb_dma.paddr, sc->def_sb, 19135 sizeof(struct host_sp_status_block)); 19136 } 19137 if(sc->eq_dma.vaddr != NULL) { 19138 BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", 19139 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); 19140 } 19141 if(sc->sp_dma.vaddr != NULL) { 19142 BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", 19143 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, 19144 sizeof(struct bxe_slowpath)); 19145 } 19146 if(sc->spq_dma.vaddr != NULL) { 19147 BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", 19148 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); 19149 } 19150 if(sc->gz_buf_dma.vaddr != NULL) { 19151 BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", 19152 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, 19153 FW_BUF_SIZE); 19154 } 19155 for (i = 0; i < sc->num_queues; i++) { 19156 fp = &sc->fp[i]; 19157 if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL && 19158 fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL && 19159 fp->rx_sge_dma.vaddr != NULL) { 19160 19161 BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, 19162 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, 19163 sizeof(union bxe_host_hc_status_block)); 19164 BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 19165 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, 19166 (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); 19167 BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 19168 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, 19169 (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); 19170 BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, 19171 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, 19172 (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); 19173 BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 19174 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, 19175 (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); 19176 } 19177 } 19178 if(ilt != NULL ) { 19179 ilt_cli = &ilt->clients[1]; 19180 if(ilt->lines != NULL) { 19181 for (i = ilt_cli->start; i <= ilt_cli->end; i++) { 19182 BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", 19183 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), 19184 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); 19185 } 19186 } 19187 } 19188 19189 19190 cmd_offset = DMAE_REG_CMD_MEM; 19191 for (i = 0; i < 224; i++) { 19192 reg_addr = (cmd_offset +(i * 4)); 19193 reg_val = REG_RD(sc, reg_addr); 19194 BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, 19195 reg_addr, reg_val); 19196 } 19197 } 19198 19199 BLOGI(sc, "Collection of grcdump done\n"); 19200 sc->grcdump_done = 1; 19201 return(rval); 19202} 19203 19204static int 19205bxe_add_cdev(struct bxe_softc *sc) 19206{ 19207 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); 19208 19209 if (sc->eeprom == NULL) { 19210 BLOGW(sc, "Unable to alloc for eeprom size buffer\n"); 19211 return (-1); 19212 } 19213 19214 sc->ioctl_dev = make_dev(&bxe_cdevsw, 19215 sc->ifnet->if_dunit, 19216 UID_ROOT, 19217 GID_WHEEL, 19218 0600, 19219 "%s", 19220 if_name(sc->ifnet)); 19221 19222 if (sc->ioctl_dev == NULL) { 19223 free(sc->eeprom, M_DEVBUF); 19224 sc->eeprom = NULL; 19225 return (-1); 19226 } 19227 19228 sc->ioctl_dev->si_drv1 = sc; 19229 19230 return (0); 19231} 19232 19233static void 19234bxe_del_cdev(struct bxe_softc *sc) 19235{ 19236 if (sc->ioctl_dev != NULL) 19237 destroy_dev(sc->ioctl_dev); 19238 19239 if (sc->eeprom != NULL) { 19240 free(sc->eeprom, M_DEVBUF); 19241 sc->eeprom = NULL; 19242 } 19243 sc->ioctl_dev = NULL; 19244 19245 return; 19246} 19247 19248static bool bxe_is_nvram_accessible(struct bxe_softc *sc) 19249{ 19250 19251 if ((sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) == 0) 19252 return FALSE; 19253 19254 return TRUE; 19255} 19256 19257 19258static int 19259bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) 19260{ 19261 int rval = 0; 19262 19263 if(!bxe_is_nvram_accessible(sc)) { 19264 BLOGW(sc, "Cannot access eeprom when interface is down\n"); 19265 return (-EAGAIN); 19266 } 19267 rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len); 19268 19269 19270 return (rval); 19271} 19272 19273static int 19274bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) 19275{ 19276 int rval = 0; 19277 19278 if(!bxe_is_nvram_accessible(sc)) { 19279 BLOGW(sc, "Cannot access eeprom when interface is down\n"); 19280 return (-EAGAIN); 19281 } 19282 rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len); 19283 19284 return (rval); 19285} 19286 19287static int 19288bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom) 19289{ 19290 int rval = 0; 19291 19292 switch (eeprom->eeprom_cmd) { 19293 19294 case BXE_EEPROM_CMD_SET_EEPROM: 19295 19296 rval = copyin(eeprom->eeprom_data, sc->eeprom, 19297 eeprom->eeprom_data_len); 19298 19299 if (rval) 19300 break; 19301 19302 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, 19303 eeprom->eeprom_data_len); 19304 break; 19305 19306 case BXE_EEPROM_CMD_GET_EEPROM: 19307 19308 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, 19309 eeprom->eeprom_data_len); 19310 19311 if (rval) { 19312 break; 19313 } 19314 19315 rval = copyout(sc->eeprom, eeprom->eeprom_data, 19316 eeprom->eeprom_data_len); 19317 break; 19318 19319 default: 19320 rval = EINVAL; 19321 break; 19322 } 19323 19324 if (rval) { 19325 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); 19326 } 19327 19328 return (rval); 19329} 19330 19331static int 19332bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p) 19333{ 19334 uint32_t ext_phy_config; 19335 int port = SC_PORT(sc); 19336 int cfg_idx = bxe_get_link_cfg_idx(sc); 19337 19338 dev_p->supported = sc->port.supported[cfg_idx] | 19339 (sc->port.supported[cfg_idx ^ 1] & 19340 (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE)); 19341 dev_p->advertising = sc->port.advertising[cfg_idx]; 19342 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == 19343 ELINK_ETH_PHY_SFP_1G_FIBER) { 19344 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); 19345 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); 19346 } 19347 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && 19348 !(sc->flags & BXE_MF_FUNC_DIS)) { 19349 dev_p->duplex = sc->link_vars.duplex; 19350 if (IS_MF(sc) && !BXE_NOMCP(sc)) 19351 dev_p->speed = bxe_get_mf_speed(sc); 19352 else 19353 dev_p->speed = sc->link_vars.line_speed; 19354 } else { 19355 dev_p->duplex = DUPLEX_UNKNOWN; 19356 dev_p->speed = SPEED_UNKNOWN; 19357 } 19358 19359 dev_p->port = bxe_media_detect(sc); 19360 19361 ext_phy_config = SHMEM_RD(sc, 19362 dev_info.port_hw_config[port].external_phy_config); 19363 if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) == 19364 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 19365 dev_p->phy_address = sc->port.phy_addr; 19366 else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != 19367 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 19368 ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != 19369 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 19370 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); 19371 else 19372 dev_p->phy_address = 0; 19373 19374 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) 19375 dev_p->autoneg = AUTONEG_ENABLE; 19376 else 19377 dev_p->autoneg = AUTONEG_DISABLE; 19378 19379 19380 return 0; 19381} 19382 19383static int 19384bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 19385 struct thread *td) 19386{ 19387 struct bxe_softc *sc; 19388 int rval = 0; 19389 device_t pci_dev; 19390 bxe_grcdump_t *dump = NULL; 19391 int grc_dump_size; 19392 bxe_drvinfo_t *drv_infop = NULL; 19393 bxe_dev_setting_t *dev_p; 19394 bxe_dev_setting_t dev_set; 19395 bxe_get_regs_t *reg_p; 19396 bxe_reg_rdw_t *reg_rdw_p; 19397 bxe_pcicfg_rdw_t *cfg_rdw_p; 19398 bxe_perm_mac_addr_t *mac_addr_p; 19399 19400 19401 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) 19402 return ENXIO; 19403 19404 pci_dev= sc->dev; 19405 19406 dump = (bxe_grcdump_t *)data; 19407 19408 switch(cmd) { 19409 19410 case BXE_GRC_DUMP_SIZE: 19411 dump->pci_func = sc->pcie_func; 19412 dump->grcdump_size = 19413 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19414 sizeof(struct dump_header); 19415 break; 19416 19417 case BXE_GRC_DUMP: 19418 19419 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19420 sizeof(struct dump_header); 19421 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || 19422 (dump->grcdump_size < grc_dump_size)) { 19423 rval = EINVAL; 19424 break; 19425 } 19426 19427 if((sc->trigger_grcdump) && (!sc->grcdump_done) && 19428 (!sc->grcdump_started)) { 19429 rval = bxe_grc_dump(sc); 19430 } 19431 19432 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && 19433 (sc->grc_dump != NULL)) { 19434 dump->grcdump_dwords = grc_dump_size >> 2; 19435 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); 19436 free(sc->grc_dump, M_DEVBUF); 19437 sc->grc_dump = NULL; 19438 sc->grcdump_started = 0; 19439 sc->grcdump_done = 0; 19440 } 19441 19442 break; 19443 19444 case BXE_DRV_INFO: 19445 drv_infop = (bxe_drvinfo_t *)data; 19446 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); 19447 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", 19448 BXE_DRIVER_VERSION); 19449 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", 19450 sc->devinfo.bc_ver_str); 19451 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, 19452 "%s", sc->fw_ver_str); 19453 drv_infop->eeprom_dump_len = sc->devinfo.flash_size; 19454 drv_infop->reg_dump_len = 19455 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) 19456 + sizeof(struct dump_header); 19457 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", 19458 sc->pcie_bus, sc->pcie_device, sc->pcie_func); 19459 break; 19460 19461 case BXE_DEV_SETTING: 19462 dev_p = (bxe_dev_setting_t *)data; 19463 bxe_get_settings(sc, &dev_set); 19464 dev_p->supported = dev_set.supported; 19465 dev_p->advertising = dev_set.advertising; 19466 dev_p->speed = dev_set.speed; 19467 dev_p->duplex = dev_set.duplex; 19468 dev_p->port = dev_set.port; 19469 dev_p->phy_address = dev_set.phy_address; 19470 dev_p->autoneg = dev_set.autoneg; 19471 19472 break; 19473 19474 case BXE_GET_REGS: 19475 19476 reg_p = (bxe_get_regs_t *)data; 19477 grc_dump_size = reg_p->reg_buf_len; 19478 19479 if((!sc->grcdump_done) && (!sc->grcdump_started)) { 19480 bxe_grc_dump(sc); 19481 } 19482 if((sc->grcdump_done) && (sc->grcdump_started) && 19483 (sc->grc_dump != NULL)) { 19484 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); 19485 free(sc->grc_dump, M_DEVBUF); 19486 sc->grc_dump = NULL; 19487 sc->grcdump_started = 0; 19488 sc->grcdump_done = 0; 19489 } 19490 19491 break; 19492 19493 case BXE_RDW_REG: 19494 reg_rdw_p = (bxe_reg_rdw_t *)data; 19495 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && 19496 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) 19497 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); 19498 19499 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && 19500 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) 19501 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); 19502 19503 break; 19504 19505 case BXE_RDW_PCICFG: 19506 cfg_rdw_p = (bxe_pcicfg_rdw_t *)data; 19507 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { 19508 19509 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, 19510 cfg_rdw_p->cfg_width); 19511 19512 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { 19513 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, 19514 cfg_rdw_p->cfg_width); 19515 } else { 19516 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n"); 19517 } 19518 break; 19519 19520 case BXE_MAC_ADDR: 19521 mac_addr_p = (bxe_perm_mac_addr_t *)data; 19522 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", 19523 sc->mac_addr_str); 19524 break; 19525 19526 case BXE_EEPROM: 19527 rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data); 19528 break; 19529 19530 19531 default: 19532 break; 19533 } 19534 19535 return (rval); 19536} 19537