1/*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/dev/bxe/bxe.c 339881 2018-10-29 21:09:39Z davidcs $"); 29 30#define BXE_DRIVER_VERSION "1.78.91" 31 32#include "bxe.h" 33#include "ecore_sp.h" 34#include "ecore_init.h" 35#include "ecore_init_ops.h" 36 37#include "57710_int_offsets.h" 38#include "57711_int_offsets.h" 39#include "57712_int_offsets.h" 40 41/* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45#ifndef CTLTYPE_U64 46#define CTLTYPE_U64 CTLTYPE_QUAD 47#define sysctl_handle_64 sysctl_handle_quad 48#endif 49 50/* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55#ifndef CSUM_TCP_IPV6 56#define CSUM_TCP_IPV6 0 57#define CSUM_UDP_IPV6 0 58#endif 59 60/* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64#if __FreeBSD_version < 900035 65#define pci_find_cap pci_find_extcap 66#endif 67 68#define BXE_DEF_SB_ATT_IDX 0x0001 69#define BXE_DEF_SB_IDX 0x0002 70 71/* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75#define FLR_WAIT_USEC 10000 /* 10 msecs */ 76#define FLR_WAIT_INTERVAL 50 /* usecs */ 77#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84}; 85 86struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90}; 91 92/* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95#define BXE_DEVDESC_MAX 64 96static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127 { 128 BRCM_VENDORID, 129 CHIP_NUM_57800, 130 PCI_ANY_ID, PCI_ANY_ID, 131 "QLogic NetXtreme II BCM57800 10GbE" 132 }, 133 { 134 BRCM_VENDORID, 135 CHIP_NUM_57800_MF, 136 PCI_ANY_ID, PCI_ANY_ID, 137 "QLogic NetXtreme II BCM57800 MF 10GbE" 138 }, 139 { 140 BRCM_VENDORID, 141 CHIP_NUM_57810, 142 PCI_ANY_ID, PCI_ANY_ID, 143 "QLogic NetXtreme II BCM57810 10GbE" 144 }, 145 { 146 BRCM_VENDORID, 147 CHIP_NUM_57810_MF, 148 PCI_ANY_ID, PCI_ANY_ID, 149 "QLogic NetXtreme II BCM57810 MF 10GbE" 150 }, 151 { 152 BRCM_VENDORID, 153 CHIP_NUM_57811, 154 PCI_ANY_ID, PCI_ANY_ID, 155 "QLogic NetXtreme II BCM57811 10GbE" 156 }, 157 { 158 BRCM_VENDORID, 159 CHIP_NUM_57811_MF, 160 PCI_ANY_ID, PCI_ANY_ID, 161 "QLogic NetXtreme II BCM57811 MF 10GbE" 162 }, 163 { 164 BRCM_VENDORID, 165 CHIP_NUM_57840_4_10, 166 PCI_ANY_ID, PCI_ANY_ID, 167 "QLogic NetXtreme II BCM57840 4x10GbE" 168 }, 169 { 170 QLOGIC_VENDORID, 171 CHIP_NUM_57840_4_10, 172 PCI_ANY_ID, PCI_ANY_ID, 173 "QLogic NetXtreme II BCM57840 4x10GbE" 174 }, 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57840_2_20, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57840 2x20GbE" 180 }, 181 { 182 BRCM_VENDORID, 183 CHIP_NUM_57840_MF, 184 PCI_ANY_ID, PCI_ANY_ID, 185 "QLogic NetXtreme II BCM57840 MF 10GbE" 186 }, 187 { 188 0, 0, 0, 0, NULL 189 } 190}; 191 192MALLOC_DECLARE(M_BXE_ILT); 193MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 194 195/* 196 * FreeBSD device entry points. 197 */ 198static int bxe_probe(device_t); 199static int bxe_attach(device_t); 200static int bxe_detach(device_t); 201static int bxe_shutdown(device_t); 202 203 204/* 205 * FreeBSD KLD module/device interface event handler method. 206 */ 207static device_method_t bxe_methods[] = { 208 /* Device interface (device_if.h) */ 209 DEVMETHOD(device_probe, bxe_probe), 210 DEVMETHOD(device_attach, bxe_attach), 211 DEVMETHOD(device_detach, bxe_detach), 212 DEVMETHOD(device_shutdown, bxe_shutdown), 213 /* Bus interface (bus_if.h) */ 214 DEVMETHOD(bus_print_child, bus_generic_print_child), 215 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 216 KOBJMETHOD_END 217}; 218 219/* 220 * FreeBSD KLD Module data declaration 221 */ 222static driver_t bxe_driver = { 223 "bxe", /* module name */ 224 bxe_methods, /* event handler */ 225 sizeof(struct bxe_softc) /* extra data */ 226}; 227 228/* 229 * FreeBSD dev class is needed to manage dev instances and 230 * to associate with a bus type 231 */ 232static devclass_t bxe_devclass; 233 234MODULE_DEPEND(bxe, pci, 1, 1, 1); 235MODULE_DEPEND(bxe, ether, 1, 1, 1); 236DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 237 238/* resources needed for unloading a previously loaded device */ 239 240#define BXE_PREV_WAIT_NEEDED 1 241struct mtx bxe_prev_mtx; 242MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 243struct bxe_prev_list_node { 244 LIST_ENTRY(bxe_prev_list_node) node; 245 uint8_t bus; 246 uint8_t slot; 247 uint8_t path; 248 uint8_t aer; /* XXX automatic error recovery */ 249 uint8_t undi; 250}; 251static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 252 253static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 254 255/* Tunable device values... */ 256 257SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 258 259/* Debug */ 260unsigned long bxe_debug = 0; 261SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, 262 &bxe_debug, 0, "Debug logging mode"); 263 264/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 265static int bxe_interrupt_mode = INTR_MODE_MSIX; 266SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 267 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 268 269/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 270static int bxe_queue_count = 4; 271SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 272 &bxe_queue_count, 0, "Multi-Queue queue count"); 273 274/* max number of buffers per queue (default RX_BD_USABLE) */ 275static int bxe_max_rx_bufs = 0; 276SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 277 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 278 279/* Host interrupt coalescing RX tick timer (usecs) */ 280static int bxe_hc_rx_ticks = 25; 281SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 282 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 283 284/* Host interrupt coalescing TX tick timer (usecs) */ 285static int bxe_hc_tx_ticks = 50; 286SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 287 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 288 289/* Maximum number of Rx packets to process at a time */ 290static int bxe_rx_budget = 0xffffffff; 291SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 292 &bxe_rx_budget, 0, "Rx processing budget"); 293 294/* Maximum LRO aggregation size */ 295static int bxe_max_aggregation_size = 0; 296SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 297 &bxe_max_aggregation_size, 0, "max aggregation size"); 298 299/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 300static int bxe_mrrs = -1; 301SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 302 &bxe_mrrs, 0, "PCIe maximum read request size"); 303 304/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 305static int bxe_autogreeen = 0; 306SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 307 &bxe_autogreeen, 0, "AutoGrEEEn support"); 308 309/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 310static int bxe_udp_rss = 0; 311SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 312 &bxe_udp_rss, 0, "UDP RSS support"); 313 314 315#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 316 317#define STATS_OFFSET32(stat_name) \ 318 (offsetof(struct bxe_eth_stats, stat_name) / 4) 319 320#define Q_STATS_OFFSET32(stat_name) \ 321 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 322 323static const struct { 324 uint32_t offset; 325 uint32_t size; 326 uint32_t flags; 327#define STATS_FLAGS_PORT 1 328#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 329#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 330 char string[STAT_NAME_LEN]; 331} bxe_eth_stats_arr[] = { 332 { STATS_OFFSET32(total_bytes_received_hi), 333 8, STATS_FLAGS_BOTH, "rx_bytes" }, 334 { STATS_OFFSET32(error_bytes_received_hi), 335 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 336 { STATS_OFFSET32(total_unicast_packets_received_hi), 337 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 338 { STATS_OFFSET32(total_multicast_packets_received_hi), 339 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 340 { STATS_OFFSET32(total_broadcast_packets_received_hi), 341 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 342 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 343 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 344 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 345 8, STATS_FLAGS_PORT, "rx_align_errors" }, 346 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 347 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 348 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 349 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 350 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 351 8, STATS_FLAGS_PORT, "rx_fragments" }, 352 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 353 8, STATS_FLAGS_PORT, "rx_jabbers" }, 354 { STATS_OFFSET32(no_buff_discard_hi), 355 8, STATS_FLAGS_BOTH, "rx_discards" }, 356 { STATS_OFFSET32(mac_filter_discard), 357 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 358 { STATS_OFFSET32(mf_tag_discard), 359 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 360 { STATS_OFFSET32(pfc_frames_received_hi), 361 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 362 { STATS_OFFSET32(pfc_frames_sent_hi), 363 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 364 { STATS_OFFSET32(brb_drop_hi), 365 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 366 { STATS_OFFSET32(brb_truncate_hi), 367 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 368 { STATS_OFFSET32(pause_frames_received_hi), 369 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 370 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 371 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 372 { STATS_OFFSET32(nig_timer_max), 373 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 374 { STATS_OFFSET32(total_bytes_transmitted_hi), 375 8, STATS_FLAGS_BOTH, "tx_bytes" }, 376 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 377 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 378 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 379 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 380 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 381 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 382 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 383 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 384 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 385 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 386 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 387 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 388 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 389 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 390 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 391 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 392 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 393 8, STATS_FLAGS_PORT, "tx_deferred" }, 394 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 395 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 396 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 397 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 398 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 399 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 400 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 401 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 402 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 403 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 404 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 405 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 406 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 407 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 408 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 409 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 410 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 411 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 412 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 413 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 414 { STATS_OFFSET32(pause_frames_sent_hi), 415 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 416 { STATS_OFFSET32(total_tpa_aggregations_hi), 417 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 418 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 419 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 420 { STATS_OFFSET32(total_tpa_bytes_hi), 421 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 422 { STATS_OFFSET32(eee_tx_lpi), 423 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 424 { STATS_OFFSET32(rx_calls), 425 4, STATS_FLAGS_FUNC, "rx_calls"}, 426 { STATS_OFFSET32(rx_pkts), 427 4, STATS_FLAGS_FUNC, "rx_pkts"}, 428 { STATS_OFFSET32(rx_tpa_pkts), 429 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 430 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 431 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, 432 { STATS_OFFSET32(rx_bxe_service_rxsgl), 433 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, 434 { STATS_OFFSET32(rx_jumbo_sge_pkts), 435 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, 436 { STATS_OFFSET32(rx_soft_errors), 437 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 438 { STATS_OFFSET32(rx_hw_csum_errors), 439 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 440 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 441 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 442 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 443 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 444 { STATS_OFFSET32(rx_budget_reached), 445 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 446 { STATS_OFFSET32(tx_pkts), 447 4, STATS_FLAGS_FUNC, "tx_pkts"}, 448 { STATS_OFFSET32(tx_soft_errors), 449 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 450 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 451 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 452 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 453 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 454 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 455 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 456 { STATS_OFFSET32(tx_ofld_frames_lso), 457 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 458 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 459 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 460 { STATS_OFFSET32(tx_encap_failures), 461 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 462 { STATS_OFFSET32(tx_hw_queue_full), 463 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 464 { STATS_OFFSET32(tx_hw_max_queue_depth), 465 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 466 { STATS_OFFSET32(tx_dma_mapping_failure), 467 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 468 { STATS_OFFSET32(tx_max_drbr_queue_depth), 469 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 470 { STATS_OFFSET32(tx_window_violation_std), 471 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 472 { STATS_OFFSET32(tx_window_violation_tso), 473 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 474 { STATS_OFFSET32(tx_chain_lost_mbuf), 475 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 476 { STATS_OFFSET32(tx_frames_deferred), 477 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 478 { STATS_OFFSET32(tx_queue_xoff), 479 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 480 { STATS_OFFSET32(mbuf_defrag_attempts), 481 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 482 { STATS_OFFSET32(mbuf_defrag_failures), 483 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 484 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 485 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 486 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 487 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 488 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 489 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 490 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 491 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 492 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 493 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 494 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 495 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 496 { STATS_OFFSET32(mbuf_alloc_tx), 497 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 498 { STATS_OFFSET32(mbuf_alloc_rx), 499 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 500 { STATS_OFFSET32(mbuf_alloc_sge), 501 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 502 { STATS_OFFSET32(mbuf_alloc_tpa), 503 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, 504 { STATS_OFFSET32(tx_queue_full_return), 505 4, STATS_FLAGS_FUNC, "tx_queue_full_return"}, 506 { STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 507 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"}, 508 { STATS_OFFSET32(tx_request_link_down_failures), 509 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"}, 510 { STATS_OFFSET32(bd_avail_too_less_failures), 511 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"}, 512 { STATS_OFFSET32(tx_mq_not_empty), 513 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}, 514 { STATS_OFFSET32(nsegs_path1_errors), 515 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"}, 516 { STATS_OFFSET32(nsegs_path2_errors), 517 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"} 518 519 520}; 521 522static const struct { 523 uint32_t offset; 524 uint32_t size; 525 char string[STAT_NAME_LEN]; 526} bxe_eth_q_stats_arr[] = { 527 { Q_STATS_OFFSET32(total_bytes_received_hi), 528 8, "rx_bytes" }, 529 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 530 8, "rx_ucast_packets" }, 531 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 532 8, "rx_mcast_packets" }, 533 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 534 8, "rx_bcast_packets" }, 535 { Q_STATS_OFFSET32(no_buff_discard_hi), 536 8, "rx_discards" }, 537 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 538 8, "tx_bytes" }, 539 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 540 8, "tx_ucast_packets" }, 541 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 542 8, "tx_mcast_packets" }, 543 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 544 8, "tx_bcast_packets" }, 545 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 546 8, "tpa_aggregations" }, 547 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 548 8, "tpa_aggregated_frames"}, 549 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 550 8, "tpa_bytes"}, 551 { Q_STATS_OFFSET32(rx_calls), 552 4, "rx_calls"}, 553 { Q_STATS_OFFSET32(rx_pkts), 554 4, "rx_pkts"}, 555 { Q_STATS_OFFSET32(rx_tpa_pkts), 556 4, "rx_tpa_pkts"}, 557 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 558 4, "rx_erroneous_jumbo_sge_pkts"}, 559 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 560 4, "rx_bxe_service_rxsgl"}, 561 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 562 4, "rx_jumbo_sge_pkts"}, 563 { Q_STATS_OFFSET32(rx_soft_errors), 564 4, "rx_soft_errors"}, 565 { Q_STATS_OFFSET32(rx_hw_csum_errors), 566 4, "rx_hw_csum_errors"}, 567 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 568 4, "rx_ofld_frames_csum_ip"}, 569 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 570 4, "rx_ofld_frames_csum_tcp_udp"}, 571 { Q_STATS_OFFSET32(rx_budget_reached), 572 4, "rx_budget_reached"}, 573 { Q_STATS_OFFSET32(tx_pkts), 574 4, "tx_pkts"}, 575 { Q_STATS_OFFSET32(tx_soft_errors), 576 4, "tx_soft_errors"}, 577 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 578 4, "tx_ofld_frames_csum_ip"}, 579 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 580 4, "tx_ofld_frames_csum_tcp"}, 581 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 582 4, "tx_ofld_frames_csum_udp"}, 583 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 584 4, "tx_ofld_frames_lso"}, 585 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 586 4, "tx_ofld_frames_lso_hdr_splits"}, 587 { Q_STATS_OFFSET32(tx_encap_failures), 588 4, "tx_encap_failures"}, 589 { Q_STATS_OFFSET32(tx_hw_queue_full), 590 4, "tx_hw_queue_full"}, 591 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 592 4, "tx_hw_max_queue_depth"}, 593 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 594 4, "tx_dma_mapping_failure"}, 595 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 596 4, "tx_max_drbr_queue_depth"}, 597 { Q_STATS_OFFSET32(tx_window_violation_std), 598 4, "tx_window_violation_std"}, 599 { Q_STATS_OFFSET32(tx_window_violation_tso), 600 4, "tx_window_violation_tso"}, 601 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 602 4, "tx_chain_lost_mbuf"}, 603 { Q_STATS_OFFSET32(tx_frames_deferred), 604 4, "tx_frames_deferred"}, 605 { Q_STATS_OFFSET32(tx_queue_xoff), 606 4, "tx_queue_xoff"}, 607 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 608 4, "mbuf_defrag_attempts"}, 609 { Q_STATS_OFFSET32(mbuf_defrag_failures), 610 4, "mbuf_defrag_failures"}, 611 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 612 4, "mbuf_rx_bd_alloc_failed"}, 613 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 614 4, "mbuf_rx_bd_mapping_failed"}, 615 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 616 4, "mbuf_rx_tpa_alloc_failed"}, 617 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 618 4, "mbuf_rx_tpa_mapping_failed"}, 619 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 620 4, "mbuf_rx_sge_alloc_failed"}, 621 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 622 4, "mbuf_rx_sge_mapping_failed"}, 623 { Q_STATS_OFFSET32(mbuf_alloc_tx), 624 4, "mbuf_alloc_tx"}, 625 { Q_STATS_OFFSET32(mbuf_alloc_rx), 626 4, "mbuf_alloc_rx"}, 627 { Q_STATS_OFFSET32(mbuf_alloc_sge), 628 4, "mbuf_alloc_sge"}, 629 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 630 4, "mbuf_alloc_tpa"}, 631 { Q_STATS_OFFSET32(tx_queue_full_return), 632 4, "tx_queue_full_return"}, 633 { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 634 4, "bxe_tx_mq_sc_state_failures"}, 635 { Q_STATS_OFFSET32(tx_request_link_down_failures), 636 4, "tx_request_link_down_failures"}, 637 { Q_STATS_OFFSET32(bd_avail_too_less_failures), 638 4, "bd_avail_too_less_failures"}, 639 { Q_STATS_OFFSET32(tx_mq_not_empty), 640 4, "tx_mq_not_empty"}, 641 { Q_STATS_OFFSET32(nsegs_path1_errors), 642 4, "nsegs_path1_errors"}, 643 { Q_STATS_OFFSET32(nsegs_path2_errors), 644 4, "nsegs_path2_errors"} 645 646 647}; 648 649#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 650#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 651 652 653static void bxe_cmng_fns_init(struct bxe_softc *sc, 654 uint8_t read_cfg, 655 uint8_t cmng_type); 656static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 657static void storm_memset_cmng(struct bxe_softc *sc, 658 struct cmng_init *cmng, 659 uint8_t port); 660static void bxe_set_reset_global(struct bxe_softc *sc); 661static void bxe_set_reset_in_progress(struct bxe_softc *sc); 662static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 663 int engine); 664static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 665static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 666 uint8_t *global, 667 uint8_t print); 668static void bxe_int_disable(struct bxe_softc *sc); 669static int bxe_release_leader_lock(struct bxe_softc *sc); 670static void bxe_pf_disable(struct bxe_softc *sc); 671static void bxe_free_fp_buffers(struct bxe_softc *sc); 672static inline void bxe_update_rx_prod(struct bxe_softc *sc, 673 struct bxe_fastpath *fp, 674 uint16_t rx_bd_prod, 675 uint16_t rx_cq_prod, 676 uint16_t rx_sge_prod); 677static void bxe_link_report_locked(struct bxe_softc *sc); 678static void bxe_link_report(struct bxe_softc *sc); 679static void bxe_link_status_update(struct bxe_softc *sc); 680static void bxe_periodic_callout_func(void *xsc); 681static void bxe_periodic_start(struct bxe_softc *sc); 682static void bxe_periodic_stop(struct bxe_softc *sc); 683static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 684 uint16_t prev_index, 685 uint16_t index); 686static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 687 int queue); 688static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 689 uint16_t index); 690static uint8_t bxe_txeof(struct bxe_softc *sc, 691 struct bxe_fastpath *fp); 692static void bxe_task_fp(struct bxe_fastpath *fp); 693static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 694 struct mbuf *m, 695 uint8_t contents); 696static int bxe_alloc_mem(struct bxe_softc *sc); 697static void bxe_free_mem(struct bxe_softc *sc); 698static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 699static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 700static int bxe_interrupt_attach(struct bxe_softc *sc); 701static void bxe_interrupt_detach(struct bxe_softc *sc); 702static void bxe_set_rx_mode(struct bxe_softc *sc); 703static int bxe_init_locked(struct bxe_softc *sc); 704static int bxe_stop_locked(struct bxe_softc *sc); 705static void bxe_sp_err_timeout_task(void *arg, int pending); 706void bxe_parity_recover(struct bxe_softc *sc); 707void bxe_handle_error(struct bxe_softc *sc); 708static __noinline int bxe_nic_load(struct bxe_softc *sc, 709 int load_mode); 710static __noinline int bxe_nic_unload(struct bxe_softc *sc, 711 uint32_t unload_mode, 712 uint8_t keep_link); 713 714static void bxe_handle_sp_tq(void *context, int pending); 715static void bxe_handle_fp_tq(void *context, int pending); 716 717static int bxe_add_cdev(struct bxe_softc *sc); 718static void bxe_del_cdev(struct bxe_softc *sc); 719int bxe_grc_dump(struct bxe_softc *sc); 720static int bxe_alloc_buf_rings(struct bxe_softc *sc); 721static void bxe_free_buf_rings(struct bxe_softc *sc); 722 723/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 724uint32_t 725calc_crc32(uint8_t *crc32_packet, 726 uint32_t crc32_length, 727 uint32_t crc32_seed, 728 uint8_t complement) 729{ 730 uint32_t byte = 0; 731 uint32_t bit = 0; 732 uint8_t msb = 0; 733 uint32_t temp = 0; 734 uint32_t shft = 0; 735 uint8_t current_byte = 0; 736 uint32_t crc32_result = crc32_seed; 737 const uint32_t CRC32_POLY = 0x1edc6f41; 738 739 if ((crc32_packet == NULL) || 740 (crc32_length == 0) || 741 ((crc32_length % 8) != 0)) 742 { 743 return (crc32_result); 744 } 745 746 for (byte = 0; byte < crc32_length; byte = byte + 1) 747 { 748 current_byte = crc32_packet[byte]; 749 for (bit = 0; bit < 8; bit = bit + 1) 750 { 751 /* msb = crc32_result[31]; */ 752 msb = (uint8_t)(crc32_result >> 31); 753 754 crc32_result = crc32_result << 1; 755 756 /* it (msb != current_byte[bit]) */ 757 if (msb != (0x1 & (current_byte >> bit))) 758 { 759 crc32_result = crc32_result ^ CRC32_POLY; 760 /* crc32_result[0] = 1 */ 761 crc32_result |= 1; 762 } 763 } 764 } 765 766 /* Last step is to: 767 * 1. "mirror" every bit 768 * 2. swap the 4 bytes 769 * 3. complement each bit 770 */ 771 772 /* Mirror */ 773 temp = crc32_result; 774 shft = sizeof(crc32_result) * 8 - 1; 775 776 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 777 { 778 temp <<= 1; 779 temp |= crc32_result & 1; 780 shft-- ; 781 } 782 783 /* temp[31-bit] = crc32_result[bit] */ 784 temp <<= shft; 785 786 /* Swap */ 787 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 788 { 789 uint32_t t0, t1, t2, t3; 790 t0 = (0x000000ff & (temp >> 24)); 791 t1 = (0x0000ff00 & (temp >> 8)); 792 t2 = (0x00ff0000 & (temp << 8)); 793 t3 = (0xff000000 & (temp << 24)); 794 crc32_result = t0 | t1 | t2 | t3; 795 } 796 797 /* Complement */ 798 if (complement) 799 { 800 crc32_result = ~crc32_result; 801 } 802 803 return (crc32_result); 804} 805 806int 807bxe_test_bit(int nr, 808 volatile unsigned long *addr) 809{ 810 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 811} 812 813void 814bxe_set_bit(unsigned int nr, 815 volatile unsigned long *addr) 816{ 817 atomic_set_acq_long(addr, (1 << nr)); 818} 819 820void 821bxe_clear_bit(int nr, 822 volatile unsigned long *addr) 823{ 824 atomic_clear_acq_long(addr, (1 << nr)); 825} 826 827int 828bxe_test_and_set_bit(int nr, 829 volatile unsigned long *addr) 830{ 831 unsigned long x; 832 nr = (1 << nr); 833 do { 834 x = *addr; 835 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 836 // if (x & nr) bit_was_set; else bit_was_not_set; 837 return (x & nr); 838} 839 840int 841bxe_test_and_clear_bit(int nr, 842 volatile unsigned long *addr) 843{ 844 unsigned long x; 845 nr = (1 << nr); 846 do { 847 x = *addr; 848 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 849 // if (x & nr) bit_was_set; else bit_was_not_set; 850 return (x & nr); 851} 852 853int 854bxe_cmpxchg(volatile int *addr, 855 int old, 856 int new) 857{ 858 int x; 859 do { 860 x = *addr; 861 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 862 return (x); 863} 864 865/* 866 * Get DMA memory from the OS. 867 * 868 * Validates that the OS has provided DMA buffers in response to a 869 * bus_dmamap_load call and saves the physical address of those buffers. 870 * When the callback is used the OS will return 0 for the mapping function 871 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 872 * failures back to the caller. 873 * 874 * Returns: 875 * Nothing. 876 */ 877static void 878bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 879{ 880 struct bxe_dma *dma = arg; 881 882 if (error) { 883 dma->paddr = 0; 884 dma->nseg = 0; 885 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 886 } else { 887 dma->paddr = segs->ds_addr; 888 dma->nseg = nseg; 889 } 890} 891 892/* 893 * Allocate a block of memory and map it for DMA. No partial completions 894 * allowed and release any resources acquired if we can't acquire all 895 * resources. 896 * 897 * Returns: 898 * 0 = Success, !0 = Failure 899 */ 900int 901bxe_dma_alloc(struct bxe_softc *sc, 902 bus_size_t size, 903 struct bxe_dma *dma, 904 const char *msg) 905{ 906 int rc; 907 908 if (dma->size > 0) { 909 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 910 (unsigned long)dma->size); 911 return (1); 912 } 913 914 memset(dma, 0, sizeof(*dma)); /* sanity */ 915 dma->sc = sc; 916 dma->size = size; 917 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 918 919 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 920 BCM_PAGE_SIZE, /* alignment */ 921 0, /* boundary limit */ 922 BUS_SPACE_MAXADDR, /* restricted low */ 923 BUS_SPACE_MAXADDR, /* restricted hi */ 924 NULL, /* addr filter() */ 925 NULL, /* addr filter() arg */ 926 size, /* max map size */ 927 1, /* num discontinuous */ 928 size, /* max seg size */ 929 BUS_DMA_ALLOCNOW, /* flags */ 930 NULL, /* lock() */ 931 NULL, /* lock() arg */ 932 &dma->tag); /* returned dma tag */ 933 if (rc != 0) { 934 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 935 memset(dma, 0, sizeof(*dma)); 936 return (1); 937 } 938 939 rc = bus_dmamem_alloc(dma->tag, 940 (void **)&dma->vaddr, 941 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 942 &dma->map); 943 if (rc != 0) { 944 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 945 bus_dma_tag_destroy(dma->tag); 946 memset(dma, 0, sizeof(*dma)); 947 return (1); 948 } 949 950 rc = bus_dmamap_load(dma->tag, 951 dma->map, 952 dma->vaddr, 953 size, 954 bxe_dma_map_addr, /* BLOGD in here */ 955 dma, 956 BUS_DMA_NOWAIT); 957 if (rc != 0) { 958 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 959 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 960 bus_dma_tag_destroy(dma->tag); 961 memset(dma, 0, sizeof(*dma)); 962 return (1); 963 } 964 965 return (0); 966} 967 968void 969bxe_dma_free(struct bxe_softc *sc, 970 struct bxe_dma *dma) 971{ 972 if (dma->size > 0) { 973 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 974 975 bus_dmamap_sync(dma->tag, dma->map, 976 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 977 bus_dmamap_unload(dma->tag, dma->map); 978 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 979 bus_dma_tag_destroy(dma->tag); 980 } 981 982 memset(dma, 0, sizeof(*dma)); 983} 984 985/* 986 * These indirect read and write routines are only during init. 987 * The locking is handled by the MCP. 988 */ 989 990void 991bxe_reg_wr_ind(struct bxe_softc *sc, 992 uint32_t addr, 993 uint32_t val) 994{ 995 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 996 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 997 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 998} 999 1000uint32_t 1001bxe_reg_rd_ind(struct bxe_softc *sc, 1002 uint32_t addr) 1003{ 1004 uint32_t val; 1005 1006 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1007 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1008 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1009 1010 return (val); 1011} 1012 1013static int 1014bxe_acquire_hw_lock(struct bxe_softc *sc, 1015 uint32_t resource) 1016{ 1017 uint32_t lock_status; 1018 uint32_t resource_bit = (1 << resource); 1019 int func = SC_FUNC(sc); 1020 uint32_t hw_lock_control_reg; 1021 int cnt; 1022 1023 /* validate the resource is within range */ 1024 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1025 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 1026 " resource_bit 0x%x\n", resource, resource_bit); 1027 return (-1); 1028 } 1029 1030 if (func <= 5) { 1031 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1032 } else { 1033 hw_lock_control_reg = 1034 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1035 } 1036 1037 /* validate the resource is not already taken */ 1038 lock_status = REG_RD(sc, hw_lock_control_reg); 1039 if (lock_status & resource_bit) { 1040 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", 1041 resource, lock_status, resource_bit); 1042 return (-1); 1043 } 1044 1045 /* try every 5ms for 5 seconds */ 1046 for (cnt = 0; cnt < 1000; cnt++) { 1047 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1048 lock_status = REG_RD(sc, hw_lock_control_reg); 1049 if (lock_status & resource_bit) { 1050 return (0); 1051 } 1052 DELAY(5000); 1053 } 1054 1055 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", 1056 resource, resource_bit); 1057 return (-1); 1058} 1059 1060static int 1061bxe_release_hw_lock(struct bxe_softc *sc, 1062 uint32_t resource) 1063{ 1064 uint32_t lock_status; 1065 uint32_t resource_bit = (1 << resource); 1066 int func = SC_FUNC(sc); 1067 uint32_t hw_lock_control_reg; 1068 1069 /* validate the resource is within range */ 1070 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1071 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 1072 " resource_bit 0x%x\n", resource, resource_bit); 1073 return (-1); 1074 } 1075 1076 if (func <= 5) { 1077 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1078 } else { 1079 hw_lock_control_reg = 1080 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1081 } 1082 1083 /* validate the resource is currently taken */ 1084 lock_status = REG_RD(sc, hw_lock_control_reg); 1085 if (!(lock_status & resource_bit)) { 1086 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", 1087 resource, lock_status, resource_bit); 1088 return (-1); 1089 } 1090 1091 REG_WR(sc, hw_lock_control_reg, resource_bit); 1092 return (0); 1093} 1094static void bxe_acquire_phy_lock(struct bxe_softc *sc) 1095{ 1096 BXE_PHY_LOCK(sc); 1097 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1098} 1099 1100static void bxe_release_phy_lock(struct bxe_softc *sc) 1101{ 1102 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1103 BXE_PHY_UNLOCK(sc); 1104} 1105/* 1106 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1107 * had we done things the other way around, if two pfs from the same port 1108 * would attempt to access nvram at the same time, we could run into a 1109 * scenario such as: 1110 * pf A takes the port lock. 1111 * pf B succeeds in taking the same lock since they are from the same port. 1112 * pf A takes the per pf misc lock. Performs eeprom access. 1113 * pf A finishes. Unlocks the per pf misc lock. 1114 * Pf B takes the lock and proceeds to perform it's own access. 1115 * pf A unlocks the per port lock, while pf B is still working (!). 1116 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1117 * access corrupted by pf B).* 1118 */ 1119static int 1120bxe_acquire_nvram_lock(struct bxe_softc *sc) 1121{ 1122 int port = SC_PORT(sc); 1123 int count, i; 1124 uint32_t val = 0; 1125 1126 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1127 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1128 1129 /* adjust timeout for emulation/FPGA */ 1130 count = NVRAM_TIMEOUT_COUNT; 1131 if (CHIP_REV_IS_SLOW(sc)) { 1132 count *= 100; 1133 } 1134 1135 /* request access to nvram interface */ 1136 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1137 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1138 1139 for (i = 0; i < count*10; i++) { 1140 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1141 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1142 break; 1143 } 1144 1145 DELAY(5); 1146 } 1147 1148 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1149 BLOGE(sc, "Cannot get access to nvram interface " 1150 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", 1151 port, val); 1152 return (-1); 1153 } 1154 1155 return (0); 1156} 1157 1158static int 1159bxe_release_nvram_lock(struct bxe_softc *sc) 1160{ 1161 int port = SC_PORT(sc); 1162 int count, i; 1163 uint32_t val = 0; 1164 1165 /* adjust timeout for emulation/FPGA */ 1166 count = NVRAM_TIMEOUT_COUNT; 1167 if (CHIP_REV_IS_SLOW(sc)) { 1168 count *= 100; 1169 } 1170 1171 /* relinquish nvram interface */ 1172 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1173 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1174 1175 for (i = 0; i < count*10; i++) { 1176 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1177 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1178 break; 1179 } 1180 1181 DELAY(5); 1182 } 1183 1184 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1185 BLOGE(sc, "Cannot free access to nvram interface " 1186 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", 1187 port, val); 1188 return (-1); 1189 } 1190 1191 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1192 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1193 1194 return (0); 1195} 1196 1197static void 1198bxe_enable_nvram_access(struct bxe_softc *sc) 1199{ 1200 uint32_t val; 1201 1202 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1203 1204 /* enable both bits, even on read */ 1205 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1206 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1207} 1208 1209static void 1210bxe_disable_nvram_access(struct bxe_softc *sc) 1211{ 1212 uint32_t val; 1213 1214 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1215 1216 /* disable both bits, even after read */ 1217 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1218 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1219 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1220} 1221 1222static int 1223bxe_nvram_read_dword(struct bxe_softc *sc, 1224 uint32_t offset, 1225 uint32_t *ret_val, 1226 uint32_t cmd_flags) 1227{ 1228 int count, i, rc; 1229 uint32_t val; 1230 1231 /* build the command word */ 1232 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1233 1234 /* need to clear DONE bit separately */ 1235 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1236 1237 /* address of the NVRAM to read from */ 1238 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1239 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1240 1241 /* issue a read command */ 1242 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1243 1244 /* adjust timeout for emulation/FPGA */ 1245 count = NVRAM_TIMEOUT_COUNT; 1246 if (CHIP_REV_IS_SLOW(sc)) { 1247 count *= 100; 1248 } 1249 1250 /* wait for completion */ 1251 *ret_val = 0; 1252 rc = -1; 1253 for (i = 0; i < count; i++) { 1254 DELAY(5); 1255 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1256 1257 if (val & MCPR_NVM_COMMAND_DONE) { 1258 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1259 /* we read nvram data in cpu order 1260 * but ethtool sees it as an array of bytes 1261 * converting to big-endian will do the work 1262 */ 1263 *ret_val = htobe32(val); 1264 rc = 0; 1265 break; 1266 } 1267 } 1268 1269 if (rc == -1) { 1270 BLOGE(sc, "nvram read timeout expired " 1271 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", 1272 offset, cmd_flags, val); 1273 } 1274 1275 return (rc); 1276} 1277 1278static int 1279bxe_nvram_read(struct bxe_softc *sc, 1280 uint32_t offset, 1281 uint8_t *ret_buf, 1282 int buf_size) 1283{ 1284 uint32_t cmd_flags; 1285 uint32_t val; 1286 int rc; 1287 1288 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1289 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1290 offset, buf_size); 1291 return (-1); 1292 } 1293 1294 if ((offset + buf_size) > sc->devinfo.flash_size) { 1295 BLOGE(sc, "Invalid parameter, " 1296 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1297 offset, buf_size, sc->devinfo.flash_size); 1298 return (-1); 1299 } 1300 1301 /* request access to nvram interface */ 1302 rc = bxe_acquire_nvram_lock(sc); 1303 if (rc) { 1304 return (rc); 1305 } 1306 1307 /* enable access to nvram interface */ 1308 bxe_enable_nvram_access(sc); 1309 1310 /* read the first word(s) */ 1311 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1312 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1313 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1314 memcpy(ret_buf, &val, 4); 1315 1316 /* advance to the next dword */ 1317 offset += sizeof(uint32_t); 1318 ret_buf += sizeof(uint32_t); 1319 buf_size -= sizeof(uint32_t); 1320 cmd_flags = 0; 1321 } 1322 1323 if (rc == 0) { 1324 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1325 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1326 memcpy(ret_buf, &val, 4); 1327 } 1328 1329 /* disable access to nvram interface */ 1330 bxe_disable_nvram_access(sc); 1331 bxe_release_nvram_lock(sc); 1332 1333 return (rc); 1334} 1335 1336static int 1337bxe_nvram_write_dword(struct bxe_softc *sc, 1338 uint32_t offset, 1339 uint32_t val, 1340 uint32_t cmd_flags) 1341{ 1342 int count, i, rc; 1343 1344 /* build the command word */ 1345 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1346 1347 /* need to clear DONE bit separately */ 1348 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1349 1350 /* write the data */ 1351 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1352 1353 /* address of the NVRAM to write to */ 1354 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1355 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1356 1357 /* issue the write command */ 1358 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1359 1360 /* adjust timeout for emulation/FPGA */ 1361 count = NVRAM_TIMEOUT_COUNT; 1362 if (CHIP_REV_IS_SLOW(sc)) { 1363 count *= 100; 1364 } 1365 1366 /* wait for completion */ 1367 rc = -1; 1368 for (i = 0; i < count; i++) { 1369 DELAY(5); 1370 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1371 if (val & MCPR_NVM_COMMAND_DONE) { 1372 rc = 0; 1373 break; 1374 } 1375 } 1376 1377 if (rc == -1) { 1378 BLOGE(sc, "nvram write timeout expired " 1379 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", 1380 offset, cmd_flags, val); 1381 } 1382 1383 return (rc); 1384} 1385 1386#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1387 1388static int 1389bxe_nvram_write1(struct bxe_softc *sc, 1390 uint32_t offset, 1391 uint8_t *data_buf, 1392 int buf_size) 1393{ 1394 uint32_t cmd_flags; 1395 uint32_t align_offset; 1396 uint32_t val; 1397 int rc; 1398 1399 if ((offset + buf_size) > sc->devinfo.flash_size) { 1400 BLOGE(sc, "Invalid parameter, " 1401 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1402 offset, buf_size, sc->devinfo.flash_size); 1403 return (-1); 1404 } 1405 1406 /* request access to nvram interface */ 1407 rc = bxe_acquire_nvram_lock(sc); 1408 if (rc) { 1409 return (rc); 1410 } 1411 1412 /* enable access to nvram interface */ 1413 bxe_enable_nvram_access(sc); 1414 1415 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1416 align_offset = (offset & ~0x03); 1417 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1418 1419 if (rc == 0) { 1420 val &= ~(0xff << BYTE_OFFSET(offset)); 1421 val |= (*data_buf << BYTE_OFFSET(offset)); 1422 1423 /* nvram data is returned as an array of bytes 1424 * convert it back to cpu order 1425 */ 1426 val = be32toh(val); 1427 1428 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1429 } 1430 1431 /* disable access to nvram interface */ 1432 bxe_disable_nvram_access(sc); 1433 bxe_release_nvram_lock(sc); 1434 1435 return (rc); 1436} 1437 1438static int 1439bxe_nvram_write(struct bxe_softc *sc, 1440 uint32_t offset, 1441 uint8_t *data_buf, 1442 int buf_size) 1443{ 1444 uint32_t cmd_flags; 1445 uint32_t val; 1446 uint32_t written_so_far; 1447 int rc; 1448 1449 if (buf_size == 1) { 1450 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1451 } 1452 1453 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1454 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1455 offset, buf_size); 1456 return (-1); 1457 } 1458 1459 if (buf_size == 0) { 1460 return (0); /* nothing to do */ 1461 } 1462 1463 if ((offset + buf_size) > sc->devinfo.flash_size) { 1464 BLOGE(sc, "Invalid parameter, " 1465 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1466 offset, buf_size, sc->devinfo.flash_size); 1467 return (-1); 1468 } 1469 1470 /* request access to nvram interface */ 1471 rc = bxe_acquire_nvram_lock(sc); 1472 if (rc) { 1473 return (rc); 1474 } 1475 1476 /* enable access to nvram interface */ 1477 bxe_enable_nvram_access(sc); 1478 1479 written_so_far = 0; 1480 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1481 while ((written_so_far < buf_size) && (rc == 0)) { 1482 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1483 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1484 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1485 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1486 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1487 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1488 } 1489 1490 memcpy(&val, data_buf, 4); 1491 1492 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1493 1494 /* advance to the next dword */ 1495 offset += sizeof(uint32_t); 1496 data_buf += sizeof(uint32_t); 1497 written_so_far += sizeof(uint32_t); 1498 cmd_flags = 0; 1499 } 1500 1501 /* disable access to nvram interface */ 1502 bxe_disable_nvram_access(sc); 1503 bxe_release_nvram_lock(sc); 1504 1505 return (rc); 1506} 1507 1508/* copy command into DMAE command memory and set DMAE command Go */ 1509void 1510bxe_post_dmae(struct bxe_softc *sc, 1511 struct dmae_cmd *dmae, 1512 int idx) 1513{ 1514 uint32_t cmd_offset; 1515 int i; 1516 1517 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); 1518 for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { 1519 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1520 } 1521 1522 REG_WR(sc, dmae_reg_go_c[idx], 1); 1523} 1524 1525uint32_t 1526bxe_dmae_opcode_add_comp(uint32_t opcode, 1527 uint8_t comp_type) 1528{ 1529 return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | 1530 DMAE_CMD_C_TYPE_ENABLE)); 1531} 1532 1533uint32_t 1534bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1535{ 1536 return (opcode & ~DMAE_CMD_SRC_RESET); 1537} 1538 1539uint32_t 1540bxe_dmae_opcode(struct bxe_softc *sc, 1541 uint8_t src_type, 1542 uint8_t dst_type, 1543 uint8_t with_comp, 1544 uint8_t comp_type) 1545{ 1546 uint32_t opcode = 0; 1547 1548 opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | 1549 (dst_type << DMAE_CMD_DST_SHIFT)); 1550 1551 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 1552 1553 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1554 1555 opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | 1556 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); 1557 1558 opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); 1559 1560#ifdef __BIG_ENDIAN 1561 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1562#else 1563 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1564#endif 1565 1566 if (with_comp) { 1567 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1568 } 1569 1570 return (opcode); 1571} 1572 1573static void 1574bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1575 struct dmae_cmd *dmae, 1576 uint8_t src_type, 1577 uint8_t dst_type) 1578{ 1579 memset(dmae, 0, sizeof(struct dmae_cmd)); 1580 1581 /* set the opcode */ 1582 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1583 TRUE, DMAE_COMP_PCI); 1584 1585 /* fill in the completion parameters */ 1586 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1587 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1588 dmae->comp_val = DMAE_COMP_VAL; 1589} 1590 1591/* issue a DMAE command over the init channel and wait for completion */ 1592static int 1593bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1594 struct dmae_cmd *dmae) 1595{ 1596 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1597 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1598 1599 BXE_DMAE_LOCK(sc); 1600 1601 /* reset completion */ 1602 *wb_comp = 0; 1603 1604 /* post the command on the channel used for initializations */ 1605 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1606 1607 /* wait for completion */ 1608 DELAY(5); 1609 1610 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1611 if (!timeout || 1612 (sc->recovery_state != BXE_RECOVERY_DONE && 1613 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1614 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", 1615 *wb_comp, sc->recovery_state); 1616 BXE_DMAE_UNLOCK(sc); 1617 return (DMAE_TIMEOUT); 1618 } 1619 1620 timeout--; 1621 DELAY(50); 1622 } 1623 1624 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1625 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", 1626 *wb_comp, sc->recovery_state); 1627 BXE_DMAE_UNLOCK(sc); 1628 return (DMAE_PCI_ERROR); 1629 } 1630 1631 BXE_DMAE_UNLOCK(sc); 1632 return (0); 1633} 1634 1635void 1636bxe_read_dmae(struct bxe_softc *sc, 1637 uint32_t src_addr, 1638 uint32_t len32) 1639{ 1640 struct dmae_cmd dmae; 1641 uint32_t *data; 1642 int i, rc; 1643 1644 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1645 1646 if (!sc->dmae_ready) { 1647 data = BXE_SP(sc, wb_data[0]); 1648 1649 for (i = 0; i < len32; i++) { 1650 data[i] = (CHIP_IS_E1(sc)) ? 1651 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1652 REG_RD(sc, (src_addr + (i * 4))); 1653 } 1654 1655 return; 1656 } 1657 1658 /* set opcode and fixed command fields */ 1659 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1660 1661 /* fill in addresses and len */ 1662 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1663 dmae.src_addr_hi = 0; 1664 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1665 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1666 dmae.len = len32; 1667 1668 /* issue the command and wait for completion */ 1669 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1670 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1671 } 1672} 1673 1674void 1675bxe_write_dmae(struct bxe_softc *sc, 1676 bus_addr_t dma_addr, 1677 uint32_t dst_addr, 1678 uint32_t len32) 1679{ 1680 struct dmae_cmd dmae; 1681 int rc; 1682 1683 if (!sc->dmae_ready) { 1684 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1685 1686 if (CHIP_IS_E1(sc)) { 1687 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1688 } else { 1689 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1690 } 1691 1692 return; 1693 } 1694 1695 /* set opcode and fixed command fields */ 1696 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1697 1698 /* fill in addresses and len */ 1699 dmae.src_addr_lo = U64_LO(dma_addr); 1700 dmae.src_addr_hi = U64_HI(dma_addr); 1701 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1702 dmae.dst_addr_hi = 0; 1703 dmae.len = len32; 1704 1705 /* issue the command and wait for completion */ 1706 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1707 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1708 } 1709} 1710 1711void 1712bxe_write_dmae_phys_len(struct bxe_softc *sc, 1713 bus_addr_t phys_addr, 1714 uint32_t addr, 1715 uint32_t len) 1716{ 1717 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1718 int offset = 0; 1719 1720 while (len > dmae_wr_max) { 1721 bxe_write_dmae(sc, 1722 (phys_addr + offset), /* src DMA address */ 1723 (addr + offset), /* dst GRC address */ 1724 dmae_wr_max); 1725 offset += (dmae_wr_max * 4); 1726 len -= dmae_wr_max; 1727 } 1728 1729 bxe_write_dmae(sc, 1730 (phys_addr + offset), /* src DMA address */ 1731 (addr + offset), /* dst GRC address */ 1732 len); 1733} 1734 1735void 1736bxe_set_ctx_validation(struct bxe_softc *sc, 1737 struct eth_context *cxt, 1738 uint32_t cid) 1739{ 1740 /* ustorm cxt validation */ 1741 cxt->ustorm_ag_context.cdu_usage = 1742 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1743 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1744 /* xcontext validation */ 1745 cxt->xstorm_ag_context.cdu_reserved = 1746 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1747 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1748} 1749 1750static void 1751bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1752 uint8_t port, 1753 uint8_t fw_sb_id, 1754 uint8_t sb_index, 1755 uint8_t ticks) 1756{ 1757 uint32_t addr = 1758 (BAR_CSTRORM_INTMEM + 1759 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1760 1761 REG_WR8(sc, addr, ticks); 1762 1763 BLOGD(sc, DBG_LOAD, 1764 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1765 port, fw_sb_id, sb_index, ticks); 1766} 1767 1768static void 1769bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1770 uint8_t port, 1771 uint16_t fw_sb_id, 1772 uint8_t sb_index, 1773 uint8_t disable) 1774{ 1775 uint32_t enable_flag = 1776 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1777 uint32_t addr = 1778 (BAR_CSTRORM_INTMEM + 1779 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1780 uint8_t flags; 1781 1782 /* clear and set */ 1783 flags = REG_RD8(sc, addr); 1784 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1785 flags |= enable_flag; 1786 REG_WR8(sc, addr, flags); 1787 1788 BLOGD(sc, DBG_LOAD, 1789 "port %d fw_sb_id %d sb_index %d disable %d\n", 1790 port, fw_sb_id, sb_index, disable); 1791} 1792 1793void 1794bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1795 uint8_t fw_sb_id, 1796 uint8_t sb_index, 1797 uint8_t disable, 1798 uint16_t usec) 1799{ 1800 int port = SC_PORT(sc); 1801 uint8_t ticks = (usec / 4); /* XXX ??? */ 1802 1803 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1804 1805 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1806 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1807} 1808 1809void 1810elink_cb_udelay(struct bxe_softc *sc, 1811 uint32_t usecs) 1812{ 1813 DELAY(usecs); 1814} 1815 1816uint32_t 1817elink_cb_reg_read(struct bxe_softc *sc, 1818 uint32_t reg_addr) 1819{ 1820 return (REG_RD(sc, reg_addr)); 1821} 1822 1823void 1824elink_cb_reg_write(struct bxe_softc *sc, 1825 uint32_t reg_addr, 1826 uint32_t val) 1827{ 1828 REG_WR(sc, reg_addr, val); 1829} 1830 1831void 1832elink_cb_reg_wb_write(struct bxe_softc *sc, 1833 uint32_t offset, 1834 uint32_t *wb_write, 1835 uint16_t len) 1836{ 1837 REG_WR_DMAE(sc, offset, wb_write, len); 1838} 1839 1840void 1841elink_cb_reg_wb_read(struct bxe_softc *sc, 1842 uint32_t offset, 1843 uint32_t *wb_write, 1844 uint16_t len) 1845{ 1846 REG_RD_DMAE(sc, offset, wb_write, len); 1847} 1848 1849uint8_t 1850elink_cb_path_id(struct bxe_softc *sc) 1851{ 1852 return (SC_PATH(sc)); 1853} 1854 1855void 1856elink_cb_event_log(struct bxe_softc *sc, 1857 const elink_log_id_t elink_log_id, 1858 ...) 1859{ 1860 /* XXX */ 1861 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1862} 1863 1864static int 1865bxe_set_spio(struct bxe_softc *sc, 1866 int spio, 1867 uint32_t mode) 1868{ 1869 uint32_t spio_reg; 1870 1871 /* Only 2 SPIOs are configurable */ 1872 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1873 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); 1874 return (-1); 1875 } 1876 1877 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1878 1879 /* read SPIO and mask except the float bits */ 1880 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1881 1882 switch (mode) { 1883 case MISC_SPIO_OUTPUT_LOW: 1884 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1885 /* clear FLOAT and set CLR */ 1886 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1887 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1888 break; 1889 1890 case MISC_SPIO_OUTPUT_HIGH: 1891 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1892 /* clear FLOAT and set SET */ 1893 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1894 spio_reg |= (spio << MISC_SPIO_SET_POS); 1895 break; 1896 1897 case MISC_SPIO_INPUT_HI_Z: 1898 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1899 /* set FLOAT */ 1900 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1901 break; 1902 1903 default: 1904 break; 1905 } 1906 1907 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1908 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1909 1910 return (0); 1911} 1912 1913static int 1914bxe_gpio_read(struct bxe_softc *sc, 1915 int gpio_num, 1916 uint8_t port) 1917{ 1918 /* The GPIO should be swapped if swap register is set and active */ 1919 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1920 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1921 int gpio_shift = (gpio_num + 1922 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1923 uint32_t gpio_mask = (1 << gpio_shift); 1924 uint32_t gpio_reg; 1925 1926 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1927 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" 1928 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, 1929 gpio_mask); 1930 return (-1); 1931 } 1932 1933 /* read GPIO value */ 1934 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 1935 1936 /* get the requested pin value */ 1937 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 1938} 1939 1940static int 1941bxe_gpio_write(struct bxe_softc *sc, 1942 int gpio_num, 1943 uint32_t mode, 1944 uint8_t port) 1945{ 1946 /* The GPIO should be swapped if swap register is set and active */ 1947 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1948 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1949 int gpio_shift = (gpio_num + 1950 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1951 uint32_t gpio_mask = (1 << gpio_shift); 1952 uint32_t gpio_reg; 1953 1954 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1955 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" 1956 " gpio_shift %d gpio_mask 0x%x\n", 1957 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); 1958 return (-1); 1959 } 1960 1961 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 1962 1963 /* read GPIO and mask except the float bits */ 1964 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1965 1966 switch (mode) { 1967 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1968 BLOGD(sc, DBG_PHY, 1969 "Set GPIO %d (shift %d) -> output low\n", 1970 gpio_num, gpio_shift); 1971 /* clear FLOAT and set CLR */ 1972 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1973 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 1974 break; 1975 1976 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1977 BLOGD(sc, DBG_PHY, 1978 "Set GPIO %d (shift %d) -> output high\n", 1979 gpio_num, gpio_shift); 1980 /* clear FLOAT and set SET */ 1981 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1982 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1983 break; 1984 1985 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1986 BLOGD(sc, DBG_PHY, 1987 "Set GPIO %d (shift %d) -> input\n", 1988 gpio_num, gpio_shift); 1989 /* set FLOAT */ 1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1991 break; 1992 1993 default: 1994 break; 1995 } 1996 1997 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 1998 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 1999 2000 return (0); 2001} 2002 2003static int 2004bxe_gpio_mult_write(struct bxe_softc *sc, 2005 uint8_t pins, 2006 uint32_t mode) 2007{ 2008 uint32_t gpio_reg; 2009 2010 /* any port swapping should be handled by caller */ 2011 2012 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2013 2014 /* read GPIO and mask except the float bits */ 2015 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2016 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2017 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2018 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2019 2020 switch (mode) { 2021 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2022 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2023 /* set CLR */ 2024 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2025 break; 2026 2027 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2028 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2029 /* set SET */ 2030 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2031 break; 2032 2033 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2034 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2035 /* set FLOAT */ 2036 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2037 break; 2038 2039 default: 2040 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" 2041 " gpio_reg 0x%x\n", pins, mode, gpio_reg); 2042 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2043 return (-1); 2044 } 2045 2046 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2047 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2048 2049 return (0); 2050} 2051 2052static int 2053bxe_gpio_int_write(struct bxe_softc *sc, 2054 int gpio_num, 2055 uint32_t mode, 2056 uint8_t port) 2057{ 2058 /* The GPIO should be swapped if swap register is set and active */ 2059 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2060 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2061 int gpio_shift = (gpio_num + 2062 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2063 uint32_t gpio_mask = (1 << gpio_shift); 2064 uint32_t gpio_reg; 2065 2066 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2067 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" 2068 " gpio_shift %d gpio_mask 0x%x\n", 2069 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); 2070 return (-1); 2071 } 2072 2073 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2074 2075 /* read GPIO int */ 2076 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2077 2078 switch (mode) { 2079 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2080 BLOGD(sc, DBG_PHY, 2081 "Clear GPIO INT %d (shift %d) -> output low\n", 2082 gpio_num, gpio_shift); 2083 /* clear SET and set CLR */ 2084 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2085 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2086 break; 2087 2088 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2089 BLOGD(sc, DBG_PHY, 2090 "Set GPIO INT %d (shift %d) -> output high\n", 2091 gpio_num, gpio_shift); 2092 /* clear CLR and set SET */ 2093 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2094 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2095 break; 2096 2097 default: 2098 break; 2099 } 2100 2101 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2102 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2103 2104 return (0); 2105} 2106 2107uint32_t 2108elink_cb_gpio_read(struct bxe_softc *sc, 2109 uint16_t gpio_num, 2110 uint8_t port) 2111{ 2112 return (bxe_gpio_read(sc, gpio_num, port)); 2113} 2114 2115uint8_t 2116elink_cb_gpio_write(struct bxe_softc *sc, 2117 uint16_t gpio_num, 2118 uint8_t mode, /* 0=low 1=high */ 2119 uint8_t port) 2120{ 2121 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2122} 2123 2124uint8_t 2125elink_cb_gpio_mult_write(struct bxe_softc *sc, 2126 uint8_t pins, 2127 uint8_t mode) /* 0=low 1=high */ 2128{ 2129 return (bxe_gpio_mult_write(sc, pins, mode)); 2130} 2131 2132uint8_t 2133elink_cb_gpio_int_write(struct bxe_softc *sc, 2134 uint16_t gpio_num, 2135 uint8_t mode, /* 0=low 1=high */ 2136 uint8_t port) 2137{ 2138 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2139} 2140 2141void 2142elink_cb_notify_link_changed(struct bxe_softc *sc) 2143{ 2144 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2145 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2146} 2147 2148/* send the MCP a request, block until there is a reply */ 2149uint32_t 2150elink_cb_fw_command(struct bxe_softc *sc, 2151 uint32_t command, 2152 uint32_t param) 2153{ 2154 int mb_idx = SC_FW_MB_IDX(sc); 2155 uint32_t seq; 2156 uint32_t rc = 0; 2157 uint32_t cnt = 1; 2158 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2159 2160 BXE_FWMB_LOCK(sc); 2161 2162 seq = ++sc->fw_seq; 2163 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2164 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2165 2166 BLOGD(sc, DBG_PHY, 2167 "wrote command 0x%08x to FW MB param 0x%08x\n", 2168 (command | seq), param); 2169 2170 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2171 do { 2172 DELAY(delay * 1000); 2173 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2174 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2175 2176 BLOGD(sc, DBG_PHY, 2177 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2178 cnt*delay, rc, seq); 2179 2180 /* is this a reply to our command? */ 2181 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2182 rc &= FW_MSG_CODE_MASK; 2183 } else { 2184 /* Ruh-roh! */ 2185 BLOGE(sc, "FW failed to respond!\n"); 2186 // XXX bxe_fw_dump(sc); 2187 rc = 0; 2188 } 2189 2190 BXE_FWMB_UNLOCK(sc); 2191 return (rc); 2192} 2193 2194static uint32_t 2195bxe_fw_command(struct bxe_softc *sc, 2196 uint32_t command, 2197 uint32_t param) 2198{ 2199 return (elink_cb_fw_command(sc, command, param)); 2200} 2201 2202static void 2203__storm_memset_dma_mapping(struct bxe_softc *sc, 2204 uint32_t addr, 2205 bus_addr_t mapping) 2206{ 2207 REG_WR(sc, addr, U64_LO(mapping)); 2208 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2209} 2210 2211static void 2212storm_memset_spq_addr(struct bxe_softc *sc, 2213 bus_addr_t mapping, 2214 uint16_t abs_fid) 2215{ 2216 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2217 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2218 __storm_memset_dma_mapping(sc, addr, mapping); 2219} 2220 2221static void 2222storm_memset_vf_to_pf(struct bxe_softc *sc, 2223 uint16_t abs_fid, 2224 uint16_t pf_id) 2225{ 2226 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2227 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2228 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2229 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2230} 2231 2232static void 2233storm_memset_func_en(struct bxe_softc *sc, 2234 uint16_t abs_fid, 2235 uint8_t enable) 2236{ 2237 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2238 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2239 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2240 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2241} 2242 2243static void 2244storm_memset_eq_data(struct bxe_softc *sc, 2245 struct event_ring_data *eq_data, 2246 uint16_t pfid) 2247{ 2248 uint32_t addr; 2249 size_t size; 2250 2251 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2252 size = sizeof(struct event_ring_data); 2253 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2254} 2255 2256static void 2257storm_memset_eq_prod(struct bxe_softc *sc, 2258 uint16_t eq_prod, 2259 uint16_t pfid) 2260{ 2261 uint32_t addr = (BAR_CSTRORM_INTMEM + 2262 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2263 REG_WR16(sc, addr, eq_prod); 2264} 2265 2266/* 2267 * Post a slowpath command. 2268 * 2269 * A slowpath command is used to propagate a configuration change through 2270 * the controller in a controlled manner, allowing each STORM processor and 2271 * other H/W blocks to phase in the change. The commands sent on the 2272 * slowpath are referred to as ramrods. Depending on the ramrod used the 2273 * completion of the ramrod will occur in different ways. Here's a 2274 * breakdown of ramrods and how they complete: 2275 * 2276 * RAMROD_CMD_ID_ETH_PORT_SETUP 2277 * Used to setup the leading connection on a port. Completes on the 2278 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2279 * 2280 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2281 * Used to setup an additional connection on a port. Completes on the 2282 * RCQ of the multi-queue/RSS connection being initialized. 2283 * 2284 * RAMROD_CMD_ID_ETH_STAT_QUERY 2285 * Used to force the storm processors to update the statistics database 2286 * in host memory. This ramrod is send on the leading connection CID and 2287 * completes as an index increment of the CSTORM on the default status 2288 * block. 2289 * 2290 * RAMROD_CMD_ID_ETH_UPDATE 2291 * Used to update the state of the leading connection, usually to udpate 2292 * the RSS indirection table. Completes on the RCQ of the leading 2293 * connection. (Not currently used under FreeBSD until OS support becomes 2294 * available.) 2295 * 2296 * RAMROD_CMD_ID_ETH_HALT 2297 * Used when tearing down a connection prior to driver unload. Completes 2298 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2299 * use this on the leading connection. 2300 * 2301 * RAMROD_CMD_ID_ETH_SET_MAC 2302 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2303 * the RCQ of the leading connection. 2304 * 2305 * RAMROD_CMD_ID_ETH_CFC_DEL 2306 * Used when tearing down a conneciton prior to driver unload. Completes 2307 * on the RCQ of the leading connection (since the current connection 2308 * has been completely removed from controller memory). 2309 * 2310 * RAMROD_CMD_ID_ETH_PORT_DEL 2311 * Used to tear down the leading connection prior to driver unload, 2312 * typically fp[0]. Completes as an index increment of the CSTORM on the 2313 * default status block. 2314 * 2315 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2316 * Used for connection offload. Completes on the RCQ of the multi-queue 2317 * RSS connection that is being offloaded. (Not currently used under 2318 * FreeBSD.) 2319 * 2320 * There can only be one command pending per function. 2321 * 2322 * Returns: 2323 * 0 = Success, !0 = Failure. 2324 */ 2325 2326/* must be called under the spq lock */ 2327static inline 2328struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2329{ 2330 struct eth_spe *next_spe = sc->spq_prod_bd; 2331 2332 if (sc->spq_prod_bd == sc->spq_last_bd) { 2333 /* wrap back to the first eth_spq */ 2334 sc->spq_prod_bd = sc->spq; 2335 sc->spq_prod_idx = 0; 2336 } else { 2337 sc->spq_prod_bd++; 2338 sc->spq_prod_idx++; 2339 } 2340 2341 return (next_spe); 2342} 2343 2344/* must be called under the spq lock */ 2345static inline 2346void bxe_sp_prod_update(struct bxe_softc *sc) 2347{ 2348 int func = SC_FUNC(sc); 2349 2350 /* 2351 * Make sure that BD data is updated before writing the producer. 2352 * BD data is written to the memory, the producer is read from the 2353 * memory, thus we need a full memory barrier to ensure the ordering. 2354 */ 2355 mb(); 2356 2357 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2358 sc->spq_prod_idx); 2359 2360 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2361 BUS_SPACE_BARRIER_WRITE); 2362} 2363 2364/** 2365 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2366 * 2367 * @cmd: command to check 2368 * @cmd_type: command type 2369 */ 2370static inline 2371int bxe_is_contextless_ramrod(int cmd, 2372 int cmd_type) 2373{ 2374 if ((cmd_type == NONE_CONNECTION_TYPE) || 2375 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2376 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2377 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2378 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2379 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2380 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2381 return (TRUE); 2382 } else { 2383 return (FALSE); 2384 } 2385} 2386 2387/** 2388 * bxe_sp_post - place a single command on an SP ring 2389 * 2390 * @sc: driver handle 2391 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2392 * @cid: SW CID the command is related to 2393 * @data_hi: command private data address (high 32 bits) 2394 * @data_lo: command private data address (low 32 bits) 2395 * @cmd_type: command type (e.g. NONE, ETH) 2396 * 2397 * SP data is handled as if it's always an address pair, thus data fields are 2398 * not swapped to little endian in upper functions. Instead this function swaps 2399 * data as if it's two uint32 fields. 2400 */ 2401int 2402bxe_sp_post(struct bxe_softc *sc, 2403 int command, 2404 int cid, 2405 uint32_t data_hi, 2406 uint32_t data_lo, 2407 int cmd_type) 2408{ 2409 struct eth_spe *spe; 2410 uint16_t type; 2411 int common; 2412 2413 common = bxe_is_contextless_ramrod(command, cmd_type); 2414 2415 BXE_SP_LOCK(sc); 2416 2417 if (common) { 2418 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2419 BLOGE(sc, "EQ ring is full!\n"); 2420 BXE_SP_UNLOCK(sc); 2421 return (-1); 2422 } 2423 } else { 2424 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2425 BLOGE(sc, "SPQ ring is full!\n"); 2426 BXE_SP_UNLOCK(sc); 2427 return (-1); 2428 } 2429 } 2430 2431 spe = bxe_sp_get_next(sc); 2432 2433 /* CID needs port number to be encoded int it */ 2434 spe->hdr.conn_and_cmd_data = 2435 htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2436 2437 type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; 2438 2439 /* TBD: Check if it works for VFs */ 2440 type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & 2441 SPE_HDR_T_FUNCTION_ID); 2442 2443 spe->hdr.type = htole16(type); 2444 2445 spe->data.update_data_addr.hi = htole32(data_hi); 2446 spe->data.update_data_addr.lo = htole32(data_lo); 2447 2448 /* 2449 * It's ok if the actual decrement is issued towards the memory 2450 * somewhere between the lock and unlock. Thus no more explict 2451 * memory barrier is needed. 2452 */ 2453 if (common) { 2454 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2455 } else { 2456 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2457 } 2458 2459 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2460 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2461 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2462 BLOGD(sc, DBG_SP, 2463 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2464 sc->spq_prod_idx, 2465 (uint32_t)U64_HI(sc->spq_dma.paddr), 2466 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2467 command, 2468 common, 2469 HW_CID(sc, cid), 2470 data_hi, 2471 data_lo, 2472 type, 2473 atomic_load_acq_long(&sc->cq_spq_left), 2474 atomic_load_acq_long(&sc->eq_spq_left)); 2475 2476 bxe_sp_prod_update(sc); 2477 2478 BXE_SP_UNLOCK(sc); 2479 return (0); 2480} 2481 2482/** 2483 * bxe_debug_print_ind_table - prints the indirection table configuration. 2484 * 2485 * @sc: driver hanlde 2486 * @p: pointer to rss configuration 2487 */ 2488 2489/* 2490 * FreeBSD Device probe function. 2491 * 2492 * Compares the device found to the driver's list of supported devices and 2493 * reports back to the bsd loader whether this is the right driver for the device. 2494 * This is the driver entry function called from the "kldload" command. 2495 * 2496 * Returns: 2497 * BUS_PROBE_DEFAULT on success, positive value on failure. 2498 */ 2499static int 2500bxe_probe(device_t dev) 2501{ 2502 struct bxe_device_type *t; 2503 char *descbuf; 2504 uint16_t did, sdid, svid, vid; 2505 2506 /* Find our device structure */ 2507 t = bxe_devs; 2508 2509 /* Get the data for the device to be probed. */ 2510 vid = pci_get_vendor(dev); 2511 did = pci_get_device(dev); 2512 svid = pci_get_subvendor(dev); 2513 sdid = pci_get_subdevice(dev); 2514 2515 /* Look through the list of known devices for a match. */ 2516 while (t->bxe_name != NULL) { 2517 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2518 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2519 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2520 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2521 if (descbuf == NULL) 2522 return (ENOMEM); 2523 2524 /* Print out the device identity. */ 2525 snprintf(descbuf, BXE_DEVDESC_MAX, 2526 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2527 (((pci_read_config(dev, PCIR_REVID, 4) & 2528 0xf0) >> 4) + 'A'), 2529 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2530 BXE_DRIVER_VERSION); 2531 2532 device_set_desc_copy(dev, descbuf); 2533 free(descbuf, M_TEMP); 2534 return (BUS_PROBE_DEFAULT); 2535 } 2536 t++; 2537 } 2538 2539 return (ENXIO); 2540} 2541 2542static void 2543bxe_init_mutexes(struct bxe_softc *sc) 2544{ 2545#ifdef BXE_CORE_LOCK_SX 2546 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2547 "bxe%d_core_lock", sc->unit); 2548 sx_init(&sc->core_sx, sc->core_sx_name); 2549#else 2550 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2551 "bxe%d_core_lock", sc->unit); 2552 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2553#endif 2554 2555 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2556 "bxe%d_sp_lock", sc->unit); 2557 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2558 2559 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2560 "bxe%d_dmae_lock", sc->unit); 2561 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2562 2563 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2564 "bxe%d_phy_lock", sc->unit); 2565 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2566 2567 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2568 "bxe%d_fwmb_lock", sc->unit); 2569 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2570 2571 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2572 "bxe%d_print_lock", sc->unit); 2573 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2574 2575 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2576 "bxe%d_stats_lock", sc->unit); 2577 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2578 2579 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2580 "bxe%d_mcast_lock", sc->unit); 2581 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2582} 2583 2584static void 2585bxe_release_mutexes(struct bxe_softc *sc) 2586{ 2587#ifdef BXE_CORE_LOCK_SX 2588 sx_destroy(&sc->core_sx); 2589#else 2590 if (mtx_initialized(&sc->core_mtx)) { 2591 mtx_destroy(&sc->core_mtx); 2592 } 2593#endif 2594 2595 if (mtx_initialized(&sc->sp_mtx)) { 2596 mtx_destroy(&sc->sp_mtx); 2597 } 2598 2599 if (mtx_initialized(&sc->dmae_mtx)) { 2600 mtx_destroy(&sc->dmae_mtx); 2601 } 2602 2603 if (mtx_initialized(&sc->port.phy_mtx)) { 2604 mtx_destroy(&sc->port.phy_mtx); 2605 } 2606 2607 if (mtx_initialized(&sc->fwmb_mtx)) { 2608 mtx_destroy(&sc->fwmb_mtx); 2609 } 2610 2611 if (mtx_initialized(&sc->print_mtx)) { 2612 mtx_destroy(&sc->print_mtx); 2613 } 2614 2615 if (mtx_initialized(&sc->stats_mtx)) { 2616 mtx_destroy(&sc->stats_mtx); 2617 } 2618 2619 if (mtx_initialized(&sc->mcast_mtx)) { 2620 mtx_destroy(&sc->mcast_mtx); 2621 } 2622} 2623 2624static void 2625bxe_tx_disable(struct bxe_softc* sc) 2626{ 2627 if_t ifp = sc->ifp; 2628 2629 /* tell the stack the driver is stopped and TX queue is full */ 2630 if (ifp != NULL) { 2631 if_setdrvflags(ifp, 0); 2632 } 2633} 2634 2635static void 2636bxe_drv_pulse(struct bxe_softc *sc) 2637{ 2638 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2639 sc->fw_drv_pulse_wr_seq); 2640} 2641 2642static inline uint16_t 2643bxe_tx_avail(struct bxe_softc *sc, 2644 struct bxe_fastpath *fp) 2645{ 2646 int16_t used; 2647 uint16_t prod; 2648 uint16_t cons; 2649 2650 prod = fp->tx_bd_prod; 2651 cons = fp->tx_bd_cons; 2652 2653 used = SUB_S16(prod, cons); 2654 2655 return (int16_t)(sc->tx_ring_size) - used; 2656} 2657 2658static inline int 2659bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2660{ 2661 uint16_t hw_cons; 2662 2663 mb(); /* status block fields can change */ 2664 hw_cons = le16toh(*fp->tx_cons_sb); 2665 return (hw_cons != fp->tx_pkt_cons); 2666} 2667 2668static inline uint8_t 2669bxe_has_tx_work(struct bxe_fastpath *fp) 2670{ 2671 /* expand this for multi-cos if ever supported */ 2672 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2673} 2674 2675static inline int 2676bxe_has_rx_work(struct bxe_fastpath *fp) 2677{ 2678 uint16_t rx_cq_cons_sb; 2679 2680 mb(); /* status block fields can change */ 2681 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2682 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2683 rx_cq_cons_sb++; 2684 return (fp->rx_cq_cons != rx_cq_cons_sb); 2685} 2686 2687static void 2688bxe_sp_event(struct bxe_softc *sc, 2689 struct bxe_fastpath *fp, 2690 union eth_rx_cqe *rr_cqe) 2691{ 2692 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2693 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2694 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2695 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2696 2697 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2698 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2699 2700 switch (command) { 2701 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2702 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2703 drv_cmd = ECORE_Q_CMD_UPDATE; 2704 break; 2705 2706 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2707 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2708 drv_cmd = ECORE_Q_CMD_SETUP; 2709 break; 2710 2711 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2712 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2713 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2714 break; 2715 2716 case (RAMROD_CMD_ID_ETH_HALT): 2717 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2718 drv_cmd = ECORE_Q_CMD_HALT; 2719 break; 2720 2721 case (RAMROD_CMD_ID_ETH_TERMINATE): 2722 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2723 drv_cmd = ECORE_Q_CMD_TERMINATE; 2724 break; 2725 2726 case (RAMROD_CMD_ID_ETH_EMPTY): 2727 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2728 drv_cmd = ECORE_Q_CMD_EMPTY; 2729 break; 2730 2731 default: 2732 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2733 command, fp->index); 2734 return; 2735 } 2736 2737 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2738 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2739 /* 2740 * q_obj->complete_cmd() failure means that this was 2741 * an unexpected completion. 2742 * 2743 * In this case we don't want to increase the sc->spq_left 2744 * because apparently we haven't sent this command the first 2745 * place. 2746 */ 2747 // bxe_panic(sc, ("Unexpected SP completion\n")); 2748 return; 2749 } 2750 2751 atomic_add_acq_long(&sc->cq_spq_left, 1); 2752 2753 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2754 atomic_load_acq_long(&sc->cq_spq_left)); 2755} 2756 2757/* 2758 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2759 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2760 * the current aggregation queue as in-progress. 2761 */ 2762static void 2763bxe_tpa_start(struct bxe_softc *sc, 2764 struct bxe_fastpath *fp, 2765 uint16_t queue, 2766 uint16_t cons, 2767 uint16_t prod, 2768 struct eth_fast_path_rx_cqe *cqe) 2769{ 2770 struct bxe_sw_rx_bd tmp_bd; 2771 struct bxe_sw_rx_bd *rx_buf; 2772 struct eth_rx_bd *rx_bd; 2773 int max_agg_queues; 2774 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2775 uint16_t index; 2776 2777 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2778 "cons=%d prod=%d\n", 2779 fp->index, queue, cons, prod); 2780 2781 max_agg_queues = MAX_AGG_QS(sc); 2782 2783 KASSERT((queue < max_agg_queues), 2784 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2785 fp->index, queue, max_agg_queues)); 2786 2787 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2788 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2789 fp->index, queue)); 2790 2791 /* copy the existing mbuf and mapping from the TPA pool */ 2792 tmp_bd = tpa_info->bd; 2793 2794 if (tmp_bd.m == NULL) { 2795 uint32_t *tmp; 2796 2797 tmp = (uint32_t *)cqe; 2798 2799 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", 2800 fp->index, queue, cons, prod); 2801 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", 2802 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); 2803 2804 /* XXX Error handling? */ 2805 return; 2806 } 2807 2808 /* change the TPA queue to the start state */ 2809 tpa_info->state = BXE_TPA_STATE_START; 2810 tpa_info->placement_offset = cqe->placement_offset; 2811 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2812 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2813 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2814 2815 fp->rx_tpa_queue_used |= (1 << queue); 2816 2817 /* 2818 * If all the buffer descriptors are filled with mbufs then fill in 2819 * the current consumer index with a new BD. Else if a maximum Rx 2820 * buffer limit is imposed then fill in the next producer index. 2821 */ 2822 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2823 prod : cons; 2824 2825 /* move the received mbuf and mapping to TPA pool */ 2826 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2827 2828 /* release any existing RX BD mbuf mappings */ 2829 if (cons != index) { 2830 rx_buf = &fp->rx_mbuf_chain[cons]; 2831 2832 if (rx_buf->m_map != NULL) { 2833 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2834 BUS_DMASYNC_POSTREAD); 2835 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2836 } 2837 2838 /* 2839 * We get here when the maximum number of rx buffers is less than 2840 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2841 * it out here without concern of a memory leak. 2842 */ 2843 fp->rx_mbuf_chain[cons].m = NULL; 2844 } 2845 2846 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2847 fp->rx_mbuf_chain[index] = tmp_bd; 2848 2849 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2850 rx_bd = &fp->rx_chain[index]; 2851 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2852 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2853} 2854 2855/* 2856 * When a TPA aggregation is completed, loop through the individual mbufs 2857 * of the aggregation, combining them into a single mbuf which will be sent 2858 * up the stack. Refill all freed SGEs with mbufs as we go along. 2859 */ 2860static int 2861bxe_fill_frag_mbuf(struct bxe_softc *sc, 2862 struct bxe_fastpath *fp, 2863 struct bxe_sw_tpa_info *tpa_info, 2864 uint16_t queue, 2865 uint16_t pages, 2866 struct mbuf *m, 2867 struct eth_end_agg_rx_cqe *cqe, 2868 uint16_t cqe_idx) 2869{ 2870 struct mbuf *m_frag; 2871 uint32_t frag_len, frag_size, i; 2872 uint16_t sge_idx; 2873 int rc = 0; 2874 int j; 2875 2876 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 2877 2878 BLOGD(sc, DBG_LRO, 2879 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 2880 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 2881 2882 /* make sure the aggregated frame is not too big to handle */ 2883 if (pages > 8 * PAGES_PER_SGE) { 2884 2885 uint32_t *tmp = (uint32_t *)cqe; 2886 2887 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 2888 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 2889 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 2890 tpa_info->len_on_bd, frag_size); 2891 2892 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", 2893 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); 2894 2895 bxe_panic(sc, ("sge page count error\n")); 2896 return (EINVAL); 2897 } 2898 2899 /* 2900 * Scan through the scatter gather list pulling individual mbufs into a 2901 * single mbuf for the host stack. 2902 */ 2903 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 2904 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 2905 2906 /* 2907 * Firmware gives the indices of the SGE as if the ring is an array 2908 * (meaning that the "next" element will consume 2 indices). 2909 */ 2910 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 2911 2912 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 2913 "sge_idx=%d frag_size=%d frag_len=%d\n", 2914 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 2915 2916 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 2917 2918 /* allocate a new mbuf for the SGE */ 2919 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 2920 if (rc) { 2921 /* Leave all remaining SGEs in the ring! */ 2922 return (rc); 2923 } 2924 2925 /* update the fragment length */ 2926 m_frag->m_len = frag_len; 2927 2928 /* concatenate the fragment to the head mbuf */ 2929 m_cat(m, m_frag); 2930 fp->eth_q_stats.mbuf_alloc_sge--; 2931 2932 /* update the TPA mbuf size and remaining fragment size */ 2933 m->m_pkthdr.len += frag_len; 2934 frag_size -= frag_len; 2935 } 2936 2937 BLOGD(sc, DBG_LRO, 2938 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 2939 fp->index, queue, frag_size); 2940 2941 return (rc); 2942} 2943 2944static inline void 2945bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 2946{ 2947 int i, j; 2948 2949 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 2950 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 2951 2952 for (j = 0; j < 2; j++) { 2953 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 2954 idx--; 2955 } 2956 } 2957} 2958 2959static inline void 2960bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 2961{ 2962 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 2963 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 2964 2965 /* 2966 * Clear the two last indices in the page to 1. These are the indices that 2967 * correspond to the "next" element, hence will never be indicated and 2968 * should be removed from the calculations. 2969 */ 2970 bxe_clear_sge_mask_next_elems(fp); 2971} 2972 2973static inline void 2974bxe_update_last_max_sge(struct bxe_fastpath *fp, 2975 uint16_t idx) 2976{ 2977 uint16_t last_max = fp->last_max_sge; 2978 2979 if (SUB_S16(idx, last_max) > 0) { 2980 fp->last_max_sge = idx; 2981 } 2982} 2983 2984static inline void 2985bxe_update_sge_prod(struct bxe_softc *sc, 2986 struct bxe_fastpath *fp, 2987 uint16_t sge_len, 2988 union eth_sgl_or_raw_data *cqe) 2989{ 2990 uint16_t last_max, last_elem, first_elem; 2991 uint16_t delta = 0; 2992 uint16_t i; 2993 2994 if (!sge_len) { 2995 return; 2996 } 2997 2998 /* first mark all used pages */ 2999 for (i = 0; i < sge_len; i++) { 3000 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3001 RX_SGE(le16toh(cqe->sgl[i]))); 3002 } 3003 3004 BLOGD(sc, DBG_LRO, 3005 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3006 fp->index, sge_len - 1, 3007 le16toh(cqe->sgl[sge_len - 1])); 3008 3009 /* assume that the last SGE index is the biggest */ 3010 bxe_update_last_max_sge(fp, 3011 le16toh(cqe->sgl[sge_len - 1])); 3012 3013 last_max = RX_SGE(fp->last_max_sge); 3014 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3015 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3016 3017 /* if ring is not full */ 3018 if (last_elem + 1 != first_elem) { 3019 last_elem++; 3020 } 3021 3022 /* now update the prod */ 3023 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3024 if (__predict_true(fp->sge_mask[i])) { 3025 break; 3026 } 3027 3028 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3029 delta += BIT_VEC64_ELEM_SZ; 3030 } 3031 3032 if (delta > 0) { 3033 fp->rx_sge_prod += delta; 3034 /* clear page-end entries */ 3035 bxe_clear_sge_mask_next_elems(fp); 3036 } 3037 3038 BLOGD(sc, DBG_LRO, 3039 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3040 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3041} 3042 3043/* 3044 * The aggregation on the current TPA queue has completed. Pull the individual 3045 * mbuf fragments together into a single mbuf, perform all necessary checksum 3046 * calculations, and send the resuting mbuf to the stack. 3047 */ 3048static void 3049bxe_tpa_stop(struct bxe_softc *sc, 3050 struct bxe_fastpath *fp, 3051 struct bxe_sw_tpa_info *tpa_info, 3052 uint16_t queue, 3053 uint16_t pages, 3054 struct eth_end_agg_rx_cqe *cqe, 3055 uint16_t cqe_idx) 3056{ 3057 if_t ifp = sc->ifp; 3058 struct mbuf *m; 3059 int rc = 0; 3060 3061 BLOGD(sc, DBG_LRO, 3062 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3063 fp->index, queue, tpa_info->placement_offset, 3064 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3065 3066 m = tpa_info->bd.m; 3067 3068 /* allocate a replacement before modifying existing mbuf */ 3069 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3070 if (rc) { 3071 /* drop the frame and log an error */ 3072 fp->eth_q_stats.rx_soft_errors++; 3073 goto bxe_tpa_stop_exit; 3074 } 3075 3076 /* we have a replacement, fixup the current mbuf */ 3077 m_adj(m, tpa_info->placement_offset); 3078 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3079 3080 /* mark the checksums valid (taken care of by the firmware) */ 3081 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3082 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3083 m->m_pkthdr.csum_data = 0xffff; 3084 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3085 CSUM_IP_VALID | 3086 CSUM_DATA_VALID | 3087 CSUM_PSEUDO_HDR); 3088 3089 /* aggregate all of the SGEs into a single mbuf */ 3090 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3091 if (rc) { 3092 /* drop the packet and log an error */ 3093 fp->eth_q_stats.rx_soft_errors++; 3094 m_freem(m); 3095 } else { 3096 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { 3097 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3098 m->m_flags |= M_VLANTAG; 3099 } 3100 3101 /* assign packet to this interface interface */ 3102 if_setrcvif(m, ifp); 3103 3104#if __FreeBSD_version >= 800000 3105 /* specify what RSS queue was used for this flow */ 3106 m->m_pkthdr.flowid = fp->index; 3107 BXE_SET_FLOWID(m); 3108#endif 3109 3110 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3111 fp->eth_q_stats.rx_tpa_pkts++; 3112 3113 /* pass the frame to the stack */ 3114 if_input(ifp, m); 3115 } 3116 3117 /* we passed an mbuf up the stack or dropped the frame */ 3118 fp->eth_q_stats.mbuf_alloc_tpa--; 3119 3120bxe_tpa_stop_exit: 3121 3122 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3123 fp->rx_tpa_queue_used &= ~(1 << queue); 3124} 3125 3126static uint8_t 3127bxe_service_rxsgl( 3128 struct bxe_fastpath *fp, 3129 uint16_t len, 3130 uint16_t lenonbd, 3131 struct mbuf *m, 3132 struct eth_fast_path_rx_cqe *cqe_fp) 3133{ 3134 struct mbuf *m_frag; 3135 uint16_t frags, frag_len; 3136 uint16_t sge_idx = 0; 3137 uint16_t j; 3138 uint8_t i, rc = 0; 3139 uint32_t frag_size; 3140 3141 /* adjust the mbuf */ 3142 m->m_len = lenonbd; 3143 3144 frag_size = len - lenonbd; 3145 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3146 3147 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { 3148 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); 3149 3150 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3151 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); 3152 m_frag->m_len = frag_len; 3153 3154 /* allocate a new mbuf for the SGE */ 3155 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3156 if (rc) { 3157 /* Leave all remaining SGEs in the ring! */ 3158 return (rc); 3159 } 3160 fp->eth_q_stats.mbuf_alloc_sge--; 3161 3162 /* concatenate the fragment to the head mbuf */ 3163 m_cat(m, m_frag); 3164 3165 frag_size -= frag_len; 3166 } 3167 3168 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); 3169 3170 return rc; 3171} 3172 3173static uint8_t 3174bxe_rxeof(struct bxe_softc *sc, 3175 struct bxe_fastpath *fp) 3176{ 3177 if_t ifp = sc->ifp; 3178 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3179 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3180 int rx_pkts = 0; 3181 int rc = 0; 3182 3183 BXE_FP_RX_LOCK(fp); 3184 3185 /* CQ "next element" is of the size of the regular element */ 3186 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3187 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3188 hw_cq_cons++; 3189 } 3190 3191 bd_cons = fp->rx_bd_cons; 3192 bd_prod = fp->rx_bd_prod; 3193 bd_prod_fw = bd_prod; 3194 sw_cq_cons = fp->rx_cq_cons; 3195 sw_cq_prod = fp->rx_cq_prod; 3196 3197 /* 3198 * Memory barrier necessary as speculative reads of the rx 3199 * buffer can be ahead of the index in the status block 3200 */ 3201 rmb(); 3202 3203 BLOGD(sc, DBG_RX, 3204 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3205 fp->index, hw_cq_cons, sw_cq_cons); 3206 3207 while (sw_cq_cons != hw_cq_cons) { 3208 struct bxe_sw_rx_bd *rx_buf = NULL; 3209 union eth_rx_cqe *cqe; 3210 struct eth_fast_path_rx_cqe *cqe_fp; 3211 uint8_t cqe_fp_flags; 3212 enum eth_rx_cqe_type cqe_fp_type; 3213 uint16_t len, lenonbd, pad; 3214 struct mbuf *m = NULL; 3215 3216 comp_ring_cons = RCQ(sw_cq_cons); 3217 bd_prod = RX_BD(bd_prod); 3218 bd_cons = RX_BD(bd_cons); 3219 3220 cqe = &fp->rcq_chain[comp_ring_cons]; 3221 cqe_fp = &cqe->fast_path_cqe; 3222 cqe_fp_flags = cqe_fp->type_error_flags; 3223 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3224 3225 BLOGD(sc, DBG_RX, 3226 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3227 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3228 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", 3229 fp->index, 3230 hw_cq_cons, 3231 sw_cq_cons, 3232 bd_prod, 3233 bd_cons, 3234 CQE_TYPE(cqe_fp_flags), 3235 cqe_fp_flags, 3236 cqe_fp->status_flags, 3237 le32toh(cqe_fp->rss_hash_result), 3238 le16toh(cqe_fp->vlan_tag), 3239 le16toh(cqe_fp->pkt_len_or_gro_seg_len), 3240 le16toh(cqe_fp->len_on_bd)); 3241 3242 /* is this a slowpath msg? */ 3243 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3244 bxe_sp_event(sc, fp, cqe); 3245 goto next_cqe; 3246 } 3247 3248 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3249 3250 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3251 struct bxe_sw_tpa_info *tpa_info; 3252 uint16_t frag_size, pages; 3253 uint8_t queue; 3254 3255 if (CQE_TYPE_START(cqe_fp_type)) { 3256 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3257 bd_cons, bd_prod, cqe_fp); 3258 m = NULL; /* packet not ready yet */ 3259 goto next_rx; 3260 } 3261 3262 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3263 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3264 3265 queue = cqe->end_agg_cqe.queue_index; 3266 tpa_info = &fp->rx_tpa_info[queue]; 3267 3268 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3269 fp->index, queue); 3270 3271 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3272 tpa_info->len_on_bd); 3273 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3274 3275 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3276 &cqe->end_agg_cqe, comp_ring_cons); 3277 3278 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); 3279 3280 goto next_cqe; 3281 } 3282 3283 /* non TPA */ 3284 3285 /* is this an error packet? */ 3286 if (__predict_false(cqe_fp_flags & 3287 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3288 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3289 fp->eth_q_stats.rx_soft_errors++; 3290 goto next_rx; 3291 } 3292 3293 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3294 lenonbd = le16toh(cqe_fp->len_on_bd); 3295 pad = cqe_fp->placement_offset; 3296 3297 m = rx_buf->m; 3298 3299 if (__predict_false(m == NULL)) { 3300 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3301 bd_cons, fp->index); 3302 goto next_rx; 3303 } 3304 3305 /* XXX double copy if packet length under a threshold */ 3306 3307 /* 3308 * If all the buffer descriptors are filled with mbufs then fill in 3309 * the current consumer index with a new BD. Else if a maximum Rx 3310 * buffer limit is imposed then fill in the next producer index. 3311 */ 3312 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3313 (sc->max_rx_bufs != RX_BD_USABLE) ? 3314 bd_prod : bd_cons); 3315 if (rc != 0) { 3316 3317 /* we simply reuse the received mbuf and don't post it to the stack */ 3318 m = NULL; 3319 3320 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3321 fp->index, rc); 3322 fp->eth_q_stats.rx_soft_errors++; 3323 3324 if (sc->max_rx_bufs != RX_BD_USABLE) { 3325 /* copy this consumer index to the producer index */ 3326 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3327 sizeof(struct bxe_sw_rx_bd)); 3328 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3329 } 3330 3331 goto next_rx; 3332 } 3333 3334 /* current mbuf was detached from the bd */ 3335 fp->eth_q_stats.mbuf_alloc_rx--; 3336 3337 /* we allocated a replacement mbuf, fixup the current one */ 3338 m_adj(m, pad); 3339 m->m_pkthdr.len = m->m_len = len; 3340 3341 if ((len > 60) && (len > lenonbd)) { 3342 fp->eth_q_stats.rx_bxe_service_rxsgl++; 3343 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); 3344 if (rc) 3345 break; 3346 fp->eth_q_stats.rx_jumbo_sge_pkts++; 3347 } else if (lenonbd < len) { 3348 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; 3349 } 3350 3351 /* assign packet to this interface interface */ 3352 if_setrcvif(m, ifp); 3353 3354 /* assume no hardware checksum has complated */ 3355 m->m_pkthdr.csum_flags = 0; 3356 3357 /* validate checksum if offload enabled */ 3358 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 3359 /* check for a valid IP frame */ 3360 if (!(cqe->fast_path_cqe.status_flags & 3361 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3362 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3363 if (__predict_false(cqe_fp_flags & 3364 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3365 fp->eth_q_stats.rx_hw_csum_errors++; 3366 } else { 3367 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3368 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3369 } 3370 } 3371 3372 /* check for a valid TCP/UDP frame */ 3373 if (!(cqe->fast_path_cqe.status_flags & 3374 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3375 if (__predict_false(cqe_fp_flags & 3376 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3377 fp->eth_q_stats.rx_hw_csum_errors++; 3378 } else { 3379 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3380 m->m_pkthdr.csum_data = 0xFFFF; 3381 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3382 CSUM_PSEUDO_HDR); 3383 } 3384 } 3385 } 3386 3387 /* if there is a VLAN tag then flag that info */ 3388 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { 3389 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3390 m->m_flags |= M_VLANTAG; 3391 } 3392 3393#if __FreeBSD_version >= 800000 3394 /* specify what RSS queue was used for this flow */ 3395 m->m_pkthdr.flowid = fp->index; 3396 BXE_SET_FLOWID(m); 3397#endif 3398 3399next_rx: 3400 3401 bd_cons = RX_BD_NEXT(bd_cons); 3402 bd_prod = RX_BD_NEXT(bd_prod); 3403 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3404 3405 /* pass the frame to the stack */ 3406 if (__predict_true(m != NULL)) { 3407 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3408 rx_pkts++; 3409 if_input(ifp, m); 3410 } 3411 3412next_cqe: 3413 3414 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3415 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3416 3417 /* limit spinning on the queue */ 3418 if (rc != 0) 3419 break; 3420 3421 if (rx_pkts == sc->rx_budget) { 3422 fp->eth_q_stats.rx_budget_reached++; 3423 break; 3424 } 3425 } /* while work to do */ 3426 3427 fp->rx_bd_cons = bd_cons; 3428 fp->rx_bd_prod = bd_prod_fw; 3429 fp->rx_cq_cons = sw_cq_cons; 3430 fp->rx_cq_prod = sw_cq_prod; 3431 3432 /* Update producers */ 3433 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3434 3435 fp->eth_q_stats.rx_pkts += rx_pkts; 3436 fp->eth_q_stats.rx_calls++; 3437 3438 BXE_FP_RX_UNLOCK(fp); 3439 3440 return (sw_cq_cons != hw_cq_cons); 3441} 3442 3443static uint16_t 3444bxe_free_tx_pkt(struct bxe_softc *sc, 3445 struct bxe_fastpath *fp, 3446 uint16_t idx) 3447{ 3448 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3449 struct eth_tx_start_bd *tx_start_bd; 3450 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3451 uint16_t new_cons; 3452 int nbd; 3453 3454 /* unmap the mbuf from non-paged memory */ 3455 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3456 3457 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3458 nbd = le16toh(tx_start_bd->nbd) - 1; 3459 3460 new_cons = (tx_buf->first_bd + nbd); 3461 3462 /* free the mbuf */ 3463 if (__predict_true(tx_buf->m != NULL)) { 3464 m_freem(tx_buf->m); 3465 fp->eth_q_stats.mbuf_alloc_tx--; 3466 } else { 3467 fp->eth_q_stats.tx_chain_lost_mbuf++; 3468 } 3469 3470 tx_buf->m = NULL; 3471 tx_buf->first_bd = 0; 3472 3473 return (new_cons); 3474} 3475 3476/* transmit timeout watchdog */ 3477static int 3478bxe_watchdog(struct bxe_softc *sc, 3479 struct bxe_fastpath *fp) 3480{ 3481 BXE_FP_TX_LOCK(fp); 3482 3483 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3484 BXE_FP_TX_UNLOCK(fp); 3485 return (0); 3486 } 3487 3488 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3489 3490 BXE_FP_TX_UNLOCK(fp); 3491 BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK); 3492 taskqueue_enqueue_timeout(taskqueue_thread, 3493 &sc->sp_err_timeout_task, hz/10); 3494 3495 return (-1); 3496} 3497 3498/* processes transmit completions */ 3499static uint8_t 3500bxe_txeof(struct bxe_softc *sc, 3501 struct bxe_fastpath *fp) 3502{ 3503 if_t ifp = sc->ifp; 3504 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3505 uint16_t tx_bd_avail; 3506 3507 BXE_FP_TX_LOCK_ASSERT(fp); 3508 3509 bd_cons = fp->tx_bd_cons; 3510 hw_cons = le16toh(*fp->tx_cons_sb); 3511 sw_cons = fp->tx_pkt_cons; 3512 3513 while (sw_cons != hw_cons) { 3514 pkt_cons = TX_BD(sw_cons); 3515 3516 BLOGD(sc, DBG_TX, 3517 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3518 fp->index, hw_cons, sw_cons, pkt_cons); 3519 3520 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3521 3522 sw_cons++; 3523 } 3524 3525 fp->tx_pkt_cons = sw_cons; 3526 fp->tx_bd_cons = bd_cons; 3527 3528 BLOGD(sc, DBG_TX, 3529 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3530 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3531 3532 mb(); 3533 3534 tx_bd_avail = bxe_tx_avail(sc, fp); 3535 3536 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3537 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 3538 } else { 3539 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 3540 } 3541 3542 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3543 /* reset the watchdog timer if there are pending transmits */ 3544 fp->watchdog_timer = BXE_TX_TIMEOUT; 3545 return (TRUE); 3546 } else { 3547 /* clear watchdog when there are no pending transmits */ 3548 fp->watchdog_timer = 0; 3549 return (FALSE); 3550 } 3551} 3552 3553static void 3554bxe_drain_tx_queues(struct bxe_softc *sc) 3555{ 3556 struct bxe_fastpath *fp; 3557 int i, count; 3558 3559 /* wait until all TX fastpath tasks have completed */ 3560 for (i = 0; i < sc->num_queues; i++) { 3561 fp = &sc->fp[i]; 3562 3563 count = 1000; 3564 3565 while (bxe_has_tx_work(fp)) { 3566 3567 BXE_FP_TX_LOCK(fp); 3568 bxe_txeof(sc, fp); 3569 BXE_FP_TX_UNLOCK(fp); 3570 3571 if (count == 0) { 3572 BLOGE(sc, "Timeout waiting for fp[%d] " 3573 "transmits to complete!\n", i); 3574 bxe_panic(sc, ("tx drain failure\n")); 3575 return; 3576 } 3577 3578 count--; 3579 DELAY(1000); 3580 rmb(); 3581 } 3582 } 3583 3584 return; 3585} 3586 3587static int 3588bxe_del_all_macs(struct bxe_softc *sc, 3589 struct ecore_vlan_mac_obj *mac_obj, 3590 int mac_type, 3591 uint8_t wait_for_comp) 3592{ 3593 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3594 int rc; 3595 3596 /* wait for completion of requested */ 3597 if (wait_for_comp) { 3598 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3599 } 3600 3601 /* Set the mac type of addresses we want to clear */ 3602 bxe_set_bit(mac_type, &vlan_mac_flags); 3603 3604 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3605 if (rc < 0) { 3606 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", 3607 rc, mac_type, wait_for_comp); 3608 } 3609 3610 return (rc); 3611} 3612 3613static int 3614bxe_fill_accept_flags(struct bxe_softc *sc, 3615 uint32_t rx_mode, 3616 unsigned long *rx_accept_flags, 3617 unsigned long *tx_accept_flags) 3618{ 3619 /* Clear the flags first */ 3620 *rx_accept_flags = 0; 3621 *tx_accept_flags = 0; 3622 3623 switch (rx_mode) { 3624 case BXE_RX_MODE_NONE: 3625 /* 3626 * 'drop all' supersedes any accept flags that may have been 3627 * passed to the function. 3628 */ 3629 break; 3630 3631 case BXE_RX_MODE_NORMAL: 3632 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3633 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3634 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3635 3636 /* internal switching mode */ 3637 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3638 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3639 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3640 3641 break; 3642 3643 case BXE_RX_MODE_ALLMULTI: 3644 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3645 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3646 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3647 3648 /* internal switching mode */ 3649 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3650 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3651 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3652 3653 break; 3654 3655 case BXE_RX_MODE_PROMISC: 3656 /* 3657 * According to deffinition of SI mode, iface in promisc mode 3658 * should receive matched and unmatched (in resolution of port) 3659 * unicast packets. 3660 */ 3661 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3662 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3663 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3664 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3665 3666 /* internal switching mode */ 3667 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3668 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3669 3670 if (IS_MF_SI(sc)) { 3671 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3672 } else { 3673 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3674 } 3675 3676 break; 3677 3678 default: 3679 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); 3680 return (-1); 3681 } 3682 3683 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3684 if (rx_mode != BXE_RX_MODE_NONE) { 3685 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3686 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3687 } 3688 3689 return (0); 3690} 3691 3692static int 3693bxe_set_q_rx_mode(struct bxe_softc *sc, 3694 uint8_t cl_id, 3695 unsigned long rx_mode_flags, 3696 unsigned long rx_accept_flags, 3697 unsigned long tx_accept_flags, 3698 unsigned long ramrod_flags) 3699{ 3700 struct ecore_rx_mode_ramrod_params ramrod_param; 3701 int rc; 3702 3703 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3704 3705 /* Prepare ramrod parameters */ 3706 ramrod_param.cid = 0; 3707 ramrod_param.cl_id = cl_id; 3708 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3709 ramrod_param.func_id = SC_FUNC(sc); 3710 3711 ramrod_param.pstate = &sc->sp_state; 3712 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3713 3714 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3715 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3716 3717 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3718 3719 ramrod_param.ramrod_flags = ramrod_flags; 3720 ramrod_param.rx_mode_flags = rx_mode_flags; 3721 3722 ramrod_param.rx_accept_flags = rx_accept_flags; 3723 ramrod_param.tx_accept_flags = tx_accept_flags; 3724 3725 rc = ecore_config_rx_mode(sc, &ramrod_param); 3726 if (rc < 0) { 3727 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " 3728 "rx_accept_flags 0x%x tx_accept_flags 0x%x " 3729 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, 3730 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, 3731 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); 3732 return (rc); 3733 } 3734 3735 return (0); 3736} 3737 3738static int 3739bxe_set_storm_rx_mode(struct bxe_softc *sc) 3740{ 3741 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3742 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3743 int rc; 3744 3745 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3746 &tx_accept_flags); 3747 if (rc) { 3748 return (rc); 3749 } 3750 3751 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3752 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3753 3754 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3755 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3756 rx_accept_flags, tx_accept_flags, 3757 ramrod_flags)); 3758} 3759 3760/* returns the "mcp load_code" according to global load_count array */ 3761static int 3762bxe_nic_load_no_mcp(struct bxe_softc *sc) 3763{ 3764 int path = SC_PATH(sc); 3765 int port = SC_PORT(sc); 3766 3767 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3768 path, load_count[path][0], load_count[path][1], 3769 load_count[path][2]); 3770 load_count[path][0]++; 3771 load_count[path][1 + port]++; 3772 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3773 path, load_count[path][0], load_count[path][1], 3774 load_count[path][2]); 3775 if (load_count[path][0] == 1) { 3776 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3777 } else if (load_count[path][1 + port] == 1) { 3778 return (FW_MSG_CODE_DRV_LOAD_PORT); 3779 } else { 3780 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3781 } 3782} 3783 3784/* returns the "mcp load_code" according to global load_count array */ 3785static int 3786bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3787{ 3788 int port = SC_PORT(sc); 3789 int path = SC_PATH(sc); 3790 3791 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3792 path, load_count[path][0], load_count[path][1], 3793 load_count[path][2]); 3794 load_count[path][0]--; 3795 load_count[path][1 + port]--; 3796 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3797 path, load_count[path][0], load_count[path][1], 3798 load_count[path][2]); 3799 if (load_count[path][0] == 0) { 3800 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3801 } else if (load_count[path][1 + port] == 0) { 3802 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3803 } else { 3804 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3805 } 3806} 3807 3808/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3809static uint32_t 3810bxe_send_unload_req(struct bxe_softc *sc, 3811 int unload_mode) 3812{ 3813 uint32_t reset_code = 0; 3814 3815 /* Select the UNLOAD request mode */ 3816 if (unload_mode == UNLOAD_NORMAL) { 3817 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3818 } else { 3819 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3820 } 3821 3822 /* Send the request to the MCP */ 3823 if (!BXE_NOMCP(sc)) { 3824 reset_code = bxe_fw_command(sc, reset_code, 0); 3825 } else { 3826 reset_code = bxe_nic_unload_no_mcp(sc); 3827 } 3828 3829 return (reset_code); 3830} 3831 3832/* send UNLOAD_DONE command to the MCP */ 3833static void 3834bxe_send_unload_done(struct bxe_softc *sc, 3835 uint8_t keep_link) 3836{ 3837 uint32_t reset_param = 3838 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 3839 3840 /* Report UNLOAD_DONE to MCP */ 3841 if (!BXE_NOMCP(sc)) { 3842 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 3843 } 3844} 3845 3846static int 3847bxe_func_wait_started(struct bxe_softc *sc) 3848{ 3849 int tout = 50; 3850 3851 if (!sc->port.pmf) { 3852 return (0); 3853 } 3854 3855 /* 3856 * (assumption: No Attention from MCP at this stage) 3857 * PMF probably in the middle of TX disable/enable transaction 3858 * 1. Sync IRS for default SB 3859 * 2. Sync SP queue - this guarantees us that attention handling started 3860 * 3. Wait, that TX disable/enable transaction completes 3861 * 3862 * 1+2 guarantee that if DCBX attention was scheduled it already changed 3863 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 3864 * received completion for the transaction the state is TX_STOPPED. 3865 * State will return to STARTED after completion of TX_STOPPED-->STARTED 3866 * transaction. 3867 */ 3868 3869 /* XXX make sure default SB ISR is done */ 3870 /* need a way to synchronize an irq (intr_mtx?) */ 3871 3872 /* XXX flush any work queues */ 3873 3874 while (ecore_func_get_state(sc, &sc->func_obj) != 3875 ECORE_F_STATE_STARTED && tout--) { 3876 DELAY(20000); 3877 } 3878 3879 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 3880 /* 3881 * Failed to complete the transaction in a "good way" 3882 * Force both transactions with CLR bit. 3883 */ 3884 struct ecore_func_state_params func_params = { NULL }; 3885 3886 BLOGE(sc, "Unexpected function state! " 3887 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 3888 3889 func_params.f_obj = &sc->func_obj; 3890 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 3891 3892 /* STARTED-->TX_STOPPED */ 3893 func_params.cmd = ECORE_F_CMD_TX_STOP; 3894 ecore_func_state_change(sc, &func_params); 3895 3896 /* TX_STOPPED-->STARTED */ 3897 func_params.cmd = ECORE_F_CMD_TX_START; 3898 return (ecore_func_state_change(sc, &func_params)); 3899 } 3900 3901 return (0); 3902} 3903 3904static int 3905bxe_stop_queue(struct bxe_softc *sc, 3906 int index) 3907{ 3908 struct bxe_fastpath *fp = &sc->fp[index]; 3909 struct ecore_queue_state_params q_params = { NULL }; 3910 int rc; 3911 3912 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 3913 3914 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 3915 /* We want to wait for completion in this context */ 3916 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3917 3918 /* Stop the primary connection: */ 3919 3920 /* ...halt the connection */ 3921 q_params.cmd = ECORE_Q_CMD_HALT; 3922 rc = ecore_queue_state_change(sc, &q_params); 3923 if (rc) { 3924 return (rc); 3925 } 3926 3927 /* ...terminate the connection */ 3928 q_params.cmd = ECORE_Q_CMD_TERMINATE; 3929 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 3930 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 3931 rc = ecore_queue_state_change(sc, &q_params); 3932 if (rc) { 3933 return (rc); 3934 } 3935 3936 /* ...delete cfc entry */ 3937 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 3938 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 3939 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 3940 return (ecore_queue_state_change(sc, &q_params)); 3941} 3942 3943/* wait for the outstanding SP commands */ 3944static inline uint8_t 3945bxe_wait_sp_comp(struct bxe_softc *sc, 3946 unsigned long mask) 3947{ 3948 unsigned long tmp; 3949 int tout = 5000; /* wait for 5 secs tops */ 3950 3951 while (tout--) { 3952 mb(); 3953 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 3954 return (TRUE); 3955 } 3956 3957 DELAY(1000); 3958 } 3959 3960 mb(); 3961 3962 tmp = atomic_load_acq_long(&sc->sp_state); 3963 if (tmp & mask) { 3964 BLOGE(sc, "Filtering completion timed out: " 3965 "sp_state 0x%lx, mask 0x%lx\n", 3966 tmp, mask); 3967 return (FALSE); 3968 } 3969 3970 return (FALSE); 3971} 3972 3973static int 3974bxe_func_stop(struct bxe_softc *sc) 3975{ 3976 struct ecore_func_state_params func_params = { NULL }; 3977 int rc; 3978 3979 /* prepare parameters for function state transitions */ 3980 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 3981 func_params.f_obj = &sc->func_obj; 3982 func_params.cmd = ECORE_F_CMD_STOP; 3983 3984 /* 3985 * Try to stop the function the 'good way'. If it fails (in case 3986 * of a parity error during bxe_chip_cleanup()) and we are 3987 * not in a debug mode, perform a state transaction in order to 3988 * enable further HW_RESET transaction. 3989 */ 3990 rc = ecore_func_state_change(sc, &func_params); 3991 if (rc) { 3992 BLOGE(sc, "FUNC_STOP ramrod failed. " 3993 "Running a dry transaction (%d)\n", rc); 3994 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 3995 return (ecore_func_state_change(sc, &func_params)); 3996 } 3997 3998 return (0); 3999} 4000 4001static int 4002bxe_reset_hw(struct bxe_softc *sc, 4003 uint32_t load_code) 4004{ 4005 struct ecore_func_state_params func_params = { NULL }; 4006 4007 /* Prepare parameters for function state transitions */ 4008 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4009 4010 func_params.f_obj = &sc->func_obj; 4011 func_params.cmd = ECORE_F_CMD_HW_RESET; 4012 4013 func_params.params.hw_init.load_phase = load_code; 4014 4015 return (ecore_func_state_change(sc, &func_params)); 4016} 4017 4018static void 4019bxe_int_disable_sync(struct bxe_softc *sc, 4020 int disable_hw) 4021{ 4022 if (disable_hw) { 4023 /* prevent the HW from sending interrupts */ 4024 bxe_int_disable(sc); 4025 } 4026 4027 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4028 /* make sure all ISRs are done */ 4029 4030 /* XXX make sure sp_task is not running */ 4031 /* cancel and flush work queues */ 4032} 4033 4034static void 4035bxe_chip_cleanup(struct bxe_softc *sc, 4036 uint32_t unload_mode, 4037 uint8_t keep_link) 4038{ 4039 int port = SC_PORT(sc); 4040 struct ecore_mcast_ramrod_params rparam = { NULL }; 4041 uint32_t reset_code; 4042 int i, rc = 0; 4043 4044 bxe_drain_tx_queues(sc); 4045 4046 /* give HW time to discard old tx messages */ 4047 DELAY(1000); 4048 4049 /* Clean all ETH MACs */ 4050 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4051 if (rc < 0) { 4052 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4053 } 4054 4055 /* Clean up UC list */ 4056 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4057 if (rc < 0) { 4058 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4059 } 4060 4061 /* Disable LLH */ 4062 if (!CHIP_IS_E1(sc)) { 4063 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4064 } 4065 4066 /* Set "drop all" to stop Rx */ 4067 4068 /* 4069 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4070 * a race between the completion code and this code. 4071 */ 4072 BXE_MCAST_LOCK(sc); 4073 4074 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4075 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4076 } else { 4077 bxe_set_storm_rx_mode(sc); 4078 } 4079 4080 /* Clean up multicast configuration */ 4081 rparam.mcast_obj = &sc->mcast_obj; 4082 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4083 if (rc < 0) { 4084 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4085 } 4086 4087 BXE_MCAST_UNLOCK(sc); 4088 4089 // XXX bxe_iov_chip_cleanup(sc); 4090 4091 /* 4092 * Send the UNLOAD_REQUEST to the MCP. This will return if 4093 * this function should perform FUNCTION, PORT, or COMMON HW 4094 * reset. 4095 */ 4096 reset_code = bxe_send_unload_req(sc, unload_mode); 4097 4098 /* 4099 * (assumption: No Attention from MCP at this stage) 4100 * PMF probably in the middle of TX disable/enable transaction 4101 */ 4102 rc = bxe_func_wait_started(sc); 4103 if (rc) { 4104 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); 4105 } 4106 4107 /* 4108 * Close multi and leading connections 4109 * Completions for ramrods are collected in a synchronous way 4110 */ 4111 for (i = 0; i < sc->num_queues; i++) { 4112 if (bxe_stop_queue(sc, i)) { 4113 goto unload_error; 4114 } 4115 } 4116 4117 /* 4118 * If SP settings didn't get completed so far - something 4119 * very wrong has happen. 4120 */ 4121 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4122 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); 4123 } 4124 4125unload_error: 4126 4127 rc = bxe_func_stop(sc); 4128 if (rc) { 4129 BLOGE(sc, "Function stop failed!(%d)\n", rc); 4130 } 4131 4132 /* disable HW interrupts */ 4133 bxe_int_disable_sync(sc, TRUE); 4134 4135 /* detach interrupts */ 4136 bxe_interrupt_detach(sc); 4137 4138 /* Reset the chip */ 4139 rc = bxe_reset_hw(sc, reset_code); 4140 if (rc) { 4141 BLOGE(sc, "Hardware reset failed(%d)\n", rc); 4142 } 4143 4144 /* Report UNLOAD_DONE to MCP */ 4145 bxe_send_unload_done(sc, keep_link); 4146} 4147 4148static void 4149bxe_disable_close_the_gate(struct bxe_softc *sc) 4150{ 4151 uint32_t val; 4152 int port = SC_PORT(sc); 4153 4154 BLOGD(sc, DBG_LOAD, 4155 "Disabling 'close the gates'\n"); 4156 4157 if (CHIP_IS_E1(sc)) { 4158 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4159 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4160 val = REG_RD(sc, addr); 4161 val &= ~(0x300); 4162 REG_WR(sc, addr, val); 4163 } else { 4164 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4165 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4166 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4167 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4168 } 4169} 4170 4171/* 4172 * Cleans the object that have internal lists without sending 4173 * ramrods. Should be run when interrutps are disabled. 4174 */ 4175static void 4176bxe_squeeze_objects(struct bxe_softc *sc) 4177{ 4178 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4179 struct ecore_mcast_ramrod_params rparam = { NULL }; 4180 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4181 int rc; 4182 4183 /* Cleanup MACs' object first... */ 4184 4185 /* Wait for completion of requested */ 4186 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4187 /* Perform a dry cleanup */ 4188 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4189 4190 /* Clean ETH primary MAC */ 4191 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4192 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4193 &ramrod_flags); 4194 if (rc != 0) { 4195 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4196 } 4197 4198 /* Cleanup UC list */ 4199 vlan_mac_flags = 0; 4200 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4201 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4202 &ramrod_flags); 4203 if (rc != 0) { 4204 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4205 } 4206 4207 /* Now clean mcast object... */ 4208 4209 rparam.mcast_obj = &sc->mcast_obj; 4210 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4211 4212 /* Add a DEL command... */ 4213 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4214 if (rc < 0) { 4215 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4216 } 4217 4218 /* now wait until all pending commands are cleared */ 4219 4220 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4221 while (rc != 0) { 4222 if (rc < 0) { 4223 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4224 return; 4225 } 4226 4227 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4228 } 4229} 4230 4231/* stop the controller */ 4232static __noinline int 4233bxe_nic_unload(struct bxe_softc *sc, 4234 uint32_t unload_mode, 4235 uint8_t keep_link) 4236{ 4237 uint8_t global = FALSE; 4238 uint32_t val; 4239 int i; 4240 4241 BXE_CORE_LOCK_ASSERT(sc); 4242 4243 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 4244 4245 for (i = 0; i < sc->num_queues; i++) { 4246 struct bxe_fastpath *fp; 4247 4248 fp = &sc->fp[i]; 4249 fp->watchdog_timer = 0; 4250 BXE_FP_TX_LOCK(fp); 4251 BXE_FP_TX_UNLOCK(fp); 4252 } 4253 4254 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4255 4256 /* mark driver as unloaded in shmem2 */ 4257 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4258 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4259 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4260 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4261 } 4262 4263 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4264 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4265 4266 if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 4267 /* 4268 * We can get here if the driver has been unloaded 4269 * during parity error recovery and is either waiting for a 4270 * leader to complete or for other functions to unload and 4271 * then ifconfig down has been issued. In this case we want to 4272 * unload and let other functions to complete a recovery 4273 * process. 4274 */ 4275 sc->recovery_state = BXE_RECOVERY_DONE; 4276 sc->is_leader = 0; 4277 bxe_release_leader_lock(sc); 4278 mb(); 4279 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4280 } 4281 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" 4282 " state = 0x%x\n", sc->recovery_state, sc->state); 4283 return (-1); 4284 } 4285 4286 /* 4287 * Nothing to do during unload if previous bxe_nic_load() 4288 * did not completed successfully - all resourses are released. 4289 */ 4290 if ((sc->state == BXE_STATE_CLOSED) || 4291 (sc->state == BXE_STATE_ERROR)) { 4292 return (0); 4293 } 4294 4295 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4296 mb(); 4297 4298 /* stop tx */ 4299 bxe_tx_disable(sc); 4300 4301 sc->rx_mode = BXE_RX_MODE_NONE; 4302 /* XXX set rx mode ??? */ 4303 4304 if (IS_PF(sc) && !sc->grcdump_done) { 4305 /* set ALWAYS_ALIVE bit in shmem */ 4306 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4307 4308 bxe_drv_pulse(sc); 4309 4310 bxe_stats_handle(sc, STATS_EVENT_STOP); 4311 bxe_save_statistics(sc); 4312 } 4313 4314 /* wait till consumers catch up with producers in all queues */ 4315 bxe_drain_tx_queues(sc); 4316 4317 /* if VF indicate to PF this function is going down (PF will delete sp 4318 * elements and clear initializations 4319 */ 4320 if (IS_VF(sc)) { 4321 ; /* bxe_vfpf_close_vf(sc); */ 4322 } else if (unload_mode != UNLOAD_RECOVERY) { 4323 /* if this is a normal/close unload need to clean up chip */ 4324 if (!sc->grcdump_done) 4325 bxe_chip_cleanup(sc, unload_mode, keep_link); 4326 } else { 4327 /* Send the UNLOAD_REQUEST to the MCP */ 4328 bxe_send_unload_req(sc, unload_mode); 4329 4330 /* 4331 * Prevent transactions to host from the functions on the 4332 * engine that doesn't reset global blocks in case of global 4333 * attention once gloabl blocks are reset and gates are opened 4334 * (the engine which leader will perform the recovery 4335 * last). 4336 */ 4337 if (!CHIP_IS_E1x(sc)) { 4338 bxe_pf_disable(sc); 4339 } 4340 4341 /* disable HW interrupts */ 4342 bxe_int_disable_sync(sc, TRUE); 4343 4344 /* detach interrupts */ 4345 bxe_interrupt_detach(sc); 4346 4347 /* Report UNLOAD_DONE to MCP */ 4348 bxe_send_unload_done(sc, FALSE); 4349 } 4350 4351 /* 4352 * At this stage no more interrupts will arrive so we may safely clean 4353 * the queue'able objects here in case they failed to get cleaned so far. 4354 */ 4355 if (IS_PF(sc)) { 4356 bxe_squeeze_objects(sc); 4357 } 4358 4359 /* There should be no more pending SP commands at this stage */ 4360 sc->sp_state = 0; 4361 4362 sc->port.pmf = 0; 4363 4364 bxe_free_fp_buffers(sc); 4365 4366 if (IS_PF(sc)) { 4367 bxe_free_mem(sc); 4368 } 4369 4370 bxe_free_fw_stats_mem(sc); 4371 4372 sc->state = BXE_STATE_CLOSED; 4373 4374 /* 4375 * Check if there are pending parity attentions. If there are - set 4376 * RECOVERY_IN_PROGRESS. 4377 */ 4378 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4379 bxe_set_reset_in_progress(sc); 4380 4381 /* Set RESET_IS_GLOBAL if needed */ 4382 if (global) { 4383 bxe_set_reset_global(sc); 4384 } 4385 } 4386 4387 /* 4388 * The last driver must disable a "close the gate" if there is no 4389 * parity attention or "process kill" pending. 4390 */ 4391 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4392 bxe_reset_is_done(sc, SC_PATH(sc))) { 4393 bxe_disable_close_the_gate(sc); 4394 } 4395 4396 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4397 4398 bxe_link_report(sc); 4399 4400 return (0); 4401} 4402 4403/* 4404 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4405 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4406 */ 4407static int 4408bxe_ifmedia_update(struct ifnet *ifp) 4409{ 4410 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); 4411 struct ifmedia *ifm; 4412 4413 ifm = &sc->ifmedia; 4414 4415 /* We only support Ethernet media type. */ 4416 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4417 return (EINVAL); 4418 } 4419 4420 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4421 case IFM_AUTO: 4422 break; 4423 case IFM_10G_CX4: 4424 case IFM_10G_SR: 4425 case IFM_10G_T: 4426 case IFM_10G_TWINAX: 4427 default: 4428 /* We don't support changing the media type. */ 4429 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4430 IFM_SUBTYPE(ifm->ifm_media)); 4431 return (EINVAL); 4432 } 4433 4434 return (0); 4435} 4436 4437/* 4438 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4439 */ 4440static void 4441bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4442{ 4443 struct bxe_softc *sc = if_getsoftc(ifp); 4444 4445 /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..." 4446 line if the IFM_AVALID flag is *NOT* set. So we need to set this 4447 flag unconditionally (irrespective of the admininistrative 4448 'up/down' state of the interface) to ensure that that line is always 4449 displayed. 4450 */ 4451 ifmr->ifm_status = IFM_AVALID; 4452 4453 /* Setup the default interface info. */ 4454 ifmr->ifm_active = IFM_ETHER; 4455 4456 /* Report link down if the driver isn't running. */ 4457 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4458 ifmr->ifm_active |= IFM_NONE; 4459 BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__); 4460 BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n", 4461 __func__, sc->link_vars.link_up); 4462 return; 4463 } 4464 4465 4466 if (sc->link_vars.link_up) { 4467 ifmr->ifm_status |= IFM_ACTIVE; 4468 ifmr->ifm_active |= IFM_FDX; 4469 } else { 4470 ifmr->ifm_active |= IFM_NONE; 4471 BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n", 4472 __func__); 4473 return; 4474 } 4475 4476 ifmr->ifm_active |= sc->media; 4477 return; 4478} 4479 4480static void 4481bxe_handle_chip_tq(void *context, 4482 int pending) 4483{ 4484 struct bxe_softc *sc = (struct bxe_softc *)context; 4485 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4486 4487 switch (work) 4488 { 4489 4490 case CHIP_TQ_REINIT: 4491 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 4492 /* restart the interface */ 4493 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4494 bxe_periodic_stop(sc); 4495 BXE_CORE_LOCK(sc); 4496 bxe_stop_locked(sc); 4497 bxe_init_locked(sc); 4498 BXE_CORE_UNLOCK(sc); 4499 } 4500 break; 4501 4502 default: 4503 break; 4504 } 4505} 4506 4507/* 4508 * Handles any IOCTL calls from the operating system. 4509 * 4510 * Returns: 4511 * 0 = Success, >0 Failure 4512 */ 4513static int 4514bxe_ioctl(if_t ifp, 4515 u_long command, 4516 caddr_t data) 4517{ 4518 struct bxe_softc *sc = if_getsoftc(ifp); 4519 struct ifreq *ifr = (struct ifreq *)data; 4520 int mask = 0; 4521 int reinit = 0; 4522 int error = 0; 4523 4524 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4525 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4526 4527 switch (command) 4528 { 4529 case SIOCSIFMTU: 4530 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4531 ifr->ifr_mtu); 4532 4533 if (sc->mtu == ifr->ifr_mtu) { 4534 /* nothing to change */ 4535 break; 4536 } 4537 4538 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4539 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4540 ifr->ifr_mtu, mtu_min, mtu_max); 4541 error = EINVAL; 4542 break; 4543 } 4544 4545 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4546 (unsigned long)ifr->ifr_mtu); 4547 /* 4548 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), 4549 (unsigned long)ifr->ifr_mtu); 4550 XXX - Not sure why it needs to be atomic 4551 */ 4552 if_setmtu(ifp, ifr->ifr_mtu); 4553 reinit = 1; 4554 break; 4555 4556 case SIOCSIFFLAGS: 4557 /* toggle the interface state up or down */ 4558 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4559 4560 BXE_CORE_LOCK(sc); 4561 /* check if the interface is up */ 4562 if (if_getflags(ifp) & IFF_UP) { 4563 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4564 /* set the receive mode flags */ 4565 bxe_set_rx_mode(sc); 4566 } else if(sc->state != BXE_STATE_DISABLED) { 4567 bxe_init_locked(sc); 4568 } 4569 } else { 4570 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4571 bxe_periodic_stop(sc); 4572 bxe_stop_locked(sc); 4573 } 4574 } 4575 BXE_CORE_UNLOCK(sc); 4576 4577 break; 4578 4579 case SIOCADDMULTI: 4580 case SIOCDELMULTI: 4581 /* add/delete multicast addresses */ 4582 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4583 4584 /* check if the interface is up */ 4585 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4586 /* set the receive mode flags */ 4587 BXE_CORE_LOCK(sc); 4588 bxe_set_rx_mode(sc); 4589 BXE_CORE_UNLOCK(sc); 4590 } 4591 4592 break; 4593 4594 case SIOCSIFCAP: 4595 /* find out which capabilities have changed */ 4596 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); 4597 4598 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4599 mask); 4600 4601 /* toggle the LRO capabilites enable flag */ 4602 if (mask & IFCAP_LRO) { 4603 if_togglecapenable(ifp, IFCAP_LRO); 4604 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4605 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); 4606 reinit = 1; 4607 } 4608 4609 /* toggle the TXCSUM checksum capabilites enable flag */ 4610 if (mask & IFCAP_TXCSUM) { 4611 if_togglecapenable(ifp, IFCAP_TXCSUM); 4612 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4613 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); 4614 if (if_getcapenable(ifp) & IFCAP_TXCSUM) { 4615 if_sethwassistbits(ifp, (CSUM_IP | 4616 CSUM_TCP | 4617 CSUM_UDP | 4618 CSUM_TSO | 4619 CSUM_TCP_IPV6 | 4620 CSUM_UDP_IPV6), 0); 4621 } else { 4622 if_clearhwassist(ifp); /* XXX */ 4623 } 4624 } 4625 4626 /* toggle the RXCSUM checksum capabilities enable flag */ 4627 if (mask & IFCAP_RXCSUM) { 4628 if_togglecapenable(ifp, IFCAP_RXCSUM); 4629 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4630 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); 4631 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 4632 if_sethwassistbits(ifp, (CSUM_IP | 4633 CSUM_TCP | 4634 CSUM_UDP | 4635 CSUM_TSO | 4636 CSUM_TCP_IPV6 | 4637 CSUM_UDP_IPV6), 0); 4638 } else { 4639 if_clearhwassist(ifp); /* XXX */ 4640 } 4641 } 4642 4643 /* toggle TSO4 capabilities enabled flag */ 4644 if (mask & IFCAP_TSO4) { 4645 if_togglecapenable(ifp, IFCAP_TSO4); 4646 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4647 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); 4648 } 4649 4650 /* toggle TSO6 capabilities enabled flag */ 4651 if (mask & IFCAP_TSO6) { 4652 if_togglecapenable(ifp, IFCAP_TSO6); 4653 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4654 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); 4655 } 4656 4657 /* toggle VLAN_HWTSO capabilities enabled flag */ 4658 if (mask & IFCAP_VLAN_HWTSO) { 4659 4660 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 4661 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4662 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4663 } 4664 4665 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4666 if (mask & IFCAP_VLAN_HWCSUM) { 4667 /* XXX investigate this... */ 4668 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4669 error = EINVAL; 4670 } 4671 4672 /* toggle VLAN_MTU capabilities enable flag */ 4673 if (mask & IFCAP_VLAN_MTU) { 4674 /* XXX investigate this... */ 4675 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4676 error = EINVAL; 4677 } 4678 4679 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4680 if (mask & IFCAP_VLAN_HWTAGGING) { 4681 /* XXX investigate this... */ 4682 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4683 error = EINVAL; 4684 } 4685 4686 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4687 if (mask & IFCAP_VLAN_HWFILTER) { 4688 /* XXX investigate this... */ 4689 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4690 error = EINVAL; 4691 } 4692 4693 /* XXX not yet... 4694 * IFCAP_WOL_MAGIC 4695 */ 4696 4697 break; 4698 4699 case SIOCSIFMEDIA: 4700 case SIOCGIFMEDIA: 4701 /* set/get interface media */ 4702 BLOGD(sc, DBG_IOCTL, 4703 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 4704 (command & 0xff)); 4705 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 4706 break; 4707 4708 default: 4709 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 4710 (command & 0xff)); 4711 error = ether_ioctl(ifp, command, data); 4712 break; 4713 } 4714 4715 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 4716 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 4717 "Re-initializing hardware from IOCTL change\n"); 4718 bxe_periodic_stop(sc); 4719 BXE_CORE_LOCK(sc); 4720 bxe_stop_locked(sc); 4721 bxe_init_locked(sc); 4722 BXE_CORE_UNLOCK(sc); 4723 } 4724 4725 return (error); 4726} 4727 4728static __noinline void 4729bxe_dump_mbuf(struct bxe_softc *sc, 4730 struct mbuf *m, 4731 uint8_t contents) 4732{ 4733 char * type; 4734 int i = 0; 4735 4736 if (!(sc->debug & DBG_MBUF)) { 4737 return; 4738 } 4739 4740 if (m == NULL) { 4741 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 4742 return; 4743 } 4744 4745 while (m) { 4746 4747#if __FreeBSD_version >= 1000000 4748 BLOGD(sc, DBG_MBUF, 4749 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 4750 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 4751 4752 if (m->m_flags & M_PKTHDR) { 4753 BLOGD(sc, DBG_MBUF, 4754 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 4755 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 4756 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 4757 } 4758#else 4759 BLOGD(sc, DBG_MBUF, 4760 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 4761 i, m, m->m_len, m->m_flags, 4762 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data); 4763 4764 if (m->m_flags & M_PKTHDR) { 4765 BLOGD(sc, DBG_MBUF, 4766 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 4767 i, m->m_pkthdr.len, m->m_flags, 4768 "\20\12M_BCAST\13M_MCAST\14M_FRAG" 4769 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" 4770 "\22M_PROMISC\23M_NOFREE", 4771 (int)m->m_pkthdr.csum_flags, 4772 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 4773 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 4774 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 4775 "\14CSUM_PSEUDO_HDR"); 4776 } 4777#endif /* #if __FreeBSD_version >= 1000000 */ 4778 4779 if (m->m_flags & M_EXT) { 4780 switch (m->m_ext.ext_type) { 4781 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 4782 case EXT_SFBUF: type = "EXT_SFBUF"; break; 4783 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 4784 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 4785 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 4786 case EXT_PACKET: type = "EXT_PACKET"; break; 4787 case EXT_MBUF: type = "EXT_MBUF"; break; 4788 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 4789 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 4790 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 4791 case EXT_EXTREF: type = "EXT_EXTREF"; break; 4792 default: type = "UNKNOWN"; break; 4793 } 4794 4795 BLOGD(sc, DBG_MBUF, 4796 "%02d: - m_ext: %p ext_size=%d type=%s\n", 4797 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 4798 } 4799 4800 if (contents) { 4801 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 4802 } 4803 4804 m = m->m_next; 4805 i++; 4806 } 4807} 4808 4809/* 4810 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 4811 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 4812 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 4813 * The headers comes in a separate bd in FreeBSD so 13-3=10. 4814 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 4815 */ 4816static int 4817bxe_chktso_window(struct bxe_softc *sc, 4818 int nsegs, 4819 bus_dma_segment_t *segs, 4820 struct mbuf *m) 4821{ 4822 uint32_t num_wnds, wnd_size, wnd_sum; 4823 int32_t frag_idx, wnd_idx; 4824 unsigned short lso_mss; 4825 int defrag; 4826 4827 defrag = 0; 4828 wnd_sum = 0; 4829 wnd_size = 10; 4830 num_wnds = nsegs - wnd_size; 4831 lso_mss = htole16(m->m_pkthdr.tso_segsz); 4832 4833 /* 4834 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 4835 * first window sum of data while skipping the first assuming it is the 4836 * header in FreeBSD. 4837 */ 4838 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 4839 wnd_sum += htole16(segs[frag_idx].ds_len); 4840 } 4841 4842 /* check the first 10 bd window size */ 4843 if (wnd_sum < lso_mss) { 4844 return (1); 4845 } 4846 4847 /* run through the windows */ 4848 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 4849 /* subtract the first mbuf->m_len of the last wndw(-header) */ 4850 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 4851 /* add the next mbuf len to the len of our new window */ 4852 wnd_sum += htole16(segs[frag_idx].ds_len); 4853 if (wnd_sum < lso_mss) { 4854 return (1); 4855 } 4856 } 4857 4858 return (0); 4859} 4860 4861static uint8_t 4862bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 4863 struct mbuf *m, 4864 uint32_t *parsing_data) 4865{ 4866 struct ether_vlan_header *eh = NULL; 4867 struct ip *ip4 = NULL; 4868 struct ip6_hdr *ip6 = NULL; 4869 caddr_t ip = NULL; 4870 struct tcphdr *th = NULL; 4871 int e_hlen, ip_hlen, l4_off; 4872 uint16_t proto; 4873 4874 if (m->m_pkthdr.csum_flags == CSUM_IP) { 4875 /* no L4 checksum offload needed */ 4876 return (0); 4877 } 4878 4879 /* get the Ethernet header */ 4880 eh = mtod(m, struct ether_vlan_header *); 4881 4882 /* handle VLAN encapsulation if present */ 4883 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 4884 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4885 proto = ntohs(eh->evl_proto); 4886 } else { 4887 e_hlen = ETHER_HDR_LEN; 4888 proto = ntohs(eh->evl_encap_proto); 4889 } 4890 4891 switch (proto) { 4892 case ETHERTYPE_IP: 4893 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 4894 ip4 = (m->m_len < sizeof(struct ip)) ? 4895 (struct ip *)m->m_next->m_data : 4896 (struct ip *)(m->m_data + e_hlen); 4897 /* ip_hl is number of 32-bit words */ 4898 ip_hlen = (ip4->ip_hl << 2); 4899 ip = (caddr_t)ip4; 4900 break; 4901 case ETHERTYPE_IPV6: 4902 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 4903 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 4904 (struct ip6_hdr *)m->m_next->m_data : 4905 (struct ip6_hdr *)(m->m_data + e_hlen); 4906 /* XXX cannot support offload with IPv6 extensions */ 4907 ip_hlen = sizeof(struct ip6_hdr); 4908 ip = (caddr_t)ip6; 4909 break; 4910 default: 4911 /* We can't offload in this case... */ 4912 /* XXX error stat ??? */ 4913 return (0); 4914 } 4915 4916 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 4917 l4_off = (e_hlen + ip_hlen); 4918 4919 *parsing_data |= 4920 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 4921 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 4922 4923 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 4924 CSUM_TSO | 4925 CSUM_TCP_IPV6)) { 4926 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 4927 th = (struct tcphdr *)(ip + ip_hlen); 4928 /* th_off is number of 32-bit words */ 4929 *parsing_data |= ((th->th_off << 4930 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 4931 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 4932 return (l4_off + (th->th_off << 2)); /* entire header length */ 4933 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 4934 CSUM_UDP_IPV6)) { 4935 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 4936 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 4937 } else { 4938 /* XXX error stat ??? */ 4939 return (0); 4940 } 4941} 4942 4943static uint8_t 4944bxe_set_pbd_csum(struct bxe_fastpath *fp, 4945 struct mbuf *m, 4946 struct eth_tx_parse_bd_e1x *pbd) 4947{ 4948 struct ether_vlan_header *eh = NULL; 4949 struct ip *ip4 = NULL; 4950 struct ip6_hdr *ip6 = NULL; 4951 caddr_t ip = NULL; 4952 struct tcphdr *th = NULL; 4953 struct udphdr *uh = NULL; 4954 int e_hlen, ip_hlen; 4955 uint16_t proto; 4956 uint8_t hlen; 4957 uint16_t tmp_csum; 4958 uint32_t *tmp_uh; 4959 4960 /* get the Ethernet header */ 4961 eh = mtod(m, struct ether_vlan_header *); 4962 4963 /* handle VLAN encapsulation if present */ 4964 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 4965 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4966 proto = ntohs(eh->evl_proto); 4967 } else { 4968 e_hlen = ETHER_HDR_LEN; 4969 proto = ntohs(eh->evl_encap_proto); 4970 } 4971 4972 switch (proto) { 4973 case ETHERTYPE_IP: 4974 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 4975 ip4 = (m->m_len < sizeof(struct ip)) ? 4976 (struct ip *)m->m_next->m_data : 4977 (struct ip *)(m->m_data + e_hlen); 4978 /* ip_hl is number of 32-bit words */ 4979 ip_hlen = (ip4->ip_hl << 1); 4980 ip = (caddr_t)ip4; 4981 break; 4982 case ETHERTYPE_IPV6: 4983 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 4984 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 4985 (struct ip6_hdr *)m->m_next->m_data : 4986 (struct ip6_hdr *)(m->m_data + e_hlen); 4987 /* XXX cannot support offload with IPv6 extensions */ 4988 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 4989 ip = (caddr_t)ip6; 4990 break; 4991 default: 4992 /* We can't offload in this case... */ 4993 /* XXX error stat ??? */ 4994 return (0); 4995 } 4996 4997 hlen = (e_hlen >> 1); 4998 4999 /* note that rest of global_data is indirectly zeroed here */ 5000 if (m->m_flags & M_VLANTAG) { 5001 pbd->global_data = 5002 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5003 } else { 5004 pbd->global_data = htole16(hlen); 5005 } 5006 5007 pbd->ip_hlen_w = ip_hlen; 5008 5009 hlen += pbd->ip_hlen_w; 5010 5011 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5012 5013 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5014 CSUM_TSO | 5015 CSUM_TCP_IPV6)) { 5016 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5017 /* th_off is number of 32-bit words */ 5018 hlen += (uint16_t)(th->th_off << 1); 5019 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5020 CSUM_UDP_IPV6)) { 5021 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5022 hlen += (sizeof(struct udphdr) / 2); 5023 } else { 5024 /* valid case as only CSUM_IP was set */ 5025 return (0); 5026 } 5027 5028 pbd->total_hlen_w = htole16(hlen); 5029 5030 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5031 CSUM_TSO | 5032 CSUM_TCP_IPV6)) { 5033 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5034 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5035 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5036 CSUM_UDP_IPV6)) { 5037 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5038 5039 /* 5040 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5041 * checksums and does not know anything about the UDP header and where 5042 * the checksum field is located. It only knows about TCP. Therefore 5043 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5044 * offload. Since the checksum field offset for TCP is 16 bytes and 5045 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5046 * bytes less than the start of the UDP header. This allows the 5047 * hardware to write the checksum in the correct spot. But the 5048 * hardware will compute a checksum which includes the last 10 bytes 5049 * of the IP header. To correct this we tweak the stack computed 5050 * pseudo checksum by folding in the calculation of the inverse 5051 * checksum for those final 10 bytes of the IP header. This allows 5052 * the correct checksum to be computed by the hardware. 5053 */ 5054 5055 /* set pointer 10 bytes before UDP header */ 5056 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5057 5058 /* calculate a pseudo header checksum over the first 10 bytes */ 5059 tmp_csum = in_pseudo(*tmp_uh, 5060 *(tmp_uh + 1), 5061 *(uint16_t *)(tmp_uh + 2)); 5062 5063 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5064 } 5065 5066 return (hlen * 2); /* entire header length, number of bytes */ 5067} 5068 5069static void 5070bxe_set_pbd_lso_e2(struct mbuf *m, 5071 uint32_t *parsing_data) 5072{ 5073 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5074 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5075 ETH_TX_PARSE_BD_E2_LSO_MSS); 5076 5077 /* XXX test for IPv6 with extension header... */ 5078} 5079 5080static void 5081bxe_set_pbd_lso(struct mbuf *m, 5082 struct eth_tx_parse_bd_e1x *pbd) 5083{ 5084 struct ether_vlan_header *eh = NULL; 5085 struct ip *ip = NULL; 5086 struct tcphdr *th = NULL; 5087 int e_hlen; 5088 5089 /* get the Ethernet header */ 5090 eh = mtod(m, struct ether_vlan_header *); 5091 5092 /* handle VLAN encapsulation if present */ 5093 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5094 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5095 5096 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5097 /* XXX assuming IPv4 */ 5098 ip = (struct ip *)(m->m_data + e_hlen); 5099 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5100 5101 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5102 pbd->tcp_send_seq = ntohl(th->th_seq); 5103 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5104 5105#if 1 5106 /* XXX IPv4 */ 5107 pbd->ip_id = ntohs(ip->ip_id); 5108 pbd->tcp_pseudo_csum = 5109 ntohs(in_pseudo(ip->ip_src.s_addr, 5110 ip->ip_dst.s_addr, 5111 htons(IPPROTO_TCP))); 5112#else 5113 /* XXX IPv6 */ 5114 pbd->tcp_pseudo_csum = 5115 ntohs(in_pseudo(&ip6->ip6_src, 5116 &ip6->ip6_dst, 5117 htons(IPPROTO_TCP))); 5118#endif 5119 5120 pbd->global_data |= 5121 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5122} 5123 5124/* 5125 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5126 * visible to the controller. 5127 * 5128 * If an mbuf is submitted to this routine and cannot be given to the 5129 * controller (e.g. it has too many fragments) then the function may free 5130 * the mbuf and return to the caller. 5131 * 5132 * Returns: 5133 * 0 = Success, !0 = Failure 5134 * Note the side effect that an mbuf may be freed if it causes a problem. 5135 */ 5136static int 5137bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5138{ 5139 bus_dma_segment_t segs[32]; 5140 struct mbuf *m0; 5141 struct bxe_sw_tx_bd *tx_buf; 5142 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5143 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5144 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5145 struct eth_tx_bd *tx_data_bd; 5146 struct eth_tx_bd *tx_total_pkt_size_bd; 5147 struct eth_tx_start_bd *tx_start_bd; 5148 uint16_t bd_prod, pkt_prod, total_pkt_size; 5149 uint8_t mac_type; 5150 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5151 struct bxe_softc *sc; 5152 uint16_t tx_bd_avail; 5153 struct ether_vlan_header *eh; 5154 uint32_t pbd_e2_parsing_data = 0; 5155 uint8_t hlen = 0; 5156 int tmp_bd; 5157 int i; 5158 5159 sc = fp->sc; 5160 5161#if __FreeBSD_version >= 800000 5162 M_ASSERTPKTHDR(*m_head); 5163#endif /* #if __FreeBSD_version >= 800000 */ 5164 5165 m0 = *m_head; 5166 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5167 tx_start_bd = NULL; 5168 tx_data_bd = NULL; 5169 tx_total_pkt_size_bd = NULL; 5170 5171 /* get the H/W pointer for packets and BDs */ 5172 pkt_prod = fp->tx_pkt_prod; 5173 bd_prod = fp->tx_bd_prod; 5174 5175 mac_type = UNICAST_ADDRESS; 5176 5177 /* map the mbuf into the next open DMAable memory */ 5178 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5179 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5180 tx_buf->m_map, m0, 5181 segs, &nsegs, BUS_DMA_NOWAIT); 5182 5183 /* mapping errors */ 5184 if(__predict_false(error != 0)) { 5185 fp->eth_q_stats.tx_dma_mapping_failure++; 5186 if (error == ENOMEM) { 5187 /* resource issue, try again later */ 5188 rc = ENOMEM; 5189 } else if (error == EFBIG) { 5190 /* possibly recoverable with defragmentation */ 5191 fp->eth_q_stats.mbuf_defrag_attempts++; 5192 m0 = m_defrag(*m_head, M_NOWAIT); 5193 if (m0 == NULL) { 5194 fp->eth_q_stats.mbuf_defrag_failures++; 5195 rc = ENOBUFS; 5196 } else { 5197 /* defrag successful, try mapping again */ 5198 *m_head = m0; 5199 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5200 tx_buf->m_map, m0, 5201 segs, &nsegs, BUS_DMA_NOWAIT); 5202 if (error) { 5203 fp->eth_q_stats.tx_dma_mapping_failure++; 5204 rc = error; 5205 } 5206 } 5207 } else { 5208 /* unknown, unrecoverable mapping error */ 5209 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5210 bxe_dump_mbuf(sc, m0, FALSE); 5211 rc = error; 5212 } 5213 5214 goto bxe_tx_encap_continue; 5215 } 5216 5217 tx_bd_avail = bxe_tx_avail(sc, fp); 5218 5219 /* make sure there is enough room in the send queue */ 5220 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5221 /* Recoverable, try again later. */ 5222 fp->eth_q_stats.tx_hw_queue_full++; 5223 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5224 rc = ENOMEM; 5225 goto bxe_tx_encap_continue; 5226 } 5227 5228 /* capture the current H/W TX chain high watermark */ 5229 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5230 (TX_BD_USABLE - tx_bd_avail))) { 5231 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5232 } 5233 5234 /* make sure it fits in the packet window */ 5235 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5236 /* 5237 * The mbuf may be to big for the controller to handle. If the frame 5238 * is a TSO frame we'll need to do an additional check. 5239 */ 5240 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5241 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5242 goto bxe_tx_encap_continue; /* OK to send */ 5243 } else { 5244 fp->eth_q_stats.tx_window_violation_tso++; 5245 } 5246 } else { 5247 fp->eth_q_stats.tx_window_violation_std++; 5248 } 5249 5250 /* lets try to defragment this mbuf and remap it */ 5251 fp->eth_q_stats.mbuf_defrag_attempts++; 5252 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5253 5254 m0 = m_defrag(*m_head, M_NOWAIT); 5255 if (m0 == NULL) { 5256 fp->eth_q_stats.mbuf_defrag_failures++; 5257 /* Ugh, just drop the frame... :( */ 5258 rc = ENOBUFS; 5259 } else { 5260 /* defrag successful, try mapping again */ 5261 *m_head = m0; 5262 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5263 tx_buf->m_map, m0, 5264 segs, &nsegs, BUS_DMA_NOWAIT); 5265 if (error) { 5266 fp->eth_q_stats.tx_dma_mapping_failure++; 5267 /* No sense in trying to defrag/copy chain, drop it. :( */ 5268 rc = error; 5269 } else { 5270 /* if the chain is still too long then drop it */ 5271 if(m0->m_pkthdr.csum_flags & CSUM_TSO) { 5272 /* 5273 * in case TSO is enabled nsegs should be checked against 5274 * BXE_TSO_MAX_SEGMENTS 5275 */ 5276 if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) { 5277 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5278 fp->eth_q_stats.nsegs_path1_errors++; 5279 rc = ENODEV; 5280 } 5281 } else { 5282 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5283 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5284 fp->eth_q_stats.nsegs_path2_errors++; 5285 rc = ENODEV; 5286 } 5287 } 5288 } 5289 } 5290 } 5291 5292bxe_tx_encap_continue: 5293 5294 /* Check for errors */ 5295 if (rc) { 5296 if (rc == ENOMEM) { 5297 /* recoverable try again later */ 5298 } else { 5299 fp->eth_q_stats.tx_soft_errors++; 5300 fp->eth_q_stats.mbuf_alloc_tx--; 5301 m_freem(*m_head); 5302 *m_head = NULL; 5303 } 5304 5305 return (rc); 5306 } 5307 5308 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5309 if (m0->m_flags & M_BCAST) { 5310 mac_type = BROADCAST_ADDRESS; 5311 } else if (m0->m_flags & M_MCAST) { 5312 mac_type = MULTICAST_ADDRESS; 5313 } 5314 5315 /* store the mbuf into the mbuf ring */ 5316 tx_buf->m = m0; 5317 tx_buf->first_bd = fp->tx_bd_prod; 5318 tx_buf->flags = 0; 5319 5320 /* prepare the first transmit (start) BD for the mbuf */ 5321 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5322 5323 BLOGD(sc, DBG_TX, 5324 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5325 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5326 5327 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5328 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5329 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5330 total_pkt_size += tx_start_bd->nbytes; 5331 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5332 5333 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5334 5335 /* all frames have at least Start BD + Parsing BD */ 5336 nbds = nsegs + 1; 5337 tx_start_bd->nbd = htole16(nbds); 5338 5339 if (m0->m_flags & M_VLANTAG) { 5340 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5341 tx_start_bd->bd_flags.as_bitfield |= 5342 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5343 } else { 5344 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5345 if (IS_VF(sc)) { 5346 /* map ethernet header to find type and header length */ 5347 eh = mtod(m0, struct ether_vlan_header *); 5348 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5349 } else { 5350 /* used by FW for packet accounting */ 5351 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5352 } 5353 } 5354 5355 /* 5356 * add a parsing BD from the chain. The parsing BD is always added 5357 * though it is only used for TSO and chksum 5358 */ 5359 bd_prod = TX_BD_NEXT(bd_prod); 5360 5361 if (m0->m_pkthdr.csum_flags) { 5362 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5363 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5364 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5365 } 5366 5367 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5368 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5369 ETH_TX_BD_FLAGS_L4_CSUM); 5370 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5371 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5372 ETH_TX_BD_FLAGS_IS_UDP | 5373 ETH_TX_BD_FLAGS_L4_CSUM); 5374 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5375 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5376 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5377 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5378 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5379 ETH_TX_BD_FLAGS_IS_UDP); 5380 } 5381 } 5382 5383 if (!CHIP_IS_E1x(sc)) { 5384 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5385 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5386 5387 if (m0->m_pkthdr.csum_flags) { 5388 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5389 } 5390 5391 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5392 mac_type); 5393 } else { 5394 uint16_t global_data = 0; 5395 5396 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5397 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5398 5399 if (m0->m_pkthdr.csum_flags) { 5400 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5401 } 5402 5403 SET_FLAG(global_data, 5404 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5405 pbd_e1x->global_data |= htole16(global_data); 5406 } 5407 5408 /* setup the parsing BD with TSO specific info */ 5409 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5410 fp->eth_q_stats.tx_ofld_frames_lso++; 5411 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5412 5413 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5414 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5415 5416 /* split the first BD into header/data making the fw job easy */ 5417 nbds++; 5418 tx_start_bd->nbd = htole16(nbds); 5419 tx_start_bd->nbytes = htole16(hlen); 5420 5421 bd_prod = TX_BD_NEXT(bd_prod); 5422 5423 /* new transmit BD after the tx_parse_bd */ 5424 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5425 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5426 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5427 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5428 if (tx_total_pkt_size_bd == NULL) { 5429 tx_total_pkt_size_bd = tx_data_bd; 5430 } 5431 5432 BLOGD(sc, DBG_TX, 5433 "TSO split header size is %d (%x:%x) nbds %d\n", 5434 le16toh(tx_start_bd->nbytes), 5435 le32toh(tx_start_bd->addr_hi), 5436 le32toh(tx_start_bd->addr_lo), 5437 nbds); 5438 } 5439 5440 if (!CHIP_IS_E1x(sc)) { 5441 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5442 } else { 5443 bxe_set_pbd_lso(m0, pbd_e1x); 5444 } 5445 } 5446 5447 if (pbd_e2_parsing_data) { 5448 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5449 } 5450 5451 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5452 for (i = 1; i < nsegs ; i++) { 5453 bd_prod = TX_BD_NEXT(bd_prod); 5454 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5455 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5456 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5457 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5458 if (tx_total_pkt_size_bd == NULL) { 5459 tx_total_pkt_size_bd = tx_data_bd; 5460 } 5461 total_pkt_size += tx_data_bd->nbytes; 5462 } 5463 5464 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5465 5466 if (tx_total_pkt_size_bd != NULL) { 5467 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5468 } 5469 5470 if (__predict_false(sc->debug & DBG_TX)) { 5471 tmp_bd = tx_buf->first_bd; 5472 for (i = 0; i < nbds; i++) 5473 { 5474 if (i == 0) { 5475 BLOGD(sc, DBG_TX, 5476 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5477 "bd_flags=0x%x hdr_nbds=%d\n", 5478 tx_start_bd, 5479 tmp_bd, 5480 le16toh(tx_start_bd->nbd), 5481 le16toh(tx_start_bd->vlan_or_ethertype), 5482 tx_start_bd->bd_flags.as_bitfield, 5483 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5484 } else if (i == 1) { 5485 if (pbd_e1x) { 5486 BLOGD(sc, DBG_TX, 5487 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5488 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5489 "tcp_seq=%u total_hlen_w=%u\n", 5490 pbd_e1x, 5491 tmp_bd, 5492 pbd_e1x->global_data, 5493 pbd_e1x->ip_hlen_w, 5494 pbd_e1x->ip_id, 5495 pbd_e1x->lso_mss, 5496 pbd_e1x->tcp_flags, 5497 pbd_e1x->tcp_pseudo_csum, 5498 pbd_e1x->tcp_send_seq, 5499 le16toh(pbd_e1x->total_hlen_w)); 5500 } else { /* if (pbd_e2) */ 5501 BLOGD(sc, DBG_TX, 5502 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5503 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5504 pbd_e2, 5505 tmp_bd, 5506 pbd_e2->data.mac_addr.dst_hi, 5507 pbd_e2->data.mac_addr.dst_mid, 5508 pbd_e2->data.mac_addr.dst_lo, 5509 pbd_e2->data.mac_addr.src_hi, 5510 pbd_e2->data.mac_addr.src_mid, 5511 pbd_e2->data.mac_addr.src_lo, 5512 pbd_e2->parsing_data); 5513 } 5514 } 5515 5516 if (i != 1) { /* skip parse db as it doesn't hold data */ 5517 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5518 BLOGD(sc, DBG_TX, 5519 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5520 tx_data_bd, 5521 tmp_bd, 5522 le16toh(tx_data_bd->nbytes), 5523 le32toh(tx_data_bd->addr_hi), 5524 le32toh(tx_data_bd->addr_lo)); 5525 } 5526 5527 tmp_bd = TX_BD_NEXT(tmp_bd); 5528 } 5529 } 5530 5531 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5532 5533 /* update TX BD producer index value for next TX */ 5534 bd_prod = TX_BD_NEXT(bd_prod); 5535 5536 /* 5537 * If the chain of tx_bd's describing this frame is adjacent to or spans 5538 * an eth_tx_next_bd element then we need to increment the nbds value. 5539 */ 5540 if (TX_BD_IDX(bd_prod) < nbds) { 5541 nbds++; 5542 } 5543 5544 /* don't allow reordering of writes for nbd and packets */ 5545 mb(); 5546 5547 fp->tx_db.data.prod += nbds; 5548 5549 /* producer points to the next free tx_bd at this point */ 5550 fp->tx_pkt_prod++; 5551 fp->tx_bd_prod = bd_prod; 5552 5553 DOORBELL(sc, fp->index, fp->tx_db.raw); 5554 5555 fp->eth_q_stats.tx_pkts++; 5556 5557 /* Prevent speculative reads from getting ahead of the status block. */ 5558 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5559 0, 0, BUS_SPACE_BARRIER_READ); 5560 5561 /* Prevent speculative reads from getting ahead of the doorbell. */ 5562 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5563 0, 0, BUS_SPACE_BARRIER_READ); 5564 5565 return (0); 5566} 5567 5568static void 5569bxe_tx_start_locked(struct bxe_softc *sc, 5570 if_t ifp, 5571 struct bxe_fastpath *fp) 5572{ 5573 struct mbuf *m = NULL; 5574 int tx_count = 0; 5575 uint16_t tx_bd_avail; 5576 5577 BXE_FP_TX_LOCK_ASSERT(fp); 5578 5579 /* keep adding entries while there are frames to send */ 5580 while (!if_sendq_empty(ifp)) { 5581 5582 /* 5583 * check for any frames to send 5584 * dequeue can still be NULL even if queue is not empty 5585 */ 5586 m = if_dequeue(ifp); 5587 if (__predict_false(m == NULL)) { 5588 break; 5589 } 5590 5591 /* the mbuf now belongs to us */ 5592 fp->eth_q_stats.mbuf_alloc_tx++; 5593 5594 /* 5595 * Put the frame into the transmit ring. If we don't have room, 5596 * place the mbuf back at the head of the TX queue, set the 5597 * OACTIVE flag, and wait for the NIC to drain the chain. 5598 */ 5599 if (__predict_false(bxe_tx_encap(fp, &m))) { 5600 fp->eth_q_stats.tx_encap_failures++; 5601 if (m != NULL) { 5602 /* mark the TX queue as full and return the frame */ 5603 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5604 if_sendq_prepend(ifp, m); 5605 fp->eth_q_stats.mbuf_alloc_tx--; 5606 fp->eth_q_stats.tx_queue_xoff++; 5607 } 5608 5609 /* stop looking for more work */ 5610 break; 5611 } 5612 5613 /* the frame was enqueued successfully */ 5614 tx_count++; 5615 5616 /* send a copy of the frame to any BPF listeners. */ 5617 if_etherbpfmtap(ifp, m); 5618 5619 tx_bd_avail = bxe_tx_avail(sc, fp); 5620 5621 /* handle any completions if we're running low */ 5622 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5623 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5624 bxe_txeof(sc, fp); 5625 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5626 break; 5627 } 5628 } 5629 } 5630 5631 /* all TX packets were dequeued and/or the tx ring is full */ 5632 if (tx_count > 0) { 5633 /* reset the TX watchdog timeout timer */ 5634 fp->watchdog_timer = BXE_TX_TIMEOUT; 5635 } 5636} 5637 5638/* Legacy (non-RSS) dispatch routine */ 5639static void 5640bxe_tx_start(if_t ifp) 5641{ 5642 struct bxe_softc *sc; 5643 struct bxe_fastpath *fp; 5644 5645 sc = if_getsoftc(ifp); 5646 5647 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 5648 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5649 return; 5650 } 5651 5652 if (!sc->link_vars.link_up) { 5653 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5654 return; 5655 } 5656 5657 fp = &sc->fp[0]; 5658 5659 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 5660 fp->eth_q_stats.tx_queue_full_return++; 5661 return; 5662 } 5663 5664 BXE_FP_TX_LOCK(fp); 5665 bxe_tx_start_locked(sc, ifp, fp); 5666 BXE_FP_TX_UNLOCK(fp); 5667} 5668 5669#if __FreeBSD_version >= 901504 5670 5671static int 5672bxe_tx_mq_start_locked(struct bxe_softc *sc, 5673 if_t ifp, 5674 struct bxe_fastpath *fp, 5675 struct mbuf *m) 5676{ 5677 struct buf_ring *tx_br = fp->tx_br; 5678 struct mbuf *next; 5679 int depth, rc, tx_count; 5680 uint16_t tx_bd_avail; 5681 5682 rc = tx_count = 0; 5683 5684 BXE_FP_TX_LOCK_ASSERT(fp); 5685 5686 if (sc->state != BXE_STATE_OPEN) { 5687 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; 5688 return ENETDOWN; 5689 } 5690 5691 if (!tx_br) { 5692 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 5693 return (EINVAL); 5694 } 5695 5696 if (m != NULL) { 5697 rc = drbr_enqueue(ifp, tx_br, m); 5698 if (rc != 0) { 5699 fp->eth_q_stats.tx_soft_errors++; 5700 goto bxe_tx_mq_start_locked_exit; 5701 } 5702 } 5703 5704 if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5705 fp->eth_q_stats.tx_request_link_down_failures++; 5706 goto bxe_tx_mq_start_locked_exit; 5707 } 5708 5709 /* fetch the depth of the driver queue */ 5710 depth = drbr_inuse_drv(ifp, tx_br); 5711 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 5712 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 5713 } 5714 5715 /* keep adding entries while there are frames to send */ 5716 while ((next = drbr_peek(ifp, tx_br)) != NULL) { 5717 /* handle any completions if we're running low */ 5718 tx_bd_avail = bxe_tx_avail(sc, fp); 5719 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5720 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5721 bxe_txeof(sc, fp); 5722 tx_bd_avail = bxe_tx_avail(sc, fp); 5723 if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) { 5724 fp->eth_q_stats.bd_avail_too_less_failures++; 5725 m_freem(next); 5726 drbr_advance(ifp, tx_br); 5727 rc = ENOBUFS; 5728 break; 5729 } 5730 } 5731 5732 /* the mbuf now belongs to us */ 5733 fp->eth_q_stats.mbuf_alloc_tx++; 5734 5735 /* 5736 * Put the frame into the transmit ring. If we don't have room, 5737 * place the mbuf back at the head of the TX queue, set the 5738 * OACTIVE flag, and wait for the NIC to drain the chain. 5739 */ 5740 rc = bxe_tx_encap(fp, &next); 5741 if (__predict_false(rc != 0)) { 5742 fp->eth_q_stats.tx_encap_failures++; 5743 if (next != NULL) { 5744 /* mark the TX queue as full and save the frame */ 5745 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5746 drbr_putback(ifp, tx_br, next); 5747 fp->eth_q_stats.mbuf_alloc_tx--; 5748 fp->eth_q_stats.tx_frames_deferred++; 5749 } else 5750 drbr_advance(ifp, tx_br); 5751 5752 /* stop looking for more work */ 5753 break; 5754 } 5755 5756 /* the transmit frame was enqueued successfully */ 5757 tx_count++; 5758 5759 /* send a copy of the frame to any BPF listeners */ 5760 if_etherbpfmtap(ifp, next); 5761 5762 drbr_advance(ifp, tx_br); 5763 } 5764 5765 /* all TX packets were dequeued and/or the tx ring is full */ 5766 if (tx_count > 0) { 5767 /* reset the TX watchdog timeout timer */ 5768 fp->watchdog_timer = BXE_TX_TIMEOUT; 5769 } 5770 5771bxe_tx_mq_start_locked_exit: 5772 /* If we didn't drain the drbr, enqueue a task in the future to do it. */ 5773 if (!drbr_empty(ifp, tx_br)) { 5774 fp->eth_q_stats.tx_mq_not_empty++; 5775 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1); 5776 } 5777 5778 return (rc); 5779} 5780 5781static void 5782bxe_tx_mq_start_deferred(void *arg, 5783 int pending) 5784{ 5785 struct bxe_fastpath *fp = (struct bxe_fastpath *)arg; 5786 struct bxe_softc *sc = fp->sc; 5787 if_t ifp = sc->ifp; 5788 5789 BXE_FP_TX_LOCK(fp); 5790 bxe_tx_mq_start_locked(sc, ifp, fp, NULL); 5791 BXE_FP_TX_UNLOCK(fp); 5792} 5793 5794/* Multiqueue (TSS) dispatch routine. */ 5795static int 5796bxe_tx_mq_start(struct ifnet *ifp, 5797 struct mbuf *m) 5798{ 5799 struct bxe_softc *sc = if_getsoftc(ifp); 5800 struct bxe_fastpath *fp; 5801 int fp_index, rc; 5802 5803 fp_index = 0; /* default is the first queue */ 5804 5805 /* check if flowid is set */ 5806 5807 if (BXE_VALID_FLOWID(m)) 5808 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 5809 5810 fp = &sc->fp[fp_index]; 5811 5812 if (sc->state != BXE_STATE_OPEN) { 5813 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; 5814 return ENETDOWN; 5815 } 5816 5817 if (BXE_FP_TX_TRYLOCK(fp)) { 5818 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 5819 BXE_FP_TX_UNLOCK(fp); 5820 } else { 5821 rc = drbr_enqueue(ifp, fp->tx_br, m); 5822 taskqueue_enqueue(fp->tq, &fp->tx_task); 5823 } 5824 5825 return (rc); 5826} 5827 5828static void 5829bxe_mq_flush(struct ifnet *ifp) 5830{ 5831 struct bxe_softc *sc = if_getsoftc(ifp); 5832 struct bxe_fastpath *fp; 5833 struct mbuf *m; 5834 int i; 5835 5836 for (i = 0; i < sc->num_queues; i++) { 5837 fp = &sc->fp[i]; 5838 5839 if (fp->state != BXE_FP_STATE_IRQ) { 5840 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 5841 fp->index, fp->state); 5842 continue; 5843 } 5844 5845 if (fp->tx_br != NULL) { 5846 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 5847 BXE_FP_TX_LOCK(fp); 5848 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 5849 m_freem(m); 5850 } 5851 BXE_FP_TX_UNLOCK(fp); 5852 } 5853 } 5854 5855 if_qflush(ifp); 5856} 5857 5858#endif /* FreeBSD_version >= 901504 */ 5859 5860static uint16_t 5861bxe_cid_ilt_lines(struct bxe_softc *sc) 5862{ 5863 if (IS_SRIOV(sc)) { 5864 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 5865 } 5866 return (L2_ILT_LINES(sc)); 5867} 5868 5869static void 5870bxe_ilt_set_info(struct bxe_softc *sc) 5871{ 5872 struct ilt_client_info *ilt_client; 5873 struct ecore_ilt *ilt = sc->ilt; 5874 uint16_t line = 0; 5875 5876 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 5877 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 5878 5879 /* CDU */ 5880 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 5881 ilt_client->client_num = ILT_CLIENT_CDU; 5882 ilt_client->page_size = CDU_ILT_PAGE_SZ; 5883 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 5884 ilt_client->start = line; 5885 line += bxe_cid_ilt_lines(sc); 5886 5887 if (CNIC_SUPPORT(sc)) { 5888 line += CNIC_ILT_LINES; 5889 } 5890 5891 ilt_client->end = (line - 1); 5892 5893 BLOGD(sc, DBG_LOAD, 5894 "ilt client[CDU]: start %d, end %d, " 5895 "psz 0x%x, flags 0x%x, hw psz %d\n", 5896 ilt_client->start, ilt_client->end, 5897 ilt_client->page_size, 5898 ilt_client->flags, 5899 ilog2(ilt_client->page_size >> 12)); 5900 5901 /* QM */ 5902 if (QM_INIT(sc->qm_cid_count)) { 5903 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 5904 ilt_client->client_num = ILT_CLIENT_QM; 5905 ilt_client->page_size = QM_ILT_PAGE_SZ; 5906 ilt_client->flags = 0; 5907 ilt_client->start = line; 5908 5909 /* 4 bytes for each cid */ 5910 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 5911 QM_ILT_PAGE_SZ); 5912 5913 ilt_client->end = (line - 1); 5914 5915 BLOGD(sc, DBG_LOAD, 5916 "ilt client[QM]: start %d, end %d, " 5917 "psz 0x%x, flags 0x%x, hw psz %d\n", 5918 ilt_client->start, ilt_client->end, 5919 ilt_client->page_size, ilt_client->flags, 5920 ilog2(ilt_client->page_size >> 12)); 5921 } 5922 5923 if (CNIC_SUPPORT(sc)) { 5924 /* SRC */ 5925 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 5926 ilt_client->client_num = ILT_CLIENT_SRC; 5927 ilt_client->page_size = SRC_ILT_PAGE_SZ; 5928 ilt_client->flags = 0; 5929 ilt_client->start = line; 5930 line += SRC_ILT_LINES; 5931 ilt_client->end = (line - 1); 5932 5933 BLOGD(sc, DBG_LOAD, 5934 "ilt client[SRC]: start %d, end %d, " 5935 "psz 0x%x, flags 0x%x, hw psz %d\n", 5936 ilt_client->start, ilt_client->end, 5937 ilt_client->page_size, ilt_client->flags, 5938 ilog2(ilt_client->page_size >> 12)); 5939 5940 /* TM */ 5941 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 5942 ilt_client->client_num = ILT_CLIENT_TM; 5943 ilt_client->page_size = TM_ILT_PAGE_SZ; 5944 ilt_client->flags = 0; 5945 ilt_client->start = line; 5946 line += TM_ILT_LINES; 5947 ilt_client->end = (line - 1); 5948 5949 BLOGD(sc, DBG_LOAD, 5950 "ilt client[TM]: start %d, end %d, " 5951 "psz 0x%x, flags 0x%x, hw psz %d\n", 5952 ilt_client->start, ilt_client->end, 5953 ilt_client->page_size, ilt_client->flags, 5954 ilog2(ilt_client->page_size >> 12)); 5955 } 5956 5957 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 5958} 5959 5960static void 5961bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 5962{ 5963 int i; 5964 uint32_t rx_buf_size; 5965 5966 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 5967 5968 for (i = 0; i < sc->num_queues; i++) { 5969 if(rx_buf_size <= MCLBYTES){ 5970 sc->fp[i].rx_buf_size = rx_buf_size; 5971 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5972 }else if (rx_buf_size <= MJUMPAGESIZE){ 5973 sc->fp[i].rx_buf_size = rx_buf_size; 5974 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 5975 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ 5976 sc->fp[i].rx_buf_size = MCLBYTES; 5977 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5978 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ 5979 sc->fp[i].rx_buf_size = MJUMPAGESIZE; 5980 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 5981 }else { 5982 sc->fp[i].rx_buf_size = MCLBYTES; 5983 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5984 } 5985 } 5986} 5987 5988static int 5989bxe_alloc_ilt_mem(struct bxe_softc *sc) 5990{ 5991 int rc = 0; 5992 5993 if ((sc->ilt = 5994 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 5995 M_BXE_ILT, 5996 (M_NOWAIT | M_ZERO))) == NULL) { 5997 rc = 1; 5998 } 5999 6000 return (rc); 6001} 6002 6003static int 6004bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6005{ 6006 int rc = 0; 6007 6008 if ((sc->ilt->lines = 6009 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6010 M_BXE_ILT, 6011 (M_NOWAIT | M_ZERO))) == NULL) { 6012 rc = 1; 6013 } 6014 6015 return (rc); 6016} 6017 6018static void 6019bxe_free_ilt_mem(struct bxe_softc *sc) 6020{ 6021 if (sc->ilt != NULL) { 6022 free(sc->ilt, M_BXE_ILT); 6023 sc->ilt = NULL; 6024 } 6025} 6026 6027static void 6028bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6029{ 6030 if (sc->ilt->lines != NULL) { 6031 free(sc->ilt->lines, M_BXE_ILT); 6032 sc->ilt->lines = NULL; 6033 } 6034} 6035 6036static void 6037bxe_free_mem(struct bxe_softc *sc) 6038{ 6039 int i; 6040 6041 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6042 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6043 sc->context[i].vcxt = NULL; 6044 sc->context[i].size = 0; 6045 } 6046 6047 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6048 6049 bxe_free_ilt_lines_mem(sc); 6050 6051} 6052 6053static int 6054bxe_alloc_mem(struct bxe_softc *sc) 6055{ 6056 6057 int context_size; 6058 int allocated; 6059 int i; 6060 6061 /* 6062 * Allocate memory for CDU context: 6063 * This memory is allocated separately and not in the generic ILT 6064 * functions because CDU differs in few aspects: 6065 * 1. There can be multiple entities allocating memory for context - 6066 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6067 * its own ILT lines. 6068 * 2. Since CDU page-size is not a single 4KB page (which is the case 6069 * for the other ILT clients), to be efficient we want to support 6070 * allocation of sub-page-size in the last entry. 6071 * 3. Context pointers are used by the driver to pass to FW / update 6072 * the context (for the other ILT clients the pointers are used just to 6073 * free the memory during unload). 6074 */ 6075 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6076 for (i = 0, allocated = 0; allocated < context_size; i++) { 6077 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6078 (context_size - allocated)); 6079 6080 if (bxe_dma_alloc(sc, sc->context[i].size, 6081 &sc->context[i].vcxt_dma, 6082 "cdu context") != 0) { 6083 bxe_free_mem(sc); 6084 return (-1); 6085 } 6086 6087 sc->context[i].vcxt = 6088 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6089 6090 allocated += sc->context[i].size; 6091 } 6092 6093 bxe_alloc_ilt_lines_mem(sc); 6094 6095 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6096 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6097 { 6098 for (i = 0; i < 4; i++) { 6099 BLOGD(sc, DBG_LOAD, 6100 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6101 i, 6102 sc->ilt->clients[i].page_size, 6103 sc->ilt->clients[i].start, 6104 sc->ilt->clients[i].end, 6105 sc->ilt->clients[i].client_num, 6106 sc->ilt->clients[i].flags); 6107 } 6108 } 6109 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6110 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6111 bxe_free_mem(sc); 6112 return (-1); 6113 } 6114 6115 return (0); 6116} 6117 6118static void 6119bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6120{ 6121 struct bxe_softc *sc; 6122 int i; 6123 6124 sc = fp->sc; 6125 6126 if (fp->rx_mbuf_tag == NULL) { 6127 return; 6128 } 6129 6130 /* free all mbufs and unload all maps */ 6131 for (i = 0; i < RX_BD_TOTAL; i++) { 6132 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6133 bus_dmamap_sync(fp->rx_mbuf_tag, 6134 fp->rx_mbuf_chain[i].m_map, 6135 BUS_DMASYNC_POSTREAD); 6136 bus_dmamap_unload(fp->rx_mbuf_tag, 6137 fp->rx_mbuf_chain[i].m_map); 6138 } 6139 6140 if (fp->rx_mbuf_chain[i].m != NULL) { 6141 m_freem(fp->rx_mbuf_chain[i].m); 6142 fp->rx_mbuf_chain[i].m = NULL; 6143 fp->eth_q_stats.mbuf_alloc_rx--; 6144 } 6145 } 6146} 6147 6148static void 6149bxe_free_tpa_pool(struct bxe_fastpath *fp) 6150{ 6151 struct bxe_softc *sc; 6152 int i, max_agg_queues; 6153 6154 sc = fp->sc; 6155 6156 if (fp->rx_mbuf_tag == NULL) { 6157 return; 6158 } 6159 6160 max_agg_queues = MAX_AGG_QS(sc); 6161 6162 /* release all mbufs and unload all DMA maps in the TPA pool */ 6163 for (i = 0; i < max_agg_queues; i++) { 6164 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6165 bus_dmamap_sync(fp->rx_mbuf_tag, 6166 fp->rx_tpa_info[i].bd.m_map, 6167 BUS_DMASYNC_POSTREAD); 6168 bus_dmamap_unload(fp->rx_mbuf_tag, 6169 fp->rx_tpa_info[i].bd.m_map); 6170 } 6171 6172 if (fp->rx_tpa_info[i].bd.m != NULL) { 6173 m_freem(fp->rx_tpa_info[i].bd.m); 6174 fp->rx_tpa_info[i].bd.m = NULL; 6175 fp->eth_q_stats.mbuf_alloc_tpa--; 6176 } 6177 } 6178} 6179 6180static void 6181bxe_free_sge_chain(struct bxe_fastpath *fp) 6182{ 6183 struct bxe_softc *sc; 6184 int i; 6185 6186 sc = fp->sc; 6187 6188 if (fp->rx_sge_mbuf_tag == NULL) { 6189 return; 6190 } 6191 6192 /* rree all mbufs and unload all maps */ 6193 for (i = 0; i < RX_SGE_TOTAL; i++) { 6194 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6195 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6196 fp->rx_sge_mbuf_chain[i].m_map, 6197 BUS_DMASYNC_POSTREAD); 6198 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6199 fp->rx_sge_mbuf_chain[i].m_map); 6200 } 6201 6202 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6203 m_freem(fp->rx_sge_mbuf_chain[i].m); 6204 fp->rx_sge_mbuf_chain[i].m = NULL; 6205 fp->eth_q_stats.mbuf_alloc_sge--; 6206 } 6207 } 6208} 6209 6210static void 6211bxe_free_fp_buffers(struct bxe_softc *sc) 6212{ 6213 struct bxe_fastpath *fp; 6214 int i; 6215 6216 for (i = 0; i < sc->num_queues; i++) { 6217 fp = &sc->fp[i]; 6218 6219#if __FreeBSD_version >= 901504 6220 if (fp->tx_br != NULL) { 6221 /* just in case bxe_mq_flush() wasn't called */ 6222 if (mtx_initialized(&fp->tx_mtx)) { 6223 struct mbuf *m; 6224 6225 BXE_FP_TX_LOCK(fp); 6226 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) 6227 m_freem(m); 6228 BXE_FP_TX_UNLOCK(fp); 6229 } 6230 } 6231#endif 6232 6233 /* free all RX buffers */ 6234 bxe_free_rx_bd_chain(fp); 6235 bxe_free_tpa_pool(fp); 6236 bxe_free_sge_chain(fp); 6237 6238 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6239 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6240 fp->eth_q_stats.mbuf_alloc_rx); 6241 } 6242 6243 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6244 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6245 fp->eth_q_stats.mbuf_alloc_sge); 6246 } 6247 6248 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6249 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6250 fp->eth_q_stats.mbuf_alloc_tpa); 6251 } 6252 6253 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6254 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6255 fp->eth_q_stats.mbuf_alloc_tx); 6256 } 6257 6258 /* XXX verify all mbufs were reclaimed */ 6259 } 6260} 6261 6262static int 6263bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6264 uint16_t prev_index, 6265 uint16_t index) 6266{ 6267 struct bxe_sw_rx_bd *rx_buf; 6268 struct eth_rx_bd *rx_bd; 6269 bus_dma_segment_t segs[1]; 6270 bus_dmamap_t map; 6271 struct mbuf *m; 6272 int nsegs, rc; 6273 6274 rc = 0; 6275 6276 /* allocate the new RX BD mbuf */ 6277 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6278 if (__predict_false(m == NULL)) { 6279 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6280 return (ENOBUFS); 6281 } 6282 6283 fp->eth_q_stats.mbuf_alloc_rx++; 6284 6285 /* initialize the mbuf buffer length */ 6286 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6287 6288 /* map the mbuf into non-paged pool */ 6289 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6290 fp->rx_mbuf_spare_map, 6291 m, segs, &nsegs, BUS_DMA_NOWAIT); 6292 if (__predict_false(rc != 0)) { 6293 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6294 m_freem(m); 6295 fp->eth_q_stats.mbuf_alloc_rx--; 6296 return (rc); 6297 } 6298 6299 /* all mbufs must map to a single segment */ 6300 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6301 6302 /* release any existing RX BD mbuf mappings */ 6303 6304 if (prev_index != index) { 6305 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6306 6307 if (rx_buf->m_map != NULL) { 6308 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6309 BUS_DMASYNC_POSTREAD); 6310 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6311 } 6312 6313 /* 6314 * We only get here from bxe_rxeof() when the maximum number 6315 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6316 * holds the mbuf in the prev_index so it's OK to NULL it out 6317 * here without concern of a memory leak. 6318 */ 6319 fp->rx_mbuf_chain[prev_index].m = NULL; 6320 } 6321 6322 rx_buf = &fp->rx_mbuf_chain[index]; 6323 6324 if (rx_buf->m_map != NULL) { 6325 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6326 BUS_DMASYNC_POSTREAD); 6327 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6328 } 6329 6330 /* save the mbuf and mapping info for a future packet */ 6331 map = (prev_index != index) ? 6332 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6333 rx_buf->m_map = fp->rx_mbuf_spare_map; 6334 fp->rx_mbuf_spare_map = map; 6335 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6336 BUS_DMASYNC_PREREAD); 6337 rx_buf->m = m; 6338 6339 rx_bd = &fp->rx_chain[index]; 6340 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6341 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6342 6343 return (rc); 6344} 6345 6346static int 6347bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6348 int queue) 6349{ 6350 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6351 bus_dma_segment_t segs[1]; 6352 bus_dmamap_t map; 6353 struct mbuf *m; 6354 int nsegs; 6355 int rc = 0; 6356 6357 /* allocate the new TPA mbuf */ 6358 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6359 if (__predict_false(m == NULL)) { 6360 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6361 return (ENOBUFS); 6362 } 6363 6364 fp->eth_q_stats.mbuf_alloc_tpa++; 6365 6366 /* initialize the mbuf buffer length */ 6367 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6368 6369 /* map the mbuf into non-paged pool */ 6370 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6371 fp->rx_tpa_info_mbuf_spare_map, 6372 m, segs, &nsegs, BUS_DMA_NOWAIT); 6373 if (__predict_false(rc != 0)) { 6374 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6375 m_free(m); 6376 fp->eth_q_stats.mbuf_alloc_tpa--; 6377 return (rc); 6378 } 6379 6380 /* all mbufs must map to a single segment */ 6381 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6382 6383 /* release any existing TPA mbuf mapping */ 6384 if (tpa_info->bd.m_map != NULL) { 6385 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6386 BUS_DMASYNC_POSTREAD); 6387 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6388 } 6389 6390 /* save the mbuf and mapping info for the TPA mbuf */ 6391 map = tpa_info->bd.m_map; 6392 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6393 fp->rx_tpa_info_mbuf_spare_map = map; 6394 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6395 BUS_DMASYNC_PREREAD); 6396 tpa_info->bd.m = m; 6397 tpa_info->seg = segs[0]; 6398 6399 return (rc); 6400} 6401 6402/* 6403 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6404 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6405 * chain. 6406 */ 6407static int 6408bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6409 uint16_t index) 6410{ 6411 struct bxe_sw_rx_bd *sge_buf; 6412 struct eth_rx_sge *sge; 6413 bus_dma_segment_t segs[1]; 6414 bus_dmamap_t map; 6415 struct mbuf *m; 6416 int nsegs; 6417 int rc = 0; 6418 6419 /* allocate a new SGE mbuf */ 6420 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6421 if (__predict_false(m == NULL)) { 6422 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6423 return (ENOMEM); 6424 } 6425 6426 fp->eth_q_stats.mbuf_alloc_sge++; 6427 6428 /* initialize the mbuf buffer length */ 6429 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6430 6431 /* map the SGE mbuf into non-paged pool */ 6432 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6433 fp->rx_sge_mbuf_spare_map, 6434 m, segs, &nsegs, BUS_DMA_NOWAIT); 6435 if (__predict_false(rc != 0)) { 6436 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6437 m_freem(m); 6438 fp->eth_q_stats.mbuf_alloc_sge--; 6439 return (rc); 6440 } 6441 6442 /* all mbufs must map to a single segment */ 6443 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6444 6445 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6446 6447 /* release any existing SGE mbuf mapping */ 6448 if (sge_buf->m_map != NULL) { 6449 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6450 BUS_DMASYNC_POSTREAD); 6451 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6452 } 6453 6454 /* save the mbuf and mapping info for a future packet */ 6455 map = sge_buf->m_map; 6456 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6457 fp->rx_sge_mbuf_spare_map = map; 6458 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6459 BUS_DMASYNC_PREREAD); 6460 sge_buf->m = m; 6461 6462 sge = &fp->rx_sge_chain[index]; 6463 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6464 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6465 6466 return (rc); 6467} 6468 6469static __noinline int 6470bxe_alloc_fp_buffers(struct bxe_softc *sc) 6471{ 6472 struct bxe_fastpath *fp; 6473 int i, j, rc = 0; 6474 int ring_prod, cqe_ring_prod; 6475 int max_agg_queues; 6476 6477 for (i = 0; i < sc->num_queues; i++) { 6478 fp = &sc->fp[i]; 6479 6480 ring_prod = cqe_ring_prod = 0; 6481 fp->rx_bd_cons = 0; 6482 fp->rx_cq_cons = 0; 6483 6484 /* allocate buffers for the RX BDs in RX BD chain */ 6485 for (j = 0; j < sc->max_rx_bufs; j++) { 6486 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6487 if (rc != 0) { 6488 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6489 i, rc); 6490 goto bxe_alloc_fp_buffers_error; 6491 } 6492 6493 ring_prod = RX_BD_NEXT(ring_prod); 6494 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6495 } 6496 6497 fp->rx_bd_prod = ring_prod; 6498 fp->rx_cq_prod = cqe_ring_prod; 6499 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6500 6501 max_agg_queues = MAX_AGG_QS(sc); 6502 6503 fp->tpa_enable = TRUE; 6504 6505 /* fill the TPA pool */ 6506 for (j = 0; j < max_agg_queues; j++) { 6507 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6508 if (rc != 0) { 6509 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6510 i, j); 6511 fp->tpa_enable = FALSE; 6512 goto bxe_alloc_fp_buffers_error; 6513 } 6514 6515 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6516 } 6517 6518 if (fp->tpa_enable) { 6519 /* fill the RX SGE chain */ 6520 ring_prod = 0; 6521 for (j = 0; j < RX_SGE_USABLE; j++) { 6522 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6523 if (rc != 0) { 6524 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6525 i, ring_prod); 6526 fp->tpa_enable = FALSE; 6527 ring_prod = 0; 6528 goto bxe_alloc_fp_buffers_error; 6529 } 6530 6531 ring_prod = RX_SGE_NEXT(ring_prod); 6532 } 6533 6534 fp->rx_sge_prod = ring_prod; 6535 } 6536 } 6537 6538 return (0); 6539 6540bxe_alloc_fp_buffers_error: 6541 6542 /* unwind what was already allocated */ 6543 bxe_free_rx_bd_chain(fp); 6544 bxe_free_tpa_pool(fp); 6545 bxe_free_sge_chain(fp); 6546 6547 return (ENOBUFS); 6548} 6549 6550static void 6551bxe_free_fw_stats_mem(struct bxe_softc *sc) 6552{ 6553 bxe_dma_free(sc, &sc->fw_stats_dma); 6554 6555 sc->fw_stats_num = 0; 6556 6557 sc->fw_stats_req_size = 0; 6558 sc->fw_stats_req = NULL; 6559 sc->fw_stats_req_mapping = 0; 6560 6561 sc->fw_stats_data_size = 0; 6562 sc->fw_stats_data = NULL; 6563 sc->fw_stats_data_mapping = 0; 6564} 6565 6566static int 6567bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6568{ 6569 uint8_t num_queue_stats; 6570 int num_groups; 6571 6572 /* number of queues for statistics is number of eth queues */ 6573 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6574 6575 /* 6576 * Total number of FW statistics requests = 6577 * 1 for port stats + 1 for PF stats + num of queues 6578 */ 6579 sc->fw_stats_num = (2 + num_queue_stats); 6580 6581 /* 6582 * Request is built from stats_query_header and an array of 6583 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6584 * rules. The real number or requests is configured in the 6585 * stats_query_header. 6586 */ 6587 num_groups = 6588 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6589 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6590 6591 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6592 sc->fw_stats_num, num_groups); 6593 6594 sc->fw_stats_req_size = 6595 (sizeof(struct stats_query_header) + 6596 (num_groups * sizeof(struct stats_query_cmd_group))); 6597 6598 /* 6599 * Data for statistics requests + stats_counter. 6600 * stats_counter holds per-STORM counters that are incremented when 6601 * STORM has finished with the current request. Memory for FCoE 6602 * offloaded statistics are counted anyway, even if they will not be sent. 6603 * VF stats are not accounted for here as the data of VF stats is stored 6604 * in memory allocated by the VF, not here. 6605 */ 6606 sc->fw_stats_data_size = 6607 (sizeof(struct stats_counter) + 6608 sizeof(struct per_port_stats) + 6609 sizeof(struct per_pf_stats) + 6610 /* sizeof(struct fcoe_statistics_params) + */ 6611 (sizeof(struct per_queue_stats) * num_queue_stats)); 6612 6613 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6614 &sc->fw_stats_dma, "fw stats") != 0) { 6615 bxe_free_fw_stats_mem(sc); 6616 return (-1); 6617 } 6618 6619 /* set up the shortcuts */ 6620 6621 sc->fw_stats_req = 6622 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6623 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6624 6625 sc->fw_stats_data = 6626 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6627 sc->fw_stats_req_size); 6628 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6629 sc->fw_stats_req_size); 6630 6631 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6632 (uintmax_t)sc->fw_stats_req_mapping); 6633 6634 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6635 (uintmax_t)sc->fw_stats_data_mapping); 6636 6637 return (0); 6638} 6639 6640/* 6641 * Bits map: 6642 * 0-7 - Engine0 load counter. 6643 * 8-15 - Engine1 load counter. 6644 * 16 - Engine0 RESET_IN_PROGRESS bit. 6645 * 17 - Engine1 RESET_IN_PROGRESS bit. 6646 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 6647 * function on the engine 6648 * 19 - Engine1 ONE_IS_LOADED. 6649 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 6650 * leader to complete (check for both RESET_IN_PROGRESS bits and not 6651 * for just the one belonging to its engine). 6652 */ 6653#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 6654#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 6655#define BXE_PATH0_LOAD_CNT_SHIFT 0 6656#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 6657#define BXE_PATH1_LOAD_CNT_SHIFT 8 6658#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 6659#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 6660#define BXE_GLOBAL_RESET_BIT 0x00040000 6661 6662/* set the GLOBAL_RESET bit, should be run under rtnl lock */ 6663static void 6664bxe_set_reset_global(struct bxe_softc *sc) 6665{ 6666 uint32_t val; 6667 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6668 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6669 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 6670 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6671} 6672 6673/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 6674static void 6675bxe_clear_reset_global(struct bxe_softc *sc) 6676{ 6677 uint32_t val; 6678 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6679 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6680 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 6681 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6682} 6683 6684/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 6685static uint8_t 6686bxe_reset_is_global(struct bxe_softc *sc) 6687{ 6688 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6689 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 6690 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 6691} 6692 6693/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 6694static void 6695bxe_set_reset_done(struct bxe_softc *sc) 6696{ 6697 uint32_t val; 6698 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 6699 BXE_PATH0_RST_IN_PROG_BIT; 6700 6701 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6702 6703 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6704 /* Clear the bit */ 6705 val &= ~bit; 6706 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6707 6708 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6709} 6710 6711/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 6712static void 6713bxe_set_reset_in_progress(struct bxe_softc *sc) 6714{ 6715 uint32_t val; 6716 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 6717 BXE_PATH0_RST_IN_PROG_BIT; 6718 6719 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6720 6721 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6722 /* Set the bit */ 6723 val |= bit; 6724 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6725 6726 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6727} 6728 6729/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 6730static uint8_t 6731bxe_reset_is_done(struct bxe_softc *sc, 6732 int engine) 6733{ 6734 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6735 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 6736 BXE_PATH0_RST_IN_PROG_BIT; 6737 6738 /* return false if bit is set */ 6739 return (val & bit) ? FALSE : TRUE; 6740} 6741 6742/* get the load status for an engine, should be run under rtnl lock */ 6743static uint8_t 6744bxe_get_load_status(struct bxe_softc *sc, 6745 int engine) 6746{ 6747 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 6748 BXE_PATH0_LOAD_CNT_MASK; 6749 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 6750 BXE_PATH0_LOAD_CNT_SHIFT; 6751 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6752 6753 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 6754 6755 val = ((val & mask) >> shift); 6756 6757 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 6758 6759 return (val != 0); 6760} 6761 6762/* set pf load mark */ 6763/* XXX needs to be under rtnl lock */ 6764static void 6765bxe_set_pf_load(struct bxe_softc *sc) 6766{ 6767 uint32_t val; 6768 uint32_t val1; 6769 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 6770 BXE_PATH0_LOAD_CNT_MASK; 6771 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 6772 BXE_PATH0_LOAD_CNT_SHIFT; 6773 6774 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6775 6776 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6777 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 6778 6779 /* get the current counter value */ 6780 val1 = ((val & mask) >> shift); 6781 6782 /* set bit of this PF */ 6783 val1 |= (1 << SC_ABS_FUNC(sc)); 6784 6785 /* clear the old value */ 6786 val &= ~mask; 6787 6788 /* set the new one */ 6789 val |= ((val1 << shift) & mask); 6790 6791 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6792 6793 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6794} 6795 6796/* clear pf load mark */ 6797/* XXX needs to be under rtnl lock */ 6798static uint8_t 6799bxe_clear_pf_load(struct bxe_softc *sc) 6800{ 6801 uint32_t val1, val; 6802 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 6803 BXE_PATH0_LOAD_CNT_MASK; 6804 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 6805 BXE_PATH0_LOAD_CNT_SHIFT; 6806 6807 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6808 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6809 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 6810 6811 /* get the current counter value */ 6812 val1 = (val & mask) >> shift; 6813 6814 /* clear bit of that PF */ 6815 val1 &= ~(1 << SC_ABS_FUNC(sc)); 6816 6817 /* clear the old value */ 6818 val &= ~mask; 6819 6820 /* set the new one */ 6821 val |= ((val1 << shift) & mask); 6822 6823 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6824 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6825 return (val1 != 0); 6826} 6827 6828/* send load requrest to mcp and analyze response */ 6829static int 6830bxe_nic_load_request(struct bxe_softc *sc, 6831 uint32_t *load_code) 6832{ 6833 /* init fw_seq */ 6834 sc->fw_seq = 6835 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 6836 DRV_MSG_SEQ_NUMBER_MASK); 6837 6838 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 6839 6840 /* get the current FW pulse sequence */ 6841 sc->fw_drv_pulse_wr_seq = 6842 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 6843 DRV_PULSE_SEQ_MASK); 6844 6845 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 6846 sc->fw_drv_pulse_wr_seq); 6847 6848 /* load request */ 6849 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 6850 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 6851 6852 /* if the MCP fails to respond we must abort */ 6853 if (!(*load_code)) { 6854 BLOGE(sc, "MCP response failure!\n"); 6855 return (-1); 6856 } 6857 6858 /* if MCP refused then must abort */ 6859 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 6860 BLOGE(sc, "MCP refused load request\n"); 6861 return (-1); 6862 } 6863 6864 return (0); 6865} 6866 6867/* 6868 * Check whether another PF has already loaded FW to chip. In virtualized 6869 * environments a pf from anoth VM may have already initialized the device 6870 * including loading FW. 6871 */ 6872static int 6873bxe_nic_load_analyze_req(struct bxe_softc *sc, 6874 uint32_t load_code) 6875{ 6876 uint32_t my_fw, loaded_fw; 6877 6878 /* is another pf loaded on this engine? */ 6879 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 6880 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 6881 /* build my FW version dword */ 6882 my_fw = (BCM_5710_FW_MAJOR_VERSION + 6883 (BCM_5710_FW_MINOR_VERSION << 8 ) + 6884 (BCM_5710_FW_REVISION_VERSION << 16) + 6885 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 6886 6887 /* read loaded FW from chip */ 6888 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 6889 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 6890 loaded_fw, my_fw); 6891 6892 /* abort nic load if version mismatch */ 6893 if (my_fw != loaded_fw) { 6894 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 6895 loaded_fw, my_fw); 6896 return (-1); 6897 } 6898 } 6899 6900 return (0); 6901} 6902 6903/* mark PMF if applicable */ 6904static void 6905bxe_nic_load_pmf(struct bxe_softc *sc, 6906 uint32_t load_code) 6907{ 6908 uint32_t ncsi_oem_data_addr; 6909 6910 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 6911 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 6912 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 6913 /* 6914 * Barrier here for ordering between the writing to sc->port.pmf here 6915 * and reading it from the periodic task. 6916 */ 6917 sc->port.pmf = 1; 6918 mb(); 6919 } else { 6920 sc->port.pmf = 0; 6921 } 6922 6923 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 6924 6925 /* XXX needed? */ 6926 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 6927 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 6928 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 6929 if (ncsi_oem_data_addr) { 6930 REG_WR(sc, 6931 (ncsi_oem_data_addr + 6932 offsetof(struct glob_ncsi_oem_data, driver_version)), 6933 0); 6934 } 6935 } 6936 } 6937} 6938 6939static void 6940bxe_read_mf_cfg(struct bxe_softc *sc) 6941{ 6942 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 6943 int abs_func; 6944 int vn; 6945 6946 if (BXE_NOMCP(sc)) { 6947 return; /* what should be the default bvalue in this case */ 6948 } 6949 6950 /* 6951 * The formula for computing the absolute function number is... 6952 * For 2 port configuration (4 functions per port): 6953 * abs_func = 2 * vn + SC_PORT + SC_PATH 6954 * For 4 port configuration (2 functions per port): 6955 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 6956 */ 6957 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 6958 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 6959 if (abs_func >= E1H_FUNC_MAX) { 6960 break; 6961 } 6962 sc->devinfo.mf_info.mf_config[vn] = 6963 MFCFG_RD(sc, func_mf_config[abs_func].config); 6964 } 6965 6966 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 6967 FUNC_MF_CFG_FUNC_DISABLED) { 6968 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 6969 sc->flags |= BXE_MF_FUNC_DIS; 6970 } else { 6971 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 6972 sc->flags &= ~BXE_MF_FUNC_DIS; 6973 } 6974} 6975 6976/* acquire split MCP access lock register */ 6977static int bxe_acquire_alr(struct bxe_softc *sc) 6978{ 6979 uint32_t j, val; 6980 6981 for (j = 0; j < 1000; j++) { 6982 val = (1UL << 31); 6983 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 6984 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 6985 if (val & (1L << 31)) 6986 break; 6987 6988 DELAY(5000); 6989 } 6990 6991 if (!(val & (1L << 31))) { 6992 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 6993 return (-1); 6994 } 6995 6996 return (0); 6997} 6998 6999/* release split MCP access lock register */ 7000static void bxe_release_alr(struct bxe_softc *sc) 7001{ 7002 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7003} 7004 7005static void 7006bxe_fan_failure(struct bxe_softc *sc) 7007{ 7008 int port = SC_PORT(sc); 7009 uint32_t ext_phy_config; 7010 7011 /* mark the failure */ 7012 ext_phy_config = 7013 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7014 7015 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7016 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7017 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7018 ext_phy_config); 7019 7020 /* log the failure */ 7021 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7022 "the card to prevent permanent damage. " 7023 "Please contact OEM Support for assistance\n"); 7024 7025 /* XXX */ 7026#if 1 7027 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7028#else 7029 /* 7030 * Schedule device reset (unload) 7031 * This is due to some boards consuming sufficient power when driver is 7032 * up to overheat if fan fails. 7033 */ 7034 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7035 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7036#endif 7037} 7038 7039/* this function is called upon a link interrupt */ 7040static void 7041bxe_link_attn(struct bxe_softc *sc) 7042{ 7043 uint32_t pause_enabled = 0; 7044 struct host_port_stats *pstats; 7045 int cmng_fns; 7046 struct bxe_fastpath *fp; 7047 int i; 7048 7049 /* Make sure that we are synced with the current statistics */ 7050 bxe_stats_handle(sc, STATS_EVENT_STOP); 7051 BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags); 7052 elink_link_update(&sc->link_params, &sc->link_vars); 7053 7054 if (sc->link_vars.link_up) { 7055 7056 /* dropless flow control */ 7057 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7058 pause_enabled = 0; 7059 7060 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7061 pause_enabled = 1; 7062 } 7063 7064 REG_WR(sc, 7065 (BAR_USTRORM_INTMEM + 7066 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7067 pause_enabled); 7068 } 7069 7070 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7071 pstats = BXE_SP(sc, port_stats); 7072 /* reset old mac stats */ 7073 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7074 } 7075 7076 if (sc->state == BXE_STATE_OPEN) { 7077 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7078 /* Restart tx when the link comes back. */ 7079 FOR_EACH_ETH_QUEUE(sc, i) { 7080 fp = &sc->fp[i]; 7081 taskqueue_enqueue(fp->tq, &fp->tx_task); 7082 } 7083 } 7084 7085 } 7086 7087 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7088 cmng_fns = bxe_get_cmng_fns_mode(sc); 7089 7090 if (cmng_fns != CMNG_FNS_NONE) { 7091 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7092 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7093 } else { 7094 /* rate shaping and fairness are disabled */ 7095 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7096 } 7097 } 7098 7099 bxe_link_report_locked(sc); 7100 7101 if (IS_MF(sc)) { 7102 ; // XXX bxe_link_sync_notify(sc); 7103 } 7104} 7105 7106static void 7107bxe_attn_int_asserted(struct bxe_softc *sc, 7108 uint32_t asserted) 7109{ 7110 int port = SC_PORT(sc); 7111 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7112 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7113 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7114 NIG_REG_MASK_INTERRUPT_PORT0; 7115 uint32_t aeu_mask; 7116 uint32_t nig_mask = 0; 7117 uint32_t reg_addr; 7118 uint32_t igu_acked; 7119 uint32_t cnt; 7120 7121 if (sc->attn_state & asserted) { 7122 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7123 } 7124 7125 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7126 7127 aeu_mask = REG_RD(sc, aeu_addr); 7128 7129 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7130 aeu_mask, asserted); 7131 7132 aeu_mask &= ~(asserted & 0x3ff); 7133 7134 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7135 7136 REG_WR(sc, aeu_addr, aeu_mask); 7137 7138 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7139 7140 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7141 sc->attn_state |= asserted; 7142 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7143 7144 if (asserted & ATTN_HARD_WIRED_MASK) { 7145 if (asserted & ATTN_NIG_FOR_FUNC) { 7146 7147 bxe_acquire_phy_lock(sc); 7148 /* save nig interrupt mask */ 7149 nig_mask = REG_RD(sc, nig_int_mask_addr); 7150 7151 /* If nig_mask is not set, no need to call the update function */ 7152 if (nig_mask) { 7153 REG_WR(sc, nig_int_mask_addr, 0); 7154 7155 bxe_link_attn(sc); 7156 } 7157 7158 /* handle unicore attn? */ 7159 } 7160 7161 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7162 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7163 } 7164 7165 if (asserted & GPIO_2_FUNC) { 7166 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7167 } 7168 7169 if (asserted & GPIO_3_FUNC) { 7170 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7171 } 7172 7173 if (asserted & GPIO_4_FUNC) { 7174 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7175 } 7176 7177 if (port == 0) { 7178 if (asserted & ATTN_GENERAL_ATTN_1) { 7179 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7180 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7181 } 7182 if (asserted & ATTN_GENERAL_ATTN_2) { 7183 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7184 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7185 } 7186 if (asserted & ATTN_GENERAL_ATTN_3) { 7187 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7188 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7189 } 7190 } else { 7191 if (asserted & ATTN_GENERAL_ATTN_4) { 7192 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7193 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7194 } 7195 if (asserted & ATTN_GENERAL_ATTN_5) { 7196 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7197 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7198 } 7199 if (asserted & ATTN_GENERAL_ATTN_6) { 7200 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7201 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7202 } 7203 } 7204 } /* hardwired */ 7205 7206 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7207 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7208 } else { 7209 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7210 } 7211 7212 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7213 asserted, 7214 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7215 REG_WR(sc, reg_addr, asserted); 7216 7217 /* now set back the mask */ 7218 if (asserted & ATTN_NIG_FOR_FUNC) { 7219 /* 7220 * Verify that IGU ack through BAR was written before restoring 7221 * NIG mask. This loop should exit after 2-3 iterations max. 7222 */ 7223 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7224 cnt = 0; 7225 7226 do { 7227 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7228 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7229 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7230 7231 if (!igu_acked) { 7232 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7233 } 7234 7235 mb(); 7236 } 7237 7238 REG_WR(sc, nig_int_mask_addr, nig_mask); 7239 7240 bxe_release_phy_lock(sc); 7241 } 7242} 7243 7244static void 7245bxe_print_next_block(struct bxe_softc *sc, 7246 int idx, 7247 const char *blk) 7248{ 7249 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7250} 7251 7252static int 7253bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7254 uint32_t sig, 7255 int par_num, 7256 uint8_t print) 7257{ 7258 uint32_t cur_bit = 0; 7259 int i = 0; 7260 7261 for (i = 0; sig; i++) { 7262 cur_bit = ((uint32_t)0x1 << i); 7263 if (sig & cur_bit) { 7264 switch (cur_bit) { 7265 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7266 if (print) 7267 bxe_print_next_block(sc, par_num++, "BRB"); 7268 break; 7269 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7270 if (print) 7271 bxe_print_next_block(sc, par_num++, "PARSER"); 7272 break; 7273 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7274 if (print) 7275 bxe_print_next_block(sc, par_num++, "TSDM"); 7276 break; 7277 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7278 if (print) 7279 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7280 break; 7281 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7282 if (print) 7283 bxe_print_next_block(sc, par_num++, "TCM"); 7284 break; 7285 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7286 if (print) 7287 bxe_print_next_block(sc, par_num++, "TSEMI"); 7288 break; 7289 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7290 if (print) 7291 bxe_print_next_block(sc, par_num++, "XPB"); 7292 break; 7293 } 7294 7295 /* Clear the bit */ 7296 sig &= ~cur_bit; 7297 } 7298 } 7299 7300 return (par_num); 7301} 7302 7303static int 7304bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7305 uint32_t sig, 7306 int par_num, 7307 uint8_t *global, 7308 uint8_t print) 7309{ 7310 int i = 0; 7311 uint32_t cur_bit = 0; 7312 for (i = 0; sig; i++) { 7313 cur_bit = ((uint32_t)0x1 << i); 7314 if (sig & cur_bit) { 7315 switch (cur_bit) { 7316 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7317 if (print) 7318 bxe_print_next_block(sc, par_num++, "PBF"); 7319 break; 7320 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7321 if (print) 7322 bxe_print_next_block(sc, par_num++, "QM"); 7323 break; 7324 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7325 if (print) 7326 bxe_print_next_block(sc, par_num++, "TM"); 7327 break; 7328 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7329 if (print) 7330 bxe_print_next_block(sc, par_num++, "XSDM"); 7331 break; 7332 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7333 if (print) 7334 bxe_print_next_block(sc, par_num++, "XCM"); 7335 break; 7336 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7337 if (print) 7338 bxe_print_next_block(sc, par_num++, "XSEMI"); 7339 break; 7340 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7341 if (print) 7342 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7343 break; 7344 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7345 if (print) 7346 bxe_print_next_block(sc, par_num++, "NIG"); 7347 break; 7348 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7349 if (print) 7350 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7351 *global = TRUE; 7352 break; 7353 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7354 if (print) 7355 bxe_print_next_block(sc, par_num++, "DEBUG"); 7356 break; 7357 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7358 if (print) 7359 bxe_print_next_block(sc, par_num++, "USDM"); 7360 break; 7361 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7362 if (print) 7363 bxe_print_next_block(sc, par_num++, "UCM"); 7364 break; 7365 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7366 if (print) 7367 bxe_print_next_block(sc, par_num++, "USEMI"); 7368 break; 7369 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7370 if (print) 7371 bxe_print_next_block(sc, par_num++, "UPB"); 7372 break; 7373 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7374 if (print) 7375 bxe_print_next_block(sc, par_num++, "CSDM"); 7376 break; 7377 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7378 if (print) 7379 bxe_print_next_block(sc, par_num++, "CCM"); 7380 break; 7381 } 7382 7383 /* Clear the bit */ 7384 sig &= ~cur_bit; 7385 } 7386 } 7387 7388 return (par_num); 7389} 7390 7391static int 7392bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7393 uint32_t sig, 7394 int par_num, 7395 uint8_t print) 7396{ 7397 uint32_t cur_bit = 0; 7398 int i = 0; 7399 7400 for (i = 0; sig; i++) { 7401 cur_bit = ((uint32_t)0x1 << i); 7402 if (sig & cur_bit) { 7403 switch (cur_bit) { 7404 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7405 if (print) 7406 bxe_print_next_block(sc, par_num++, "CSEMI"); 7407 break; 7408 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7409 if (print) 7410 bxe_print_next_block(sc, par_num++, "PXP"); 7411 break; 7412 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7413 if (print) 7414 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7415 break; 7416 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7417 if (print) 7418 bxe_print_next_block(sc, par_num++, "CFC"); 7419 break; 7420 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7421 if (print) 7422 bxe_print_next_block(sc, par_num++, "CDU"); 7423 break; 7424 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7425 if (print) 7426 bxe_print_next_block(sc, par_num++, "DMAE"); 7427 break; 7428 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7429 if (print) 7430 bxe_print_next_block(sc, par_num++, "IGU"); 7431 break; 7432 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7433 if (print) 7434 bxe_print_next_block(sc, par_num++, "MISC"); 7435 break; 7436 } 7437 7438 /* Clear the bit */ 7439 sig &= ~cur_bit; 7440 } 7441 } 7442 7443 return (par_num); 7444} 7445 7446static int 7447bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7448 uint32_t sig, 7449 int par_num, 7450 uint8_t *global, 7451 uint8_t print) 7452{ 7453 uint32_t cur_bit = 0; 7454 int i = 0; 7455 7456 for (i = 0; sig; i++) { 7457 cur_bit = ((uint32_t)0x1 << i); 7458 if (sig & cur_bit) { 7459 switch (cur_bit) { 7460 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7461 if (print) 7462 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7463 *global = TRUE; 7464 break; 7465 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7466 if (print) 7467 bxe_print_next_block(sc, par_num++, 7468 "MCP UMP RX"); 7469 *global = TRUE; 7470 break; 7471 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7472 if (print) 7473 bxe_print_next_block(sc, par_num++, 7474 "MCP UMP TX"); 7475 *global = TRUE; 7476 break; 7477 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7478 if (print) 7479 bxe_print_next_block(sc, par_num++, 7480 "MCP SCPAD"); 7481 *global = TRUE; 7482 break; 7483 } 7484 7485 /* Clear the bit */ 7486 sig &= ~cur_bit; 7487 } 7488 } 7489 7490 return (par_num); 7491} 7492 7493static int 7494bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7495 uint32_t sig, 7496 int par_num, 7497 uint8_t print) 7498{ 7499 uint32_t cur_bit = 0; 7500 int i = 0; 7501 7502 for (i = 0; sig; i++) { 7503 cur_bit = ((uint32_t)0x1 << i); 7504 if (sig & cur_bit) { 7505 switch (cur_bit) { 7506 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7507 if (print) 7508 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7509 break; 7510 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7511 if (print) 7512 bxe_print_next_block(sc, par_num++, "ATC"); 7513 break; 7514 } 7515 7516 /* Clear the bit */ 7517 sig &= ~cur_bit; 7518 } 7519 } 7520 7521 return (par_num); 7522} 7523 7524static uint8_t 7525bxe_parity_attn(struct bxe_softc *sc, 7526 uint8_t *global, 7527 uint8_t print, 7528 uint32_t *sig) 7529{ 7530 int par_num = 0; 7531 7532 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7533 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7534 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7535 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7536 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7537 BLOGE(sc, "Parity error: HW block parity attention:\n" 7538 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7539 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7540 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7541 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7542 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7543 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7544 7545 if (print) 7546 BLOGI(sc, "Parity errors detected in blocks: "); 7547 7548 par_num = 7549 bxe_check_blocks_with_parity0(sc, sig[0] & 7550 HW_PRTY_ASSERT_SET_0, 7551 par_num, print); 7552 par_num = 7553 bxe_check_blocks_with_parity1(sc, sig[1] & 7554 HW_PRTY_ASSERT_SET_1, 7555 par_num, global, print); 7556 par_num = 7557 bxe_check_blocks_with_parity2(sc, sig[2] & 7558 HW_PRTY_ASSERT_SET_2, 7559 par_num, print); 7560 par_num = 7561 bxe_check_blocks_with_parity3(sc, sig[3] & 7562 HW_PRTY_ASSERT_SET_3, 7563 par_num, global, print); 7564 par_num = 7565 bxe_check_blocks_with_parity4(sc, sig[4] & 7566 HW_PRTY_ASSERT_SET_4, 7567 par_num, print); 7568 7569 if (print) 7570 BLOGI(sc, "\n"); 7571 7572 if( *global == TRUE ) { 7573 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); 7574 } 7575 7576 return (TRUE); 7577 } 7578 7579 return (FALSE); 7580} 7581 7582static uint8_t 7583bxe_chk_parity_attn(struct bxe_softc *sc, 7584 uint8_t *global, 7585 uint8_t print) 7586{ 7587 struct attn_route attn = { {0} }; 7588 int port = SC_PORT(sc); 7589 7590 if(sc->state != BXE_STATE_OPEN) 7591 return FALSE; 7592 7593 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7594 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7595 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7596 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7597 7598 /* 7599 * Since MCP attentions can't be disabled inside the block, we need to 7600 * read AEU registers to see whether they're currently disabled 7601 */ 7602 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 7603 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & 7604 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 7605 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 7606 7607 7608 if (!CHIP_IS_E1x(sc)) 7609 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7610 7611 return (bxe_parity_attn(sc, global, print, attn.sig)); 7612} 7613 7614static void 7615bxe_attn_int_deasserted4(struct bxe_softc *sc, 7616 uint32_t attn) 7617{ 7618 uint32_t val; 7619 boolean_t err_flg = FALSE; 7620 7621 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7622 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7623 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7624 err_flg = TRUE; 7625 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7626 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7627 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7628 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7629 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7630 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7631 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7632 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7633 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7634 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7635 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7636 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7637 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7638 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7639 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7640 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7641 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7642 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7643 } 7644 7645 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7646 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7647 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7648 err_flg = TRUE; 7649 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7650 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7651 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7652 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7653 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7654 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7655 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7656 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7657 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7658 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7659 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7660 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7661 } 7662 7663 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7664 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7665 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7666 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7667 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7668 err_flg = TRUE; 7669 } 7670 if (err_flg) { 7671 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); 7672 taskqueue_enqueue_timeout(taskqueue_thread, 7673 &sc->sp_err_timeout_task, hz/10); 7674 } 7675 7676} 7677 7678static void 7679bxe_e1h_disable(struct bxe_softc *sc) 7680{ 7681 int port = SC_PORT(sc); 7682 7683 bxe_tx_disable(sc); 7684 7685 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7686} 7687 7688static void 7689bxe_e1h_enable(struct bxe_softc *sc) 7690{ 7691 int port = SC_PORT(sc); 7692 7693 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7694 7695 // XXX bxe_tx_enable(sc); 7696} 7697 7698/* 7699 * called due to MCP event (on pmf): 7700 * reread new bandwidth configuration 7701 * configure FW 7702 * notify others function about the change 7703 */ 7704static void 7705bxe_config_mf_bw(struct bxe_softc *sc) 7706{ 7707 if (sc->link_vars.link_up) { 7708 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 7709 // XXX bxe_link_sync_notify(sc); 7710 } 7711 7712 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7713} 7714 7715static void 7716bxe_set_mf_bw(struct bxe_softc *sc) 7717{ 7718 bxe_config_mf_bw(sc); 7719 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 7720} 7721 7722static void 7723bxe_handle_eee_event(struct bxe_softc *sc) 7724{ 7725 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 7726 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 7727} 7728 7729#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 7730 7731static void 7732bxe_drv_info_ether_stat(struct bxe_softc *sc) 7733{ 7734 struct eth_stats_info *ether_stat = 7735 &sc->sp->drv_info_to_mcp.ether_stat; 7736 7737 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 7738 ETH_STAT_INFO_VERSION_LEN); 7739 7740 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 7741 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 7742 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 7743 ether_stat->mac_local + MAC_PAD, 7744 MAC_PAD, ETH_ALEN); 7745 7746 ether_stat->mtu_size = sc->mtu; 7747 7748 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 7749 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 7750 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 7751 } 7752 7753 // XXX ether_stat->feature_flags |= ???; 7754 7755 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 7756 7757 ether_stat->txq_size = sc->tx_ring_size; 7758 ether_stat->rxq_size = sc->rx_ring_size; 7759} 7760 7761static void 7762bxe_handle_drv_info_req(struct bxe_softc *sc) 7763{ 7764 enum drv_info_opcode op_code; 7765 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 7766 7767 /* if drv_info version supported by MFW doesn't match - send NACK */ 7768 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 7769 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 7770 return; 7771 } 7772 7773 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 7774 DRV_INFO_CONTROL_OP_CODE_SHIFT); 7775 7776 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 7777 7778 switch (op_code) { 7779 case ETH_STATS_OPCODE: 7780 bxe_drv_info_ether_stat(sc); 7781 break; 7782 case FCOE_STATS_OPCODE: 7783 case ISCSI_STATS_OPCODE: 7784 default: 7785 /* if op code isn't supported - send NACK */ 7786 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 7787 return; 7788 } 7789 7790 /* 7791 * If we got drv_info attn from MFW then these fields are defined in 7792 * shmem2 for sure 7793 */ 7794 SHMEM2_WR(sc, drv_info_host_addr_lo, 7795 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 7796 SHMEM2_WR(sc, drv_info_host_addr_hi, 7797 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 7798 7799 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 7800} 7801 7802static void 7803bxe_dcc_event(struct bxe_softc *sc, 7804 uint32_t dcc_event) 7805{ 7806 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 7807 7808 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 7809 /* 7810 * This is the only place besides the function initialization 7811 * where the sc->flags can change so it is done without any 7812 * locks 7813 */ 7814 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 7815 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 7816 sc->flags |= BXE_MF_FUNC_DIS; 7817 bxe_e1h_disable(sc); 7818 } else { 7819 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 7820 sc->flags &= ~BXE_MF_FUNC_DIS; 7821 bxe_e1h_enable(sc); 7822 } 7823 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 7824 } 7825 7826 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 7827 bxe_config_mf_bw(sc); 7828 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 7829 } 7830 7831 /* Report results to MCP */ 7832 if (dcc_event) 7833 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 7834 else 7835 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 7836} 7837 7838static void 7839bxe_pmf_update(struct bxe_softc *sc) 7840{ 7841 int port = SC_PORT(sc); 7842 uint32_t val; 7843 7844 sc->port.pmf = 1; 7845 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 7846 7847 /* 7848 * We need the mb() to ensure the ordering between the writing to 7849 * sc->port.pmf here and reading it from the bxe_periodic_task(). 7850 */ 7851 mb(); 7852 7853 /* queue a periodic task */ 7854 // XXX schedule task... 7855 7856 // XXX bxe_dcbx_pmf_update(sc); 7857 7858 /* enable nig attention */ 7859 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 7860 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7861 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 7862 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 7863 } else if (!CHIP_IS_E1x(sc)) { 7864 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 7865 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 7866 } 7867 7868 bxe_stats_handle(sc, STATS_EVENT_PMF); 7869} 7870 7871static int 7872bxe_mc_assert(struct bxe_softc *sc) 7873{ 7874 char last_idx; 7875 int i, rc = 0; 7876 uint32_t row0, row1, row2, row3; 7877 7878 /* XSTORM */ 7879 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 7880 if (last_idx) 7881 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7882 7883 /* print the asserts */ 7884 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7885 7886 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 7887 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 7888 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 7889 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 7890 7891 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7892 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7893 i, row3, row2, row1, row0); 7894 rc++; 7895 } else { 7896 break; 7897 } 7898 } 7899 7900 /* TSTORM */ 7901 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 7902 if (last_idx) { 7903 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7904 } 7905 7906 /* print the asserts */ 7907 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7908 7909 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 7910 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 7911 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 7912 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 7913 7914 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7915 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7916 i, row3, row2, row1, row0); 7917 rc++; 7918 } else { 7919 break; 7920 } 7921 } 7922 7923 /* CSTORM */ 7924 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 7925 if (last_idx) { 7926 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7927 } 7928 7929 /* print the asserts */ 7930 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7931 7932 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 7933 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 7934 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 7935 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 7936 7937 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7938 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7939 i, row3, row2, row1, row0); 7940 rc++; 7941 } else { 7942 break; 7943 } 7944 } 7945 7946 /* USTORM */ 7947 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 7948 if (last_idx) { 7949 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7950 } 7951 7952 /* print the asserts */ 7953 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7954 7955 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 7956 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 7957 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 7958 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 7959 7960 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7961 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7962 i, row3, row2, row1, row0); 7963 rc++; 7964 } else { 7965 break; 7966 } 7967 } 7968 7969 return (rc); 7970} 7971 7972static void 7973bxe_attn_int_deasserted3(struct bxe_softc *sc, 7974 uint32_t attn) 7975{ 7976 int func = SC_FUNC(sc); 7977 uint32_t val; 7978 7979 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 7980 7981 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 7982 7983 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7984 bxe_read_mf_cfg(sc); 7985 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 7986 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 7987 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 7988 7989 if (val & DRV_STATUS_DCC_EVENT_MASK) 7990 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 7991 7992 if (val & DRV_STATUS_SET_MF_BW) 7993 bxe_set_mf_bw(sc); 7994 7995 if (val & DRV_STATUS_DRV_INFO_REQ) 7996 bxe_handle_drv_info_req(sc); 7997 7998 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 7999 bxe_pmf_update(sc); 8000 8001 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8002 bxe_handle_eee_event(sc); 8003 8004 if (sc->link_vars.periodic_flags & 8005 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8006 /* sync with link */ 8007 bxe_acquire_phy_lock(sc); 8008 sc->link_vars.periodic_flags &= 8009 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8010 bxe_release_phy_lock(sc); 8011 if (IS_MF(sc)) 8012 ; // XXX bxe_link_sync_notify(sc); 8013 bxe_link_report(sc); 8014 } 8015 8016 /* 8017 * Always call it here: bxe_link_report() will 8018 * prevent the link indication duplication. 8019 */ 8020 bxe_link_status_update(sc); 8021 8022 } else if (attn & BXE_MC_ASSERT_BITS) { 8023 8024 BLOGE(sc, "MC assert!\n"); 8025 bxe_mc_assert(sc); 8026 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8027 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8028 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8029 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8030 bxe_int_disable(sc); 8031 BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT); 8032 taskqueue_enqueue_timeout(taskqueue_thread, 8033 &sc->sp_err_timeout_task, hz/10); 8034 8035 } else if (attn & BXE_MCP_ASSERT) { 8036 8037 BLOGE(sc, "MCP assert!\n"); 8038 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8039 BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT); 8040 taskqueue_enqueue_timeout(taskqueue_thread, 8041 &sc->sp_err_timeout_task, hz/10); 8042 bxe_int_disable(sc); /*avoid repetive assert alert */ 8043 8044 8045 } else { 8046 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8047 } 8048 } 8049 8050 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8051 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8052 if (attn & BXE_GRC_TIMEOUT) { 8053 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8054 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8055 } 8056 if (attn & BXE_GRC_RSV) { 8057 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8058 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8059 } 8060 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8061 } 8062} 8063 8064static void 8065bxe_attn_int_deasserted2(struct bxe_softc *sc, 8066 uint32_t attn) 8067{ 8068 int port = SC_PORT(sc); 8069 int reg_offset; 8070 uint32_t val0, mask0, val1, mask1; 8071 uint32_t val; 8072 boolean_t err_flg = FALSE; 8073 8074 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8075 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8076 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8077 /* CFC error attention */ 8078 if (val & 0x2) { 8079 BLOGE(sc, "FATAL error from CFC\n"); 8080 err_flg = TRUE; 8081 } 8082 } 8083 8084 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8085 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8086 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8087 /* RQ_USDMDP_FIFO_OVERFLOW */ 8088 if (val & 0x18000) { 8089 BLOGE(sc, "FATAL error from PXP\n"); 8090 err_flg = TRUE; 8091 } 8092 8093 if (!CHIP_IS_E1x(sc)) { 8094 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8095 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8096 err_flg = TRUE; 8097 } 8098 } 8099 8100#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8101#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8102 8103 if (attn & AEU_PXP2_HW_INT_BIT) { 8104 /* CQ47854 workaround do not panic on 8105 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8106 */ 8107 if (!CHIP_IS_E1x(sc)) { 8108 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8109 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8110 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8111 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8112 /* 8113 * If the only PXP2_EOP_ERROR_BIT is set in 8114 * STS0 and STS1 - clear it 8115 * 8116 * probably we lose additional attentions between 8117 * STS0 and STS_CLR0, in this case user will not 8118 * be notified about them 8119 */ 8120 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8121 !(val1 & mask1)) 8122 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8123 8124 /* print the register, since no one can restore it */ 8125 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8126 8127 /* 8128 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8129 * then notify 8130 */ 8131 if (val0 & PXP2_EOP_ERROR_BIT) { 8132 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8133 err_flg = TRUE; 8134 8135 /* 8136 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8137 * set then clear attention from PXP2 block without panic 8138 */ 8139 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8140 ((val1 & mask1) == 0)) 8141 attn &= ~AEU_PXP2_HW_INT_BIT; 8142 } 8143 } 8144 } 8145 8146 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8147 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8148 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8149 8150 val = REG_RD(sc, reg_offset); 8151 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8152 REG_WR(sc, reg_offset, val); 8153 8154 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8155 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8156 err_flg = TRUE; 8157 bxe_panic(sc, ("HW block attention set2\n")); 8158 } 8159 if(err_flg) { 8160 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); 8161 taskqueue_enqueue_timeout(taskqueue_thread, 8162 &sc->sp_err_timeout_task, hz/10); 8163 } 8164 8165} 8166 8167static void 8168bxe_attn_int_deasserted1(struct bxe_softc *sc, 8169 uint32_t attn) 8170{ 8171 int port = SC_PORT(sc); 8172 int reg_offset; 8173 uint32_t val; 8174 boolean_t err_flg = FALSE; 8175 8176 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8177 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8178 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8179 /* DORQ discard attention */ 8180 if (val & 0x2) { 8181 BLOGE(sc, "FATAL error from DORQ\n"); 8182 err_flg = TRUE; 8183 } 8184 } 8185 8186 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8187 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8188 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8189 8190 val = REG_RD(sc, reg_offset); 8191 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8192 REG_WR(sc, reg_offset, val); 8193 8194 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8195 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8196 err_flg = TRUE; 8197 bxe_panic(sc, ("HW block attention set1\n")); 8198 } 8199 if(err_flg) { 8200 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); 8201 taskqueue_enqueue_timeout(taskqueue_thread, 8202 &sc->sp_err_timeout_task, hz/10); 8203 } 8204 8205} 8206 8207static void 8208bxe_attn_int_deasserted0(struct bxe_softc *sc, 8209 uint32_t attn) 8210{ 8211 int port = SC_PORT(sc); 8212 int reg_offset; 8213 uint32_t val; 8214 8215 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8216 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8217 8218 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8219 val = REG_RD(sc, reg_offset); 8220 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8221 REG_WR(sc, reg_offset, val); 8222 8223 BLOGW(sc, "SPIO5 hw attention\n"); 8224 8225 /* Fan failure attention */ 8226 elink_hw_reset_phy(&sc->link_params); 8227 bxe_fan_failure(sc); 8228 } 8229 8230 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8231 bxe_acquire_phy_lock(sc); 8232 elink_handle_module_detect_int(&sc->link_params); 8233 bxe_release_phy_lock(sc); 8234 } 8235 8236 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8237 val = REG_RD(sc, reg_offset); 8238 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8239 REG_WR(sc, reg_offset, val); 8240 8241 8242 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); 8243 taskqueue_enqueue_timeout(taskqueue_thread, 8244 &sc->sp_err_timeout_task, hz/10); 8245 8246 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8247 (attn & HW_INTERRUT_ASSERT_SET_0))); 8248 } 8249} 8250 8251static void 8252bxe_attn_int_deasserted(struct bxe_softc *sc, 8253 uint32_t deasserted) 8254{ 8255 struct attn_route attn; 8256 struct attn_route *group_mask; 8257 int port = SC_PORT(sc); 8258 int index; 8259 uint32_t reg_addr; 8260 uint32_t val; 8261 uint32_t aeu_mask; 8262 uint8_t global = FALSE; 8263 8264 /* 8265 * Need to take HW lock because MCP or other port might also 8266 * try to handle this event. 8267 */ 8268 bxe_acquire_alr(sc); 8269 8270 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8271 /* XXX 8272 * In case of parity errors don't handle attentions so that 8273 * other function would "see" parity errors. 8274 */ 8275 // XXX schedule a recovery task... 8276 /* disable HW interrupts */ 8277 bxe_int_disable(sc); 8278 BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY); 8279 taskqueue_enqueue_timeout(taskqueue_thread, 8280 &sc->sp_err_timeout_task, hz/10); 8281 bxe_release_alr(sc); 8282 return; 8283 } 8284 8285 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8286 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8287 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8288 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8289 if (!CHIP_IS_E1x(sc)) { 8290 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8291 } else { 8292 attn.sig[4] = 0; 8293 } 8294 8295 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8296 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8297 8298 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8299 if (deasserted & (1 << index)) { 8300 group_mask = &sc->attn_group[index]; 8301 8302 BLOGD(sc, DBG_INTR, 8303 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8304 group_mask->sig[0], group_mask->sig[1], 8305 group_mask->sig[2], group_mask->sig[3], 8306 group_mask->sig[4]); 8307 8308 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8309 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8310 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8311 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8312 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8313 } 8314 } 8315 8316 bxe_release_alr(sc); 8317 8318 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8319 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8320 COMMAND_REG_ATTN_BITS_CLR); 8321 } else { 8322 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8323 } 8324 8325 val = ~deasserted; 8326 BLOGD(sc, DBG_INTR, 8327 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8328 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8329 REG_WR(sc, reg_addr, val); 8330 8331 if (~sc->attn_state & deasserted) { 8332 BLOGE(sc, "IGU error\n"); 8333 } 8334 8335 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8336 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8337 8338 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8339 8340 aeu_mask = REG_RD(sc, reg_addr); 8341 8342 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8343 aeu_mask, deasserted); 8344 aeu_mask |= (deasserted & 0x3ff); 8345 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8346 8347 REG_WR(sc, reg_addr, aeu_mask); 8348 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8349 8350 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8351 sc->attn_state &= ~deasserted; 8352 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8353} 8354 8355static void 8356bxe_attn_int(struct bxe_softc *sc) 8357{ 8358 /* read local copy of bits */ 8359 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8360 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8361 uint32_t attn_state = sc->attn_state; 8362 8363 /* look for changed bits */ 8364 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8365 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8366 8367 BLOGD(sc, DBG_INTR, 8368 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8369 attn_bits, attn_ack, asserted, deasserted); 8370 8371 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8372 BLOGE(sc, "BAD attention state\n"); 8373 } 8374 8375 /* handle bits that were raised */ 8376 if (asserted) { 8377 bxe_attn_int_asserted(sc, asserted); 8378 } 8379 8380 if (deasserted) { 8381 bxe_attn_int_deasserted(sc, deasserted); 8382 } 8383} 8384 8385static uint16_t 8386bxe_update_dsb_idx(struct bxe_softc *sc) 8387{ 8388 struct host_sp_status_block *def_sb = sc->def_sb; 8389 uint16_t rc = 0; 8390 8391 mb(); /* status block is written to by the chip */ 8392 8393 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8394 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8395 rc |= BXE_DEF_SB_ATT_IDX; 8396 } 8397 8398 if (sc->def_idx != def_sb->sp_sb.running_index) { 8399 sc->def_idx = def_sb->sp_sb.running_index; 8400 rc |= BXE_DEF_SB_IDX; 8401 } 8402 8403 mb(); 8404 8405 return (rc); 8406} 8407 8408static inline struct ecore_queue_sp_obj * 8409bxe_cid_to_q_obj(struct bxe_softc *sc, 8410 uint32_t cid) 8411{ 8412 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8413 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8414} 8415 8416static void 8417bxe_handle_mcast_eqe(struct bxe_softc *sc) 8418{ 8419 struct ecore_mcast_ramrod_params rparam; 8420 int rc; 8421 8422 memset(&rparam, 0, sizeof(rparam)); 8423 8424 rparam.mcast_obj = &sc->mcast_obj; 8425 8426 BXE_MCAST_LOCK(sc); 8427 8428 /* clear pending state for the last command */ 8429 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8430 8431 /* if there are pending mcast commands - send them */ 8432 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8433 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8434 if (rc < 0) { 8435 BLOGD(sc, DBG_SP, 8436 "ERROR: Failed to send pending mcast commands (%d)\n", rc); 8437 } 8438 } 8439 8440 BXE_MCAST_UNLOCK(sc); 8441} 8442 8443static void 8444bxe_handle_classification_eqe(struct bxe_softc *sc, 8445 union event_ring_elem *elem) 8446{ 8447 unsigned long ramrod_flags = 0; 8448 int rc = 0; 8449 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8450 struct ecore_vlan_mac_obj *vlan_mac_obj; 8451 8452 /* always push next commands out, don't wait here */ 8453 bit_set(&ramrod_flags, RAMROD_CONT); 8454 8455 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8456 case ECORE_FILTER_MAC_PENDING: 8457 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8458 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8459 break; 8460 8461 case ECORE_FILTER_MCAST_PENDING: 8462 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8463 /* 8464 * This is only relevant for 57710 where multicast MACs are 8465 * configured as unicast MACs using the same ramrod. 8466 */ 8467 bxe_handle_mcast_eqe(sc); 8468 return; 8469 8470 default: 8471 BLOGE(sc, "Unsupported classification command: %d\n", 8472 elem->message.data.eth_event.echo); 8473 return; 8474 } 8475 8476 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8477 8478 if (rc < 0) { 8479 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8480 } else if (rc > 0) { 8481 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8482 } 8483} 8484 8485static void 8486bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8487 union event_ring_elem *elem) 8488{ 8489 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8490 8491 /* send rx_mode command again if was requested */ 8492 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8493 &sc->sp_state)) { 8494 bxe_set_storm_rx_mode(sc); 8495 } 8496} 8497 8498static void 8499bxe_update_eq_prod(struct bxe_softc *sc, 8500 uint16_t prod) 8501{ 8502 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8503 wmb(); /* keep prod updates ordered */ 8504} 8505 8506static void 8507bxe_eq_int(struct bxe_softc *sc) 8508{ 8509 uint16_t hw_cons, sw_cons, sw_prod; 8510 union event_ring_elem *elem; 8511 uint8_t echo; 8512 uint32_t cid; 8513 uint8_t opcode; 8514 int spqe_cnt = 0; 8515 struct ecore_queue_sp_obj *q_obj; 8516 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8517 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8518 8519 hw_cons = le16toh(*sc->eq_cons_sb); 8520 8521 /* 8522 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8523 * when we get to the next-page we need to adjust so the loop 8524 * condition below will be met. The next element is the size of a 8525 * regular element and hence incrementing by 1 8526 */ 8527 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8528 hw_cons++; 8529 } 8530 8531 /* 8532 * This function may never run in parallel with itself for a 8533 * specific sc and no need for a read memory barrier here. 8534 */ 8535 sw_cons = sc->eq_cons; 8536 sw_prod = sc->eq_prod; 8537 8538 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8539 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8540 8541 for (; 8542 sw_cons != hw_cons; 8543 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8544 8545 elem = &sc->eq[EQ_DESC(sw_cons)]; 8546 8547 /* elem CID originates from FW, actually LE */ 8548 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8549 opcode = elem->message.opcode; 8550 8551 /* handle eq element */ 8552 switch (opcode) { 8553 8554 case EVENT_RING_OPCODE_STAT_QUERY: 8555 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8556 sc->stats_comp++); 8557 /* nothing to do with stats comp */ 8558 goto next_spqe; 8559 8560 case EVENT_RING_OPCODE_CFC_DEL: 8561 /* handle according to cid range */ 8562 /* we may want to verify here that the sc state is HALTING */ 8563 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8564 q_obj = bxe_cid_to_q_obj(sc, cid); 8565 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8566 break; 8567 } 8568 goto next_spqe; 8569 8570 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8571 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8572 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8573 break; 8574 } 8575 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8576 goto next_spqe; 8577 8578 case EVENT_RING_OPCODE_START_TRAFFIC: 8579 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8580 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8581 break; 8582 } 8583 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8584 goto next_spqe; 8585 8586 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8587 echo = elem->message.data.function_update_event.echo; 8588 if (echo == SWITCH_UPDATE) { 8589 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8590 if (f_obj->complete_cmd(sc, f_obj, 8591 ECORE_F_CMD_SWITCH_UPDATE)) { 8592 break; 8593 } 8594 } 8595 else { 8596 BLOGD(sc, DBG_SP, 8597 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8598 } 8599 goto next_spqe; 8600 8601 case EVENT_RING_OPCODE_FORWARD_SETUP: 8602 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8603 if (q_obj->complete_cmd(sc, q_obj, 8604 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8605 break; 8606 } 8607 goto next_spqe; 8608 8609 case EVENT_RING_OPCODE_FUNCTION_START: 8610 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8611 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8612 break; 8613 } 8614 goto next_spqe; 8615 8616 case EVENT_RING_OPCODE_FUNCTION_STOP: 8617 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8618 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8619 break; 8620 } 8621 goto next_spqe; 8622 } 8623 8624 switch (opcode | sc->state) { 8625 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8626 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8627 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8628 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8629 rss_raw->clear_pending(rss_raw); 8630 break; 8631 8632 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8633 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8634 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8635 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8636 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8637 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8638 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8639 bxe_handle_classification_eqe(sc, elem); 8640 break; 8641 8642 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8643 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8644 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8645 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8646 bxe_handle_mcast_eqe(sc); 8647 break; 8648 8649 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8650 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8651 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8652 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8653 bxe_handle_rx_mode_eqe(sc, elem); 8654 break; 8655 8656 default: 8657 /* unknown event log error and continue */ 8658 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 8659 elem->message.opcode, sc->state); 8660 } 8661 8662next_spqe: 8663 spqe_cnt++; 8664 } /* for */ 8665 8666 mb(); 8667 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 8668 8669 sc->eq_cons = sw_cons; 8670 sc->eq_prod = sw_prod; 8671 8672 /* make sure that above mem writes were issued towards the memory */ 8673 wmb(); 8674 8675 /* update producer */ 8676 bxe_update_eq_prod(sc, sc->eq_prod); 8677} 8678 8679static void 8680bxe_handle_sp_tq(void *context, 8681 int pending) 8682{ 8683 struct bxe_softc *sc = (struct bxe_softc *)context; 8684 uint16_t status; 8685 8686 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 8687 8688 /* what work needs to be performed? */ 8689 status = bxe_update_dsb_idx(sc); 8690 8691 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 8692 8693 /* HW attentions */ 8694 if (status & BXE_DEF_SB_ATT_IDX) { 8695 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 8696 bxe_attn_int(sc); 8697 status &= ~BXE_DEF_SB_ATT_IDX; 8698 } 8699 8700 /* SP events: STAT_QUERY and others */ 8701 if (status & BXE_DEF_SB_IDX) { 8702 /* handle EQ completions */ 8703 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 8704 bxe_eq_int(sc); 8705 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 8706 le16toh(sc->def_idx), IGU_INT_NOP, 1); 8707 status &= ~BXE_DEF_SB_IDX; 8708 } 8709 8710 /* if status is non zero then something went wrong */ 8711 if (__predict_false(status)) { 8712 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 8713 } 8714 8715 /* ack status block only if something was actually handled */ 8716 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 8717 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 8718 8719 /* 8720 * Must be called after the EQ processing (since eq leads to sriov 8721 * ramrod completion flows). 8722 * This flow may have been scheduled by the arrival of a ramrod 8723 * completion, or by the sriov code rescheduling itself. 8724 */ 8725 // XXX bxe_iov_sp_task(sc); 8726 8727} 8728 8729static void 8730bxe_handle_fp_tq(void *context, 8731 int pending) 8732{ 8733 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 8734 struct bxe_softc *sc = fp->sc; 8735 uint8_t more_tx = FALSE; 8736 uint8_t more_rx = FALSE; 8737 8738 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 8739 8740 /* XXX 8741 * IFF_DRV_RUNNING state can't be checked here since we process 8742 * slowpath events on a client queue during setup. Instead 8743 * we need to add a "process/continue" flag here that the driver 8744 * can use to tell the task here not to do anything. 8745 */ 8746#if 0 8747 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 8748 return; 8749 } 8750#endif 8751 8752 /* update the fastpath index */ 8753 bxe_update_fp_sb_idx(fp); 8754 8755 /* XXX add loop here if ever support multiple tx CoS */ 8756 /* fp->txdata[cos] */ 8757 if (bxe_has_tx_work(fp)) { 8758 BXE_FP_TX_LOCK(fp); 8759 more_tx = bxe_txeof(sc, fp); 8760 BXE_FP_TX_UNLOCK(fp); 8761 } 8762 8763 if (bxe_has_rx_work(fp)) { 8764 more_rx = bxe_rxeof(sc, fp); 8765 } 8766 8767 if (more_rx /*|| more_tx*/) { 8768 /* still more work to do */ 8769 taskqueue_enqueue(fp->tq, &fp->tq_task); 8770 return; 8771 } 8772 8773 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 8774 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 8775} 8776 8777static void 8778bxe_task_fp(struct bxe_fastpath *fp) 8779{ 8780 struct bxe_softc *sc = fp->sc; 8781 uint8_t more_tx = FALSE; 8782 uint8_t more_rx = FALSE; 8783 8784 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 8785 8786 /* update the fastpath index */ 8787 bxe_update_fp_sb_idx(fp); 8788 8789 /* XXX add loop here if ever support multiple tx CoS */ 8790 /* fp->txdata[cos] */ 8791 if (bxe_has_tx_work(fp)) { 8792 BXE_FP_TX_LOCK(fp); 8793 more_tx = bxe_txeof(sc, fp); 8794 BXE_FP_TX_UNLOCK(fp); 8795 } 8796 8797 if (bxe_has_rx_work(fp)) { 8798 more_rx = bxe_rxeof(sc, fp); 8799 } 8800 8801 if (more_rx /*|| more_tx*/) { 8802 /* still more work to do, bail out if this ISR and process later */ 8803 taskqueue_enqueue(fp->tq, &fp->tq_task); 8804 return; 8805 } 8806 8807 /* 8808 * Here we write the fastpath index taken before doing any tx or rx work. 8809 * It is very well possible other hw events occurred up to this point and 8810 * they were actually processed accordingly above. Since we're going to 8811 * write an older fastpath index, an interrupt is coming which we might 8812 * not do any work in. 8813 */ 8814 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 8815 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 8816} 8817 8818/* 8819 * Legacy interrupt entry point. 8820 * 8821 * Verifies that the controller generated the interrupt and 8822 * then calls a separate routine to handle the various 8823 * interrupt causes: link, RX, and TX. 8824 */ 8825static void 8826bxe_intr_legacy(void *xsc) 8827{ 8828 struct bxe_softc *sc = (struct bxe_softc *)xsc; 8829 struct bxe_fastpath *fp; 8830 uint16_t status, mask; 8831 int i; 8832 8833 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 8834 8835 /* 8836 * 0 for ustorm, 1 for cstorm 8837 * the bits returned from ack_int() are 0-15 8838 * bit 0 = attention status block 8839 * bit 1 = fast path status block 8840 * a mask of 0x2 or more = tx/rx event 8841 * a mask of 1 = slow path event 8842 */ 8843 8844 status = bxe_ack_int(sc); 8845 8846 /* the interrupt is not for us */ 8847 if (__predict_false(status == 0)) { 8848 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 8849 return; 8850 } 8851 8852 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 8853 8854 FOR_EACH_ETH_QUEUE(sc, i) { 8855 fp = &sc->fp[i]; 8856 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 8857 if (status & mask) { 8858 /* acknowledge and disable further fastpath interrupts */ 8859 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8860 bxe_task_fp(fp); 8861 status &= ~mask; 8862 } 8863 } 8864 8865 if (__predict_false(status & 0x1)) { 8866 /* acknowledge and disable further slowpath interrupts */ 8867 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8868 8869 /* schedule slowpath handler */ 8870 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8871 8872 status &= ~0x1; 8873 } 8874 8875 if (__predict_false(status)) { 8876 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 8877 } 8878} 8879 8880/* slowpath interrupt entry point */ 8881static void 8882bxe_intr_sp(void *xsc) 8883{ 8884 struct bxe_softc *sc = (struct bxe_softc *)xsc; 8885 8886 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 8887 8888 /* acknowledge and disable further slowpath interrupts */ 8889 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8890 8891 /* schedule slowpath handler */ 8892 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8893} 8894 8895/* fastpath interrupt entry point */ 8896static void 8897bxe_intr_fp(void *xfp) 8898{ 8899 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 8900 struct bxe_softc *sc = fp->sc; 8901 8902 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 8903 8904 BLOGD(sc, DBG_INTR, 8905 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 8906 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 8907 8908 /* acknowledge and disable further fastpath interrupts */ 8909 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8910 8911 bxe_task_fp(fp); 8912} 8913 8914/* Release all interrupts allocated by the driver. */ 8915static void 8916bxe_interrupt_free(struct bxe_softc *sc) 8917{ 8918 int i; 8919 8920 switch (sc->interrupt_mode) { 8921 case INTR_MODE_INTX: 8922 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 8923 if (sc->intr[0].resource != NULL) { 8924 bus_release_resource(sc->dev, 8925 SYS_RES_IRQ, 8926 sc->intr[0].rid, 8927 sc->intr[0].resource); 8928 } 8929 break; 8930 case INTR_MODE_MSI: 8931 for (i = 0; i < sc->intr_count; i++) { 8932 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 8933 if (sc->intr[i].resource && sc->intr[i].rid) { 8934 bus_release_resource(sc->dev, 8935 SYS_RES_IRQ, 8936 sc->intr[i].rid, 8937 sc->intr[i].resource); 8938 } 8939 } 8940 pci_release_msi(sc->dev); 8941 break; 8942 case INTR_MODE_MSIX: 8943 for (i = 0; i < sc->intr_count; i++) { 8944 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 8945 if (sc->intr[i].resource && sc->intr[i].rid) { 8946 bus_release_resource(sc->dev, 8947 SYS_RES_IRQ, 8948 sc->intr[i].rid, 8949 sc->intr[i].resource); 8950 } 8951 } 8952 pci_release_msi(sc->dev); 8953 break; 8954 default: 8955 /* nothing to do as initial allocation failed */ 8956 break; 8957 } 8958} 8959 8960/* 8961 * This function determines and allocates the appropriate 8962 * interrupt based on system capabilites and user request. 8963 * 8964 * The user may force a particular interrupt mode, specify 8965 * the number of receive queues, specify the method for 8966 * distribuitng received frames to receive queues, or use 8967 * the default settings which will automatically select the 8968 * best supported combination. In addition, the OS may or 8969 * may not support certain combinations of these settings. 8970 * This routine attempts to reconcile the settings requested 8971 * by the user with the capabilites available from the system 8972 * to select the optimal combination of features. 8973 * 8974 * Returns: 8975 * 0 = Success, !0 = Failure. 8976 */ 8977static int 8978bxe_interrupt_alloc(struct bxe_softc *sc) 8979{ 8980 int msix_count = 0; 8981 int msi_count = 0; 8982 int num_requested = 0; 8983 int num_allocated = 0; 8984 int rid, i, j; 8985 int rc; 8986 8987 /* get the number of available MSI/MSI-X interrupts from the OS */ 8988 if (sc->interrupt_mode > 0) { 8989 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 8990 msix_count = pci_msix_count(sc->dev); 8991 } 8992 8993 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 8994 msi_count = pci_msi_count(sc->dev); 8995 } 8996 8997 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 8998 msi_count, msix_count); 8999 } 9000 9001 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9002 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9003 break; 9004 } 9005 9006 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9007 (msix_count < 2)) { 9008 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9009 break; 9010 } 9011 9012 /* ask for the necessary number of MSI-X vectors */ 9013 num_requested = min((sc->num_queues + 1), msix_count); 9014 9015 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9016 9017 num_allocated = num_requested; 9018 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9019 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9020 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9021 break; 9022 } 9023 9024 if (num_allocated < 2) { /* possible? */ 9025 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9026 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9027 pci_release_msi(sc->dev); 9028 break; 9029 } 9030 9031 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9032 num_requested, num_allocated); 9033 9034 /* best effort so use the number of vectors allocated to us */ 9035 sc->intr_count = num_allocated; 9036 sc->num_queues = num_allocated - 1; 9037 9038 rid = 1; /* initial resource identifier */ 9039 9040 /* allocate the MSI-X vectors */ 9041 for (i = 0; i < num_allocated; i++) { 9042 sc->intr[i].rid = (rid + i); 9043 9044 if ((sc->intr[i].resource = 9045 bus_alloc_resource_any(sc->dev, 9046 SYS_RES_IRQ, 9047 &sc->intr[i].rid, 9048 RF_ACTIVE)) == NULL) { 9049 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9050 i, (rid + i)); 9051 9052 for (j = (i - 1); j >= 0; j--) { 9053 bus_release_resource(sc->dev, 9054 SYS_RES_IRQ, 9055 sc->intr[j].rid, 9056 sc->intr[j].resource); 9057 } 9058 9059 sc->intr_count = 0; 9060 sc->num_queues = 0; 9061 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9062 pci_release_msi(sc->dev); 9063 break; 9064 } 9065 9066 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9067 } 9068 } while (0); 9069 9070 do { /* try allocating MSI vector resources (at least 2) */ 9071 if (sc->interrupt_mode != INTR_MODE_MSI) { 9072 break; 9073 } 9074 9075 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9076 (msi_count < 1)) { 9077 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9078 break; 9079 } 9080 9081 /* ask for a single MSI vector */ 9082 num_requested = 1; 9083 9084 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9085 9086 num_allocated = num_requested; 9087 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9088 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9089 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9090 break; 9091 } 9092 9093 if (num_allocated != 1) { /* possible? */ 9094 BLOGE(sc, "MSI allocation is not 1!\n"); 9095 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9096 pci_release_msi(sc->dev); 9097 break; 9098 } 9099 9100 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9101 num_requested, num_allocated); 9102 9103 /* best effort so use the number of vectors allocated to us */ 9104 sc->intr_count = num_allocated; 9105 sc->num_queues = num_allocated; 9106 9107 rid = 1; /* initial resource identifier */ 9108 9109 sc->intr[0].rid = rid; 9110 9111 if ((sc->intr[0].resource = 9112 bus_alloc_resource_any(sc->dev, 9113 SYS_RES_IRQ, 9114 &sc->intr[0].rid, 9115 RF_ACTIVE)) == NULL) { 9116 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9117 sc->intr_count = 0; 9118 sc->num_queues = 0; 9119 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9120 pci_release_msi(sc->dev); 9121 break; 9122 } 9123 9124 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9125 } while (0); 9126 9127 do { /* try allocating INTx vector resources */ 9128 if (sc->interrupt_mode != INTR_MODE_INTX) { 9129 break; 9130 } 9131 9132 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9133 9134 /* only one vector for INTx */ 9135 sc->intr_count = 1; 9136 sc->num_queues = 1; 9137 9138 rid = 0; /* initial resource identifier */ 9139 9140 sc->intr[0].rid = rid; 9141 9142 if ((sc->intr[0].resource = 9143 bus_alloc_resource_any(sc->dev, 9144 SYS_RES_IRQ, 9145 &sc->intr[0].rid, 9146 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9147 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9148 sc->intr_count = 0; 9149 sc->num_queues = 0; 9150 sc->interrupt_mode = -1; /* Failed! */ 9151 break; 9152 } 9153 9154 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9155 } while (0); 9156 9157 if (sc->interrupt_mode == -1) { 9158 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9159 rc = 1; 9160 } else { 9161 BLOGD(sc, DBG_LOAD, 9162 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9163 sc->interrupt_mode, sc->num_queues); 9164 rc = 0; 9165 } 9166 9167 return (rc); 9168} 9169 9170static void 9171bxe_interrupt_detach(struct bxe_softc *sc) 9172{ 9173 struct bxe_fastpath *fp; 9174 int i; 9175 9176 /* release interrupt resources */ 9177 for (i = 0; i < sc->intr_count; i++) { 9178 if (sc->intr[i].resource && sc->intr[i].tag) { 9179 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9180 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9181 } 9182 } 9183 9184 for (i = 0; i < sc->num_queues; i++) { 9185 fp = &sc->fp[i]; 9186 if (fp->tq) { 9187 taskqueue_drain(fp->tq, &fp->tq_task); 9188 taskqueue_drain(fp->tq, &fp->tx_task); 9189 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task, 9190 NULL)) 9191 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task); 9192 } 9193 9194 for (i = 0; i < sc->num_queues; i++) { 9195 fp = &sc->fp[i]; 9196 if (fp->tq != NULL) { 9197 taskqueue_free(fp->tq); 9198 fp->tq = NULL; 9199 } 9200 } 9201 } 9202 9203 if (sc->sp_tq) { 9204 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9205 taskqueue_free(sc->sp_tq); 9206 sc->sp_tq = NULL; 9207 } 9208} 9209 9210/* 9211 * Enables interrupts and attach to the ISR. 9212 * 9213 * When using multiple MSI/MSI-X vectors the first vector 9214 * is used for slowpath operations while all remaining 9215 * vectors are used for fastpath operations. If only a 9216 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9217 * ISR must look for both slowpath and fastpath completions. 9218 */ 9219static int 9220bxe_interrupt_attach(struct bxe_softc *sc) 9221{ 9222 struct bxe_fastpath *fp; 9223 int rc = 0; 9224 int i; 9225 9226 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9227 "bxe%d_sp_tq", sc->unit); 9228 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9229 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT, 9230 taskqueue_thread_enqueue, 9231 &sc->sp_tq); 9232 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9233 "%s", sc->sp_tq_name); 9234 9235 9236 for (i = 0; i < sc->num_queues; i++) { 9237 fp = &sc->fp[i]; 9238 snprintf(fp->tq_name, sizeof(fp->tq_name), 9239 "bxe%d_fp%d_tq", sc->unit, i); 9240 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9241 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); 9242 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, 9243 taskqueue_thread_enqueue, 9244 &fp->tq); 9245 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0, 9246 bxe_tx_mq_start_deferred, fp); 9247 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9248 "%s", fp->tq_name); 9249 } 9250 9251 /* setup interrupt handlers */ 9252 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9253 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9254 9255 /* 9256 * Setup the interrupt handler. Note that we pass the driver instance 9257 * to the interrupt handler for the slowpath. 9258 */ 9259 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9260 (INTR_TYPE_NET | INTR_MPSAFE), 9261 NULL, bxe_intr_sp, sc, 9262 &sc->intr[0].tag)) != 0) { 9263 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9264 goto bxe_interrupt_attach_exit; 9265 } 9266 9267 bus_describe_intr(sc->dev, sc->intr[0].resource, 9268 sc->intr[0].tag, "sp"); 9269 9270 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9271 9272 /* initialize the fastpath vectors (note the first was used for sp) */ 9273 for (i = 0; i < sc->num_queues; i++) { 9274 fp = &sc->fp[i]; 9275 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9276 9277 /* 9278 * Setup the interrupt handler. Note that we pass the 9279 * fastpath context to the interrupt handler in this 9280 * case. 9281 */ 9282 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9283 (INTR_TYPE_NET | INTR_MPSAFE), 9284 NULL, bxe_intr_fp, fp, 9285 &sc->intr[i + 1].tag)) != 0) { 9286 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9287 (i + 1), rc); 9288 goto bxe_interrupt_attach_exit; 9289 } 9290 9291 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9292 sc->intr[i + 1].tag, "fp%02d", i); 9293 9294 /* bind the fastpath instance to a cpu */ 9295 if (sc->num_queues > 1) { 9296 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9297 } 9298 9299 fp->state = BXE_FP_STATE_IRQ; 9300 } 9301 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9302 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9303 9304 /* 9305 * Setup the interrupt handler. Note that we pass the 9306 * driver instance to the interrupt handler which 9307 * will handle both the slowpath and fastpath. 9308 */ 9309 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9310 (INTR_TYPE_NET | INTR_MPSAFE), 9311 NULL, bxe_intr_legacy, sc, 9312 &sc->intr[0].tag)) != 0) { 9313 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9314 goto bxe_interrupt_attach_exit; 9315 } 9316 9317 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9318 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9319 9320 /* 9321 * Setup the interrupt handler. Note that we pass the 9322 * driver instance to the interrupt handler which 9323 * will handle both the slowpath and fastpath. 9324 */ 9325 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9326 (INTR_TYPE_NET | INTR_MPSAFE), 9327 NULL, bxe_intr_legacy, sc, 9328 &sc->intr[0].tag)) != 0) { 9329 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9330 goto bxe_interrupt_attach_exit; 9331 } 9332 } 9333 9334bxe_interrupt_attach_exit: 9335 9336 return (rc); 9337} 9338 9339static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9340static int bxe_init_hw_common(struct bxe_softc *sc); 9341static int bxe_init_hw_port(struct bxe_softc *sc); 9342static int bxe_init_hw_func(struct bxe_softc *sc); 9343static void bxe_reset_common(struct bxe_softc *sc); 9344static void bxe_reset_port(struct bxe_softc *sc); 9345static void bxe_reset_func(struct bxe_softc *sc); 9346static int bxe_gunzip_init(struct bxe_softc *sc); 9347static void bxe_gunzip_end(struct bxe_softc *sc); 9348static int bxe_init_firmware(struct bxe_softc *sc); 9349static void bxe_release_firmware(struct bxe_softc *sc); 9350 9351static struct 9352ecore_func_sp_drv_ops bxe_func_sp_drv = { 9353 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9354 .init_hw_cmn = bxe_init_hw_common, 9355 .init_hw_port = bxe_init_hw_port, 9356 .init_hw_func = bxe_init_hw_func, 9357 9358 .reset_hw_cmn = bxe_reset_common, 9359 .reset_hw_port = bxe_reset_port, 9360 .reset_hw_func = bxe_reset_func, 9361 9362 .gunzip_init = bxe_gunzip_init, 9363 .gunzip_end = bxe_gunzip_end, 9364 9365 .init_fw = bxe_init_firmware, 9366 .release_fw = bxe_release_firmware, 9367}; 9368 9369static void 9370bxe_init_func_obj(struct bxe_softc *sc) 9371{ 9372 sc->dmae_ready = 0; 9373 9374 ecore_init_func_obj(sc, 9375 &sc->func_obj, 9376 BXE_SP(sc, func_rdata), 9377 BXE_SP_MAPPING(sc, func_rdata), 9378 BXE_SP(sc, func_afex_rdata), 9379 BXE_SP_MAPPING(sc, func_afex_rdata), 9380 &bxe_func_sp_drv); 9381} 9382 9383static int 9384bxe_init_hw(struct bxe_softc *sc, 9385 uint32_t load_code) 9386{ 9387 struct ecore_func_state_params func_params = { NULL }; 9388 int rc; 9389 9390 /* prepare the parameters for function state transitions */ 9391 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9392 9393 func_params.f_obj = &sc->func_obj; 9394 func_params.cmd = ECORE_F_CMD_HW_INIT; 9395 9396 func_params.params.hw_init.load_phase = load_code; 9397 9398 /* 9399 * Via a plethora of function pointers, we will eventually reach 9400 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9401 */ 9402 rc = ecore_func_state_change(sc, &func_params); 9403 9404 return (rc); 9405} 9406 9407static void 9408bxe_fill(struct bxe_softc *sc, 9409 uint32_t addr, 9410 int fill, 9411 uint32_t len) 9412{ 9413 uint32_t i; 9414 9415 if (!(len % 4) && !(addr % 4)) { 9416 for (i = 0; i < len; i += 4) { 9417 REG_WR(sc, (addr + i), fill); 9418 } 9419 } else { 9420 for (i = 0; i < len; i++) { 9421 REG_WR8(sc, (addr + i), fill); 9422 } 9423 } 9424} 9425 9426/* writes FP SP data to FW - data_size in dwords */ 9427static void 9428bxe_wr_fp_sb_data(struct bxe_softc *sc, 9429 int fw_sb_id, 9430 uint32_t *sb_data_p, 9431 uint32_t data_size) 9432{ 9433 int index; 9434 9435 for (index = 0; index < data_size; index++) { 9436 REG_WR(sc, 9437 (BAR_CSTRORM_INTMEM + 9438 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9439 (sizeof(uint32_t) * index)), 9440 *(sb_data_p + index)); 9441 } 9442} 9443 9444static void 9445bxe_zero_fp_sb(struct bxe_softc *sc, 9446 int fw_sb_id) 9447{ 9448 struct hc_status_block_data_e2 sb_data_e2; 9449 struct hc_status_block_data_e1x sb_data_e1x; 9450 uint32_t *sb_data_p; 9451 uint32_t data_size = 0; 9452 9453 if (!CHIP_IS_E1x(sc)) { 9454 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9455 sb_data_e2.common.state = SB_DISABLED; 9456 sb_data_e2.common.p_func.vf_valid = FALSE; 9457 sb_data_p = (uint32_t *)&sb_data_e2; 9458 data_size = (sizeof(struct hc_status_block_data_e2) / 9459 sizeof(uint32_t)); 9460 } else { 9461 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9462 sb_data_e1x.common.state = SB_DISABLED; 9463 sb_data_e1x.common.p_func.vf_valid = FALSE; 9464 sb_data_p = (uint32_t *)&sb_data_e1x; 9465 data_size = (sizeof(struct hc_status_block_data_e1x) / 9466 sizeof(uint32_t)); 9467 } 9468 9469 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9470 9471 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9472 0, CSTORM_STATUS_BLOCK_SIZE); 9473 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9474 0, CSTORM_SYNC_BLOCK_SIZE); 9475} 9476 9477static void 9478bxe_wr_sp_sb_data(struct bxe_softc *sc, 9479 struct hc_sp_status_block_data *sp_sb_data) 9480{ 9481 int i; 9482 9483 for (i = 0; 9484 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9485 i++) { 9486 REG_WR(sc, 9487 (BAR_CSTRORM_INTMEM + 9488 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9489 (i * sizeof(uint32_t))), 9490 *((uint32_t *)sp_sb_data + i)); 9491 } 9492} 9493 9494static void 9495bxe_zero_sp_sb(struct bxe_softc *sc) 9496{ 9497 struct hc_sp_status_block_data sp_sb_data; 9498 9499 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9500 9501 sp_sb_data.state = SB_DISABLED; 9502 sp_sb_data.p_func.vf_valid = FALSE; 9503 9504 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9505 9506 bxe_fill(sc, 9507 (BAR_CSTRORM_INTMEM + 9508 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9509 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9510 bxe_fill(sc, 9511 (BAR_CSTRORM_INTMEM + 9512 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9513 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9514} 9515 9516static void 9517bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9518 int igu_sb_id, 9519 int igu_seg_id) 9520{ 9521 hc_sm->igu_sb_id = igu_sb_id; 9522 hc_sm->igu_seg_id = igu_seg_id; 9523 hc_sm->timer_value = 0xFF; 9524 hc_sm->time_to_expire = 0xFFFFFFFF; 9525} 9526 9527static void 9528bxe_map_sb_state_machines(struct hc_index_data *index_data) 9529{ 9530 /* zero out state machine indices */ 9531 9532 /* rx indices */ 9533 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9534 9535 /* tx indices */ 9536 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9537 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9538 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9539 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9540 9541 /* map indices */ 9542 9543 /* rx indices */ 9544 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9545 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9546 9547 /* tx indices */ 9548 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9549 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9550 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9551 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9552 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9553 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9554 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9555 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9556} 9557 9558static void 9559bxe_init_sb(struct bxe_softc *sc, 9560 bus_addr_t busaddr, 9561 int vfid, 9562 uint8_t vf_valid, 9563 int fw_sb_id, 9564 int igu_sb_id) 9565{ 9566 struct hc_status_block_data_e2 sb_data_e2; 9567 struct hc_status_block_data_e1x sb_data_e1x; 9568 struct hc_status_block_sm *hc_sm_p; 9569 uint32_t *sb_data_p; 9570 int igu_seg_id; 9571 int data_size; 9572 9573 if (CHIP_INT_MODE_IS_BC(sc)) { 9574 igu_seg_id = HC_SEG_ACCESS_NORM; 9575 } else { 9576 igu_seg_id = IGU_SEG_ACCESS_NORM; 9577 } 9578 9579 bxe_zero_fp_sb(sc, fw_sb_id); 9580 9581 if (!CHIP_IS_E1x(sc)) { 9582 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9583 sb_data_e2.common.state = SB_ENABLED; 9584 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9585 sb_data_e2.common.p_func.vf_id = vfid; 9586 sb_data_e2.common.p_func.vf_valid = vf_valid; 9587 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9588 sb_data_e2.common.same_igu_sb_1b = TRUE; 9589 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9590 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9591 hc_sm_p = sb_data_e2.common.state_machine; 9592 sb_data_p = (uint32_t *)&sb_data_e2; 9593 data_size = (sizeof(struct hc_status_block_data_e2) / 9594 sizeof(uint32_t)); 9595 bxe_map_sb_state_machines(sb_data_e2.index_data); 9596 } else { 9597 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9598 sb_data_e1x.common.state = SB_ENABLED; 9599 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9600 sb_data_e1x.common.p_func.vf_id = 0xff; 9601 sb_data_e1x.common.p_func.vf_valid = FALSE; 9602 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9603 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9604 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9605 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9606 hc_sm_p = sb_data_e1x.common.state_machine; 9607 sb_data_p = (uint32_t *)&sb_data_e1x; 9608 data_size = (sizeof(struct hc_status_block_data_e1x) / 9609 sizeof(uint32_t)); 9610 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9611 } 9612 9613 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9614 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9615 9616 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9617 9618 /* write indices to HW - PCI guarantees endianity of regpairs */ 9619 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9620} 9621 9622static inline uint8_t 9623bxe_fp_qzone_id(struct bxe_fastpath *fp) 9624{ 9625 if (CHIP_IS_E1x(fp->sc)) { 9626 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9627 } else { 9628 return (fp->cl_id); 9629 } 9630} 9631 9632static inline uint32_t 9633bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9634 struct bxe_fastpath *fp) 9635{ 9636 uint32_t offset = BAR_USTRORM_INTMEM; 9637 9638 if (!CHIP_IS_E1x(sc)) { 9639 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 9640 } else { 9641 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 9642 } 9643 9644 return (offset); 9645} 9646 9647static void 9648bxe_init_eth_fp(struct bxe_softc *sc, 9649 int idx) 9650{ 9651 struct bxe_fastpath *fp = &sc->fp[idx]; 9652 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 9653 unsigned long q_type = 0; 9654 int cos; 9655 9656 fp->sc = sc; 9657 fp->index = idx; 9658 9659 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 9660 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 9661 9662 fp->cl_id = (CHIP_IS_E1x(sc)) ? 9663 (SC_L_ID(sc) + idx) : 9664 /* want client ID same as IGU SB ID for non-E1 */ 9665 fp->igu_sb_id; 9666 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 9667 9668 /* setup sb indices */ 9669 if (!CHIP_IS_E1x(sc)) { 9670 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 9671 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 9672 } else { 9673 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 9674 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 9675 } 9676 9677 /* init shortcut */ 9678 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 9679 9680 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 9681 9682 /* 9683 * XXX If multiple CoS is ever supported then each fastpath structure 9684 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 9685 */ 9686 for (cos = 0; cos < sc->max_cos; cos++) { 9687 cids[cos] = idx; 9688 } 9689 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 9690 9691 /* nothing more for a VF to do */ 9692 if (IS_VF(sc)) { 9693 return; 9694 } 9695 9696 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 9697 fp->fw_sb_id, fp->igu_sb_id); 9698 9699 bxe_update_fp_sb_idx(fp); 9700 9701 /* Configure Queue State object */ 9702 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 9703 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 9704 9705 ecore_init_queue_obj(sc, 9706 &sc->sp_objs[idx].q_obj, 9707 fp->cl_id, 9708 cids, 9709 sc->max_cos, 9710 SC_FUNC(sc), 9711 BXE_SP(sc, q_rdata), 9712 BXE_SP_MAPPING(sc, q_rdata), 9713 q_type); 9714 9715 /* configure classification DBs */ 9716 ecore_init_mac_obj(sc, 9717 &sc->sp_objs[idx].mac_obj, 9718 fp->cl_id, 9719 idx, 9720 SC_FUNC(sc), 9721 BXE_SP(sc, mac_rdata), 9722 BXE_SP_MAPPING(sc, mac_rdata), 9723 ECORE_FILTER_MAC_PENDING, 9724 &sc->sp_state, 9725 ECORE_OBJ_TYPE_RX_TX, 9726 &sc->macs_pool); 9727 9728 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 9729 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 9730} 9731 9732static inline void 9733bxe_update_rx_prod(struct bxe_softc *sc, 9734 struct bxe_fastpath *fp, 9735 uint16_t rx_bd_prod, 9736 uint16_t rx_cq_prod, 9737 uint16_t rx_sge_prod) 9738{ 9739 struct ustorm_eth_rx_producers rx_prods = { 0 }; 9740 uint32_t i; 9741 9742 /* update producers */ 9743 rx_prods.bd_prod = rx_bd_prod; 9744 rx_prods.cqe_prod = rx_cq_prod; 9745 rx_prods.sge_prod = rx_sge_prod; 9746 9747 /* 9748 * Make sure that the BD and SGE data is updated before updating the 9749 * producers since FW might read the BD/SGE right after the producer 9750 * is updated. 9751 * This is only applicable for weak-ordered memory model archs such 9752 * as IA-64. The following barrier is also mandatory since FW will 9753 * assumes BDs must have buffers. 9754 */ 9755 wmb(); 9756 9757 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 9758 REG_WR(sc, 9759 (fp->ustorm_rx_prods_offset + (i * 4)), 9760 ((uint32_t *)&rx_prods)[i]); 9761 } 9762 9763 wmb(); /* keep prod updates ordered */ 9764 9765 BLOGD(sc, DBG_RX, 9766 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 9767 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 9768} 9769 9770static void 9771bxe_init_rx_rings(struct bxe_softc *sc) 9772{ 9773 struct bxe_fastpath *fp; 9774 int i; 9775 9776 for (i = 0; i < sc->num_queues; i++) { 9777 fp = &sc->fp[i]; 9778 9779 fp->rx_bd_cons = 0; 9780 9781 /* 9782 * Activate the BD ring... 9783 * Warning, this will generate an interrupt (to the TSTORM) 9784 * so this can only be done after the chip is initialized 9785 */ 9786 bxe_update_rx_prod(sc, fp, 9787 fp->rx_bd_prod, 9788 fp->rx_cq_prod, 9789 fp->rx_sge_prod); 9790 9791 if (i != 0) { 9792 continue; 9793 } 9794 9795 if (CHIP_IS_E1(sc)) { 9796 REG_WR(sc, 9797 (BAR_USTRORM_INTMEM + 9798 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 9799 U64_LO(fp->rcq_dma.paddr)); 9800 REG_WR(sc, 9801 (BAR_USTRORM_INTMEM + 9802 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 9803 U64_HI(fp->rcq_dma.paddr)); 9804 } 9805 } 9806} 9807 9808static void 9809bxe_init_tx_ring_one(struct bxe_fastpath *fp) 9810{ 9811 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); 9812 fp->tx_db.data.zero_fill1 = 0; 9813 fp->tx_db.data.prod = 0; 9814 9815 fp->tx_pkt_prod = 0; 9816 fp->tx_pkt_cons = 0; 9817 fp->tx_bd_prod = 0; 9818 fp->tx_bd_cons = 0; 9819 fp->eth_q_stats.tx_pkts = 0; 9820} 9821 9822static inline void 9823bxe_init_tx_rings(struct bxe_softc *sc) 9824{ 9825 int i; 9826 9827 for (i = 0; i < sc->num_queues; i++) { 9828 bxe_init_tx_ring_one(&sc->fp[i]); 9829 } 9830} 9831 9832static void 9833bxe_init_def_sb(struct bxe_softc *sc) 9834{ 9835 struct host_sp_status_block *def_sb = sc->def_sb; 9836 bus_addr_t mapping = sc->def_sb_dma.paddr; 9837 int igu_sp_sb_index; 9838 int igu_seg_id; 9839 int port = SC_PORT(sc); 9840 int func = SC_FUNC(sc); 9841 int reg_offset, reg_offset_en5; 9842 uint64_t section; 9843 int index, sindex; 9844 struct hc_sp_status_block_data sp_sb_data; 9845 9846 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9847 9848 if (CHIP_INT_MODE_IS_BC(sc)) { 9849 igu_sp_sb_index = DEF_SB_IGU_ID; 9850 igu_seg_id = HC_SEG_ACCESS_DEF; 9851 } else { 9852 igu_sp_sb_index = sc->igu_dsb_id; 9853 igu_seg_id = IGU_SEG_ACCESS_DEF; 9854 } 9855 9856 /* attentions */ 9857 section = ((uint64_t)mapping + 9858 offsetof(struct host_sp_status_block, atten_status_block)); 9859 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 9860 sc->attn_state = 0; 9861 9862 reg_offset = (port) ? 9863 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 9864 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 9865 reg_offset_en5 = (port) ? 9866 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 9867 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 9868 9869 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 9870 /* take care of sig[0]..sig[4] */ 9871 for (sindex = 0; sindex < 4; sindex++) { 9872 sc->attn_group[index].sig[sindex] = 9873 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 9874 } 9875 9876 if (!CHIP_IS_E1x(sc)) { 9877 /* 9878 * enable5 is separate from the rest of the registers, 9879 * and the address skip is 4 and not 16 between the 9880 * different groups 9881 */ 9882 sc->attn_group[index].sig[4] = 9883 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 9884 } else { 9885 sc->attn_group[index].sig[4] = 0; 9886 } 9887 } 9888 9889 if (sc->devinfo.int_block == INT_BLOCK_HC) { 9890 reg_offset = (port) ? 9891 HC_REG_ATTN_MSG1_ADDR_L : 9892 HC_REG_ATTN_MSG0_ADDR_L; 9893 REG_WR(sc, reg_offset, U64_LO(section)); 9894 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 9895 } else if (!CHIP_IS_E1x(sc)) { 9896 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 9897 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 9898 } 9899 9900 section = ((uint64_t)mapping + 9901 offsetof(struct host_sp_status_block, sp_sb)); 9902 9903 bxe_zero_sp_sb(sc); 9904 9905 /* PCI guarantees endianity of regpair */ 9906 sp_sb_data.state = SB_ENABLED; 9907 sp_sb_data.host_sb_addr.lo = U64_LO(section); 9908 sp_sb_data.host_sb_addr.hi = U64_HI(section); 9909 sp_sb_data.igu_sb_id = igu_sp_sb_index; 9910 sp_sb_data.igu_seg_id = igu_seg_id; 9911 sp_sb_data.p_func.pf_id = func; 9912 sp_sb_data.p_func.vnic_id = SC_VN(sc); 9913 sp_sb_data.p_func.vf_id = 0xff; 9914 9915 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9916 9917 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 9918} 9919 9920static void 9921bxe_init_sp_ring(struct bxe_softc *sc) 9922{ 9923 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 9924 sc->spq_prod_idx = 0; 9925 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 9926 sc->spq_prod_bd = sc->spq; 9927 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 9928} 9929 9930static void 9931bxe_init_eq_ring(struct bxe_softc *sc) 9932{ 9933 union event_ring_elem *elem; 9934 int i; 9935 9936 for (i = 1; i <= NUM_EQ_PAGES; i++) { 9937 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 9938 9939 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 9940 BCM_PAGE_SIZE * 9941 (i % NUM_EQ_PAGES))); 9942 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 9943 BCM_PAGE_SIZE * 9944 (i % NUM_EQ_PAGES))); 9945 } 9946 9947 sc->eq_cons = 0; 9948 sc->eq_prod = NUM_EQ_DESC; 9949 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 9950 9951 atomic_store_rel_long(&sc->eq_spq_left, 9952 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 9953 NUM_EQ_DESC) - 1)); 9954} 9955 9956static void 9957bxe_init_internal_common(struct bxe_softc *sc) 9958{ 9959 int i; 9960 9961 /* 9962 * Zero this manually as its initialization is currently missing 9963 * in the initTool. 9964 */ 9965 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 9966 REG_WR(sc, 9967 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 9968 0); 9969 } 9970 9971 if (!CHIP_IS_E1x(sc)) { 9972 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 9973 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 9974 } 9975} 9976 9977static void 9978bxe_init_internal(struct bxe_softc *sc, 9979 uint32_t load_code) 9980{ 9981 switch (load_code) { 9982 case FW_MSG_CODE_DRV_LOAD_COMMON: 9983 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 9984 bxe_init_internal_common(sc); 9985 /* no break */ 9986 9987 case FW_MSG_CODE_DRV_LOAD_PORT: 9988 /* nothing to do */ 9989 /* no break */ 9990 9991 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 9992 /* internal memory per function is initialized inside bxe_pf_init */ 9993 break; 9994 9995 default: 9996 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 9997 break; 9998 } 9999} 10000 10001static void 10002storm_memset_func_cfg(struct bxe_softc *sc, 10003 struct tstorm_eth_function_common_config *tcfg, 10004 uint16_t abs_fid) 10005{ 10006 uint32_t addr; 10007 size_t size; 10008 10009 addr = (BAR_TSTRORM_INTMEM + 10010 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10011 size = sizeof(struct tstorm_eth_function_common_config); 10012 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10013} 10014 10015static void 10016bxe_func_init(struct bxe_softc *sc, 10017 struct bxe_func_init_params *p) 10018{ 10019 struct tstorm_eth_function_common_config tcfg = { 0 }; 10020 10021 if (CHIP_IS_E1x(sc)) { 10022 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10023 } 10024 10025 /* Enable the function in the FW */ 10026 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10027 storm_memset_func_en(sc, p->func_id, 1); 10028 10029 /* spq */ 10030 if (p->func_flgs & FUNC_FLG_SPQ) { 10031 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10032 REG_WR(sc, 10033 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10034 p->spq_prod); 10035 } 10036} 10037 10038/* 10039 * Calculates the sum of vn_min_rates. 10040 * It's needed for further normalizing of the min_rates. 10041 * Returns: 10042 * sum of vn_min_rates. 10043 * or 10044 * 0 - if all the min_rates are 0. 10045 * In the later case fainess algorithm should be deactivated. 10046 * If all min rates are not zero then those that are zeroes will be set to 1. 10047 */ 10048static void 10049bxe_calc_vn_min(struct bxe_softc *sc, 10050 struct cmng_init_input *input) 10051{ 10052 uint32_t vn_cfg; 10053 uint32_t vn_min_rate; 10054 int all_zero = 1; 10055 int vn; 10056 10057 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10058 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10059 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10060 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10061 10062 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10063 /* skip hidden VNs */ 10064 vn_min_rate = 0; 10065 } else if (!vn_min_rate) { 10066 /* If min rate is zero - set it to 100 */ 10067 vn_min_rate = DEF_MIN_RATE; 10068 } else { 10069 all_zero = 0; 10070 } 10071 10072 input->vnic_min_rate[vn] = vn_min_rate; 10073 } 10074 10075 /* if ETS or all min rates are zeros - disable fairness */ 10076 if (BXE_IS_ETS_ENABLED(sc)) { 10077 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10078 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10079 } else if (all_zero) { 10080 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10081 BLOGD(sc, DBG_LOAD, 10082 "Fariness disabled (all MIN values are zeroes)\n"); 10083 } else { 10084 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10085 } 10086} 10087 10088static inline uint16_t 10089bxe_extract_max_cfg(struct bxe_softc *sc, 10090 uint32_t mf_cfg) 10091{ 10092 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10093 FUNC_MF_CFG_MAX_BW_SHIFT); 10094 10095 if (!max_cfg) { 10096 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10097 max_cfg = 100; 10098 } 10099 10100 return (max_cfg); 10101} 10102 10103static void 10104bxe_calc_vn_max(struct bxe_softc *sc, 10105 int vn, 10106 struct cmng_init_input *input) 10107{ 10108 uint16_t vn_max_rate; 10109 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10110 uint32_t max_cfg; 10111 10112 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10113 vn_max_rate = 0; 10114 } else { 10115 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10116 10117 if (IS_MF_SI(sc)) { 10118 /* max_cfg in percents of linkspeed */ 10119 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10120 } else { /* SD modes */ 10121 /* max_cfg is absolute in 100Mb units */ 10122 vn_max_rate = (max_cfg * 100); 10123 } 10124 } 10125 10126 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10127 10128 input->vnic_max_rate[vn] = vn_max_rate; 10129} 10130 10131static void 10132bxe_cmng_fns_init(struct bxe_softc *sc, 10133 uint8_t read_cfg, 10134 uint8_t cmng_type) 10135{ 10136 struct cmng_init_input input; 10137 int vn; 10138 10139 memset(&input, 0, sizeof(struct cmng_init_input)); 10140 10141 input.port_rate = sc->link_vars.line_speed; 10142 10143 if (cmng_type == CMNG_FNS_MINMAX) { 10144 /* read mf conf from shmem */ 10145 if (read_cfg) { 10146 bxe_read_mf_cfg(sc); 10147 } 10148 10149 /* get VN min rate and enable fairness if not 0 */ 10150 bxe_calc_vn_min(sc, &input); 10151 10152 /* get VN max rate */ 10153 if (sc->port.pmf) { 10154 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10155 bxe_calc_vn_max(sc, vn, &input); 10156 } 10157 } 10158 10159 /* always enable rate shaping and fairness */ 10160 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10161 10162 ecore_init_cmng(&input, &sc->cmng); 10163 return; 10164 } 10165 10166 /* rate shaping and fairness are disabled */ 10167 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10168} 10169 10170static int 10171bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10172{ 10173 if (CHIP_REV_IS_SLOW(sc)) { 10174 return (CMNG_FNS_NONE); 10175 } 10176 10177 if (IS_MF(sc)) { 10178 return (CMNG_FNS_MINMAX); 10179 } 10180 10181 return (CMNG_FNS_NONE); 10182} 10183 10184static void 10185storm_memset_cmng(struct bxe_softc *sc, 10186 struct cmng_init *cmng, 10187 uint8_t port) 10188{ 10189 int vn; 10190 int func; 10191 uint32_t addr; 10192 size_t size; 10193 10194 addr = (BAR_XSTRORM_INTMEM + 10195 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10196 size = sizeof(struct cmng_struct_per_port); 10197 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10198 10199 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10200 func = func_by_vn(sc, vn); 10201 10202 addr = (BAR_XSTRORM_INTMEM + 10203 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10204 size = sizeof(struct rate_shaping_vars_per_vn); 10205 ecore_storm_memset_struct(sc, addr, size, 10206 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10207 10208 addr = (BAR_XSTRORM_INTMEM + 10209 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10210 size = sizeof(struct fairness_vars_per_vn); 10211 ecore_storm_memset_struct(sc, addr, size, 10212 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10213 } 10214} 10215 10216static void 10217bxe_pf_init(struct bxe_softc *sc) 10218{ 10219 struct bxe_func_init_params func_init = { 0 }; 10220 struct event_ring_data eq_data = { { 0 } }; 10221 uint16_t flags; 10222 10223 if (!CHIP_IS_E1x(sc)) { 10224 /* reset IGU PF statistics: MSIX + ATTN */ 10225 /* PF */ 10226 REG_WR(sc, 10227 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10228 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10229 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10230 0); 10231 /* ATTN */ 10232 REG_WR(sc, 10233 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10234 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10235 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10236 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10237 0); 10238 } 10239 10240 /* function setup flags */ 10241 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10242 10243 /* 10244 * This flag is relevant for E1x only. 10245 * E2 doesn't have a TPA configuration in a function level. 10246 */ 10247 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10248 10249 func_init.func_flgs = flags; 10250 func_init.pf_id = SC_FUNC(sc); 10251 func_init.func_id = SC_FUNC(sc); 10252 func_init.spq_map = sc->spq_dma.paddr; 10253 func_init.spq_prod = sc->spq_prod_idx; 10254 10255 bxe_func_init(sc, &func_init); 10256 10257 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10258 10259 /* 10260 * Congestion management values depend on the link rate. 10261 * There is no active link so initial link rate is set to 10Gbps. 10262 * When the link comes up the congestion management values are 10263 * re-calculated according to the actual link rate. 10264 */ 10265 sc->link_vars.line_speed = SPEED_10000; 10266 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10267 10268 /* Only the PMF sets the HW */ 10269 if (sc->port.pmf) { 10270 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10271 } 10272 10273 /* init Event Queue - PCI bus guarantees correct endainity */ 10274 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10275 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10276 eq_data.producer = sc->eq_prod; 10277 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10278 eq_data.sb_id = DEF_SB_ID; 10279 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10280} 10281 10282static void 10283bxe_hc_int_enable(struct bxe_softc *sc) 10284{ 10285 int port = SC_PORT(sc); 10286 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10287 uint32_t val = REG_RD(sc, addr); 10288 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10289 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10290 (sc->intr_count == 1)) ? TRUE : FALSE; 10291 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10292 10293 if (msix) { 10294 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10295 HC_CONFIG_0_REG_INT_LINE_EN_0); 10296 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10297 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10298 if (single_msix) { 10299 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10300 } 10301 } else if (msi) { 10302 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10303 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10304 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10305 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10306 } else { 10307 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10308 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10309 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10310 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10311 10312 if (!CHIP_IS_E1(sc)) { 10313 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10314 val, port, addr); 10315 10316 REG_WR(sc, addr, val); 10317 10318 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10319 } 10320 } 10321 10322 if (CHIP_IS_E1(sc)) { 10323 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10324 } 10325 10326 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10327 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10328 10329 REG_WR(sc, addr, val); 10330 10331 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10332 mb(); 10333 10334 if (!CHIP_IS_E1(sc)) { 10335 /* init leading/trailing edge */ 10336 if (IS_MF(sc)) { 10337 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10338 if (sc->port.pmf) { 10339 /* enable nig and gpio3 attention */ 10340 val |= 0x1100; 10341 } 10342 } else { 10343 val = 0xffff; 10344 } 10345 10346 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10347 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10348 } 10349 10350 /* make sure that interrupts are indeed enabled from here on */ 10351 mb(); 10352} 10353 10354static void 10355bxe_igu_int_enable(struct bxe_softc *sc) 10356{ 10357 uint32_t val; 10358 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10359 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10360 (sc->intr_count == 1)) ? TRUE : FALSE; 10361 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10362 10363 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10364 10365 if (msix) { 10366 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10367 IGU_PF_CONF_SINGLE_ISR_EN); 10368 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10369 IGU_PF_CONF_ATTN_BIT_EN); 10370 if (single_msix) { 10371 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10372 } 10373 } else if (msi) { 10374 val &= ~IGU_PF_CONF_INT_LINE_EN; 10375 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10376 IGU_PF_CONF_ATTN_BIT_EN | 10377 IGU_PF_CONF_SINGLE_ISR_EN); 10378 } else { 10379 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10380 val |= (IGU_PF_CONF_INT_LINE_EN | 10381 IGU_PF_CONF_ATTN_BIT_EN | 10382 IGU_PF_CONF_SINGLE_ISR_EN); 10383 } 10384 10385 /* clean previous status - need to configure igu prior to ack*/ 10386 if ((!msix) || single_msix) { 10387 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10388 bxe_ack_int(sc); 10389 } 10390 10391 val |= IGU_PF_CONF_FUNC_EN; 10392 10393 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10394 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10395 10396 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10397 10398 mb(); 10399 10400 /* init leading/trailing edge */ 10401 if (IS_MF(sc)) { 10402 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10403 if (sc->port.pmf) { 10404 /* enable nig and gpio3 attention */ 10405 val |= 0x1100; 10406 } 10407 } else { 10408 val = 0xffff; 10409 } 10410 10411 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10412 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10413 10414 /* make sure that interrupts are indeed enabled from here on */ 10415 mb(); 10416} 10417 10418static void 10419bxe_int_enable(struct bxe_softc *sc) 10420{ 10421 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10422 bxe_hc_int_enable(sc); 10423 } else { 10424 bxe_igu_int_enable(sc); 10425 } 10426} 10427 10428static void 10429bxe_hc_int_disable(struct bxe_softc *sc) 10430{ 10431 int port = SC_PORT(sc); 10432 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10433 uint32_t val = REG_RD(sc, addr); 10434 10435 /* 10436 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10437 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10438 * block 10439 */ 10440 if (CHIP_IS_E1(sc)) { 10441 /* 10442 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10443 * to prevent from HC sending interrupts after we exit the function 10444 */ 10445 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10446 10447 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10448 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10449 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10450 } else { 10451 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10452 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10453 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10454 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10455 } 10456 10457 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10458 10459 /* flush all outstanding writes */ 10460 mb(); 10461 10462 REG_WR(sc, addr, val); 10463 if (REG_RD(sc, addr) != val) { 10464 BLOGE(sc, "proper val not read from HC IGU!\n"); 10465 } 10466} 10467 10468static void 10469bxe_igu_int_disable(struct bxe_softc *sc) 10470{ 10471 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10472 10473 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10474 IGU_PF_CONF_INT_LINE_EN | 10475 IGU_PF_CONF_ATTN_BIT_EN); 10476 10477 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10478 10479 /* flush all outstanding writes */ 10480 mb(); 10481 10482 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10483 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10484 BLOGE(sc, "proper val not read from IGU!\n"); 10485 } 10486} 10487 10488static void 10489bxe_int_disable(struct bxe_softc *sc) 10490{ 10491 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10492 bxe_hc_int_disable(sc); 10493 } else { 10494 bxe_igu_int_disable(sc); 10495 } 10496} 10497 10498static void 10499bxe_nic_init(struct bxe_softc *sc, 10500 int load_code) 10501{ 10502 int i; 10503 10504 for (i = 0; i < sc->num_queues; i++) { 10505 bxe_init_eth_fp(sc, i); 10506 } 10507 10508 rmb(); /* ensure status block indices were read */ 10509 10510 bxe_init_rx_rings(sc); 10511 bxe_init_tx_rings(sc); 10512 10513 if (IS_VF(sc)) { 10514 return; 10515 } 10516 10517 /* initialize MOD_ABS interrupts */ 10518 elink_init_mod_abs_int(sc, &sc->link_vars, 10519 sc->devinfo.chip_id, 10520 sc->devinfo.shmem_base, 10521 sc->devinfo.shmem2_base, 10522 SC_PORT(sc)); 10523 10524 bxe_init_def_sb(sc); 10525 bxe_update_dsb_idx(sc); 10526 bxe_init_sp_ring(sc); 10527 bxe_init_eq_ring(sc); 10528 bxe_init_internal(sc, load_code); 10529 bxe_pf_init(sc); 10530 bxe_stats_init(sc); 10531 10532 /* flush all before enabling interrupts */ 10533 mb(); 10534 10535 bxe_int_enable(sc); 10536 10537 /* check for SPIO5 */ 10538 bxe_attn_int_deasserted0(sc, 10539 REG_RD(sc, 10540 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10541 SC_PORT(sc)*4)) & 10542 AEU_INPUTS_ATTN_BITS_SPIO5); 10543} 10544 10545static inline void 10546bxe_init_objs(struct bxe_softc *sc) 10547{ 10548 /* mcast rules must be added to tx if tx switching is enabled */ 10549 ecore_obj_type o_type = 10550 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10551 ECORE_OBJ_TYPE_RX; 10552 10553 /* RX_MODE controlling object */ 10554 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10555 10556 /* multicast configuration controlling object */ 10557 ecore_init_mcast_obj(sc, 10558 &sc->mcast_obj, 10559 sc->fp[0].cl_id, 10560 sc->fp[0].index, 10561 SC_FUNC(sc), 10562 SC_FUNC(sc), 10563 BXE_SP(sc, mcast_rdata), 10564 BXE_SP_MAPPING(sc, mcast_rdata), 10565 ECORE_FILTER_MCAST_PENDING, 10566 &sc->sp_state, 10567 o_type); 10568 10569 /* Setup CAM credit pools */ 10570 ecore_init_mac_credit_pool(sc, 10571 &sc->macs_pool, 10572 SC_FUNC(sc), 10573 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10574 VNICS_PER_PATH(sc)); 10575 10576 ecore_init_vlan_credit_pool(sc, 10577 &sc->vlans_pool, 10578 SC_ABS_FUNC(sc) >> 1, 10579 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10580 VNICS_PER_PATH(sc)); 10581 10582 /* RSS configuration object */ 10583 ecore_init_rss_config_obj(sc, 10584 &sc->rss_conf_obj, 10585 sc->fp[0].cl_id, 10586 sc->fp[0].index, 10587 SC_FUNC(sc), 10588 SC_FUNC(sc), 10589 BXE_SP(sc, rss_rdata), 10590 BXE_SP_MAPPING(sc, rss_rdata), 10591 ECORE_FILTER_RSS_CONF_PENDING, 10592 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10593} 10594 10595/* 10596 * Initialize the function. This must be called before sending CLIENT_SETUP 10597 * for the first client. 10598 */ 10599static inline int 10600bxe_func_start(struct bxe_softc *sc) 10601{ 10602 struct ecore_func_state_params func_params = { NULL }; 10603 struct ecore_func_start_params *start_params = &func_params.params.start; 10604 10605 /* Prepare parameters for function state transitions */ 10606 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 10607 10608 func_params.f_obj = &sc->func_obj; 10609 func_params.cmd = ECORE_F_CMD_START; 10610 10611 /* Function parameters */ 10612 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 10613 start_params->sd_vlan_tag = OVLAN(sc); 10614 10615 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 10616 start_params->network_cos_mode = STATIC_COS; 10617 } else { /* CHIP_IS_E1X */ 10618 start_params->network_cos_mode = FW_WRR; 10619 } 10620 10621 //start_params->gre_tunnel_mode = 0; 10622 //start_params->gre_tunnel_rss = 0; 10623 10624 return (ecore_func_state_change(sc, &func_params)); 10625} 10626 10627static int 10628bxe_set_power_state(struct bxe_softc *sc, 10629 uint8_t state) 10630{ 10631 uint16_t pmcsr; 10632 10633 /* If there is no power capability, silently succeed */ 10634 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 10635 BLOGW(sc, "No power capability\n"); 10636 return (0); 10637 } 10638 10639 pmcsr = pci_read_config(sc->dev, 10640 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10641 2); 10642 10643 switch (state) { 10644 case PCI_PM_D0: 10645 pci_write_config(sc->dev, 10646 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10647 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 10648 10649 if (pmcsr & PCIM_PSTAT_DMASK) { 10650 /* delay required during transition out of D3hot */ 10651 DELAY(20000); 10652 } 10653 10654 break; 10655 10656 case PCI_PM_D3hot: 10657 /* XXX if there are other clients above don't shut down the power */ 10658 10659 /* don't shut down the power for emulation and FPGA */ 10660 if (CHIP_REV_IS_SLOW(sc)) { 10661 return (0); 10662 } 10663 10664 pmcsr &= ~PCIM_PSTAT_DMASK; 10665 pmcsr |= PCIM_PSTAT_D3; 10666 10667 if (sc->wol) { 10668 pmcsr |= PCIM_PSTAT_PMEENABLE; 10669 } 10670 10671 pci_write_config(sc->dev, 10672 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10673 pmcsr, 4); 10674 10675 /* 10676 * No more memory access after this point until device is brought back 10677 * to D0 state. 10678 */ 10679 break; 10680 10681 default: 10682 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", 10683 state, pmcsr); 10684 return (-1); 10685 } 10686 10687 return (0); 10688} 10689 10690 10691/* return true if succeeded to acquire the lock */ 10692static uint8_t 10693bxe_trylock_hw_lock(struct bxe_softc *sc, 10694 uint32_t resource) 10695{ 10696 uint32_t lock_status; 10697 uint32_t resource_bit = (1 << resource); 10698 int func = SC_FUNC(sc); 10699 uint32_t hw_lock_control_reg; 10700 10701 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 10702 10703 /* Validating that the resource is within range */ 10704 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 10705 BLOGD(sc, DBG_LOAD, 10706 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 10707 resource, HW_LOCK_MAX_RESOURCE_VALUE); 10708 return (FALSE); 10709 } 10710 10711 if (func <= 5) { 10712 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 10713 } else { 10714 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 10715 } 10716 10717 /* try to acquire the lock */ 10718 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 10719 lock_status = REG_RD(sc, hw_lock_control_reg); 10720 if (lock_status & resource_bit) { 10721 return (TRUE); 10722 } 10723 10724 BLOGE(sc, "Failed to get a resource lock 0x%x func %d " 10725 "lock_status 0x%x resource_bit 0x%x\n", resource, func, 10726 lock_status, resource_bit); 10727 10728 return (FALSE); 10729} 10730 10731/* 10732 * Get the recovery leader resource id according to the engine this function 10733 * belongs to. Currently only only 2 engines is supported. 10734 */ 10735static int 10736bxe_get_leader_lock_resource(struct bxe_softc *sc) 10737{ 10738 if (SC_PATH(sc)) { 10739 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 10740 } else { 10741 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 10742 } 10743} 10744 10745/* try to acquire a leader lock for current engine */ 10746static uint8_t 10747bxe_trylock_leader_lock(struct bxe_softc *sc) 10748{ 10749 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 10750} 10751 10752static int 10753bxe_release_leader_lock(struct bxe_softc *sc) 10754{ 10755 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 10756} 10757 10758/* close gates #2, #3 and #4 */ 10759static void 10760bxe_set_234_gates(struct bxe_softc *sc, 10761 uint8_t close) 10762{ 10763 uint32_t val; 10764 10765 /* gates #2 and #4a are closed/opened for "not E1" only */ 10766 if (!CHIP_IS_E1(sc)) { 10767 /* #4 */ 10768 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 10769 /* #2 */ 10770 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 10771 } 10772 10773 /* #3 */ 10774 if (CHIP_IS_E1x(sc)) { 10775 /* prevent interrupts from HC on both ports */ 10776 val = REG_RD(sc, HC_REG_CONFIG_1); 10777 REG_WR(sc, HC_REG_CONFIG_1, 10778 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 10779 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 10780 10781 val = REG_RD(sc, HC_REG_CONFIG_0); 10782 REG_WR(sc, HC_REG_CONFIG_0, 10783 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 10784 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 10785 } else { 10786 /* Prevent incoming interrupts in IGU */ 10787 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 10788 10789 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 10790 (!close) ? 10791 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 10792 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 10793 } 10794 10795 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 10796 close ? "closing" : "opening"); 10797 10798 wmb(); 10799} 10800 10801/* poll for pending writes bit, it should get cleared in no more than 1s */ 10802static int 10803bxe_er_poll_igu_vq(struct bxe_softc *sc) 10804{ 10805 uint32_t cnt = 1000; 10806 uint32_t pend_bits = 0; 10807 10808 do { 10809 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 10810 10811 if (pend_bits == 0) { 10812 break; 10813 } 10814 10815 DELAY(1000); 10816 } while (--cnt > 0); 10817 10818 if (cnt == 0) { 10819 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 10820 return (-1); 10821 } 10822 10823 return (0); 10824} 10825 10826#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 10827 10828static void 10829bxe_clp_reset_prep(struct bxe_softc *sc, 10830 uint32_t *magic_val) 10831{ 10832 /* Do some magic... */ 10833 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 10834 *magic_val = val & SHARED_MF_CLP_MAGIC; 10835 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 10836} 10837 10838/* restore the value of the 'magic' bit */ 10839static void 10840bxe_clp_reset_done(struct bxe_softc *sc, 10841 uint32_t magic_val) 10842{ 10843 /* Restore the 'magic' bit value... */ 10844 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 10845 MFCFG_WR(sc, shared_mf_config.clp_mb, 10846 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 10847} 10848 10849/* prepare for MCP reset, takes care of CLP configurations */ 10850static void 10851bxe_reset_mcp_prep(struct bxe_softc *sc, 10852 uint32_t *magic_val) 10853{ 10854 uint32_t shmem; 10855 uint32_t validity_offset; 10856 10857 /* set `magic' bit in order to save MF config */ 10858 if (!CHIP_IS_E1(sc)) { 10859 bxe_clp_reset_prep(sc, magic_val); 10860 } 10861 10862 /* get shmem offset */ 10863 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 10864 validity_offset = 10865 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 10866 10867 /* Clear validity map flags */ 10868 if (shmem > 0) { 10869 REG_WR(sc, shmem + validity_offset, 0); 10870 } 10871} 10872 10873#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 10874#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 10875 10876static void 10877bxe_mcp_wait_one(struct bxe_softc *sc) 10878{ 10879 /* special handling for emulation and FPGA (10 times longer) */ 10880 if (CHIP_REV_IS_SLOW(sc)) { 10881 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 10882 } else { 10883 DELAY((MCP_ONE_TIMEOUT) * 1000); 10884 } 10885} 10886 10887/* initialize shmem_base and waits for validity signature to appear */ 10888static int 10889bxe_init_shmem(struct bxe_softc *sc) 10890{ 10891 int cnt = 0; 10892 uint32_t val = 0; 10893 10894 do { 10895 sc->devinfo.shmem_base = 10896 sc->link_params.shmem_base = 10897 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 10898 10899 if (sc->devinfo.shmem_base) { 10900 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 10901 if (val & SHR_MEM_VALIDITY_MB) 10902 return (0); 10903 } 10904 10905 bxe_mcp_wait_one(sc); 10906 10907 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 10908 10909 BLOGE(sc, "BAD MCP validity signature\n"); 10910 10911 return (-1); 10912} 10913 10914static int 10915bxe_reset_mcp_comp(struct bxe_softc *sc, 10916 uint32_t magic_val) 10917{ 10918 int rc = bxe_init_shmem(sc); 10919 10920 /* Restore the `magic' bit value */ 10921 if (!CHIP_IS_E1(sc)) { 10922 bxe_clp_reset_done(sc, magic_val); 10923 } 10924 10925 return (rc); 10926} 10927 10928static void 10929bxe_pxp_prep(struct bxe_softc *sc) 10930{ 10931 if (!CHIP_IS_E1(sc)) { 10932 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 10933 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 10934 wmb(); 10935 } 10936} 10937 10938/* 10939 * Reset the whole chip except for: 10940 * - PCIE core 10941 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 10942 * - IGU 10943 * - MISC (including AEU) 10944 * - GRC 10945 * - RBCN, RBCP 10946 */ 10947static void 10948bxe_process_kill_chip_reset(struct bxe_softc *sc, 10949 uint8_t global) 10950{ 10951 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 10952 uint32_t global_bits2, stay_reset2; 10953 10954 /* 10955 * Bits that have to be set in reset_mask2 if we want to reset 'global' 10956 * (per chip) blocks. 10957 */ 10958 global_bits2 = 10959 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 10960 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 10961 10962 /* 10963 * Don't reset the following blocks. 10964 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 10965 * reset, as in 4 port device they might still be owned 10966 * by the MCP (there is only one leader per path). 10967 */ 10968 not_reset_mask1 = 10969 MISC_REGISTERS_RESET_REG_1_RST_HC | 10970 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 10971 MISC_REGISTERS_RESET_REG_1_RST_PXP; 10972 10973 not_reset_mask2 = 10974 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 10975 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 10976 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 10977 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 10978 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 10979 MISC_REGISTERS_RESET_REG_2_RST_GRC | 10980 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 10981 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 10982 MISC_REGISTERS_RESET_REG_2_RST_ATC | 10983 MISC_REGISTERS_RESET_REG_2_PGLC | 10984 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 10985 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 10986 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 10987 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 10988 MISC_REGISTERS_RESET_REG_2_UMAC0 | 10989 MISC_REGISTERS_RESET_REG_2_UMAC1; 10990 10991 /* 10992 * Keep the following blocks in reset: 10993 * - all xxMACs are handled by the elink code. 10994 */ 10995 stay_reset2 = 10996 MISC_REGISTERS_RESET_REG_2_XMAC | 10997 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 10998 10999 /* Full reset masks according to the chip */ 11000 reset_mask1 = 0xffffffff; 11001 11002 if (CHIP_IS_E1(sc)) 11003 reset_mask2 = 0xffff; 11004 else if (CHIP_IS_E1H(sc)) 11005 reset_mask2 = 0x1ffff; 11006 else if (CHIP_IS_E2(sc)) 11007 reset_mask2 = 0xfffff; 11008 else /* CHIP_IS_E3 */ 11009 reset_mask2 = 0x3ffffff; 11010 11011 /* Don't reset global blocks unless we need to */ 11012 if (!global) 11013 reset_mask2 &= ~global_bits2; 11014 11015 /* 11016 * In case of attention in the QM, we need to reset PXP 11017 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11018 * because otherwise QM reset would release 'close the gates' shortly 11019 * before resetting the PXP, then the PSWRQ would send a write 11020 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11021 * read the payload data from PSWWR, but PSWWR would not 11022 * respond. The write queue in PGLUE would stuck, dmae commands 11023 * would not return. Therefore it's important to reset the second 11024 * reset register (containing the 11025 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11026 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11027 * bit). 11028 */ 11029 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11030 reset_mask2 & (~not_reset_mask2)); 11031 11032 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11033 reset_mask1 & (~not_reset_mask1)); 11034 11035 mb(); 11036 wmb(); 11037 11038 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11039 reset_mask2 & (~stay_reset2)); 11040 11041 mb(); 11042 wmb(); 11043 11044 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11045 wmb(); 11046} 11047 11048static int 11049bxe_process_kill(struct bxe_softc *sc, 11050 uint8_t global) 11051{ 11052 int cnt = 1000; 11053 uint32_t val = 0; 11054 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11055 uint32_t tags_63_32 = 0; 11056 11057 /* Empty the Tetris buffer, wait for 1s */ 11058 do { 11059 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11060 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11061 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11062 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11063 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11064 if (CHIP_IS_E3(sc)) { 11065 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11066 } 11067 11068 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11069 ((port_is_idle_0 & 0x1) == 0x1) && 11070 ((port_is_idle_1 & 0x1) == 0x1) && 11071 (pgl_exp_rom2 == 0xffffffff) && 11072 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11073 break; 11074 DELAY(1000); 11075 } while (cnt-- > 0); 11076 11077 if (cnt <= 0) { 11078 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11079 "are still outstanding read requests after 1s! " 11080 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11081 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11082 sr_cnt, blk_cnt, port_is_idle_0, 11083 port_is_idle_1, pgl_exp_rom2); 11084 return (-1); 11085 } 11086 11087 mb(); 11088 11089 /* Close gates #2, #3 and #4 */ 11090 bxe_set_234_gates(sc, TRUE); 11091 11092 /* Poll for IGU VQs for 57712 and newer chips */ 11093 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11094 return (-1); 11095 } 11096 11097 /* XXX indicate that "process kill" is in progress to MCP */ 11098 11099 /* clear "unprepared" bit */ 11100 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11101 mb(); 11102 11103 /* Make sure all is written to the chip before the reset */ 11104 wmb(); 11105 11106 /* 11107 * Wait for 1ms to empty GLUE and PCI-E core queues, 11108 * PSWHST, GRC and PSWRD Tetris buffer. 11109 */ 11110 DELAY(1000); 11111 11112 /* Prepare to chip reset: */ 11113 /* MCP */ 11114 if (global) { 11115 bxe_reset_mcp_prep(sc, &val); 11116 } 11117 11118 /* PXP */ 11119 bxe_pxp_prep(sc); 11120 mb(); 11121 11122 /* reset the chip */ 11123 bxe_process_kill_chip_reset(sc, global); 11124 mb(); 11125 11126 /* clear errors in PGB */ 11127 if (!CHIP_IS_E1(sc)) 11128 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11129 11130 /* Recover after reset: */ 11131 /* MCP */ 11132 if (global && bxe_reset_mcp_comp(sc, val)) { 11133 return (-1); 11134 } 11135 11136 /* XXX add resetting the NO_MCP mode DB here */ 11137 11138 /* Open the gates #2, #3 and #4 */ 11139 bxe_set_234_gates(sc, FALSE); 11140 11141 /* XXX 11142 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11143 * re-enable attentions 11144 */ 11145 11146 return (0); 11147} 11148 11149static int 11150bxe_leader_reset(struct bxe_softc *sc) 11151{ 11152 int rc = 0; 11153 uint8_t global = bxe_reset_is_global(sc); 11154 uint32_t load_code; 11155 11156 /* 11157 * If not going to reset MCP, load "fake" driver to reset HW while 11158 * driver is owner of the HW. 11159 */ 11160 if (!global && !BXE_NOMCP(sc)) { 11161 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11162 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11163 if (!load_code) { 11164 BLOGE(sc, "MCP response failure, aborting\n"); 11165 rc = -1; 11166 goto exit_leader_reset; 11167 } 11168 11169 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11170 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11171 BLOGE(sc, "MCP unexpected response, aborting\n"); 11172 rc = -1; 11173 goto exit_leader_reset2; 11174 } 11175 11176 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11177 if (!load_code) { 11178 BLOGE(sc, "MCP response failure, aborting\n"); 11179 rc = -1; 11180 goto exit_leader_reset2; 11181 } 11182 } 11183 11184 /* try to recover after the failure */ 11185 if (bxe_process_kill(sc, global)) { 11186 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11187 rc = -1; 11188 goto exit_leader_reset2; 11189 } 11190 11191 /* 11192 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11193 * state. 11194 */ 11195 bxe_set_reset_done(sc); 11196 if (global) { 11197 bxe_clear_reset_global(sc); 11198 } 11199 11200exit_leader_reset2: 11201 11202 /* unload "fake driver" if it was loaded */ 11203 if (!global && !BXE_NOMCP(sc)) { 11204 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11205 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11206 } 11207 11208exit_leader_reset: 11209 11210 sc->is_leader = 0; 11211 bxe_release_leader_lock(sc); 11212 11213 mb(); 11214 return (rc); 11215} 11216 11217/* 11218 * prepare INIT transition, parameters configured: 11219 * - HC configuration 11220 * - Queue's CDU context 11221 */ 11222static void 11223bxe_pf_q_prep_init(struct bxe_softc *sc, 11224 struct bxe_fastpath *fp, 11225 struct ecore_queue_init_params *init_params) 11226{ 11227 uint8_t cos; 11228 int cxt_index, cxt_offset; 11229 11230 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11231 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11232 11233 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11234 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11235 11236 /* HC rate */ 11237 init_params->rx.hc_rate = 11238 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11239 init_params->tx.hc_rate = 11240 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11241 11242 /* FW SB ID */ 11243 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11244 11245 /* CQ index among the SB indices */ 11246 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11247 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11248 11249 /* set maximum number of COSs supported by this queue */ 11250 init_params->max_cos = sc->max_cos; 11251 11252 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11253 fp->index, init_params->max_cos); 11254 11255 /* set the context pointers queue object */ 11256 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11257 /* XXX change index/cid here if ever support multiple tx CoS */ 11258 /* fp->txdata[cos]->cid */ 11259 cxt_index = fp->index / ILT_PAGE_CIDS; 11260 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11261 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11262 } 11263} 11264 11265/* set flags that are common for the Tx-only and not normal connections */ 11266static unsigned long 11267bxe_get_common_flags(struct bxe_softc *sc, 11268 struct bxe_fastpath *fp, 11269 uint8_t zero_stats) 11270{ 11271 unsigned long flags = 0; 11272 11273 /* PF driver will always initialize the Queue to an ACTIVE state */ 11274 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11275 11276 /* 11277 * tx only connections collect statistics (on the same index as the 11278 * parent connection). The statistics are zeroed when the parent 11279 * connection is initialized. 11280 */ 11281 11282 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11283 if (zero_stats) { 11284 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11285 } 11286 11287 /* 11288 * tx only connections can support tx-switching, though their 11289 * CoS-ness doesn't survive the loopback 11290 */ 11291 if (sc->flags & BXE_TX_SWITCHING) { 11292 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11293 } 11294 11295 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11296 11297 return (flags); 11298} 11299 11300static unsigned long 11301bxe_get_q_flags(struct bxe_softc *sc, 11302 struct bxe_fastpath *fp, 11303 uint8_t leading) 11304{ 11305 unsigned long flags = 0; 11306 11307 if (IS_MF_SD(sc)) { 11308 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11309 } 11310 11311 if (if_getcapenable(sc->ifp) & IFCAP_LRO) { 11312 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11313#if __FreeBSD_version >= 800000 11314 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11315#endif 11316 } 11317 11318 if (leading) { 11319 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11320 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11321 } 11322 11323 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11324 11325 /* merge with common flags */ 11326 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11327} 11328 11329static void 11330bxe_pf_q_prep_general(struct bxe_softc *sc, 11331 struct bxe_fastpath *fp, 11332 struct ecore_general_setup_params *gen_init, 11333 uint8_t cos) 11334{ 11335 gen_init->stat_id = bxe_stats_id(fp); 11336 gen_init->spcl_id = fp->cl_id; 11337 gen_init->mtu = sc->mtu; 11338 gen_init->cos = cos; 11339} 11340 11341static void 11342bxe_pf_rx_q_prep(struct bxe_softc *sc, 11343 struct bxe_fastpath *fp, 11344 struct rxq_pause_params *pause, 11345 struct ecore_rxq_setup_params *rxq_init) 11346{ 11347 uint8_t max_sge = 0; 11348 uint16_t sge_sz = 0; 11349 uint16_t tpa_agg_size = 0; 11350 11351 pause->sge_th_lo = SGE_TH_LO(sc); 11352 pause->sge_th_hi = SGE_TH_HI(sc); 11353 11354 /* validate SGE ring has enough to cross high threshold */ 11355 if (sc->dropless_fc && 11356 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11357 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11358 BLOGW(sc, "sge ring threshold limit\n"); 11359 } 11360 11361 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11362 tpa_agg_size = (2 * sc->mtu); 11363 if (tpa_agg_size < sc->max_aggregation_size) { 11364 tpa_agg_size = sc->max_aggregation_size; 11365 } 11366 11367 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11368 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11369 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11370 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11371 11372 /* pause - not for e1 */ 11373 if (!CHIP_IS_E1(sc)) { 11374 pause->bd_th_lo = BD_TH_LO(sc); 11375 pause->bd_th_hi = BD_TH_HI(sc); 11376 11377 pause->rcq_th_lo = RCQ_TH_LO(sc); 11378 pause->rcq_th_hi = RCQ_TH_HI(sc); 11379 11380 /* validate rings have enough entries to cross high thresholds */ 11381 if (sc->dropless_fc && 11382 pause->bd_th_hi + FW_PREFETCH_CNT > 11383 sc->rx_ring_size) { 11384 BLOGW(sc, "rx bd ring threshold limit\n"); 11385 } 11386 11387 if (sc->dropless_fc && 11388 pause->rcq_th_hi + FW_PREFETCH_CNT > 11389 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11390 BLOGW(sc, "rcq ring threshold limit\n"); 11391 } 11392 11393 pause->pri_map = 1; 11394 } 11395 11396 /* rxq setup */ 11397 rxq_init->dscr_map = fp->rx_dma.paddr; 11398 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11399 rxq_init->rcq_map = fp->rcq_dma.paddr; 11400 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11401 11402 /* 11403 * This should be a maximum number of data bytes that may be 11404 * placed on the BD (not including paddings). 11405 */ 11406 rxq_init->buf_sz = (fp->rx_buf_size - 11407 IP_HEADER_ALIGNMENT_PADDING); 11408 11409 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11410 rxq_init->tpa_agg_sz = tpa_agg_size; 11411 rxq_init->sge_buf_sz = sge_sz; 11412 rxq_init->max_sges_pkt = max_sge; 11413 rxq_init->rss_engine_id = SC_FUNC(sc); 11414 rxq_init->mcast_engine_id = SC_FUNC(sc); 11415 11416 /* 11417 * Maximum number or simultaneous TPA aggregation for this Queue. 11418 * For PF Clients it should be the maximum available number. 11419 * VF driver(s) may want to define it to a smaller value. 11420 */ 11421 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11422 11423 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11424 rxq_init->fw_sb_id = fp->fw_sb_id; 11425 11426 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11427 11428 /* 11429 * configure silent vlan removal 11430 * if multi function mode is afex, then mask default vlan 11431 */ 11432 if (IS_MF_AFEX(sc)) { 11433 rxq_init->silent_removal_value = 11434 sc->devinfo.mf_info.afex_def_vlan_tag; 11435 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11436 } 11437} 11438 11439static void 11440bxe_pf_tx_q_prep(struct bxe_softc *sc, 11441 struct bxe_fastpath *fp, 11442 struct ecore_txq_setup_params *txq_init, 11443 uint8_t cos) 11444{ 11445 /* 11446 * XXX If multiple CoS is ever supported then each fastpath structure 11447 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11448 * fp->txdata[cos]->tx_dma.paddr; 11449 */ 11450 txq_init->dscr_map = fp->tx_dma.paddr; 11451 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11452 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11453 txq_init->fw_sb_id = fp->fw_sb_id; 11454 11455 /* 11456 * set the TSS leading client id for TX classfication to the 11457 * leading RSS client id 11458 */ 11459 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11460} 11461 11462/* 11463 * This function performs 2 steps in a queue state machine: 11464 * 1) RESET->INIT 11465 * 2) INIT->SETUP 11466 */ 11467static int 11468bxe_setup_queue(struct bxe_softc *sc, 11469 struct bxe_fastpath *fp, 11470 uint8_t leading) 11471{ 11472 struct ecore_queue_state_params q_params = { NULL }; 11473 struct ecore_queue_setup_params *setup_params = 11474 &q_params.params.setup; 11475 int rc; 11476 11477 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11478 11479 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11480 11481 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11482 11483 /* we want to wait for completion in this context */ 11484 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11485 11486 /* prepare the INIT parameters */ 11487 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11488 11489 /* Set the command */ 11490 q_params.cmd = ECORE_Q_CMD_INIT; 11491 11492 /* Change the state to INIT */ 11493 rc = ecore_queue_state_change(sc, &q_params); 11494 if (rc) { 11495 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); 11496 return (rc); 11497 } 11498 11499 BLOGD(sc, DBG_LOAD, "init complete\n"); 11500 11501 /* now move the Queue to the SETUP state */ 11502 memset(setup_params, 0, sizeof(*setup_params)); 11503 11504 /* set Queue flags */ 11505 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11506 11507 /* set general SETUP parameters */ 11508 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11509 FIRST_TX_COS_INDEX); 11510 11511 bxe_pf_rx_q_prep(sc, fp, 11512 &setup_params->pause_params, 11513 &setup_params->rxq_params); 11514 11515 bxe_pf_tx_q_prep(sc, fp, 11516 &setup_params->txq_params, 11517 FIRST_TX_COS_INDEX); 11518 11519 /* Set the command */ 11520 q_params.cmd = ECORE_Q_CMD_SETUP; 11521 11522 /* change the state to SETUP */ 11523 rc = ecore_queue_state_change(sc, &q_params); 11524 if (rc) { 11525 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); 11526 return (rc); 11527 } 11528 11529 return (rc); 11530} 11531 11532static int 11533bxe_setup_leading(struct bxe_softc *sc) 11534{ 11535 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11536} 11537 11538static int 11539bxe_config_rss_pf(struct bxe_softc *sc, 11540 struct ecore_rss_config_obj *rss_obj, 11541 uint8_t config_hash) 11542{ 11543 struct ecore_config_rss_params params = { NULL }; 11544 int i; 11545 11546 /* 11547 * Although RSS is meaningless when there is a single HW queue we 11548 * still need it enabled in order to have HW Rx hash generated. 11549 */ 11550 11551 params.rss_obj = rss_obj; 11552 11553 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11554 11555 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11556 11557 /* RSS configuration */ 11558 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11559 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11560 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11561 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11562 if (rss_obj->udp_rss_v4) { 11563 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11564 } 11565 if (rss_obj->udp_rss_v6) { 11566 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11567 } 11568 11569 /* Hash bits */ 11570 params.rss_result_mask = MULTI_MASK; 11571 11572 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 11573 11574 if (config_hash) { 11575 /* RSS keys */ 11576 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 11577 params.rss_key[i] = arc4random(); 11578 } 11579 11580 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 11581 } 11582 11583 return (ecore_config_rss(sc, ¶ms)); 11584} 11585 11586static int 11587bxe_config_rss_eth(struct bxe_softc *sc, 11588 uint8_t config_hash) 11589{ 11590 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 11591} 11592 11593static int 11594bxe_init_rss_pf(struct bxe_softc *sc) 11595{ 11596 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 11597 int i; 11598 11599 /* 11600 * Prepare the initial contents of the indirection table if 11601 * RSS is enabled 11602 */ 11603 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 11604 sc->rss_conf_obj.ind_table[i] = 11605 (sc->fp->cl_id + (i % num_eth_queues)); 11606 } 11607 11608 if (sc->udp_rss) { 11609 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 11610 } 11611 11612 /* 11613 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 11614 * per-port, so if explicit configuration is needed, do it only 11615 * for a PMF. 11616 * 11617 * For 57712 and newer it's a per-function configuration. 11618 */ 11619 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 11620} 11621 11622static int 11623bxe_set_mac_one(struct bxe_softc *sc, 11624 uint8_t *mac, 11625 struct ecore_vlan_mac_obj *obj, 11626 uint8_t set, 11627 int mac_type, 11628 unsigned long *ramrod_flags) 11629{ 11630 struct ecore_vlan_mac_ramrod_params ramrod_param; 11631 int rc; 11632 11633 memset(&ramrod_param, 0, sizeof(ramrod_param)); 11634 11635 /* fill in general parameters */ 11636 ramrod_param.vlan_mac_obj = obj; 11637 ramrod_param.ramrod_flags = *ramrod_flags; 11638 11639 /* fill a user request section if needed */ 11640 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 11641 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 11642 11643 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 11644 11645 /* Set the command: ADD or DEL */ 11646 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 11647 ECORE_VLAN_MAC_DEL; 11648 } 11649 11650 rc = ecore_config_vlan_mac(sc, &ramrod_param); 11651 11652 if (rc == ECORE_EXISTS) { 11653 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 11654 /* do not treat adding same MAC as error */ 11655 rc = 0; 11656 } else if (rc < 0) { 11657 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 11658 } 11659 11660 return (rc); 11661} 11662 11663static int 11664bxe_set_eth_mac(struct bxe_softc *sc, 11665 uint8_t set) 11666{ 11667 unsigned long ramrod_flags = 0; 11668 11669 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 11670 11671 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 11672 11673 /* Eth MAC is set on RSS leading client (fp[0]) */ 11674 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 11675 &sc->sp_objs->mac_obj, 11676 set, ECORE_ETH_MAC, &ramrod_flags)); 11677} 11678 11679static int 11680bxe_get_cur_phy_idx(struct bxe_softc *sc) 11681{ 11682 uint32_t sel_phy_idx = 0; 11683 11684 if (sc->link_params.num_phys <= 1) { 11685 return (ELINK_INT_PHY); 11686 } 11687 11688 if (sc->link_vars.link_up) { 11689 sel_phy_idx = ELINK_EXT_PHY1; 11690 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 11691 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 11692 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 11693 ELINK_SUPPORTED_FIBRE)) 11694 sel_phy_idx = ELINK_EXT_PHY2; 11695 } else { 11696 switch (elink_phy_selection(&sc->link_params)) { 11697 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 11698 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 11699 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 11700 sel_phy_idx = ELINK_EXT_PHY1; 11701 break; 11702 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 11703 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 11704 sel_phy_idx = ELINK_EXT_PHY2; 11705 break; 11706 } 11707 } 11708 11709 return (sel_phy_idx); 11710} 11711 11712static int 11713bxe_get_link_cfg_idx(struct bxe_softc *sc) 11714{ 11715 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 11716 11717 /* 11718 * The selected activated PHY is always after swapping (in case PHY 11719 * swapping is enabled). So when swapping is enabled, we need to reverse 11720 * the configuration 11721 */ 11722 11723 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 11724 if (sel_phy_idx == ELINK_EXT_PHY1) 11725 sel_phy_idx = ELINK_EXT_PHY2; 11726 else if (sel_phy_idx == ELINK_EXT_PHY2) 11727 sel_phy_idx = ELINK_EXT_PHY1; 11728 } 11729 11730 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 11731} 11732 11733static void 11734bxe_set_requested_fc(struct bxe_softc *sc) 11735{ 11736 /* 11737 * Initialize link parameters structure variables 11738 * It is recommended to turn off RX FC for jumbo frames 11739 * for better performance 11740 */ 11741 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 11742 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 11743 } else { 11744 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 11745 } 11746} 11747 11748static void 11749bxe_calc_fc_adv(struct bxe_softc *sc) 11750{ 11751 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 11752 11753 11754 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 11755 ADVERTISED_Pause); 11756 11757 switch (sc->link_vars.ieee_fc & 11758 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 11759 11760 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 11761 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 11762 ADVERTISED_Pause); 11763 break; 11764 11765 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 11766 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 11767 break; 11768 11769 default: 11770 break; 11771 11772 } 11773} 11774 11775static uint16_t 11776bxe_get_mf_speed(struct bxe_softc *sc) 11777{ 11778 uint16_t line_speed = sc->link_vars.line_speed; 11779 if (IS_MF(sc)) { 11780 uint16_t maxCfg = 11781 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 11782 11783 /* calculate the current MAX line speed limit for the MF devices */ 11784 if (IS_MF_SI(sc)) { 11785 line_speed = (line_speed * maxCfg) / 100; 11786 } else { /* SD mode */ 11787 uint16_t vn_max_rate = maxCfg * 100; 11788 11789 if (vn_max_rate < line_speed) { 11790 line_speed = vn_max_rate; 11791 } 11792 } 11793 } 11794 11795 return (line_speed); 11796} 11797 11798static void 11799bxe_fill_report_data(struct bxe_softc *sc, 11800 struct bxe_link_report_data *data) 11801{ 11802 uint16_t line_speed = bxe_get_mf_speed(sc); 11803 11804 memset(data, 0, sizeof(*data)); 11805 11806 /* fill the report data with the effective line speed */ 11807 data->line_speed = line_speed; 11808 11809 /* Link is down */ 11810 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 11811 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 11812 } 11813 11814 /* Full DUPLEX */ 11815 if (sc->link_vars.duplex == DUPLEX_FULL) { 11816 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 11817 } 11818 11819 /* Rx Flow Control is ON */ 11820 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 11821 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 11822 } 11823 11824 /* Tx Flow Control is ON */ 11825 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 11826 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 11827 } 11828} 11829 11830/* report link status to OS, should be called under phy_lock */ 11831static void 11832bxe_link_report_locked(struct bxe_softc *sc) 11833{ 11834 struct bxe_link_report_data cur_data; 11835 11836 /* reread mf_cfg */ 11837 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 11838 bxe_read_mf_cfg(sc); 11839 } 11840 11841 /* Read the current link report info */ 11842 bxe_fill_report_data(sc, &cur_data); 11843 11844 /* Don't report link down or exactly the same link status twice */ 11845 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 11846 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11847 &sc->last_reported_link.link_report_flags) && 11848 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11849 &cur_data.link_report_flags))) { 11850 return; 11851 } 11852 11853 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n", 11854 cur_data.link_report_flags, sc->last_reported_link.link_report_flags); 11855 sc->link_cnt++; 11856 11857 ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt); 11858 /* report new link params and remember the state for the next time */ 11859 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 11860 11861 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11862 &cur_data.link_report_flags)) { 11863 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 11864 } else { 11865 const char *duplex; 11866 const char *flow; 11867 11868 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 11869 &cur_data.link_report_flags)) { 11870 duplex = "full"; 11871 ELINK_DEBUG_P0(sc, "link set to full duplex\n"); 11872 } else { 11873 duplex = "half"; 11874 ELINK_DEBUG_P0(sc, "link set to half duplex\n"); 11875 } 11876 11877 /* 11878 * Handle the FC at the end so that only these flags would be 11879 * possibly set. This way we may easily check if there is no FC 11880 * enabled. 11881 */ 11882 if (cur_data.link_report_flags) { 11883 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11884 &cur_data.link_report_flags) && 11885 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11886 &cur_data.link_report_flags)) { 11887 flow = "ON - receive & transmit"; 11888 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11889 &cur_data.link_report_flags) && 11890 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11891 &cur_data.link_report_flags)) { 11892 flow = "ON - receive"; 11893 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11894 &cur_data.link_report_flags) && 11895 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11896 &cur_data.link_report_flags)) { 11897 flow = "ON - transmit"; 11898 } else { 11899 flow = "none"; /* possible? */ 11900 } 11901 } else { 11902 flow = "none"; 11903 } 11904 11905 if_link_state_change(sc->ifp, LINK_STATE_UP); 11906 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 11907 cur_data.line_speed, duplex, flow); 11908 } 11909} 11910 11911static void 11912bxe_link_report(struct bxe_softc *sc) 11913{ 11914 bxe_acquire_phy_lock(sc); 11915 bxe_link_report_locked(sc); 11916 bxe_release_phy_lock(sc); 11917} 11918 11919static void 11920bxe_link_status_update(struct bxe_softc *sc) 11921{ 11922 if (sc->state != BXE_STATE_OPEN) { 11923 return; 11924 } 11925 11926 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 11927 elink_link_status_update(&sc->link_params, &sc->link_vars); 11928 } else { 11929 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 11930 ELINK_SUPPORTED_10baseT_Full | 11931 ELINK_SUPPORTED_100baseT_Half | 11932 ELINK_SUPPORTED_100baseT_Full | 11933 ELINK_SUPPORTED_1000baseT_Full | 11934 ELINK_SUPPORTED_2500baseX_Full | 11935 ELINK_SUPPORTED_10000baseT_Full | 11936 ELINK_SUPPORTED_TP | 11937 ELINK_SUPPORTED_FIBRE | 11938 ELINK_SUPPORTED_Autoneg | 11939 ELINK_SUPPORTED_Pause | 11940 ELINK_SUPPORTED_Asym_Pause); 11941 sc->port.advertising[0] = sc->port.supported[0]; 11942 11943 sc->link_params.sc = sc; 11944 sc->link_params.port = SC_PORT(sc); 11945 sc->link_params.req_duplex[0] = DUPLEX_FULL; 11946 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 11947 sc->link_params.req_line_speed[0] = SPEED_10000; 11948 sc->link_params.speed_cap_mask[0] = 0x7f0000; 11949 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 11950 11951 if (CHIP_REV_IS_FPGA(sc)) { 11952 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 11953 sc->link_vars.line_speed = ELINK_SPEED_1000; 11954 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 11955 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 11956 } else { 11957 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 11958 sc->link_vars.line_speed = ELINK_SPEED_10000; 11959 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 11960 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 11961 } 11962 11963 sc->link_vars.link_up = 1; 11964 11965 sc->link_vars.duplex = DUPLEX_FULL; 11966 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 11967 11968 if (IS_PF(sc)) { 11969 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 11970 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11971 bxe_link_report(sc); 11972 } 11973 } 11974 11975 if (IS_PF(sc)) { 11976 if (sc->link_vars.link_up) { 11977 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11978 } else { 11979 bxe_stats_handle(sc, STATS_EVENT_STOP); 11980 } 11981 bxe_link_report(sc); 11982 } else { 11983 bxe_link_report(sc); 11984 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11985 } 11986} 11987 11988static int 11989bxe_initial_phy_init(struct bxe_softc *sc, 11990 int load_mode) 11991{ 11992 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 11993 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 11994 struct elink_params *lp = &sc->link_params; 11995 11996 bxe_set_requested_fc(sc); 11997 11998 if (CHIP_REV_IS_SLOW(sc)) { 11999 uint32_t bond = CHIP_BOND_ID(sc); 12000 uint32_t feat = 0; 12001 12002 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12003 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12004 } else if (bond & 0x4) { 12005 if (CHIP_IS_E3(sc)) { 12006 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12007 } else { 12008 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12009 } 12010 } else if (bond & 0x8) { 12011 if (CHIP_IS_E3(sc)) { 12012 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12013 } else { 12014 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12015 } 12016 } 12017 12018 /* disable EMAC for E3 and above */ 12019 if (bond & 0x2) { 12020 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12021 } 12022 12023 sc->link_params.feature_config_flags |= feat; 12024 } 12025 12026 bxe_acquire_phy_lock(sc); 12027 12028 if (load_mode == LOAD_DIAG) { 12029 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12030 /* Prefer doing PHY loopback at 10G speed, if possible */ 12031 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12032 if (lp->speed_cap_mask[cfg_idx] & 12033 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12034 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12035 } else { 12036 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12037 } 12038 } 12039 } 12040 12041 if (load_mode == LOAD_LOOPBACK_EXT) { 12042 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12043 } 12044 12045 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12046 12047 bxe_release_phy_lock(sc); 12048 12049 bxe_calc_fc_adv(sc); 12050 12051 if (sc->link_vars.link_up) { 12052 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12053 bxe_link_report(sc); 12054 } 12055 12056 if (!CHIP_REV_IS_SLOW(sc)) { 12057 bxe_periodic_start(sc); 12058 } 12059 12060 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12061 return (rc); 12062} 12063 12064/* must be called under IF_ADDR_LOCK */ 12065static int 12066bxe_init_mcast_macs_list(struct bxe_softc *sc, 12067 struct ecore_mcast_ramrod_params *p) 12068{ 12069 if_t ifp = sc->ifp; 12070 int mc_count = 0; 12071 struct ifmultiaddr *ifma; 12072 struct ecore_mcast_list_elem *mc_mac; 12073 12074 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12075 if (ifma->ifma_addr->sa_family != AF_LINK) { 12076 continue; 12077 } 12078 12079 mc_count++; 12080 } 12081 12082 ECORE_LIST_INIT(&p->mcast_list); 12083 p->mcast_list_len = 0; 12084 12085 if (!mc_count) { 12086 return (0); 12087 } 12088 12089 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, 12090 (M_NOWAIT | M_ZERO)); 12091 if (!mc_mac) { 12092 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12093 return (-1); 12094 } 12095 bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); 12096 12097 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12098 if (ifma->ifma_addr->sa_family != AF_LINK) { 12099 continue; 12100 } 12101 12102 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 12103 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list); 12104 12105 BLOGD(sc, DBG_LOAD, 12106 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n", 12107 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12108 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5], mc_count); 12109 mc_mac++; 12110 } 12111 12112 p->mcast_list_len = mc_count; 12113 12114 return (0); 12115} 12116 12117static void 12118bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) 12119{ 12120 struct ecore_mcast_list_elem *mc_mac = 12121 ECORE_LIST_FIRST_ENTRY(&p->mcast_list, 12122 struct ecore_mcast_list_elem, 12123 link); 12124 12125 if (mc_mac) { 12126 /* only a single free as all mc_macs are in the same heap array */ 12127 free(mc_mac, M_DEVBUF); 12128 } 12129} 12130static int 12131bxe_set_mc_list(struct bxe_softc *sc) 12132{ 12133 struct ecore_mcast_ramrod_params rparam = { NULL }; 12134 int rc = 0; 12135 12136 rparam.mcast_obj = &sc->mcast_obj; 12137 12138 BXE_MCAST_LOCK(sc); 12139 12140 /* first, clear all configured multicast MACs */ 12141 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12142 if (rc < 0) { 12143 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12144 /* Manual backport parts of FreeBSD upstream r284470. */ 12145 BXE_MCAST_UNLOCK(sc); 12146 return (rc); 12147 } 12148 12149 /* configure a new MACs list */ 12150 rc = bxe_init_mcast_macs_list(sc, &rparam); 12151 if (rc) { 12152 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); 12153 BXE_MCAST_UNLOCK(sc); 12154 return (rc); 12155 } 12156 12157 /* Now add the new MACs */ 12158 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12159 if (rc < 0) { 12160 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12161 } 12162 12163 bxe_free_mcast_macs_list(&rparam); 12164 12165 BXE_MCAST_UNLOCK(sc); 12166 12167 return (rc); 12168} 12169 12170static int 12171bxe_set_uc_list(struct bxe_softc *sc) 12172{ 12173 if_t ifp = sc->ifp; 12174 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12175 struct ifaddr *ifa; 12176 unsigned long ramrod_flags = 0; 12177 int rc; 12178 12179#if __FreeBSD_version < 800000 12180 IF_ADDR_LOCK(ifp); 12181#else 12182 if_addr_rlock(ifp); 12183#endif 12184 12185 /* first schedule a cleanup up of old configuration */ 12186 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12187 if (rc < 0) { 12188 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12189#if __FreeBSD_version < 800000 12190 IF_ADDR_UNLOCK(ifp); 12191#else 12192 if_addr_runlock(ifp); 12193#endif 12194 return (rc); 12195 } 12196 12197 ifa = if_getifaddr(ifp); /* XXX Is this structure */ 12198 while (ifa) { 12199 if (ifa->ifa_addr->sa_family != AF_LINK) { 12200 ifa = TAILQ_NEXT(ifa, ifa_link); 12201 continue; 12202 } 12203 12204 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12205 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12206 if (rc == -EEXIST) { 12207 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12208 /* do not treat adding same MAC as an error */ 12209 rc = 0; 12210 } else if (rc < 0) { 12211 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12212#if __FreeBSD_version < 800000 12213 IF_ADDR_UNLOCK(ifp); 12214#else 12215 if_addr_runlock(ifp); 12216#endif 12217 return (rc); 12218 } 12219 12220 ifa = TAILQ_NEXT(ifa, ifa_link); 12221 } 12222 12223#if __FreeBSD_version < 800000 12224 IF_ADDR_UNLOCK(ifp); 12225#else 12226 if_addr_runlock(ifp); 12227#endif 12228 12229 /* Execute the pending commands */ 12230 bit_set(&ramrod_flags, RAMROD_CONT); 12231 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12232 ECORE_UC_LIST_MAC, &ramrod_flags)); 12233} 12234 12235static void 12236bxe_set_rx_mode(struct bxe_softc *sc) 12237{ 12238 if_t ifp = sc->ifp; 12239 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12240 12241 if (sc->state != BXE_STATE_OPEN) { 12242 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12243 return; 12244 } 12245 12246 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); 12247 12248 if (if_getflags(ifp) & IFF_PROMISC) { 12249 rx_mode = BXE_RX_MODE_PROMISC; 12250 } else if ((if_getflags(ifp) & IFF_ALLMULTI) || 12251 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && 12252 CHIP_IS_E1(sc))) { 12253 rx_mode = BXE_RX_MODE_ALLMULTI; 12254 } else { 12255 if (IS_PF(sc)) { 12256 /* some multicasts */ 12257 if (bxe_set_mc_list(sc) < 0) { 12258 rx_mode = BXE_RX_MODE_ALLMULTI; 12259 } 12260 if (bxe_set_uc_list(sc) < 0) { 12261 rx_mode = BXE_RX_MODE_PROMISC; 12262 } 12263 } 12264 } 12265 12266 sc->rx_mode = rx_mode; 12267 12268 /* schedule the rx_mode command */ 12269 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12270 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12271 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12272 return; 12273 } 12274 12275 if (IS_PF(sc)) { 12276 bxe_set_storm_rx_mode(sc); 12277 } 12278} 12279 12280 12281/* update flags in shmem */ 12282static void 12283bxe_update_drv_flags(struct bxe_softc *sc, 12284 uint32_t flags, 12285 uint32_t set) 12286{ 12287 uint32_t drv_flags; 12288 12289 if (SHMEM2_HAS(sc, drv_flags)) { 12290 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12291 drv_flags = SHMEM2_RD(sc, drv_flags); 12292 12293 if (set) { 12294 SET_FLAGS(drv_flags, flags); 12295 } else { 12296 RESET_FLAGS(drv_flags, flags); 12297 } 12298 12299 SHMEM2_WR(sc, drv_flags, drv_flags); 12300 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12301 12302 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12303 } 12304} 12305 12306/* periodic timer callout routine, only runs when the interface is up */ 12307 12308static void 12309bxe_periodic_callout_func(void *xsc) 12310{ 12311 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12312 int i; 12313 12314 if (!BXE_CORE_TRYLOCK(sc)) { 12315 /* just bail and try again next time */ 12316 12317 if ((sc->state == BXE_STATE_OPEN) && 12318 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12319 /* schedule the next periodic callout */ 12320 callout_reset(&sc->periodic_callout, hz, 12321 bxe_periodic_callout_func, sc); 12322 } 12323 12324 return; 12325 } 12326 12327 if ((sc->state != BXE_STATE_OPEN) || 12328 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12329 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12330 BXE_CORE_UNLOCK(sc); 12331 return; 12332 } 12333 12334 12335 /* Check for TX timeouts on any fastpath. */ 12336 FOR_EACH_QUEUE(sc, i) { 12337 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12338 /* Ruh-Roh, chip was reset! */ 12339 break; 12340 } 12341 } 12342 12343 if (!CHIP_REV_IS_SLOW(sc)) { 12344 /* 12345 * This barrier is needed to ensure the ordering between the writing 12346 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12347 * the reading here. 12348 */ 12349 mb(); 12350 if (sc->port.pmf) { 12351 bxe_acquire_phy_lock(sc); 12352 elink_period_func(&sc->link_params, &sc->link_vars); 12353 bxe_release_phy_lock(sc); 12354 } 12355 } 12356 12357 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { 12358 int mb_idx = SC_FW_MB_IDX(sc); 12359 uint32_t drv_pulse; 12360 uint32_t mcp_pulse; 12361 12362 ++sc->fw_drv_pulse_wr_seq; 12363 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12364 12365 drv_pulse = sc->fw_drv_pulse_wr_seq; 12366 bxe_drv_pulse(sc); 12367 12368 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12369 MCP_PULSE_SEQ_MASK); 12370 12371 /* 12372 * The delta between driver pulse and mcp response should 12373 * be 1 (before mcp response) or 0 (after mcp response). 12374 */ 12375 if ((drv_pulse != mcp_pulse) && 12376 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12377 /* someone lost a heartbeat... */ 12378 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12379 drv_pulse, mcp_pulse); 12380 } 12381 } 12382 12383 /* state is BXE_STATE_OPEN */ 12384 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12385 12386 BXE_CORE_UNLOCK(sc); 12387 12388 if ((sc->state == BXE_STATE_OPEN) && 12389 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12390 /* schedule the next periodic callout */ 12391 callout_reset(&sc->periodic_callout, hz, 12392 bxe_periodic_callout_func, sc); 12393 } 12394} 12395 12396static void 12397bxe_periodic_start(struct bxe_softc *sc) 12398{ 12399 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12400 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12401} 12402 12403static void 12404bxe_periodic_stop(struct bxe_softc *sc) 12405{ 12406 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12407 callout_drain(&sc->periodic_callout); 12408} 12409 12410void 12411bxe_parity_recover(struct bxe_softc *sc) 12412{ 12413 uint8_t global = FALSE; 12414 uint32_t error_recovered, error_unrecovered; 12415 bool is_parity; 12416 12417 12418 if ((sc->recovery_state == BXE_RECOVERY_FAILED) && 12419 (sc->state == BXE_STATE_ERROR)) { 12420 BLOGE(sc, "RECOVERY failed, " 12421 "stack notified driver is NOT running! " 12422 "Please reboot/power cycle the system.\n"); 12423 return; 12424 } 12425 12426 while (1) { 12427 BLOGD(sc, DBG_SP, 12428 "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n", 12429 __func__, sc, sc->state, sc->recovery_state, sc->error_status); 12430 12431 switch(sc->recovery_state) { 12432 12433 case BXE_RECOVERY_INIT: 12434 is_parity = bxe_chk_parity_attn(sc, &global, FALSE); 12435 12436 if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) || 12437 (sc->error_status & BXE_ERR_MCP_ASSERT) || 12438 (sc->error_status & BXE_ERR_GLOBAL)) { 12439 12440 BXE_CORE_LOCK(sc); 12441 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 12442 bxe_periodic_stop(sc); 12443 } 12444 bxe_nic_unload(sc, UNLOAD_RECOVERY, false); 12445 sc->state = BXE_STATE_ERROR; 12446 sc->recovery_state = BXE_RECOVERY_FAILED; 12447 BLOGE(sc, " No Recovery tried for error 0x%x" 12448 " stack notified driver is NOT running!" 12449 " Please reboot/power cycle the system.\n", 12450 sc->error_status); 12451 BXE_CORE_UNLOCK(sc); 12452 return; 12453 } 12454 12455 12456 /* Try to get a LEADER_LOCK HW lock */ 12457 if (bxe_trylock_leader_lock(sc)) { 12458 12459 bxe_set_reset_in_progress(sc); 12460 /* 12461 * Check if there is a global attention and if 12462 * there was a global attention, set the global 12463 * reset bit. 12464 */ 12465 if (global) { 12466 bxe_set_reset_global(sc); 12467 } 12468 sc->is_leader = 1; 12469 } 12470 12471 /* If interface has been removed - break */ 12472 12473 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 12474 bxe_periodic_stop(sc); 12475 } 12476 12477 BXE_CORE_LOCK(sc); 12478 bxe_nic_unload(sc,UNLOAD_RECOVERY, false); 12479 sc->recovery_state = BXE_RECOVERY_WAIT; 12480 BXE_CORE_UNLOCK(sc); 12481 12482 /* 12483 * Ensure "is_leader", MCP command sequence and 12484 * "recovery_state" update values are seen on other 12485 * CPUs. 12486 */ 12487 mb(); 12488 break; 12489 case BXE_RECOVERY_WAIT: 12490 12491 if (sc->is_leader) { 12492 int other_engine = SC_PATH(sc) ? 0 : 1; 12493 bool other_load_status = 12494 bxe_get_load_status(sc, other_engine); 12495 bool load_status = 12496 bxe_get_load_status(sc, SC_PATH(sc)); 12497 global = bxe_reset_is_global(sc); 12498 12499 /* 12500 * In case of a parity in a global block, let 12501 * the first leader that performs a 12502 * leader_reset() reset the global blocks in 12503 * order to clear global attentions. Otherwise 12504 * the gates will remain closed for that 12505 * engine. 12506 */ 12507 if (load_status || 12508 (global && other_load_status)) { 12509 /* 12510 * Wait until all other functions get 12511 * down. 12512 */ 12513 taskqueue_enqueue_timeout(taskqueue_thread, 12514 &sc->sp_err_timeout_task, hz/10); 12515 return; 12516 } else { 12517 /* 12518 * If all other functions got down 12519 * try to bring the chip back to 12520 * normal. In any case it's an exit 12521 * point for a leader. 12522 */ 12523 if (bxe_leader_reset(sc)) { 12524 BLOGE(sc, "RECOVERY failed, " 12525 "stack notified driver is NOT running!\n"); 12526 sc->recovery_state = BXE_RECOVERY_FAILED; 12527 sc->state = BXE_STATE_ERROR; 12528 mb(); 12529 return; 12530 } 12531 12532 /* 12533 * If we are here, means that the 12534 * leader has succeeded and doesn't 12535 * want to be a leader any more. Try 12536 * to continue as a none-leader. 12537 */ 12538 break; 12539 } 12540 12541 } else { /* non-leader */ 12542 if (!bxe_reset_is_done(sc, SC_PATH(sc))) { 12543 /* 12544 * Try to get a LEADER_LOCK HW lock as 12545 * long as a former leader may have 12546 * been unloaded by the user or 12547 * released a leadership by another 12548 * reason. 12549 */ 12550 if (bxe_trylock_leader_lock(sc)) { 12551 /* 12552 * I'm a leader now! Restart a 12553 * switch case. 12554 */ 12555 sc->is_leader = 1; 12556 break; 12557 } 12558 12559 taskqueue_enqueue_timeout(taskqueue_thread, 12560 &sc->sp_err_timeout_task, hz/10); 12561 return; 12562 12563 } else { 12564 /* 12565 * If there was a global attention, wait 12566 * for it to be cleared. 12567 */ 12568 if (bxe_reset_is_global(sc)) { 12569 taskqueue_enqueue_timeout(taskqueue_thread, 12570 &sc->sp_err_timeout_task, hz/10); 12571 return; 12572 } 12573 12574 error_recovered = 12575 sc->eth_stats.recoverable_error; 12576 error_unrecovered = 12577 sc->eth_stats.unrecoverable_error; 12578 BXE_CORE_LOCK(sc); 12579 sc->recovery_state = 12580 BXE_RECOVERY_NIC_LOADING; 12581 if (bxe_nic_load(sc, LOAD_NORMAL)) { 12582 error_unrecovered++; 12583 sc->recovery_state = BXE_RECOVERY_FAILED; 12584 sc->state = BXE_STATE_ERROR; 12585 BLOGE(sc, "Recovery is NOT successfull, " 12586 " state=0x%x recovery_state=0x%x error=%x\n", 12587 sc->state, sc->recovery_state, sc->error_status); 12588 sc->error_status = 0; 12589 } else { 12590 sc->recovery_state = 12591 BXE_RECOVERY_DONE; 12592 error_recovered++; 12593 BLOGI(sc, "Recovery is successfull from errors %x," 12594 " state=0x%x" 12595 " recovery_state=0x%x \n", sc->error_status, 12596 sc->state, sc->recovery_state); 12597 mb(); 12598 } 12599 sc->error_status = 0; 12600 BXE_CORE_UNLOCK(sc); 12601 sc->eth_stats.recoverable_error = 12602 error_recovered; 12603 sc->eth_stats.unrecoverable_error = 12604 error_unrecovered; 12605 12606 return; 12607 } 12608 } 12609 default: 12610 return; 12611 } 12612 } 12613} 12614void 12615bxe_handle_error(struct bxe_softc * sc) 12616{ 12617 12618 if(sc->recovery_state == BXE_RECOVERY_WAIT) { 12619 return; 12620 } 12621 if(sc->error_status) { 12622 if (sc->state == BXE_STATE_OPEN) { 12623 bxe_int_disable(sc); 12624 } 12625 if (sc->link_vars.link_up) { 12626 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 12627 } 12628 sc->recovery_state = BXE_RECOVERY_INIT; 12629 BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n", 12630 sc->unit, sc->error_status, sc->recovery_state); 12631 bxe_parity_recover(sc); 12632 } 12633} 12634 12635static void 12636bxe_sp_err_timeout_task(void *arg, int pending) 12637{ 12638 12639 struct bxe_softc *sc = (struct bxe_softc *)arg; 12640 12641 BLOGD(sc, DBG_SP, 12642 "%s state = 0x%x rec state=0x%x error_status=%x\n", 12643 __func__, sc->state, sc->recovery_state, sc->error_status); 12644 12645 if((sc->recovery_state == BXE_RECOVERY_FAILED) && 12646 (sc->state == BXE_STATE_ERROR)) { 12647 return; 12648 } 12649 /* if can be taken */ 12650 if ((sc->error_status) && (sc->trigger_grcdump)) { 12651 bxe_grc_dump(sc); 12652 } 12653 if (sc->recovery_state != BXE_RECOVERY_DONE) { 12654 bxe_handle_error(sc); 12655 bxe_parity_recover(sc); 12656 } else if (sc->error_status) { 12657 bxe_handle_error(sc); 12658 } 12659 12660 return; 12661} 12662 12663/* start the controller */ 12664static __noinline int 12665bxe_nic_load(struct bxe_softc *sc, 12666 int load_mode) 12667{ 12668 uint32_t val; 12669 int load_code = 0; 12670 int i, rc = 0; 12671 12672 BXE_CORE_LOCK_ASSERT(sc); 12673 12674 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12675 12676 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12677 12678 if (IS_PF(sc)) { 12679 /* must be called before memory allocation and HW init */ 12680 bxe_ilt_set_info(sc); 12681 } 12682 12683 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12684 12685 bxe_set_fp_rx_buf_size(sc); 12686 12687 if (bxe_alloc_fp_buffers(sc) != 0) { 12688 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12689 sc->state = BXE_STATE_CLOSED; 12690 rc = ENOMEM; 12691 goto bxe_nic_load_error0; 12692 } 12693 12694 if (bxe_alloc_mem(sc) != 0) { 12695 sc->state = BXE_STATE_CLOSED; 12696 rc = ENOMEM; 12697 goto bxe_nic_load_error0; 12698 } 12699 12700 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12701 sc->state = BXE_STATE_CLOSED; 12702 rc = ENOMEM; 12703 goto bxe_nic_load_error0; 12704 } 12705 12706 if (IS_PF(sc)) { 12707 /* set pf load just before approaching the MCP */ 12708 bxe_set_pf_load(sc); 12709 12710 /* if MCP exists send load request and analyze response */ 12711 if (!BXE_NOMCP(sc)) { 12712 /* attempt to load pf */ 12713 if (bxe_nic_load_request(sc, &load_code) != 0) { 12714 sc->state = BXE_STATE_CLOSED; 12715 rc = ENXIO; 12716 goto bxe_nic_load_error1; 12717 } 12718 12719 /* what did the MCP say? */ 12720 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12721 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12722 sc->state = BXE_STATE_CLOSED; 12723 rc = ENXIO; 12724 goto bxe_nic_load_error2; 12725 } 12726 } else { 12727 BLOGI(sc, "Device has no MCP!\n"); 12728 load_code = bxe_nic_load_no_mcp(sc); 12729 } 12730 12731 /* mark PMF if applicable */ 12732 bxe_nic_load_pmf(sc, load_code); 12733 12734 /* Init Function state controlling object */ 12735 bxe_init_func_obj(sc); 12736 12737 /* Initialize HW */ 12738 if (bxe_init_hw(sc, load_code) != 0) { 12739 BLOGE(sc, "HW init failed\n"); 12740 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12741 sc->state = BXE_STATE_CLOSED; 12742 rc = ENXIO; 12743 goto bxe_nic_load_error2; 12744 } 12745 } 12746 12747 /* set ALWAYS_ALIVE bit in shmem */ 12748 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 12749 bxe_drv_pulse(sc); 12750 sc->flags |= BXE_NO_PULSE; 12751 12752 /* attach interrupts */ 12753 if (bxe_interrupt_attach(sc) != 0) { 12754 sc->state = BXE_STATE_CLOSED; 12755 rc = ENXIO; 12756 goto bxe_nic_load_error2; 12757 } 12758 12759 bxe_nic_init(sc, load_code); 12760 12761 /* Init per-function objects */ 12762 if (IS_PF(sc)) { 12763 bxe_init_objs(sc); 12764 // XXX bxe_iov_nic_init(sc); 12765 12766 /* set AFEX default VLAN tag to an invalid value */ 12767 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12768 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12769 12770 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12771 rc = bxe_func_start(sc); 12772 if (rc) { 12773 BLOGE(sc, "Function start failed! rc = %d\n", rc); 12774 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12775 sc->state = BXE_STATE_ERROR; 12776 goto bxe_nic_load_error3; 12777 } 12778 12779 /* send LOAD_DONE command to MCP */ 12780 if (!BXE_NOMCP(sc)) { 12781 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12782 if (!load_code) { 12783 BLOGE(sc, "MCP response failure, aborting\n"); 12784 sc->state = BXE_STATE_ERROR; 12785 rc = ENXIO; 12786 goto bxe_nic_load_error3; 12787 } 12788 } 12789 12790 rc = bxe_setup_leading(sc); 12791 if (rc) { 12792 BLOGE(sc, "Setup leading failed! rc = %d\n", rc); 12793 sc->state = BXE_STATE_ERROR; 12794 goto bxe_nic_load_error3; 12795 } 12796 12797 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 12798 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 12799 if (rc) { 12800 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); 12801 sc->state = BXE_STATE_ERROR; 12802 goto bxe_nic_load_error3; 12803 } 12804 } 12805 12806 rc = bxe_init_rss_pf(sc); 12807 if (rc) { 12808 BLOGE(sc, "PF RSS init failed\n"); 12809 sc->state = BXE_STATE_ERROR; 12810 goto bxe_nic_load_error3; 12811 } 12812 } 12813 /* XXX VF */ 12814 12815 /* now when Clients are configured we are ready to work */ 12816 sc->state = BXE_STATE_OPEN; 12817 12818 /* Configure a ucast MAC */ 12819 if (IS_PF(sc)) { 12820 rc = bxe_set_eth_mac(sc, TRUE); 12821 } 12822 if (rc) { 12823 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); 12824 sc->state = BXE_STATE_ERROR; 12825 goto bxe_nic_load_error3; 12826 } 12827 12828 if (sc->port.pmf) { 12829 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 12830 if (rc) { 12831 sc->state = BXE_STATE_ERROR; 12832 goto bxe_nic_load_error3; 12833 } 12834 } 12835 12836 sc->link_params.feature_config_flags &= 12837 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 12838 12839 /* start fast path */ 12840 12841 /* Initialize Rx filter */ 12842 bxe_set_rx_mode(sc); 12843 12844 /* start the Tx */ 12845 switch (/* XXX load_mode */LOAD_OPEN) { 12846 case LOAD_NORMAL: 12847 case LOAD_OPEN: 12848 break; 12849 12850 case LOAD_DIAG: 12851 case LOAD_LOOPBACK_EXT: 12852 sc->state = BXE_STATE_DIAG; 12853 break; 12854 12855 default: 12856 break; 12857 } 12858 12859 if (sc->port.pmf) { 12860 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 12861 } else { 12862 bxe_link_status_update(sc); 12863 } 12864 12865 /* start the periodic timer callout */ 12866 bxe_periodic_start(sc); 12867 12868 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 12869 /* mark driver is loaded in shmem2 */ 12870 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 12871 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 12872 (val | 12873 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 12874 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 12875 } 12876 12877 /* wait for all pending SP commands to complete */ 12878 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 12879 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 12880 bxe_periodic_stop(sc); 12881 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 12882 return (ENXIO); 12883 } 12884 12885 /* Tell the stack the driver is running! */ 12886 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); 12887 12888 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 12889 12890 return (0); 12891 12892bxe_nic_load_error3: 12893 12894 if (IS_PF(sc)) { 12895 bxe_int_disable_sync(sc, 1); 12896 12897 /* clean out queued objects */ 12898 bxe_squeeze_objects(sc); 12899 } 12900 12901 bxe_interrupt_detach(sc); 12902 12903bxe_nic_load_error2: 12904 12905 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 12906 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 12907 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 12908 } 12909 12910 sc->port.pmf = 0; 12911 12912bxe_nic_load_error1: 12913 12914 /* clear pf_load status, as it was already set */ 12915 if (IS_PF(sc)) { 12916 bxe_clear_pf_load(sc); 12917 } 12918 12919bxe_nic_load_error0: 12920 12921 bxe_free_fw_stats_mem(sc); 12922 bxe_free_fp_buffers(sc); 12923 bxe_free_mem(sc); 12924 12925 return (rc); 12926} 12927 12928static int 12929bxe_init_locked(struct bxe_softc *sc) 12930{ 12931 int other_engine = SC_PATH(sc) ? 0 : 1; 12932 uint8_t other_load_status, load_status; 12933 uint8_t global = FALSE; 12934 int rc; 12935 12936 BXE_CORE_LOCK_ASSERT(sc); 12937 12938 /* check if the driver is already running */ 12939 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 12940 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 12941 return (0); 12942 } 12943 12944 if((sc->state == BXE_STATE_ERROR) && 12945 (sc->recovery_state == BXE_RECOVERY_FAILED)) { 12946 BLOGE(sc, "Initialization not done, " 12947 "as previous recovery failed." 12948 "Reboot/Power-cycle the system\n" ); 12949 return (ENXIO); 12950 } 12951 12952 12953 bxe_set_power_state(sc, PCI_PM_D0); 12954 12955 /* 12956 * If parity occurred during the unload, then attentions and/or 12957 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 12958 * loaded on the current engine to complete the recovery. Parity recovery 12959 * is only relevant for PF driver. 12960 */ 12961 if (IS_PF(sc)) { 12962 other_load_status = bxe_get_load_status(sc, other_engine); 12963 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 12964 12965 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 12966 bxe_chk_parity_attn(sc, &global, TRUE)) { 12967 do { 12968 /* 12969 * If there are attentions and they are in global blocks, set 12970 * the GLOBAL_RESET bit regardless whether it will be this 12971 * function that will complete the recovery or not. 12972 */ 12973 if (global) { 12974 bxe_set_reset_global(sc); 12975 } 12976 12977 /* 12978 * Only the first function on the current engine should try 12979 * to recover in open. In case of attentions in global blocks 12980 * only the first in the chip should try to recover. 12981 */ 12982 if ((!load_status && (!global || !other_load_status)) && 12983 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 12984 BLOGI(sc, "Recovered during init\n"); 12985 break; 12986 } 12987 12988 /* recovery has failed... */ 12989 bxe_set_power_state(sc, PCI_PM_D3hot); 12990 sc->recovery_state = BXE_RECOVERY_FAILED; 12991 12992 BLOGE(sc, "Recovery flow hasn't properly " 12993 "completed yet, try again later. " 12994 "If you still see this message after a " 12995 "few retries then power cycle is required.\n"); 12996 12997 rc = ENXIO; 12998 goto bxe_init_locked_done; 12999 } while (0); 13000 } 13001 } 13002 13003 sc->recovery_state = BXE_RECOVERY_DONE; 13004 13005 rc = bxe_nic_load(sc, LOAD_OPEN); 13006 13007bxe_init_locked_done: 13008 13009 if (rc) { 13010 /* Tell the stack the driver is NOT running! */ 13011 BLOGE(sc, "Initialization failed, " 13012 "stack notified driver is NOT running!\n"); 13013 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 13014 } 13015 13016 return (rc); 13017} 13018 13019static int 13020bxe_stop_locked(struct bxe_softc *sc) 13021{ 13022 BXE_CORE_LOCK_ASSERT(sc); 13023 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13024} 13025 13026/* 13027 * Handles controller initialization when called from an unlocked routine. 13028 * ifconfig calls this function. 13029 * 13030 * Returns: 13031 * void 13032 */ 13033static void 13034bxe_init(void *xsc) 13035{ 13036 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13037 13038 BXE_CORE_LOCK(sc); 13039 bxe_init_locked(sc); 13040 BXE_CORE_UNLOCK(sc); 13041} 13042 13043static int 13044bxe_init_ifnet(struct bxe_softc *sc) 13045{ 13046 if_t ifp; 13047 int capabilities; 13048 13049 /* ifconfig entrypoint for media type/status reporting */ 13050 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13051 bxe_ifmedia_update, 13052 bxe_ifmedia_status); 13053 13054 /* set the default interface values */ 13055 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13056 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13057 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13058 13059 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13060 BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media); 13061 13062 /* allocate the ifnet structure */ 13063 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { 13064 BLOGE(sc, "Interface allocation failed!\n"); 13065 return (ENXIO); 13066 } 13067 13068 if_setsoftc(ifp, sc); 13069 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13070 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); 13071 if_setioctlfn(ifp, bxe_ioctl); 13072 if_setstartfn(ifp, bxe_tx_start); 13073 if_setgetcounterfn(ifp, bxe_get_counter); 13074#if __FreeBSD_version >= 901504 13075 if_settransmitfn(ifp, bxe_tx_mq_start); 13076 if_setqflushfn(ifp, bxe_mq_flush); 13077#endif 13078#ifdef FreeBSD8_0 13079 if_settimer(ifp, 0); 13080#endif 13081 if_setinitfn(ifp, bxe_init); 13082 if_setmtu(ifp, sc->mtu); 13083 if_sethwassist(ifp, (CSUM_IP | 13084 CSUM_TCP | 13085 CSUM_UDP | 13086 CSUM_TSO | 13087 CSUM_TCP_IPV6 | 13088 CSUM_UDP_IPV6)); 13089 13090 capabilities = 13091#if __FreeBSD_version < 700000 13092 (IFCAP_VLAN_MTU | 13093 IFCAP_VLAN_HWTAGGING | 13094 IFCAP_HWCSUM | 13095 IFCAP_JUMBO_MTU | 13096 IFCAP_LRO); 13097#else 13098 (IFCAP_VLAN_MTU | 13099 IFCAP_VLAN_HWTAGGING | 13100 IFCAP_VLAN_HWTSO | 13101 IFCAP_VLAN_HWFILTER | 13102 IFCAP_VLAN_HWCSUM | 13103 IFCAP_HWCSUM | 13104 IFCAP_JUMBO_MTU | 13105 IFCAP_LRO | 13106 IFCAP_TSO4 | 13107 IFCAP_TSO6 | 13108 IFCAP_WOL_MAGIC); 13109#endif 13110 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ 13111 if_setcapenable(ifp, if_getcapabilities(ifp)); 13112 if_setbaudrate(ifp, IF_Gbps(10)); 13113/* XXX */ 13114 if_setsendqlen(ifp, sc->tx_ring_size); 13115 if_setsendqready(ifp); 13116/* XXX */ 13117 13118 sc->ifp = ifp; 13119 13120 /* attach to the Ethernet interface list */ 13121 ether_ifattach(ifp, sc->link_params.mac_addr); 13122 13123 return (0); 13124} 13125 13126static void 13127bxe_deallocate_bars(struct bxe_softc *sc) 13128{ 13129 int i; 13130 13131 for (i = 0; i < MAX_BARS; i++) { 13132 if (sc->bar[i].resource != NULL) { 13133 bus_release_resource(sc->dev, 13134 SYS_RES_MEMORY, 13135 sc->bar[i].rid, 13136 sc->bar[i].resource); 13137 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13138 i, PCIR_BAR(i)); 13139 } 13140 } 13141} 13142 13143static int 13144bxe_allocate_bars(struct bxe_softc *sc) 13145{ 13146 u_int flags; 13147 int i; 13148 13149 memset(sc->bar, 0, sizeof(sc->bar)); 13150 13151 for (i = 0; i < MAX_BARS; i++) { 13152 13153 /* memory resources reside at BARs 0, 2, 4 */ 13154 /* Run `pciconf -lb` to see mappings */ 13155 if ((i != 0) && (i != 2) && (i != 4)) { 13156 continue; 13157 } 13158 13159 sc->bar[i].rid = PCIR_BAR(i); 13160 13161 flags = RF_ACTIVE; 13162 if (i == 0) { 13163 flags |= RF_SHAREABLE; 13164 } 13165 13166 if ((sc->bar[i].resource = 13167 bus_alloc_resource_any(sc->dev, 13168 SYS_RES_MEMORY, 13169 &sc->bar[i].rid, 13170 flags)) == NULL) { 13171 return (0); 13172 } 13173 13174 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13175 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13176 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13177 13178 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n", 13179 i, PCIR_BAR(i), 13180 rman_get_start(sc->bar[i].resource), 13181 rman_get_end(sc->bar[i].resource), 13182 rman_get_size(sc->bar[i].resource), 13183 (uintmax_t)sc->bar[i].kva); 13184 } 13185 13186 return (0); 13187} 13188 13189static void 13190bxe_get_function_num(struct bxe_softc *sc) 13191{ 13192 uint32_t val = 0; 13193 13194 /* 13195 * Read the ME register to get the function number. The ME register 13196 * holds the relative-function number and absolute-function number. The 13197 * absolute-function number appears only in E2 and above. Before that 13198 * these bits always contained zero, therefore we cannot blindly use them. 13199 */ 13200 13201 val = REG_RD(sc, BAR_ME_REGISTER); 13202 13203 sc->pfunc_rel = 13204 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13205 sc->path_id = 13206 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13207 13208 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13209 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13210 } else { 13211 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13212 } 13213 13214 BLOGD(sc, DBG_LOAD, 13215 "Relative function %d, Absolute function %d, Path %d\n", 13216 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13217} 13218 13219static uint32_t 13220bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13221{ 13222 uint32_t shmem2_size; 13223 uint32_t offset; 13224 uint32_t mf_cfg_offset_value; 13225 13226 /* Non 57712 */ 13227 offset = (SHMEM_RD(sc, func_mb) + 13228 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13229 13230 /* 57712 plus */ 13231 if (sc->devinfo.shmem2_base != 0) { 13232 shmem2_size = SHMEM2_RD(sc, size); 13233 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13234 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13235 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13236 offset = mf_cfg_offset_value; 13237 } 13238 } 13239 } 13240 13241 return (offset); 13242} 13243 13244static uint32_t 13245bxe_pcie_capability_read(struct bxe_softc *sc, 13246 int reg, 13247 int width) 13248{ 13249 int pcie_reg; 13250 13251 /* ensure PCIe capability is enabled */ 13252 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13253 if (pcie_reg != 0) { 13254 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13255 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13256 } 13257 } 13258 13259 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13260 13261 return (0); 13262} 13263 13264static uint8_t 13265bxe_is_pcie_pending(struct bxe_softc *sc) 13266{ 13267 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13268 PCIM_EXP_STA_TRANSACTION_PND); 13269} 13270 13271/* 13272 * Walk the PCI capabiites list for the device to find what features are 13273 * supported. These capabilites may be enabled/disabled by firmware so it's 13274 * best to walk the list rather than make assumptions. 13275 */ 13276static void 13277bxe_probe_pci_caps(struct bxe_softc *sc) 13278{ 13279 uint16_t link_status; 13280 int reg; 13281 13282 /* check if PCI Power Management is enabled */ 13283 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13284 if (reg != 0) { 13285 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13286 13287 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13288 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13289 } 13290 } 13291 13292 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13293 13294 /* handle PCIe 2.0 workarounds for 57710 */ 13295 if (CHIP_IS_E1(sc)) { 13296 /* workaround for 57710 errata E4_57710_27462 */ 13297 sc->devinfo.pcie_link_speed = 13298 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13299 13300 /* workaround for 57710 errata E4_57710_27488 */ 13301 sc->devinfo.pcie_link_width = 13302 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13303 if (sc->devinfo.pcie_link_speed > 1) { 13304 sc->devinfo.pcie_link_width = 13305 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13306 } 13307 } else { 13308 sc->devinfo.pcie_link_speed = 13309 (link_status & PCIM_LINK_STA_SPEED); 13310 sc->devinfo.pcie_link_width = 13311 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13312 } 13313 13314 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13315 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13316 13317 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13318 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13319 13320 /* check if MSI capability is enabled */ 13321 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13322 if (reg != 0) { 13323 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13324 13325 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13326 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13327 } 13328 } 13329 13330 /* check if MSI-X capability is enabled */ 13331 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13332 if (reg != 0) { 13333 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13334 13335 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13336 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13337 } 13338 } 13339} 13340 13341static int 13342bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13343{ 13344 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13345 uint32_t val; 13346 13347 /* get the outer vlan if we're in switch-dependent mode */ 13348 13349 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13350 mf_info->ext_id = (uint16_t)val; 13351 13352 mf_info->multi_vnics_mode = 1; 13353 13354 if (!VALID_OVLAN(mf_info->ext_id)) { 13355 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13356 return (1); 13357 } 13358 13359 /* get the capabilities */ 13360 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13361 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13362 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13363 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13364 FUNC_MF_CFG_PROTOCOL_FCOE) { 13365 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13366 } else { 13367 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13368 } 13369 13370 mf_info->vnics_per_port = 13371 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13372 13373 return (0); 13374} 13375 13376static uint32_t 13377bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13378{ 13379 uint32_t retval = 0; 13380 uint32_t val; 13381 13382 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13383 13384 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13385 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13386 retval |= MF_PROTO_SUPPORT_ETHERNET; 13387 } 13388 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13389 retval |= MF_PROTO_SUPPORT_ISCSI; 13390 } 13391 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13392 retval |= MF_PROTO_SUPPORT_FCOE; 13393 } 13394 } 13395 13396 return (retval); 13397} 13398 13399static int 13400bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13401{ 13402 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13403 uint32_t val; 13404 13405 /* 13406 * There is no outer vlan if we're in switch-independent mode. 13407 * If the mac is valid then assume multi-function. 13408 */ 13409 13410 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13411 13412 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13413 13414 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13415 13416 mf_info->vnics_per_port = 13417 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13418 13419 return (0); 13420} 13421 13422static int 13423bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13424{ 13425 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13426 uint32_t e1hov_tag; 13427 uint32_t func_config; 13428 uint32_t niv_config; 13429 13430 mf_info->multi_vnics_mode = 1; 13431 13432 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13433 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13434 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13435 13436 mf_info->ext_id = 13437 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13438 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13439 13440 mf_info->default_vlan = 13441 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13442 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13443 13444 mf_info->niv_allowed_priorities = 13445 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13446 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13447 13448 mf_info->niv_default_cos = 13449 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13450 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13451 13452 mf_info->afex_vlan_mode = 13453 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13454 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13455 13456 mf_info->niv_mba_enabled = 13457 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13458 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13459 13460 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13461 13462 mf_info->vnics_per_port = 13463 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13464 13465 return (0); 13466} 13467 13468static int 13469bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13470{ 13471 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13472 uint32_t mf_cfg1; 13473 uint32_t mf_cfg2; 13474 uint32_t ovlan1; 13475 uint32_t ovlan2; 13476 uint8_t i, j; 13477 13478 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13479 SC_PORT(sc)); 13480 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13481 mf_info->mf_config[SC_VN(sc)]); 13482 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13483 mf_info->multi_vnics_mode); 13484 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13485 mf_info->vnics_per_port); 13486 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13487 mf_info->ext_id); 13488 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13489 mf_info->min_bw[0], mf_info->min_bw[1], 13490 mf_info->min_bw[2], mf_info->min_bw[3]); 13491 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13492 mf_info->max_bw[0], mf_info->max_bw[1], 13493 mf_info->max_bw[2], mf_info->max_bw[3]); 13494 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13495 sc->mac_addr_str); 13496 13497 /* various MF mode sanity checks... */ 13498 13499 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13500 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13501 SC_PORT(sc)); 13502 return (1); 13503 } 13504 13505 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13506 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13507 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13508 return (1); 13509 } 13510 13511 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13512 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13513 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13514 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13515 SC_VN(sc), OVLAN(sc)); 13516 return (1); 13517 } 13518 13519 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13520 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13521 mf_info->multi_vnics_mode, OVLAN(sc)); 13522 return (1); 13523 } 13524 13525 /* 13526 * Verify all functions are either MF or SF mode. If MF, make sure 13527 * sure that all non-hidden functions have a valid ovlan. If SF, 13528 * make sure that all non-hidden functions have an invalid ovlan. 13529 */ 13530 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13531 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13532 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13533 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13534 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13535 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13536 BLOGE(sc, "mf_mode=SD function %d MF config " 13537 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13538 i, mf_info->multi_vnics_mode, ovlan1); 13539 return (1); 13540 } 13541 } 13542 13543 /* Verify all funcs on the same port each have a different ovlan. */ 13544 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13545 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13546 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13547 /* iterate from the next function on the port to the max func */ 13548 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13549 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13550 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13551 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13552 VALID_OVLAN(ovlan1) && 13553 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13554 VALID_OVLAN(ovlan2) && 13555 (ovlan1 == ovlan2)) { 13556 BLOGE(sc, "mf_mode=SD functions %d and %d " 13557 "have the same ovlan (%d)\n", 13558 i, j, ovlan1); 13559 return (1); 13560 } 13561 } 13562 } 13563 } /* MULTI_FUNCTION_SD */ 13564 13565 return (0); 13566} 13567 13568static int 13569bxe_get_mf_cfg_info(struct bxe_softc *sc) 13570{ 13571 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13572 uint32_t val, mac_upper; 13573 uint8_t i, vnic; 13574 13575 /* initialize mf_info defaults */ 13576 mf_info->vnics_per_port = 1; 13577 mf_info->multi_vnics_mode = FALSE; 13578 mf_info->path_has_ovlan = FALSE; 13579 mf_info->mf_mode = SINGLE_FUNCTION; 13580 13581 if (!CHIP_IS_MF_CAP(sc)) { 13582 return (0); 13583 } 13584 13585 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13586 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13587 return (1); 13588 } 13589 13590 /* get the MF mode (switch dependent / independent / single-function) */ 13591 13592 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13593 13594 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13595 { 13596 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13597 13598 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13599 13600 /* check for legal upper mac bytes */ 13601 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13602 mf_info->mf_mode = MULTI_FUNCTION_SI; 13603 } else { 13604 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13605 } 13606 13607 break; 13608 13609 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13610 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13611 13612 /* get outer vlan configuration */ 13613 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13614 13615 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13616 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13617 mf_info->mf_mode = MULTI_FUNCTION_SD; 13618 } else { 13619 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13620 } 13621 13622 break; 13623 13624 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13625 13626 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13627 return (0); 13628 13629 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13630 13631 /* 13632 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13633 * and the MAC address is valid. 13634 */ 13635 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13636 13637 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13638 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13639 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13640 } else { 13641 BLOGE(sc, "Invalid config for AFEX mode\n"); 13642 } 13643 13644 break; 13645 13646 default: 13647 13648 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13649 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13650 13651 return (1); 13652 } 13653 13654 /* set path mf_mode (which could be different than function mf_mode) */ 13655 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13656 mf_info->path_has_ovlan = TRUE; 13657 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13658 /* 13659 * Decide on path multi vnics mode. If we're not in MF mode and in 13660 * 4-port mode, this is good enough to check vnic-0 of the other port 13661 * on the same path 13662 */ 13663 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13664 uint8_t other_port = !(PORT_ID(sc) & 1); 13665 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13666 13667 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13668 13669 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13670 } 13671 } 13672 13673 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13674 /* invalid MF config */ 13675 if (SC_VN(sc) >= 1) { 13676 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13677 return (1); 13678 } 13679 13680 return (0); 13681 } 13682 13683 /* get the MF configuration */ 13684 mf_info->mf_config[SC_VN(sc)] = 13685 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13686 13687 switch(mf_info->mf_mode) 13688 { 13689 case MULTI_FUNCTION_SD: 13690 13691 bxe_get_shmem_mf_cfg_info_sd(sc); 13692 break; 13693 13694 case MULTI_FUNCTION_SI: 13695 13696 bxe_get_shmem_mf_cfg_info_si(sc); 13697 break; 13698 13699 case MULTI_FUNCTION_AFEX: 13700 13701 bxe_get_shmem_mf_cfg_info_niv(sc); 13702 break; 13703 13704 default: 13705 13706 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13707 mf_info->mf_mode); 13708 return (1); 13709 } 13710 13711 /* get the congestion management parameters */ 13712 13713 vnic = 0; 13714 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13715 /* get min/max bw */ 13716 val = MFCFG_RD(sc, func_mf_config[i].config); 13717 mf_info->min_bw[vnic] = 13718 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13719 mf_info->max_bw[vnic] = 13720 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13721 vnic++; 13722 } 13723 13724 return (bxe_check_valid_mf_cfg(sc)); 13725} 13726 13727static int 13728bxe_get_shmem_info(struct bxe_softc *sc) 13729{ 13730 int port; 13731 uint32_t mac_hi, mac_lo, val; 13732 13733 port = SC_PORT(sc); 13734 mac_hi = mac_lo = 0; 13735 13736 sc->link_params.sc = sc; 13737 sc->link_params.port = port; 13738 13739 /* get the hardware config info */ 13740 sc->devinfo.hw_config = 13741 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13742 sc->devinfo.hw_config2 = 13743 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13744 13745 sc->link_params.hw_led_mode = 13746 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13747 SHARED_HW_CFG_LED_MODE_SHIFT); 13748 13749 /* get the port feature config */ 13750 sc->port.config = 13751 SHMEM_RD(sc, dev_info.port_feature_config[port].config); 13752 13753 /* get the link params */ 13754 sc->link_params.speed_cap_mask[0] = 13755 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13756 sc->link_params.speed_cap_mask[1] = 13757 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13758 13759 /* get the lane config */ 13760 sc->link_params.lane_config = 13761 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13762 13763 /* get the link config */ 13764 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13765 sc->port.link_config[ELINK_INT_PHY] = val; 13766 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13767 sc->port.link_config[ELINK_EXT_PHY1] = 13768 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13769 13770 /* get the override preemphasis flag and enable it or turn it off */ 13771 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13772 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 13773 sc->link_params.feature_config_flags |= 13774 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13775 } else { 13776 sc->link_params.feature_config_flags &= 13777 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13778 } 13779 13780 /* get the initial value of the link params */ 13781 sc->link_params.multi_phy_config = 13782 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 13783 13784 /* get external phy info */ 13785 sc->port.ext_phy_config = 13786 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 13787 13788 /* get the multifunction configuration */ 13789 bxe_get_mf_cfg_info(sc); 13790 13791 /* get the mac address */ 13792 if (IS_MF(sc)) { 13793 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13794 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 13795 } else { 13796 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 13797 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 13798 } 13799 13800 if ((mac_lo == 0) && (mac_hi == 0)) { 13801 *sc->mac_addr_str = 0; 13802 BLOGE(sc, "No Ethernet address programmed!\n"); 13803 } else { 13804 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 13805 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 13806 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 13807 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 13808 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 13809 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 13810 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 13811 "%02x:%02x:%02x:%02x:%02x:%02x", 13812 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 13813 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 13814 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 13815 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 13816 } 13817 13818 return (0); 13819} 13820 13821static void 13822bxe_get_tunable_params(struct bxe_softc *sc) 13823{ 13824 /* sanity checks */ 13825 13826 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 13827 (bxe_interrupt_mode != INTR_MODE_MSI) && 13828 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 13829 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 13830 bxe_interrupt_mode = INTR_MODE_MSIX; 13831 } 13832 13833 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 13834 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 13835 bxe_queue_count = 0; 13836 } 13837 13838 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 13839 if (bxe_max_rx_bufs == 0) { 13840 bxe_max_rx_bufs = RX_BD_USABLE; 13841 } else { 13842 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 13843 bxe_max_rx_bufs = 2048; 13844 } 13845 } 13846 13847 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 13848 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 13849 bxe_hc_rx_ticks = 25; 13850 } 13851 13852 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 13853 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 13854 bxe_hc_tx_ticks = 50; 13855 } 13856 13857 if (bxe_max_aggregation_size == 0) { 13858 bxe_max_aggregation_size = TPA_AGG_SIZE; 13859 } 13860 13861 if (bxe_max_aggregation_size > 0xffff) { 13862 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 13863 bxe_max_aggregation_size); 13864 bxe_max_aggregation_size = TPA_AGG_SIZE; 13865 } 13866 13867 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 13868 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 13869 bxe_mrrs = -1; 13870 } 13871 13872 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 13873 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 13874 bxe_autogreeen = 0; 13875 } 13876 13877 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 13878 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 13879 bxe_udp_rss = 0; 13880 } 13881 13882 /* pull in user settings */ 13883 13884 sc->interrupt_mode = bxe_interrupt_mode; 13885 sc->max_rx_bufs = bxe_max_rx_bufs; 13886 sc->hc_rx_ticks = bxe_hc_rx_ticks; 13887 sc->hc_tx_ticks = bxe_hc_tx_ticks; 13888 sc->max_aggregation_size = bxe_max_aggregation_size; 13889 sc->mrrs = bxe_mrrs; 13890 sc->autogreeen = bxe_autogreeen; 13891 sc->udp_rss = bxe_udp_rss; 13892 13893 if (bxe_interrupt_mode == INTR_MODE_INTX) { 13894 sc->num_queues = 1; 13895 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 13896 sc->num_queues = 13897 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 13898 MAX_RSS_CHAINS); 13899 if (sc->num_queues > mp_ncpus) { 13900 sc->num_queues = mp_ncpus; 13901 } 13902 } 13903 13904 BLOGD(sc, DBG_LOAD, 13905 "User Config: " 13906 "debug=0x%lx " 13907 "interrupt_mode=%d " 13908 "queue_count=%d " 13909 "hc_rx_ticks=%d " 13910 "hc_tx_ticks=%d " 13911 "rx_budget=%d " 13912 "max_aggregation_size=%d " 13913 "mrrs=%d " 13914 "autogreeen=%d " 13915 "udp_rss=%d\n", 13916 bxe_debug, 13917 sc->interrupt_mode, 13918 sc->num_queues, 13919 sc->hc_rx_ticks, 13920 sc->hc_tx_ticks, 13921 bxe_rx_budget, 13922 sc->max_aggregation_size, 13923 sc->mrrs, 13924 sc->autogreeen, 13925 sc->udp_rss); 13926} 13927 13928static int 13929bxe_media_detect(struct bxe_softc *sc) 13930{ 13931 int port_type; 13932 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 13933 13934 switch (sc->link_params.phy[phy_idx].media_type) { 13935 case ELINK_ETH_PHY_SFPP_10G_FIBER: 13936 case ELINK_ETH_PHY_XFP_FIBER: 13937 BLOGI(sc, "Found 10Gb Fiber media.\n"); 13938 sc->media = IFM_10G_SR; 13939 port_type = PORT_FIBRE; 13940 break; 13941 case ELINK_ETH_PHY_SFP_1G_FIBER: 13942 BLOGI(sc, "Found 1Gb Fiber media.\n"); 13943 sc->media = IFM_1000_SX; 13944 port_type = PORT_FIBRE; 13945 break; 13946 case ELINK_ETH_PHY_KR: 13947 case ELINK_ETH_PHY_CX4: 13948 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 13949 sc->media = IFM_10G_CX4; 13950 port_type = PORT_FIBRE; 13951 break; 13952 case ELINK_ETH_PHY_DA_TWINAX: 13953 BLOGI(sc, "Found 10Gb Twinax media.\n"); 13954 sc->media = IFM_10G_TWINAX; 13955 port_type = PORT_DA; 13956 break; 13957 case ELINK_ETH_PHY_BASE_T: 13958 if (sc->link_params.speed_cap_mask[0] & 13959 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 13960 BLOGI(sc, "Found 10GBase-T media.\n"); 13961 sc->media = IFM_10G_T; 13962 port_type = PORT_TP; 13963 } else { 13964 BLOGI(sc, "Found 1000Base-T media.\n"); 13965 sc->media = IFM_1000_T; 13966 port_type = PORT_TP; 13967 } 13968 break; 13969 case ELINK_ETH_PHY_NOT_PRESENT: 13970 BLOGI(sc, "Media not present.\n"); 13971 sc->media = 0; 13972 port_type = PORT_OTHER; 13973 break; 13974 case ELINK_ETH_PHY_UNSPECIFIED: 13975 default: 13976 BLOGI(sc, "Unknown media!\n"); 13977 sc->media = 0; 13978 port_type = PORT_OTHER; 13979 break; 13980 } 13981 return port_type; 13982} 13983 13984#define GET_FIELD(value, fname) \ 13985 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 13986#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 13987#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 13988 13989static int 13990bxe_get_igu_cam_info(struct bxe_softc *sc) 13991{ 13992 int pfid = SC_FUNC(sc); 13993 int igu_sb_id; 13994 uint32_t val; 13995 uint8_t fid, igu_sb_cnt = 0; 13996 13997 sc->igu_base_sb = 0xff; 13998 13999 if (CHIP_INT_MODE_IS_BC(sc)) { 14000 int vn = SC_VN(sc); 14001 igu_sb_cnt = sc->igu_sb_cnt; 14002 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14003 FP_SB_MAX_E1x); 14004 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14005 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14006 return (0); 14007 } 14008 14009 /* IGU in normal mode - read CAM */ 14010 for (igu_sb_id = 0; 14011 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14012 igu_sb_id++) { 14013 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14014 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14015 continue; 14016 } 14017 fid = IGU_FID(val); 14018 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14019 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14020 continue; 14021 } 14022 if (IGU_VEC(val) == 0) { 14023 /* default status block */ 14024 sc->igu_dsb_id = igu_sb_id; 14025 } else { 14026 if (sc->igu_base_sb == 0xff) { 14027 sc->igu_base_sb = igu_sb_id; 14028 } 14029 igu_sb_cnt++; 14030 } 14031 } 14032 } 14033 14034 /* 14035 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14036 * that number of CAM entries will not be equal to the value advertised in 14037 * PCI. Driver should use the minimal value of both as the actual status 14038 * block count 14039 */ 14040 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14041 14042 if (igu_sb_cnt == 0) { 14043 BLOGE(sc, "CAM configuration error\n"); 14044 return (-1); 14045 } 14046 14047 return (0); 14048} 14049 14050/* 14051 * Gather various information from the device config space, the device itself, 14052 * shmem, and the user input. 14053 */ 14054static int 14055bxe_get_device_info(struct bxe_softc *sc) 14056{ 14057 uint32_t val; 14058 int rc; 14059 14060 /* Get the data for the device */ 14061 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14062 sc->devinfo.device_id = pci_get_device(sc->dev); 14063 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14064 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14065 14066 /* get the chip revision (chip metal comes from pci config space) */ 14067 sc->devinfo.chip_id = 14068 sc->link_params.chip_id = 14069 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14070 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14071 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14072 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14073 14074 /* force 57811 according to MISC register */ 14075 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14076 if (CHIP_IS_57810(sc)) { 14077 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14078 (sc->devinfo.chip_id & 0x0000ffff)); 14079 } else if (CHIP_IS_57810_MF(sc)) { 14080 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14081 (sc->devinfo.chip_id & 0x0000ffff)); 14082 } 14083 sc->devinfo.chip_id |= 0x1; 14084 } 14085 14086 BLOGD(sc, DBG_LOAD, 14087 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14088 sc->devinfo.chip_id, 14089 ((sc->devinfo.chip_id >> 16) & 0xffff), 14090 ((sc->devinfo.chip_id >> 12) & 0xf), 14091 ((sc->devinfo.chip_id >> 4) & 0xff), 14092 ((sc->devinfo.chip_id >> 0) & 0xf)); 14093 14094 val = (REG_RD(sc, 0x2874) & 0x55); 14095 if ((sc->devinfo.chip_id & 0x1) || 14096 (CHIP_IS_E1(sc) && val) || 14097 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14098 sc->flags |= BXE_ONE_PORT_FLAG; 14099 BLOGD(sc, DBG_LOAD, "single port device\n"); 14100 } 14101 14102 /* set the doorbell size */ 14103 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14104 14105 /* determine whether the device is in 2 port or 4 port mode */ 14106 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14107 if (CHIP_IS_E2E3(sc)) { 14108 /* 14109 * Read port4mode_en_ovwr[0]: 14110 * If 1, four port mode is in port4mode_en_ovwr[1]. 14111 * If 0, four port mode is in port4mode_en[0]. 14112 */ 14113 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14114 if (val & 1) { 14115 val = ((val >> 1) & 1); 14116 } else { 14117 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14118 } 14119 14120 sc->devinfo.chip_port_mode = 14121 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14122 14123 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14124 } 14125 14126 /* get the function and path info for the device */ 14127 bxe_get_function_num(sc); 14128 14129 /* get the shared memory base address */ 14130 sc->devinfo.shmem_base = 14131 sc->link_params.shmem_base = 14132 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14133 sc->devinfo.shmem2_base = 14134 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14135 MISC_REG_GENERIC_CR_0)); 14136 14137 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14138 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14139 14140 if (!sc->devinfo.shmem_base) { 14141 /* this should ONLY prevent upcoming shmem reads */ 14142 BLOGI(sc, "MCP not active\n"); 14143 sc->flags |= BXE_NO_MCP_FLAG; 14144 return (0); 14145 } 14146 14147 /* make sure the shared memory contents are valid */ 14148 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14149 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14150 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14151 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14152 return (0); 14153 } 14154 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14155 14156 /* get the bootcode version */ 14157 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14158 snprintf(sc->devinfo.bc_ver_str, 14159 sizeof(sc->devinfo.bc_ver_str), 14160 "%d.%d.%d", 14161 ((sc->devinfo.bc_ver >> 24) & 0xff), 14162 ((sc->devinfo.bc_ver >> 16) & 0xff), 14163 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14164 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14165 14166 /* get the bootcode shmem address */ 14167 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14168 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14169 14170 /* clean indirect addresses as they're not used */ 14171 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14172 if (IS_PF(sc)) { 14173 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14174 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14175 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14176 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14177 if (CHIP_IS_E1x(sc)) { 14178 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14179 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14180 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14181 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14182 } 14183 14184 /* 14185 * Enable internal target-read (in case we are probed after PF 14186 * FLR). Must be done prior to any BAR read access. Only for 14187 * 57712 and up 14188 */ 14189 if (!CHIP_IS_E1x(sc)) { 14190 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14191 } 14192 } 14193 14194 /* get the nvram size */ 14195 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14196 sc->devinfo.flash_size = 14197 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14198 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14199 14200 /* get PCI capabilites */ 14201 bxe_probe_pci_caps(sc); 14202 14203 bxe_set_power_state(sc, PCI_PM_D0); 14204 14205 /* get various configuration parameters from shmem */ 14206 bxe_get_shmem_info(sc); 14207 14208 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14209 val = pci_read_config(sc->dev, 14210 (sc->devinfo.pcie_msix_cap_reg + 14211 PCIR_MSIX_CTRL), 14212 2); 14213 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14214 } else { 14215 sc->igu_sb_cnt = 1; 14216 } 14217 14218 sc->igu_base_addr = BAR_IGU_INTMEM; 14219 14220 /* initialize IGU parameters */ 14221 if (CHIP_IS_E1x(sc)) { 14222 sc->devinfo.int_block = INT_BLOCK_HC; 14223 sc->igu_dsb_id = DEF_SB_IGU_ID; 14224 sc->igu_base_sb = 0; 14225 } else { 14226 sc->devinfo.int_block = INT_BLOCK_IGU; 14227 14228 /* do not allow device reset during IGU info preocessing */ 14229 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14230 14231 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14232 14233 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14234 int tout = 5000; 14235 14236 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14237 14238 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14239 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14240 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14241 14242 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14243 tout--; 14244 DELAY(1000); 14245 } 14246 14247 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14248 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14249 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14250 return (-1); 14251 } 14252 } 14253 14254 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14255 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14256 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14257 } else { 14258 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14259 } 14260 14261 rc = bxe_get_igu_cam_info(sc); 14262 14263 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14264 14265 if (rc) { 14266 return (rc); 14267 } 14268 } 14269 14270 /* 14271 * Get base FW non-default (fast path) status block ID. This value is 14272 * used to initialize the fw_sb_id saved on the fp/queue structure to 14273 * determine the id used by the FW. 14274 */ 14275 if (CHIP_IS_E1x(sc)) { 14276 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14277 } else { 14278 /* 14279 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14280 * the same queue are indicated on the same IGU SB). So we prefer 14281 * FW and IGU SBs to be the same value. 14282 */ 14283 sc->base_fw_ndsb = sc->igu_base_sb; 14284 } 14285 14286 BLOGD(sc, DBG_LOAD, 14287 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14288 sc->igu_dsb_id, sc->igu_base_sb, 14289 sc->igu_sb_cnt, sc->base_fw_ndsb); 14290 14291 elink_phy_probe(&sc->link_params); 14292 14293 return (0); 14294} 14295 14296static void 14297bxe_link_settings_supported(struct bxe_softc *sc, 14298 uint32_t switch_cfg) 14299{ 14300 uint32_t cfg_size = 0; 14301 uint32_t idx; 14302 uint8_t port = SC_PORT(sc); 14303 14304 /* aggregation of supported attributes of all external phys */ 14305 sc->port.supported[0] = 0; 14306 sc->port.supported[1] = 0; 14307 14308 switch (sc->link_params.num_phys) { 14309 case 1: 14310 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14311 cfg_size = 1; 14312 break; 14313 case 2: 14314 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14315 cfg_size = 1; 14316 break; 14317 case 3: 14318 if (sc->link_params.multi_phy_config & 14319 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14320 sc->port.supported[1] = 14321 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14322 sc->port.supported[0] = 14323 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14324 } else { 14325 sc->port.supported[0] = 14326 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14327 sc->port.supported[1] = 14328 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14329 } 14330 cfg_size = 2; 14331 break; 14332 } 14333 14334 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14335 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14336 SHMEM_RD(sc, 14337 dev_info.port_hw_config[port].external_phy_config), 14338 SHMEM_RD(sc, 14339 dev_info.port_hw_config[port].external_phy_config2)); 14340 return; 14341 } 14342 14343 if (CHIP_IS_E3(sc)) 14344 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14345 else { 14346 switch (switch_cfg) { 14347 case ELINK_SWITCH_CFG_1G: 14348 sc->port.phy_addr = 14349 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14350 break; 14351 case ELINK_SWITCH_CFG_10G: 14352 sc->port.phy_addr = 14353 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14354 break; 14355 default: 14356 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14357 sc->port.link_config[0]); 14358 return; 14359 } 14360 } 14361 14362 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14363 14364 /* mask what we support according to speed_cap_mask per configuration */ 14365 for (idx = 0; idx < cfg_size; idx++) { 14366 if (!(sc->link_params.speed_cap_mask[idx] & 14367 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14368 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14369 } 14370 14371 if (!(sc->link_params.speed_cap_mask[idx] & 14372 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14373 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14374 } 14375 14376 if (!(sc->link_params.speed_cap_mask[idx] & 14377 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14378 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14379 } 14380 14381 if (!(sc->link_params.speed_cap_mask[idx] & 14382 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14383 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14384 } 14385 14386 if (!(sc->link_params.speed_cap_mask[idx] & 14387 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14388 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14389 } 14390 14391 if (!(sc->link_params.speed_cap_mask[idx] & 14392 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14393 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14394 } 14395 14396 if (!(sc->link_params.speed_cap_mask[idx] & 14397 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14398 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14399 } 14400 14401 if (!(sc->link_params.speed_cap_mask[idx] & 14402 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14403 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14404 } 14405 } 14406 14407 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14408 sc->port.supported[0], sc->port.supported[1]); 14409 ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n", 14410 sc->port.supported[0], sc->port.supported[1]); 14411} 14412 14413static void 14414bxe_link_settings_requested(struct bxe_softc *sc) 14415{ 14416 uint32_t link_config; 14417 uint32_t idx; 14418 uint32_t cfg_size = 0; 14419 14420 sc->port.advertising[0] = 0; 14421 sc->port.advertising[1] = 0; 14422 14423 switch (sc->link_params.num_phys) { 14424 case 1: 14425 case 2: 14426 cfg_size = 1; 14427 break; 14428 case 3: 14429 cfg_size = 2; 14430 break; 14431 } 14432 14433 for (idx = 0; idx < cfg_size; idx++) { 14434 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14435 link_config = sc->port.link_config[idx]; 14436 14437 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14438 case PORT_FEATURE_LINK_SPEED_AUTO: 14439 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14440 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14441 sc->port.advertising[idx] |= sc->port.supported[idx]; 14442 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14443 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14444 sc->port.advertising[idx] |= 14445 (ELINK_SUPPORTED_100baseT_Half | 14446 ELINK_SUPPORTED_100baseT_Full); 14447 } else { 14448 /* force 10G, no AN */ 14449 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14450 sc->port.advertising[idx] |= 14451 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14452 continue; 14453 } 14454 break; 14455 14456 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14457 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14458 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14459 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14460 ADVERTISED_TP); 14461 } else { 14462 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14463 "speed_cap_mask=0x%08x\n", 14464 link_config, sc->link_params.speed_cap_mask[idx]); 14465 return; 14466 } 14467 break; 14468 14469 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14470 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14471 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14472 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14473 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14474 ADVERTISED_TP); 14475 ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n", 14476 sc->link_params.req_duplex[idx]); 14477 } else { 14478 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14479 "speed_cap_mask=0x%08x\n", 14480 link_config, sc->link_params.speed_cap_mask[idx]); 14481 return; 14482 } 14483 break; 14484 14485 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14486 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14487 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14488 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14489 ADVERTISED_TP); 14490 } else { 14491 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14492 "speed_cap_mask=0x%08x\n", 14493 link_config, sc->link_params.speed_cap_mask[idx]); 14494 return; 14495 } 14496 break; 14497 14498 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14499 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14500 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14501 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14502 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14503 ADVERTISED_TP); 14504 } else { 14505 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14506 "speed_cap_mask=0x%08x\n", 14507 link_config, sc->link_params.speed_cap_mask[idx]); 14508 return; 14509 } 14510 break; 14511 14512 case PORT_FEATURE_LINK_SPEED_1G: 14513 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14514 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14515 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14516 ADVERTISED_TP); 14517 } else { 14518 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14519 "speed_cap_mask=0x%08x\n", 14520 link_config, sc->link_params.speed_cap_mask[idx]); 14521 return; 14522 } 14523 break; 14524 14525 case PORT_FEATURE_LINK_SPEED_2_5G: 14526 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14527 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14528 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14529 ADVERTISED_TP); 14530 } else { 14531 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14532 "speed_cap_mask=0x%08x\n", 14533 link_config, sc->link_params.speed_cap_mask[idx]); 14534 return; 14535 } 14536 break; 14537 14538 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14539 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14540 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14541 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14542 ADVERTISED_FIBRE); 14543 } else { 14544 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14545 "speed_cap_mask=0x%08x\n", 14546 link_config, sc->link_params.speed_cap_mask[idx]); 14547 return; 14548 } 14549 break; 14550 14551 case PORT_FEATURE_LINK_SPEED_20G: 14552 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14553 break; 14554 14555 default: 14556 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14557 "speed_cap_mask=0x%08x\n", 14558 link_config, sc->link_params.speed_cap_mask[idx]); 14559 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14560 sc->port.advertising[idx] = sc->port.supported[idx]; 14561 break; 14562 } 14563 14564 sc->link_params.req_flow_ctrl[idx] = 14565 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14566 14567 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14568 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14569 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14570 } else { 14571 bxe_set_requested_fc(sc); 14572 } 14573 } 14574 14575 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14576 "req_flow_ctrl=0x%x advertising=0x%x\n", 14577 sc->link_params.req_line_speed[idx], 14578 sc->link_params.req_duplex[idx], 14579 sc->link_params.req_flow_ctrl[idx], 14580 sc->port.advertising[idx]); 14581 ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d " 14582 "advertising=0x%x\n", 14583 sc->link_params.req_line_speed[idx], 14584 sc->link_params.req_duplex[idx], 14585 sc->port.advertising[idx]); 14586 } 14587} 14588 14589static void 14590bxe_get_phy_info(struct bxe_softc *sc) 14591{ 14592 uint8_t port = SC_PORT(sc); 14593 uint32_t config = sc->port.config; 14594 uint32_t eee_mode; 14595 14596 /* shmem data already read in bxe_get_shmem_info() */ 14597 14598 ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14599 "link_config0=0x%08x\n", 14600 sc->link_params.lane_config, 14601 sc->link_params.speed_cap_mask[0], 14602 sc->port.link_config[0]); 14603 14604 14605 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14606 bxe_link_settings_requested(sc); 14607 14608 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14609 sc->link_params.feature_config_flags |= 14610 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14611 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14612 sc->link_params.feature_config_flags &= 14613 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14614 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14615 sc->link_params.feature_config_flags |= 14616 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14617 } 14618 14619 /* configure link feature according to nvram value */ 14620 eee_mode = 14621 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14622 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14623 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14624 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14625 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14626 ELINK_EEE_MODE_ENABLE_LPI | 14627 ELINK_EEE_MODE_OUTPUT_TIME); 14628 } else { 14629 sc->link_params.eee_mode = 0; 14630 } 14631 14632 /* get the media type */ 14633 bxe_media_detect(sc); 14634 ELINK_DEBUG_P1(sc, "detected media type\n", sc->media); 14635} 14636 14637static void 14638bxe_get_params(struct bxe_softc *sc) 14639{ 14640 /* get user tunable params */ 14641 bxe_get_tunable_params(sc); 14642 14643 /* select the RX and TX ring sizes */ 14644 sc->tx_ring_size = TX_BD_USABLE; 14645 sc->rx_ring_size = RX_BD_USABLE; 14646 14647 /* XXX disable WoL */ 14648 sc->wol = 0; 14649} 14650 14651static void 14652bxe_set_modes_bitmap(struct bxe_softc *sc) 14653{ 14654 uint32_t flags = 0; 14655 14656 if (CHIP_REV_IS_FPGA(sc)) { 14657 SET_FLAGS(flags, MODE_FPGA); 14658 } else if (CHIP_REV_IS_EMUL(sc)) { 14659 SET_FLAGS(flags, MODE_EMUL); 14660 } else { 14661 SET_FLAGS(flags, MODE_ASIC); 14662 } 14663 14664 if (CHIP_IS_MODE_4_PORT(sc)) { 14665 SET_FLAGS(flags, MODE_PORT4); 14666 } else { 14667 SET_FLAGS(flags, MODE_PORT2); 14668 } 14669 14670 if (CHIP_IS_E2(sc)) { 14671 SET_FLAGS(flags, MODE_E2); 14672 } else if (CHIP_IS_E3(sc)) { 14673 SET_FLAGS(flags, MODE_E3); 14674 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14675 SET_FLAGS(flags, MODE_E3_A0); 14676 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14677 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14678 } 14679 } 14680 14681 if (IS_MF(sc)) { 14682 SET_FLAGS(flags, MODE_MF); 14683 switch (sc->devinfo.mf_info.mf_mode) { 14684 case MULTI_FUNCTION_SD: 14685 SET_FLAGS(flags, MODE_MF_SD); 14686 break; 14687 case MULTI_FUNCTION_SI: 14688 SET_FLAGS(flags, MODE_MF_SI); 14689 break; 14690 case MULTI_FUNCTION_AFEX: 14691 SET_FLAGS(flags, MODE_MF_AFEX); 14692 break; 14693 } 14694 } else { 14695 SET_FLAGS(flags, MODE_SF); 14696 } 14697 14698#if defined(__LITTLE_ENDIAN) 14699 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14700#else /* __BIG_ENDIAN */ 14701 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14702#endif 14703 14704 INIT_MODE_FLAGS(sc) = flags; 14705} 14706 14707static int 14708bxe_alloc_hsi_mem(struct bxe_softc *sc) 14709{ 14710 struct bxe_fastpath *fp; 14711 bus_addr_t busaddr; 14712 int max_agg_queues; 14713 int max_segments; 14714 bus_size_t max_size; 14715 bus_size_t max_seg_size; 14716 char buf[32]; 14717 int rc; 14718 int i, j; 14719 14720 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14721 14722 /* allocate the parent bus DMA tag */ 14723 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14724 1, /* alignment */ 14725 0, /* boundary limit */ 14726 BUS_SPACE_MAXADDR, /* restricted low */ 14727 BUS_SPACE_MAXADDR, /* restricted hi */ 14728 NULL, /* addr filter() */ 14729 NULL, /* addr filter() arg */ 14730 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14731 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14732 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14733 0, /* flags */ 14734 NULL, /* lock() */ 14735 NULL, /* lock() arg */ 14736 &sc->parent_dma_tag); /* returned dma tag */ 14737 if (rc != 0) { 14738 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14739 return (1); 14740 } 14741 14742 /************************/ 14743 /* DEFAULT STATUS BLOCK */ 14744 /************************/ 14745 14746 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14747 &sc->def_sb_dma, "default status block") != 0) { 14748 /* XXX */ 14749 bus_dma_tag_destroy(sc->parent_dma_tag); 14750 return (1); 14751 } 14752 14753 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14754 14755 /***************/ 14756 /* EVENT QUEUE */ 14757 /***************/ 14758 14759 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14760 &sc->eq_dma, "event queue") != 0) { 14761 /* XXX */ 14762 bxe_dma_free(sc, &sc->def_sb_dma); 14763 sc->def_sb = NULL; 14764 bus_dma_tag_destroy(sc->parent_dma_tag); 14765 return (1); 14766 } 14767 14768 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14769 14770 /*************/ 14771 /* SLOW PATH */ 14772 /*************/ 14773 14774 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14775 &sc->sp_dma, "slow path") != 0) { 14776 /* XXX */ 14777 bxe_dma_free(sc, &sc->eq_dma); 14778 sc->eq = NULL; 14779 bxe_dma_free(sc, &sc->def_sb_dma); 14780 sc->def_sb = NULL; 14781 bus_dma_tag_destroy(sc->parent_dma_tag); 14782 return (1); 14783 } 14784 14785 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 14786 14787 /*******************/ 14788 /* SLOW PATH QUEUE */ 14789 /*******************/ 14790 14791 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14792 &sc->spq_dma, "slow path queue") != 0) { 14793 /* XXX */ 14794 bxe_dma_free(sc, &sc->sp_dma); 14795 sc->sp = NULL; 14796 bxe_dma_free(sc, &sc->eq_dma); 14797 sc->eq = NULL; 14798 bxe_dma_free(sc, &sc->def_sb_dma); 14799 sc->def_sb = NULL; 14800 bus_dma_tag_destroy(sc->parent_dma_tag); 14801 return (1); 14802 } 14803 14804 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 14805 14806 /***************************/ 14807 /* FW DECOMPRESSION BUFFER */ 14808 /***************************/ 14809 14810 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 14811 "fw decompression buffer") != 0) { 14812 /* XXX */ 14813 bxe_dma_free(sc, &sc->spq_dma); 14814 sc->spq = NULL; 14815 bxe_dma_free(sc, &sc->sp_dma); 14816 sc->sp = NULL; 14817 bxe_dma_free(sc, &sc->eq_dma); 14818 sc->eq = NULL; 14819 bxe_dma_free(sc, &sc->def_sb_dma); 14820 sc->def_sb = NULL; 14821 bus_dma_tag_destroy(sc->parent_dma_tag); 14822 return (1); 14823 } 14824 14825 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 14826 14827 if ((sc->gz_strm = 14828 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 14829 /* XXX */ 14830 bxe_dma_free(sc, &sc->gz_buf_dma); 14831 sc->gz_buf = NULL; 14832 bxe_dma_free(sc, &sc->spq_dma); 14833 sc->spq = NULL; 14834 bxe_dma_free(sc, &sc->sp_dma); 14835 sc->sp = NULL; 14836 bxe_dma_free(sc, &sc->eq_dma); 14837 sc->eq = NULL; 14838 bxe_dma_free(sc, &sc->def_sb_dma); 14839 sc->def_sb = NULL; 14840 bus_dma_tag_destroy(sc->parent_dma_tag); 14841 return (1); 14842 } 14843 14844 /*************/ 14845 /* FASTPATHS */ 14846 /*************/ 14847 14848 /* allocate DMA memory for each fastpath structure */ 14849 for (i = 0; i < sc->num_queues; i++) { 14850 fp = &sc->fp[i]; 14851 fp->sc = sc; 14852 fp->index = i; 14853 14854 /*******************/ 14855 /* FP STATUS BLOCK */ 14856 /*******************/ 14857 14858 snprintf(buf, sizeof(buf), "fp %d status block", i); 14859 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 14860 &fp->sb_dma, buf) != 0) { 14861 /* XXX unwind and free previous fastpath allocations */ 14862 BLOGE(sc, "Failed to alloc %s\n", buf); 14863 return (1); 14864 } else { 14865 if (CHIP_IS_E2E3(sc)) { 14866 fp->status_block.e2_sb = 14867 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 14868 } else { 14869 fp->status_block.e1x_sb = 14870 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 14871 } 14872 } 14873 14874 /******************/ 14875 /* FP TX BD CHAIN */ 14876 /******************/ 14877 14878 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 14879 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 14880 &fp->tx_dma, buf) != 0) { 14881 /* XXX unwind and free previous fastpath allocations */ 14882 BLOGE(sc, "Failed to alloc %s\n", buf); 14883 return (1); 14884 } else { 14885 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 14886 } 14887 14888 /* link together the tx bd chain pages */ 14889 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 14890 /* index into the tx bd chain array to last entry per page */ 14891 struct eth_tx_next_bd *tx_next_bd = 14892 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 14893 /* point to the next page and wrap from last page */ 14894 busaddr = (fp->tx_dma.paddr + 14895 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 14896 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 14897 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 14898 } 14899 14900 /******************/ 14901 /* FP RX BD CHAIN */ 14902 /******************/ 14903 14904 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 14905 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 14906 &fp->rx_dma, buf) != 0) { 14907 /* XXX unwind and free previous fastpath allocations */ 14908 BLOGE(sc, "Failed to alloc %s\n", buf); 14909 return (1); 14910 } else { 14911 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 14912 } 14913 14914 /* link together the rx bd chain pages */ 14915 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 14916 /* index into the rx bd chain array to last entry per page */ 14917 struct eth_rx_bd *rx_bd = 14918 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 14919 /* point to the next page and wrap from last page */ 14920 busaddr = (fp->rx_dma.paddr + 14921 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 14922 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 14923 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 14924 } 14925 14926 /*******************/ 14927 /* FP RX RCQ CHAIN */ 14928 /*******************/ 14929 14930 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 14931 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 14932 &fp->rcq_dma, buf) != 0) { 14933 /* XXX unwind and free previous fastpath allocations */ 14934 BLOGE(sc, "Failed to alloc %s\n", buf); 14935 return (1); 14936 } else { 14937 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 14938 } 14939 14940 /* link together the rcq chain pages */ 14941 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 14942 /* index into the rcq chain array to last entry per page */ 14943 struct eth_rx_cqe_next_page *rx_cqe_next = 14944 (struct eth_rx_cqe_next_page *) 14945 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 14946 /* point to the next page and wrap from last page */ 14947 busaddr = (fp->rcq_dma.paddr + 14948 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 14949 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 14950 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 14951 } 14952 14953 /*******************/ 14954 /* FP RX SGE CHAIN */ 14955 /*******************/ 14956 14957 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 14958 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 14959 &fp->rx_sge_dma, buf) != 0) { 14960 /* XXX unwind and free previous fastpath allocations */ 14961 BLOGE(sc, "Failed to alloc %s\n", buf); 14962 return (1); 14963 } else { 14964 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 14965 } 14966 14967 /* link together the sge chain pages */ 14968 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 14969 /* index into the rcq chain array to last entry per page */ 14970 struct eth_rx_sge *rx_sge = 14971 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 14972 /* point to the next page and wrap from last page */ 14973 busaddr = (fp->rx_sge_dma.paddr + 14974 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 14975 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 14976 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 14977 } 14978 14979 /***********************/ 14980 /* FP TX MBUF DMA MAPS */ 14981 /***********************/ 14982 14983 /* set required sizes before mapping to conserve resources */ 14984 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 14985 max_size = BXE_TSO_MAX_SIZE; 14986 max_segments = BXE_TSO_MAX_SEGMENTS; 14987 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 14988 } else { 14989 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 14990 max_segments = BXE_MAX_SEGMENTS; 14991 max_seg_size = MCLBYTES; 14992 } 14993 14994 /* create a dma tag for the tx mbufs */ 14995 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 14996 1, /* alignment */ 14997 0, /* boundary limit */ 14998 BUS_SPACE_MAXADDR, /* restricted low */ 14999 BUS_SPACE_MAXADDR, /* restricted hi */ 15000 NULL, /* addr filter() */ 15001 NULL, /* addr filter() arg */ 15002 max_size, /* max map size */ 15003 max_segments, /* num discontinuous */ 15004 max_seg_size, /* max seg size */ 15005 0, /* flags */ 15006 NULL, /* lock() */ 15007 NULL, /* lock() arg */ 15008 &fp->tx_mbuf_tag); /* returned dma tag */ 15009 if (rc != 0) { 15010 /* XXX unwind and free previous fastpath allocations */ 15011 BLOGE(sc, "Failed to create dma tag for " 15012 "'fp %d tx mbufs' (%d)\n", i, rc); 15013 return (1); 15014 } 15015 15016 /* create dma maps for each of the tx mbuf clusters */ 15017 for (j = 0; j < TX_BD_TOTAL; j++) { 15018 if (bus_dmamap_create(fp->tx_mbuf_tag, 15019 BUS_DMA_NOWAIT, 15020 &fp->tx_mbuf_chain[j].m_map)) { 15021 /* XXX unwind and free previous fastpath allocations */ 15022 BLOGE(sc, "Failed to create dma map for " 15023 "'fp %d tx mbuf %d' (%d)\n", i, j, rc); 15024 return (1); 15025 } 15026 } 15027 15028 /***********************/ 15029 /* FP RX MBUF DMA MAPS */ 15030 /***********************/ 15031 15032 /* create a dma tag for the rx mbufs */ 15033 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15034 1, /* alignment */ 15035 0, /* boundary limit */ 15036 BUS_SPACE_MAXADDR, /* restricted low */ 15037 BUS_SPACE_MAXADDR, /* restricted hi */ 15038 NULL, /* addr filter() */ 15039 NULL, /* addr filter() arg */ 15040 MJUM9BYTES, /* max map size */ 15041 1, /* num discontinuous */ 15042 MJUM9BYTES, /* max seg size */ 15043 0, /* flags */ 15044 NULL, /* lock() */ 15045 NULL, /* lock() arg */ 15046 &fp->rx_mbuf_tag); /* returned dma tag */ 15047 if (rc != 0) { 15048 /* XXX unwind and free previous fastpath allocations */ 15049 BLOGE(sc, "Failed to create dma tag for " 15050 "'fp %d rx mbufs' (%d)\n", i, rc); 15051 return (1); 15052 } 15053 15054 /* create dma maps for each of the rx mbuf clusters */ 15055 for (j = 0; j < RX_BD_TOTAL; j++) { 15056 if (bus_dmamap_create(fp->rx_mbuf_tag, 15057 BUS_DMA_NOWAIT, 15058 &fp->rx_mbuf_chain[j].m_map)) { 15059 /* XXX unwind and free previous fastpath allocations */ 15060 BLOGE(sc, "Failed to create dma map for " 15061 "'fp %d rx mbuf %d' (%d)\n", i, j, rc); 15062 return (1); 15063 } 15064 } 15065 15066 /* create dma map for the spare rx mbuf cluster */ 15067 if (bus_dmamap_create(fp->rx_mbuf_tag, 15068 BUS_DMA_NOWAIT, 15069 &fp->rx_mbuf_spare_map)) { 15070 /* XXX unwind and free previous fastpath allocations */ 15071 BLOGE(sc, "Failed to create dma map for " 15072 "'fp %d spare rx mbuf' (%d)\n", i, rc); 15073 return (1); 15074 } 15075 15076 /***************************/ 15077 /* FP RX SGE MBUF DMA MAPS */ 15078 /***************************/ 15079 15080 /* create a dma tag for the rx sge mbufs */ 15081 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15082 1, /* alignment */ 15083 0, /* boundary limit */ 15084 BUS_SPACE_MAXADDR, /* restricted low */ 15085 BUS_SPACE_MAXADDR, /* restricted hi */ 15086 NULL, /* addr filter() */ 15087 NULL, /* addr filter() arg */ 15088 BCM_PAGE_SIZE, /* max map size */ 15089 1, /* num discontinuous */ 15090 BCM_PAGE_SIZE, /* max seg size */ 15091 0, /* flags */ 15092 NULL, /* lock() */ 15093 NULL, /* lock() arg */ 15094 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15095 if (rc != 0) { 15096 /* XXX unwind and free previous fastpath allocations */ 15097 BLOGE(sc, "Failed to create dma tag for " 15098 "'fp %d rx sge mbufs' (%d)\n", i, rc); 15099 return (1); 15100 } 15101 15102 /* create dma maps for the rx sge mbuf clusters */ 15103 for (j = 0; j < RX_SGE_TOTAL; j++) { 15104 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15105 BUS_DMA_NOWAIT, 15106 &fp->rx_sge_mbuf_chain[j].m_map)) { 15107 /* XXX unwind and free previous fastpath allocations */ 15108 BLOGE(sc, "Failed to create dma map for " 15109 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); 15110 return (1); 15111 } 15112 } 15113 15114 /* create dma map for the spare rx sge mbuf cluster */ 15115 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15116 BUS_DMA_NOWAIT, 15117 &fp->rx_sge_mbuf_spare_map)) { 15118 /* XXX unwind and free previous fastpath allocations */ 15119 BLOGE(sc, "Failed to create dma map for " 15120 "'fp %d spare rx sge mbuf' (%d)\n", i, rc); 15121 return (1); 15122 } 15123 15124 /***************************/ 15125 /* FP RX TPA MBUF DMA MAPS */ 15126 /***************************/ 15127 15128 /* create dma maps for the rx tpa mbuf clusters */ 15129 max_agg_queues = MAX_AGG_QS(sc); 15130 15131 for (j = 0; j < max_agg_queues; j++) { 15132 if (bus_dmamap_create(fp->rx_mbuf_tag, 15133 BUS_DMA_NOWAIT, 15134 &fp->rx_tpa_info[j].bd.m_map)) { 15135 /* XXX unwind and free previous fastpath allocations */ 15136 BLOGE(sc, "Failed to create dma map for " 15137 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); 15138 return (1); 15139 } 15140 } 15141 15142 /* create dma map for the spare rx tpa mbuf cluster */ 15143 if (bus_dmamap_create(fp->rx_mbuf_tag, 15144 BUS_DMA_NOWAIT, 15145 &fp->rx_tpa_info_mbuf_spare_map)) { 15146 /* XXX unwind and free previous fastpath allocations */ 15147 BLOGE(sc, "Failed to create dma map for " 15148 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); 15149 return (1); 15150 } 15151 15152 bxe_init_sge_ring_bit_mask(fp); 15153 } 15154 15155 return (0); 15156} 15157 15158static void 15159bxe_free_hsi_mem(struct bxe_softc *sc) 15160{ 15161 struct bxe_fastpath *fp; 15162 int max_agg_queues; 15163 int i, j; 15164 15165 if (sc->parent_dma_tag == NULL) { 15166 return; /* assume nothing was allocated */ 15167 } 15168 15169 for (i = 0; i < sc->num_queues; i++) { 15170 fp = &sc->fp[i]; 15171 15172 /*******************/ 15173 /* FP STATUS BLOCK */ 15174 /*******************/ 15175 15176 bxe_dma_free(sc, &fp->sb_dma); 15177 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15178 15179 /******************/ 15180 /* FP TX BD CHAIN */ 15181 /******************/ 15182 15183 bxe_dma_free(sc, &fp->tx_dma); 15184 fp->tx_chain = NULL; 15185 15186 /******************/ 15187 /* FP RX BD CHAIN */ 15188 /******************/ 15189 15190 bxe_dma_free(sc, &fp->rx_dma); 15191 fp->rx_chain = NULL; 15192 15193 /*******************/ 15194 /* FP RX RCQ CHAIN */ 15195 /*******************/ 15196 15197 bxe_dma_free(sc, &fp->rcq_dma); 15198 fp->rcq_chain = NULL; 15199 15200 /*******************/ 15201 /* FP RX SGE CHAIN */ 15202 /*******************/ 15203 15204 bxe_dma_free(sc, &fp->rx_sge_dma); 15205 fp->rx_sge_chain = NULL; 15206 15207 /***********************/ 15208 /* FP TX MBUF DMA MAPS */ 15209 /***********************/ 15210 15211 if (fp->tx_mbuf_tag != NULL) { 15212 for (j = 0; j < TX_BD_TOTAL; j++) { 15213 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15214 bus_dmamap_unload(fp->tx_mbuf_tag, 15215 fp->tx_mbuf_chain[j].m_map); 15216 bus_dmamap_destroy(fp->tx_mbuf_tag, 15217 fp->tx_mbuf_chain[j].m_map); 15218 } 15219 } 15220 15221 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15222 fp->tx_mbuf_tag = NULL; 15223 } 15224 15225 /***********************/ 15226 /* FP RX MBUF DMA MAPS */ 15227 /***********************/ 15228 15229 if (fp->rx_mbuf_tag != NULL) { 15230 for (j = 0; j < RX_BD_TOTAL; j++) { 15231 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15232 bus_dmamap_unload(fp->rx_mbuf_tag, 15233 fp->rx_mbuf_chain[j].m_map); 15234 bus_dmamap_destroy(fp->rx_mbuf_tag, 15235 fp->rx_mbuf_chain[j].m_map); 15236 } 15237 } 15238 15239 if (fp->rx_mbuf_spare_map != NULL) { 15240 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15241 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15242 } 15243 15244 /***************************/ 15245 /* FP RX TPA MBUF DMA MAPS */ 15246 /***************************/ 15247 15248 max_agg_queues = MAX_AGG_QS(sc); 15249 15250 for (j = 0; j < max_agg_queues; j++) { 15251 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15252 bus_dmamap_unload(fp->rx_mbuf_tag, 15253 fp->rx_tpa_info[j].bd.m_map); 15254 bus_dmamap_destroy(fp->rx_mbuf_tag, 15255 fp->rx_tpa_info[j].bd.m_map); 15256 } 15257 } 15258 15259 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15260 bus_dmamap_unload(fp->rx_mbuf_tag, 15261 fp->rx_tpa_info_mbuf_spare_map); 15262 bus_dmamap_destroy(fp->rx_mbuf_tag, 15263 fp->rx_tpa_info_mbuf_spare_map); 15264 } 15265 15266 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15267 fp->rx_mbuf_tag = NULL; 15268 } 15269 15270 /***************************/ 15271 /* FP RX SGE MBUF DMA MAPS */ 15272 /***************************/ 15273 15274 if (fp->rx_sge_mbuf_tag != NULL) { 15275 for (j = 0; j < RX_SGE_TOTAL; j++) { 15276 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15277 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15278 fp->rx_sge_mbuf_chain[j].m_map); 15279 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15280 fp->rx_sge_mbuf_chain[j].m_map); 15281 } 15282 } 15283 15284 if (fp->rx_sge_mbuf_spare_map != NULL) { 15285 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15286 fp->rx_sge_mbuf_spare_map); 15287 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15288 fp->rx_sge_mbuf_spare_map); 15289 } 15290 15291 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15292 fp->rx_sge_mbuf_tag = NULL; 15293 } 15294 } 15295 15296 /***************************/ 15297 /* FW DECOMPRESSION BUFFER */ 15298 /***************************/ 15299 15300 bxe_dma_free(sc, &sc->gz_buf_dma); 15301 sc->gz_buf = NULL; 15302 free(sc->gz_strm, M_DEVBUF); 15303 sc->gz_strm = NULL; 15304 15305 /*******************/ 15306 /* SLOW PATH QUEUE */ 15307 /*******************/ 15308 15309 bxe_dma_free(sc, &sc->spq_dma); 15310 sc->spq = NULL; 15311 15312 /*************/ 15313 /* SLOW PATH */ 15314 /*************/ 15315 15316 bxe_dma_free(sc, &sc->sp_dma); 15317 sc->sp = NULL; 15318 15319 /***************/ 15320 /* EVENT QUEUE */ 15321 /***************/ 15322 15323 bxe_dma_free(sc, &sc->eq_dma); 15324 sc->eq = NULL; 15325 15326 /************************/ 15327 /* DEFAULT STATUS BLOCK */ 15328 /************************/ 15329 15330 bxe_dma_free(sc, &sc->def_sb_dma); 15331 sc->def_sb = NULL; 15332 15333 bus_dma_tag_destroy(sc->parent_dma_tag); 15334 sc->parent_dma_tag = NULL; 15335} 15336 15337/* 15338 * Previous driver DMAE transaction may have occurred when pre-boot stage 15339 * ended and boot began. This would invalidate the addresses of the 15340 * transaction, resulting in was-error bit set in the PCI causing all 15341 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15342 * the interrupt which detected this from the pglueb and the was-done bit 15343 */ 15344static void 15345bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15346{ 15347 uint32_t val; 15348 15349 if (!CHIP_IS_E1x(sc)) { 15350 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15351 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15352 BLOGD(sc, DBG_LOAD, 15353 "Clearing 'was-error' bit that was set in pglueb"); 15354 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15355 } 15356 } 15357} 15358 15359static int 15360bxe_prev_mcp_done(struct bxe_softc *sc) 15361{ 15362 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15363 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15364 if (!rc) { 15365 BLOGE(sc, "MCP response failure, aborting\n"); 15366 return (-1); 15367 } 15368 15369 return (0); 15370} 15371 15372static struct bxe_prev_list_node * 15373bxe_prev_path_get_entry(struct bxe_softc *sc) 15374{ 15375 struct bxe_prev_list_node *tmp; 15376 15377 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15378 if ((sc->pcie_bus == tmp->bus) && 15379 (sc->pcie_device == tmp->slot) && 15380 (SC_PATH(sc) == tmp->path)) { 15381 return (tmp); 15382 } 15383 } 15384 15385 return (NULL); 15386} 15387 15388static uint8_t 15389bxe_prev_is_path_marked(struct bxe_softc *sc) 15390{ 15391 struct bxe_prev_list_node *tmp; 15392 int rc = FALSE; 15393 15394 mtx_lock(&bxe_prev_mtx); 15395 15396 tmp = bxe_prev_path_get_entry(sc); 15397 if (tmp) { 15398 if (tmp->aer) { 15399 BLOGD(sc, DBG_LOAD, 15400 "Path %d/%d/%d was marked by AER\n", 15401 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15402 } else { 15403 rc = TRUE; 15404 BLOGD(sc, DBG_LOAD, 15405 "Path %d/%d/%d was already cleaned from previous drivers\n", 15406 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15407 } 15408 } 15409 15410 mtx_unlock(&bxe_prev_mtx); 15411 15412 return (rc); 15413} 15414 15415static int 15416bxe_prev_mark_path(struct bxe_softc *sc, 15417 uint8_t after_undi) 15418{ 15419 struct bxe_prev_list_node *tmp; 15420 15421 mtx_lock(&bxe_prev_mtx); 15422 15423 /* Check whether the entry for this path already exists */ 15424 tmp = bxe_prev_path_get_entry(sc); 15425 if (tmp) { 15426 if (!tmp->aer) { 15427 BLOGD(sc, DBG_LOAD, 15428 "Re-marking AER in path %d/%d/%d\n", 15429 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15430 } else { 15431 BLOGD(sc, DBG_LOAD, 15432 "Removing AER indication from path %d/%d/%d\n", 15433 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15434 tmp->aer = 0; 15435 } 15436 15437 mtx_unlock(&bxe_prev_mtx); 15438 return (0); 15439 } 15440 15441 mtx_unlock(&bxe_prev_mtx); 15442 15443 /* Create an entry for this path and add it */ 15444 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15445 (M_NOWAIT | M_ZERO)); 15446 if (!tmp) { 15447 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15448 return (-1); 15449 } 15450 15451 tmp->bus = sc->pcie_bus; 15452 tmp->slot = sc->pcie_device; 15453 tmp->path = SC_PATH(sc); 15454 tmp->aer = 0; 15455 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15456 15457 mtx_lock(&bxe_prev_mtx); 15458 15459 BLOGD(sc, DBG_LOAD, 15460 "Marked path %d/%d/%d - finished previous unload\n", 15461 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15462 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15463 15464 mtx_unlock(&bxe_prev_mtx); 15465 15466 return (0); 15467} 15468 15469static int 15470bxe_do_flr(struct bxe_softc *sc) 15471{ 15472 int i; 15473 15474 /* only E2 and onwards support FLR */ 15475 if (CHIP_IS_E1x(sc)) { 15476 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15477 return (-1); 15478 } 15479 15480 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15481 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15482 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15483 sc->devinfo.bc_ver); 15484 return (-1); 15485 } 15486 15487 /* Wait for Transaction Pending bit clean */ 15488 for (i = 0; i < 4; i++) { 15489 if (i) { 15490 DELAY(((1 << (i - 1)) * 100) * 1000); 15491 } 15492 15493 if (!bxe_is_pcie_pending(sc)) { 15494 goto clear; 15495 } 15496 } 15497 15498 BLOGE(sc, "PCIE transaction is not cleared, " 15499 "proceeding with reset anyway\n"); 15500 15501clear: 15502 15503 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15504 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15505 15506 return (0); 15507} 15508 15509struct bxe_mac_vals { 15510 uint32_t xmac_addr; 15511 uint32_t xmac_val; 15512 uint32_t emac_addr; 15513 uint32_t emac_val; 15514 uint32_t umac_addr; 15515 uint32_t umac_val; 15516 uint32_t bmac_addr; 15517 uint32_t bmac_val[2]; 15518}; 15519 15520static void 15521bxe_prev_unload_close_mac(struct bxe_softc *sc, 15522 struct bxe_mac_vals *vals) 15523{ 15524 uint32_t val, base_addr, offset, mask, reset_reg; 15525 uint8_t mac_stopped = FALSE; 15526 uint8_t port = SC_PORT(sc); 15527 uint32_t wb_data[2]; 15528 15529 /* reset addresses as they also mark which values were changed */ 15530 vals->bmac_addr = 0; 15531 vals->umac_addr = 0; 15532 vals->xmac_addr = 0; 15533 vals->emac_addr = 0; 15534 15535 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15536 15537 if (!CHIP_IS_E3(sc)) { 15538 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15539 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15540 if ((mask & reset_reg) && val) { 15541 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15542 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15543 : NIG_REG_INGRESS_BMAC0_MEM; 15544 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15545 : BIGMAC_REGISTER_BMAC_CONTROL; 15546 15547 /* 15548 * use rd/wr since we cannot use dmae. This is safe 15549 * since MCP won't access the bus due to the request 15550 * to unload, and no function on the path can be 15551 * loaded at this time. 15552 */ 15553 wb_data[0] = REG_RD(sc, base_addr + offset); 15554 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15555 vals->bmac_addr = base_addr + offset; 15556 vals->bmac_val[0] = wb_data[0]; 15557 vals->bmac_val[1] = wb_data[1]; 15558 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15559 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15560 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15561 } 15562 15563 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15564 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15565 vals->emac_val = REG_RD(sc, vals->emac_addr); 15566 REG_WR(sc, vals->emac_addr, 0); 15567 mac_stopped = TRUE; 15568 } else { 15569 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15570 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15571 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15572 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15573 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15574 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15575 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15576 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15577 REG_WR(sc, vals->xmac_addr, 0); 15578 mac_stopped = TRUE; 15579 } 15580 15581 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15582 if (mask & reset_reg) { 15583 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15584 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15585 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15586 vals->umac_val = REG_RD(sc, vals->umac_addr); 15587 REG_WR(sc, vals->umac_addr, 0); 15588 mac_stopped = TRUE; 15589 } 15590 } 15591 15592 if (mac_stopped) { 15593 DELAY(20000); 15594 } 15595} 15596 15597#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15598#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15599#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15600#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15601 15602static void 15603bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15604 uint8_t port, 15605 uint8_t inc) 15606{ 15607 uint16_t rcq, bd; 15608 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15609 15610 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15611 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15612 15613 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15614 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15615 15616 BLOGD(sc, DBG_LOAD, 15617 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15618 port, bd, rcq); 15619} 15620 15621static int 15622bxe_prev_unload_common(struct bxe_softc *sc) 15623{ 15624 uint32_t reset_reg, tmp_reg = 0, rc; 15625 uint8_t prev_undi = FALSE; 15626 struct bxe_mac_vals mac_vals; 15627 uint32_t timer_count = 1000; 15628 uint32_t prev_brb; 15629 15630 /* 15631 * It is possible a previous function received 'common' answer, 15632 * but hasn't loaded yet, therefore creating a scenario of 15633 * multiple functions receiving 'common' on the same path. 15634 */ 15635 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15636 15637 memset(&mac_vals, 0, sizeof(mac_vals)); 15638 15639 if (bxe_prev_is_path_marked(sc)) { 15640 return (bxe_prev_mcp_done(sc)); 15641 } 15642 15643 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15644 15645 /* Reset should be performed after BRB is emptied */ 15646 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15647 /* Close the MAC Rx to prevent BRB from filling up */ 15648 bxe_prev_unload_close_mac(sc, &mac_vals); 15649 15650 /* close LLH filters towards the BRB */ 15651 elink_set_rx_filter(&sc->link_params, 0); 15652 15653 /* 15654 * Check if the UNDI driver was previously loaded. 15655 * UNDI driver initializes CID offset for normal bell to 0x7 15656 */ 15657 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15658 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15659 if (tmp_reg == 0x7) { 15660 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15661 prev_undi = TRUE; 15662 /* clear the UNDI indication */ 15663 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15664 /* clear possible idle check errors */ 15665 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15666 } 15667 } 15668 15669 /* wait until BRB is empty */ 15670 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15671 while (timer_count) { 15672 prev_brb = tmp_reg; 15673 15674 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15675 if (!tmp_reg) { 15676 break; 15677 } 15678 15679 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15680 15681 /* reset timer as long as BRB actually gets emptied */ 15682 if (prev_brb > tmp_reg) { 15683 timer_count = 1000; 15684 } else { 15685 timer_count--; 15686 } 15687 15688 /* If UNDI resides in memory, manually increment it */ 15689 if (prev_undi) { 15690 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15691 } 15692 15693 DELAY(10); 15694 } 15695 15696 if (!timer_count) { 15697 BLOGE(sc, "Failed to empty BRB\n"); 15698 } 15699 } 15700 15701 /* No packets are in the pipeline, path is ready for reset */ 15702 bxe_reset_common(sc); 15703 15704 if (mac_vals.xmac_addr) { 15705 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15706 } 15707 if (mac_vals.umac_addr) { 15708 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15709 } 15710 if (mac_vals.emac_addr) { 15711 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15712 } 15713 if (mac_vals.bmac_addr) { 15714 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15715 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15716 } 15717 15718 rc = bxe_prev_mark_path(sc, prev_undi); 15719 if (rc) { 15720 bxe_prev_mcp_done(sc); 15721 return (rc); 15722 } 15723 15724 return (bxe_prev_mcp_done(sc)); 15725} 15726 15727static int 15728bxe_prev_unload_uncommon(struct bxe_softc *sc) 15729{ 15730 int rc; 15731 15732 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15733 15734 /* Test if previous unload process was already finished for this path */ 15735 if (bxe_prev_is_path_marked(sc)) { 15736 return (bxe_prev_mcp_done(sc)); 15737 } 15738 15739 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15740 15741 /* 15742 * If function has FLR capabilities, and existing FW version matches 15743 * the one required, then FLR will be sufficient to clean any residue 15744 * left by previous driver 15745 */ 15746 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15747 if (!rc) { 15748 /* fw version is good */ 15749 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15750 rc = bxe_do_flr(sc); 15751 } 15752 15753 if (!rc) { 15754 /* FLR was performed */ 15755 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15756 return (0); 15757 } 15758 15759 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15760 15761 /* Close the MCP request, return failure*/ 15762 rc = bxe_prev_mcp_done(sc); 15763 if (!rc) { 15764 rc = BXE_PREV_WAIT_NEEDED; 15765 } 15766 15767 return (rc); 15768} 15769 15770static int 15771bxe_prev_unload(struct bxe_softc *sc) 15772{ 15773 int time_counter = 10; 15774 uint32_t fw, hw_lock_reg, hw_lock_val; 15775 uint32_t rc = 0; 15776 15777 /* 15778 * Clear HW from errors which may have resulted from an interrupted 15779 * DMAE transaction. 15780 */ 15781 bxe_prev_interrupted_dmae(sc); 15782 15783 /* Release previously held locks */ 15784 hw_lock_reg = 15785 (SC_FUNC(sc) <= 5) ? 15786 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 15787 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 15788 15789 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 15790 if (hw_lock_val) { 15791 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 15792 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 15793 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 15794 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 15795 } 15796 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 15797 REG_WR(sc, hw_lock_reg, 0xffffffff); 15798 } else { 15799 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 15800 } 15801 15802 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 15803 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 15804 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 15805 } 15806 15807 do { 15808 /* Lock MCP using an unload request */ 15809 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 15810 if (!fw) { 15811 BLOGE(sc, "MCP response failure, aborting\n"); 15812 rc = -1; 15813 break; 15814 } 15815 15816 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 15817 rc = bxe_prev_unload_common(sc); 15818 break; 15819 } 15820 15821 /* non-common reply from MCP night require looping */ 15822 rc = bxe_prev_unload_uncommon(sc); 15823 if (rc != BXE_PREV_WAIT_NEEDED) { 15824 break; 15825 } 15826 15827 DELAY(20000); 15828 } while (--time_counter); 15829 15830 if (!time_counter || rc) { 15831 BLOGE(sc, "Failed to unload previous driver!" 15832 " time_counter %d rc %d\n", time_counter, rc); 15833 rc = -1; 15834 } 15835 15836 return (rc); 15837} 15838 15839void 15840bxe_dcbx_set_state(struct bxe_softc *sc, 15841 uint8_t dcb_on, 15842 uint32_t dcbx_enabled) 15843{ 15844 if (!CHIP_IS_E1x(sc)) { 15845 sc->dcb_state = dcb_on; 15846 sc->dcbx_enabled = dcbx_enabled; 15847 } else { 15848 sc->dcb_state = FALSE; 15849 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 15850 } 15851 BLOGD(sc, DBG_LOAD, 15852 "DCB state [%s:%s]\n", 15853 dcb_on ? "ON" : "OFF", 15854 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 15855 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 15856 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 15857 "on-chip with negotiation" : "invalid"); 15858} 15859 15860/* must be called after sriov-enable */ 15861static int 15862bxe_set_qm_cid_count(struct bxe_softc *sc) 15863{ 15864 int cid_count = BXE_L2_MAX_CID(sc); 15865 15866 if (IS_SRIOV(sc)) { 15867 cid_count += BXE_VF_CIDS; 15868 } 15869 15870 if (CNIC_SUPPORT(sc)) { 15871 cid_count += CNIC_CID_MAX; 15872 } 15873 15874 return (roundup(cid_count, QM_CID_ROUND)); 15875} 15876 15877static void 15878bxe_init_multi_cos(struct bxe_softc *sc) 15879{ 15880 int pri, cos; 15881 15882 uint32_t pri_map = 0; /* XXX change to user config */ 15883 15884 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 15885 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 15886 if (cos < sc->max_cos) { 15887 sc->prio_to_cos[pri] = cos; 15888 } else { 15889 BLOGW(sc, "Invalid COS %d for priority %d " 15890 "(max COS is %d), setting to 0\n", 15891 cos, pri, (sc->max_cos - 1)); 15892 sc->prio_to_cos[pri] = 0; 15893 } 15894 } 15895} 15896 15897static int 15898bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 15899{ 15900 struct bxe_softc *sc; 15901 int error, result; 15902 15903 result = 0; 15904 error = sysctl_handle_int(oidp, &result, 0, req); 15905 15906 if (error || !req->newptr) { 15907 return (error); 15908 } 15909 15910 if (result == 1) { 15911 uint32_t temp; 15912 sc = (struct bxe_softc *)arg1; 15913 15914 BLOGI(sc, "... dumping driver state ...\n"); 15915 temp = SHMEM2_RD(sc, temperature_in_half_celsius); 15916 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); 15917 } 15918 15919 return (error); 15920} 15921 15922static int 15923bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 15924{ 15925 struct bxe_softc *sc = (struct bxe_softc *)arg1; 15926 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 15927 uint32_t *offset; 15928 uint64_t value = 0; 15929 int index = (int)arg2; 15930 15931 if (index >= BXE_NUM_ETH_STATS) { 15932 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 15933 return (-1); 15934 } 15935 15936 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 15937 15938 switch (bxe_eth_stats_arr[index].size) { 15939 case 4: 15940 value = (uint64_t)*offset; 15941 break; 15942 case 8: 15943 value = HILO_U64(*offset, *(offset + 1)); 15944 break; 15945 default: 15946 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 15947 index, bxe_eth_stats_arr[index].size); 15948 return (-1); 15949 } 15950 15951 return (sysctl_handle_64(oidp, &value, 0, req)); 15952} 15953 15954static int 15955bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 15956{ 15957 struct bxe_softc *sc = (struct bxe_softc *)arg1; 15958 uint32_t *eth_stats; 15959 uint32_t *offset; 15960 uint64_t value = 0; 15961 uint32_t q_stat = (uint32_t)arg2; 15962 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 15963 uint32_t index = (q_stat & 0xffff); 15964 15965 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 15966 15967 if (index >= BXE_NUM_ETH_Q_STATS) { 15968 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 15969 return (-1); 15970 } 15971 15972 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 15973 15974 switch (bxe_eth_q_stats_arr[index].size) { 15975 case 4: 15976 value = (uint64_t)*offset; 15977 break; 15978 case 8: 15979 value = HILO_U64(*offset, *(offset + 1)); 15980 break; 15981 default: 15982 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 15983 index, bxe_eth_q_stats_arr[index].size); 15984 return (-1); 15985 } 15986 15987 return (sysctl_handle_64(oidp, &value, 0, req)); 15988} 15989 15990static void bxe_force_link_reset(struct bxe_softc *sc) 15991{ 15992 15993 bxe_acquire_phy_lock(sc); 15994 elink_link_reset(&sc->link_params, &sc->link_vars, 1); 15995 bxe_release_phy_lock(sc); 15996} 15997 15998static int 15999bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS) 16000{ 16001 struct bxe_softc *sc = (struct bxe_softc *)arg1;; 16002 uint32_t cfg_idx = bxe_get_link_cfg_idx(sc); 16003 int rc = 0; 16004 int error; 16005 int result; 16006 16007 16008 error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req); 16009 16010 if (error || !req->newptr) { 16011 return (error); 16012 } 16013 if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) { 16014 BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param); 16015 sc->bxe_pause_param = 8; 16016 } 16017 16018 result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT); 16019 16020 16021 if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) { 16022 BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param); 16023 return -EINVAL; 16024 } 16025 16026 if(IS_MF(sc)) 16027 return 0; 16028 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; 16029 if(result & ELINK_FLOW_CTRL_RX) 16030 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX; 16031 16032 if(result & ELINK_FLOW_CTRL_TX) 16033 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX; 16034 if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO) 16035 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE; 16036 16037 if(result & 0x400) { 16038 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) { 16039 sc->link_params.req_flow_ctrl[cfg_idx] = 16040 ELINK_FLOW_CTRL_AUTO; 16041 } 16042 sc->link_params.req_fc_auto_adv = 0; 16043 if (result & ELINK_FLOW_CTRL_RX) 16044 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX; 16045 16046 if (result & ELINK_FLOW_CTRL_TX) 16047 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX; 16048 if (!sc->link_params.req_fc_auto_adv) 16049 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE; 16050 } 16051 if (IS_PF(sc)) { 16052 if (sc->link_vars.link_up) { 16053 bxe_stats_handle(sc, STATS_EVENT_STOP); 16054 } 16055 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 16056 bxe_force_link_reset(sc); 16057 bxe_acquire_phy_lock(sc); 16058 16059 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 16060 16061 bxe_release_phy_lock(sc); 16062 16063 bxe_calc_fc_adv(sc); 16064 } 16065 } 16066 return rc; 16067} 16068 16069 16070static void 16071bxe_add_sysctls(struct bxe_softc *sc) 16072{ 16073 struct sysctl_ctx_list *ctx; 16074 struct sysctl_oid_list *children; 16075 struct sysctl_oid *queue_top, *queue; 16076 struct sysctl_oid_list *queue_top_children, *queue_children; 16077 char queue_num_buf[32]; 16078 uint32_t q_stat; 16079 int i, j; 16080 16081 ctx = device_get_sysctl_ctx(sc->dev); 16082 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16083 16084 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16085 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16086 "version"); 16087 16088 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16089 BCM_5710_FW_MAJOR_VERSION, 16090 BCM_5710_FW_MINOR_VERSION, 16091 BCM_5710_FW_REVISION_VERSION, 16092 BCM_5710_FW_ENGINEERING_VERSION); 16093 16094 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16095 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16096 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16097 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16098 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16099 "Unknown")); 16100 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16101 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16102 "multifunction vnics per port"); 16103 16104 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16105 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16106 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16107 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16108 "???GT/s"), 16109 sc->devinfo.pcie_link_width); 16110 16111 sc->debug = bxe_debug; 16112 16113#if __FreeBSD_version >= 900000 16114 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16115 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 16116 "bootcode version"); 16117 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16118 CTLFLAG_RD, sc->fw_ver_str, 0, 16119 "firmware version"); 16120 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16121 CTLFLAG_RD, sc->mf_mode_str, 0, 16122 "multifunction mode"); 16123 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16124 CTLFLAG_RD, sc->mac_addr_str, 0, 16125 "mac address"); 16126 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16127 CTLFLAG_RD, sc->pci_link_str, 0, 16128 "pci link status"); 16129 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 16130 CTLFLAG_RW, &sc->debug, 16131 "debug logging mode"); 16132#else 16133 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16134 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0, 16135 "bootcode version"); 16136 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16137 CTLFLAG_RD, &sc->fw_ver_str, 0, 16138 "firmware version"); 16139 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16140 CTLFLAG_RD, &sc->mf_mode_str, 0, 16141 "multifunction mode"); 16142 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16143 CTLFLAG_RD, &sc->mac_addr_str, 0, 16144 "mac address"); 16145 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16146 CTLFLAG_RD, &sc->pci_link_str, 0, 16147 "pci link status"); 16148 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", 16149 CTLFLAG_RW, &sc->debug, 0, 16150 "debug logging mode"); 16151#endif /* #if __FreeBSD_version >= 900000 */ 16152 16153 sc->trigger_grcdump = 0; 16154 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", 16155 CTLFLAG_RW, &sc->trigger_grcdump, 0, 16156 "trigger grcdump should be invoked" 16157 " before collecting grcdump"); 16158 16159 sc->grcdump_started = 0; 16160 sc->grcdump_done = 0; 16161 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", 16162 CTLFLAG_RD, &sc->grcdump_done, 0, 16163 "set by driver when grcdump is done"); 16164 16165 sc->rx_budget = bxe_rx_budget; 16166 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16167 CTLFLAG_RW, &sc->rx_budget, 0, 16168 "rx processing budget"); 16169 16170 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param", 16171 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16172 bxe_sysctl_pauseparam, "IU", 16173 "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8"); 16174 16175 16176 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16177 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16178 bxe_sysctl_state, "IU", "dump driver state"); 16179 16180 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16181 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16182 bxe_eth_stats_arr[i].string, 16183 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16184 bxe_sysctl_eth_stat, "LU", 16185 bxe_eth_stats_arr[i].string); 16186 } 16187 16188 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16189 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16190 CTLFLAG_RD, NULL, "queue"); 16191 queue_top_children = SYSCTL_CHILDREN(queue_top); 16192 16193 for (i = 0; i < sc->num_queues; i++) { 16194 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16195 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16196 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16197 queue_num_buf, CTLFLAG_RD, NULL, 16198 "single queue"); 16199 queue_children = SYSCTL_CHILDREN(queue); 16200 16201 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16202 q_stat = ((i << 16) | j); 16203 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16204 bxe_eth_q_stats_arr[j].string, 16205 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16206 bxe_sysctl_eth_q_stat, "LU", 16207 bxe_eth_q_stats_arr[j].string); 16208 } 16209 } 16210} 16211 16212static int 16213bxe_alloc_buf_rings(struct bxe_softc *sc) 16214{ 16215#if __FreeBSD_version >= 901504 16216 16217 int i; 16218 struct bxe_fastpath *fp; 16219 16220 for (i = 0; i < sc->num_queues; i++) { 16221 16222 fp = &sc->fp[i]; 16223 16224 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 16225 M_NOWAIT, &fp->tx_mtx); 16226 if (fp->tx_br == NULL) 16227 return (-1); 16228 } 16229#endif 16230 return (0); 16231} 16232 16233static void 16234bxe_free_buf_rings(struct bxe_softc *sc) 16235{ 16236#if __FreeBSD_version >= 901504 16237 16238 int i; 16239 struct bxe_fastpath *fp; 16240 16241 for (i = 0; i < sc->num_queues; i++) { 16242 16243 fp = &sc->fp[i]; 16244 16245 if (fp->tx_br) { 16246 buf_ring_free(fp->tx_br, M_DEVBUF); 16247 fp->tx_br = NULL; 16248 } 16249 } 16250 16251#endif 16252} 16253 16254static void 16255bxe_init_fp_mutexs(struct bxe_softc *sc) 16256{ 16257 int i; 16258 struct bxe_fastpath *fp; 16259 16260 for (i = 0; i < sc->num_queues; i++) { 16261 16262 fp = &sc->fp[i]; 16263 16264 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 16265 "bxe%d_fp%d_tx_lock", sc->unit, i); 16266 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 16267 16268 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 16269 "bxe%d_fp%d_rx_lock", sc->unit, i); 16270 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 16271 } 16272} 16273 16274static void 16275bxe_destroy_fp_mutexs(struct bxe_softc *sc) 16276{ 16277 int i; 16278 struct bxe_fastpath *fp; 16279 16280 for (i = 0; i < sc->num_queues; i++) { 16281 16282 fp = &sc->fp[i]; 16283 16284 if (mtx_initialized(&fp->tx_mtx)) { 16285 mtx_destroy(&fp->tx_mtx); 16286 } 16287 16288 if (mtx_initialized(&fp->rx_mtx)) { 16289 mtx_destroy(&fp->rx_mtx); 16290 } 16291 } 16292} 16293 16294 16295/* 16296 * Device attach function. 16297 * 16298 * Allocates device resources, performs secondary chip identification, and 16299 * initializes driver instance variables. This function is called from driver 16300 * load after a successful probe. 16301 * 16302 * Returns: 16303 * 0 = Success, >0 = Failure 16304 */ 16305static int 16306bxe_attach(device_t dev) 16307{ 16308 struct bxe_softc *sc; 16309 16310 sc = device_get_softc(dev); 16311 16312 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16313 16314 sc->state = BXE_STATE_CLOSED; 16315 16316 sc->dev = dev; 16317 sc->unit = device_get_unit(dev); 16318 16319 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16320 16321 sc->pcie_bus = pci_get_bus(dev); 16322 sc->pcie_device = pci_get_slot(dev); 16323 sc->pcie_func = pci_get_function(dev); 16324 16325 /* enable bus master capability */ 16326 pci_enable_busmaster(dev); 16327 16328 /* get the BARs */ 16329 if (bxe_allocate_bars(sc) != 0) { 16330 return (ENXIO); 16331 } 16332 16333 /* initialize the mutexes */ 16334 bxe_init_mutexes(sc); 16335 16336 /* prepare the periodic callout */ 16337 callout_init(&sc->periodic_callout, 0); 16338 16339 /* prepare the chip taskqueue */ 16340 sc->chip_tq_flags = CHIP_TQ_NONE; 16341 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16342 "bxe%d_chip_tq", sc->unit); 16343 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16344 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16345 taskqueue_thread_enqueue, 16346 &sc->chip_tq); 16347 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16348 "%s", sc->chip_tq_name); 16349 16350 TIMEOUT_TASK_INIT(taskqueue_thread, 16351 &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc); 16352 16353 16354 /* get device info and set params */ 16355 if (bxe_get_device_info(sc) != 0) { 16356 BLOGE(sc, "getting device info\n"); 16357 bxe_deallocate_bars(sc); 16358 pci_disable_busmaster(dev); 16359 return (ENXIO); 16360 } 16361 16362 /* get final misc params */ 16363 bxe_get_params(sc); 16364 16365 /* set the default MTU (changed via ifconfig) */ 16366 sc->mtu = ETHERMTU; 16367 16368 bxe_set_modes_bitmap(sc); 16369 16370 /* XXX 16371 * If in AFEX mode and the function is configured for FCoE 16372 * then bail... no L2 allowed. 16373 */ 16374 16375 /* get phy settings from shmem and 'and' against admin settings */ 16376 bxe_get_phy_info(sc); 16377 16378 /* initialize the FreeBSD ifnet interface */ 16379 if (bxe_init_ifnet(sc) != 0) { 16380 bxe_release_mutexes(sc); 16381 bxe_deallocate_bars(sc); 16382 pci_disable_busmaster(dev); 16383 return (ENXIO); 16384 } 16385 16386 if (bxe_add_cdev(sc) != 0) { 16387 if (sc->ifp != NULL) { 16388 ether_ifdetach(sc->ifp); 16389 } 16390 ifmedia_removeall(&sc->ifmedia); 16391 bxe_release_mutexes(sc); 16392 bxe_deallocate_bars(sc); 16393 pci_disable_busmaster(dev); 16394 return (ENXIO); 16395 } 16396 16397 /* allocate device interrupts */ 16398 if (bxe_interrupt_alloc(sc) != 0) { 16399 bxe_del_cdev(sc); 16400 if (sc->ifp != NULL) { 16401 ether_ifdetach(sc->ifp); 16402 } 16403 ifmedia_removeall(&sc->ifmedia); 16404 bxe_release_mutexes(sc); 16405 bxe_deallocate_bars(sc); 16406 pci_disable_busmaster(dev); 16407 return (ENXIO); 16408 } 16409 16410 bxe_init_fp_mutexs(sc); 16411 16412 if (bxe_alloc_buf_rings(sc) != 0) { 16413 bxe_free_buf_rings(sc); 16414 bxe_interrupt_free(sc); 16415 bxe_del_cdev(sc); 16416 if (sc->ifp != NULL) { 16417 ether_ifdetach(sc->ifp); 16418 } 16419 ifmedia_removeall(&sc->ifmedia); 16420 bxe_release_mutexes(sc); 16421 bxe_deallocate_bars(sc); 16422 pci_disable_busmaster(dev); 16423 return (ENXIO); 16424 } 16425 16426 /* allocate ilt */ 16427 if (bxe_alloc_ilt_mem(sc) != 0) { 16428 bxe_free_buf_rings(sc); 16429 bxe_interrupt_free(sc); 16430 bxe_del_cdev(sc); 16431 if (sc->ifp != NULL) { 16432 ether_ifdetach(sc->ifp); 16433 } 16434 ifmedia_removeall(&sc->ifmedia); 16435 bxe_release_mutexes(sc); 16436 bxe_deallocate_bars(sc); 16437 pci_disable_busmaster(dev); 16438 return (ENXIO); 16439 } 16440 16441 /* allocate the host hardware/software hsi structures */ 16442 if (bxe_alloc_hsi_mem(sc) != 0) { 16443 bxe_free_ilt_mem(sc); 16444 bxe_free_buf_rings(sc); 16445 bxe_interrupt_free(sc); 16446 bxe_del_cdev(sc); 16447 if (sc->ifp != NULL) { 16448 ether_ifdetach(sc->ifp); 16449 } 16450 ifmedia_removeall(&sc->ifmedia); 16451 bxe_release_mutexes(sc); 16452 bxe_deallocate_bars(sc); 16453 pci_disable_busmaster(dev); 16454 return (ENXIO); 16455 } 16456 16457 /* need to reset chip if UNDI was active */ 16458 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16459 /* init fw_seq */ 16460 sc->fw_seq = 16461 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16462 DRV_MSG_SEQ_NUMBER_MASK); 16463 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16464 bxe_prev_unload(sc); 16465 } 16466 16467#if 1 16468 /* XXX */ 16469 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16470#else 16471 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16472 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16473 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16474 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16475 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16476 bxe_dcbx_init_params(sc); 16477 } else { 16478 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16479 } 16480#endif 16481 16482 /* calculate qm_cid_count */ 16483 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16484 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16485 16486 sc->max_cos = 1; 16487 bxe_init_multi_cos(sc); 16488 16489 bxe_add_sysctls(sc); 16490 16491 return (0); 16492} 16493 16494/* 16495 * Device detach function. 16496 * 16497 * Stops the controller, resets the controller, and releases resources. 16498 * 16499 * Returns: 16500 * 0 = Success, >0 = Failure 16501 */ 16502static int 16503bxe_detach(device_t dev) 16504{ 16505 struct bxe_softc *sc; 16506 if_t ifp; 16507 16508 sc = device_get_softc(dev); 16509 16510 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16511 16512 ifp = sc->ifp; 16513 if (ifp != NULL && if_vlantrunkinuse(ifp)) { 16514 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16515 return(EBUSY); 16516 } 16517 16518 bxe_del_cdev(sc); 16519 16520 /* stop the periodic callout */ 16521 bxe_periodic_stop(sc); 16522 16523 /* stop the chip taskqueue */ 16524 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16525 if (sc->chip_tq) { 16526 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16527 taskqueue_free(sc->chip_tq); 16528 sc->chip_tq = NULL; 16529 taskqueue_drain_timeout(taskqueue_thread, 16530 &sc->sp_err_timeout_task); 16531 } 16532 16533 /* stop and reset the controller if it was open */ 16534 if (sc->state != BXE_STATE_CLOSED) { 16535 BXE_CORE_LOCK(sc); 16536 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16537 sc->state = BXE_STATE_DISABLED; 16538 BXE_CORE_UNLOCK(sc); 16539 } 16540 16541 /* release the network interface */ 16542 if (ifp != NULL) { 16543 ether_ifdetach(ifp); 16544 } 16545 ifmedia_removeall(&sc->ifmedia); 16546 16547 /* XXX do the following based on driver state... */ 16548 16549 /* free the host hardware/software hsi structures */ 16550 bxe_free_hsi_mem(sc); 16551 16552 /* free ilt */ 16553 bxe_free_ilt_mem(sc); 16554 16555 bxe_free_buf_rings(sc); 16556 16557 /* release the interrupts */ 16558 bxe_interrupt_free(sc); 16559 16560 /* Release the mutexes*/ 16561 bxe_destroy_fp_mutexs(sc); 16562 bxe_release_mutexes(sc); 16563 16564 16565 /* Release the PCIe BAR mapped memory */ 16566 bxe_deallocate_bars(sc); 16567 16568 /* Release the FreeBSD interface. */ 16569 if (sc->ifp != NULL) { 16570 if_free(sc->ifp); 16571 } 16572 16573 pci_disable_busmaster(dev); 16574 16575 return (0); 16576} 16577 16578/* 16579 * Device shutdown function. 16580 * 16581 * Stops and resets the controller. 16582 * 16583 * Returns: 16584 * Nothing 16585 */ 16586static int 16587bxe_shutdown(device_t dev) 16588{ 16589 struct bxe_softc *sc; 16590 16591 sc = device_get_softc(dev); 16592 16593 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16594 16595 /* stop the periodic callout */ 16596 bxe_periodic_stop(sc); 16597 16598 if (sc->state != BXE_STATE_CLOSED) { 16599 BXE_CORE_LOCK(sc); 16600 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16601 BXE_CORE_UNLOCK(sc); 16602 } 16603 16604 return (0); 16605} 16606 16607void 16608bxe_igu_ack_sb(struct bxe_softc *sc, 16609 uint8_t igu_sb_id, 16610 uint8_t segment, 16611 uint16_t index, 16612 uint8_t op, 16613 uint8_t update) 16614{ 16615 uint32_t igu_addr = sc->igu_base_addr; 16616 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16617 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16618} 16619 16620static void 16621bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16622 uint8_t func, 16623 uint8_t idu_sb_id, 16624 uint8_t is_pf) 16625{ 16626 uint32_t data, ctl, cnt = 100; 16627 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16628 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16629 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16630 uint32_t sb_bit = 1 << (idu_sb_id%32); 16631 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16632 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16633 16634 /* Not supported in BC mode */ 16635 if (CHIP_INT_MODE_IS_BC(sc)) { 16636 return; 16637 } 16638 16639 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16640 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16641 IGU_REGULAR_CLEANUP_SET | 16642 IGU_REGULAR_BCLEANUP); 16643 16644 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16645 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16646 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16647 16648 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16649 data, igu_addr_data); 16650 REG_WR(sc, igu_addr_data, data); 16651 16652 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16653 BUS_SPACE_BARRIER_WRITE); 16654 mb(); 16655 16656 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16657 ctl, igu_addr_ctl); 16658 REG_WR(sc, igu_addr_ctl, ctl); 16659 16660 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16661 BUS_SPACE_BARRIER_WRITE); 16662 mb(); 16663 16664 /* wait for clean up to finish */ 16665 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16666 DELAY(20000); 16667 } 16668 16669 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16670 BLOGD(sc, DBG_LOAD, 16671 "Unable to finish IGU cleanup: " 16672 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16673 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16674 } 16675} 16676 16677static void 16678bxe_igu_clear_sb(struct bxe_softc *sc, 16679 uint8_t idu_sb_id) 16680{ 16681 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16682} 16683 16684 16685 16686 16687 16688 16689 16690/*******************/ 16691/* ECORE CALLBACKS */ 16692/*******************/ 16693 16694static void 16695bxe_reset_common(struct bxe_softc *sc) 16696{ 16697 uint32_t val = 0x1400; 16698 16699 /* reset_common */ 16700 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16701 16702 if (CHIP_IS_E3(sc)) { 16703 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16704 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16705 } 16706 16707 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16708} 16709 16710static void 16711bxe_common_init_phy(struct bxe_softc *sc) 16712{ 16713 uint32_t shmem_base[2]; 16714 uint32_t shmem2_base[2]; 16715 16716 /* Avoid common init in case MFW supports LFA */ 16717 if (SHMEM2_RD(sc, size) > 16718 (uint32_t)offsetof(struct shmem2_region, 16719 lfa_host_addr[SC_PORT(sc)])) { 16720 return; 16721 } 16722 16723 shmem_base[0] = sc->devinfo.shmem_base; 16724 shmem2_base[0] = sc->devinfo.shmem2_base; 16725 16726 if (!CHIP_IS_E1x(sc)) { 16727 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16728 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16729 } 16730 16731 bxe_acquire_phy_lock(sc); 16732 elink_common_init_phy(sc, shmem_base, shmem2_base, 16733 sc->devinfo.chip_id, 0); 16734 bxe_release_phy_lock(sc); 16735} 16736 16737static void 16738bxe_pf_disable(struct bxe_softc *sc) 16739{ 16740 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16741 16742 val &= ~IGU_PF_CONF_FUNC_EN; 16743 16744 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16745 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16746 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16747} 16748 16749static void 16750bxe_init_pxp(struct bxe_softc *sc) 16751{ 16752 uint16_t devctl; 16753 int r_order, w_order; 16754 16755 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16756 16757 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16758 16759 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16760 16761 if (sc->mrrs == -1) { 16762 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16763 } else { 16764 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16765 r_order = sc->mrrs; 16766 } 16767 16768 ecore_init_pxp_arb(sc, r_order, w_order); 16769} 16770 16771static uint32_t 16772bxe_get_pretend_reg(struct bxe_softc *sc) 16773{ 16774 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16775 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16776 return (base + (SC_ABS_FUNC(sc)) * stride); 16777} 16778 16779/* 16780 * Called only on E1H or E2. 16781 * When pretending to be PF, the pretend value is the function number 0..7. 16782 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16783 * combination. 16784 */ 16785static int 16786bxe_pretend_func(struct bxe_softc *sc, 16787 uint16_t pretend_func_val) 16788{ 16789 uint32_t pretend_reg; 16790 16791 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16792 return (-1); 16793 } 16794 16795 /* get my own pretend register */ 16796 pretend_reg = bxe_get_pretend_reg(sc); 16797 REG_WR(sc, pretend_reg, pretend_func_val); 16798 REG_RD(sc, pretend_reg); 16799 return (0); 16800} 16801 16802static void 16803bxe_iov_init_dmae(struct bxe_softc *sc) 16804{ 16805 return; 16806} 16807 16808static void 16809bxe_iov_init_dq(struct bxe_softc *sc) 16810{ 16811 return; 16812} 16813 16814/* send a NIG loopback debug packet */ 16815static void 16816bxe_lb_pckt(struct bxe_softc *sc) 16817{ 16818 uint32_t wb_write[3]; 16819 16820 /* Ethernet source and destination addresses */ 16821 wb_write[0] = 0x55555555; 16822 wb_write[1] = 0x55555555; 16823 wb_write[2] = 0x20; /* SOP */ 16824 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16825 16826 /* NON-IP protocol */ 16827 wb_write[0] = 0x09000000; 16828 wb_write[1] = 0x55555555; 16829 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16830 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16831} 16832 16833/* 16834 * Some of the internal memories are not directly readable from the driver. 16835 * To test them we send debug packets. 16836 */ 16837static int 16838bxe_int_mem_test(struct bxe_softc *sc) 16839{ 16840 int factor; 16841 int count, i; 16842 uint32_t val = 0; 16843 16844 if (CHIP_REV_IS_FPGA(sc)) { 16845 factor = 120; 16846 } else if (CHIP_REV_IS_EMUL(sc)) { 16847 factor = 200; 16848 } else { 16849 factor = 1; 16850 } 16851 16852 /* disable inputs of parser neighbor blocks */ 16853 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16854 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16855 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16856 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16857 16858 /* write 0 to parser credits for CFC search request */ 16859 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16860 16861 /* send Ethernet packet */ 16862 bxe_lb_pckt(sc); 16863 16864 /* TODO do i reset NIG statistic? */ 16865 /* Wait until NIG register shows 1 packet of size 0x10 */ 16866 count = 1000 * factor; 16867 while (count) { 16868 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16869 val = *BXE_SP(sc, wb_data[0]); 16870 if (val == 0x10) { 16871 break; 16872 } 16873 16874 DELAY(10000); 16875 count--; 16876 } 16877 16878 if (val != 0x10) { 16879 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16880 return (-1); 16881 } 16882 16883 /* wait until PRS register shows 1 packet */ 16884 count = (1000 * factor); 16885 while (count) { 16886 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16887 if (val == 1) { 16888 break; 16889 } 16890 16891 DELAY(10000); 16892 count--; 16893 } 16894 16895 if (val != 0x1) { 16896 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16897 return (-2); 16898 } 16899 16900 /* Reset and init BRB, PRS */ 16901 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16902 DELAY(50000); 16903 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16904 DELAY(50000); 16905 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16906 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16907 16908 /* Disable inputs of parser neighbor blocks */ 16909 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16910 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16911 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16912 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16913 16914 /* Write 0 to parser credits for CFC search request */ 16915 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16916 16917 /* send 10 Ethernet packets */ 16918 for (i = 0; i < 10; i++) { 16919 bxe_lb_pckt(sc); 16920 } 16921 16922 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 16923 count = (1000 * factor); 16924 while (count) { 16925 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16926 val = *BXE_SP(sc, wb_data[0]); 16927 if (val == 0xb0) { 16928 break; 16929 } 16930 16931 DELAY(10000); 16932 count--; 16933 } 16934 16935 if (val != 0xb0) { 16936 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16937 return (-3); 16938 } 16939 16940 /* Wait until PRS register shows 2 packets */ 16941 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16942 if (val != 2) { 16943 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16944 } 16945 16946 /* Write 1 to parser credits for CFC search request */ 16947 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 16948 16949 /* Wait until PRS register shows 3 packets */ 16950 DELAY(10000 * factor); 16951 16952 /* Wait until NIG register shows 1 packet of size 0x10 */ 16953 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16954 if (val != 3) { 16955 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16956 } 16957 16958 /* clear NIG EOP FIFO */ 16959 for (i = 0; i < 11; i++) { 16960 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 16961 } 16962 16963 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 16964 if (val != 1) { 16965 BLOGE(sc, "clear of NIG failed val=0x%x\n", val); 16966 return (-4); 16967 } 16968 16969 /* Reset and init BRB, PRS, NIG */ 16970 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16971 DELAY(50000); 16972 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16973 DELAY(50000); 16974 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16975 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16976 if (!CNIC_SUPPORT(sc)) { 16977 /* set NIC mode */ 16978 REG_WR(sc, PRS_REG_NIC_MODE, 1); 16979 } 16980 16981 /* Enable inputs of parser neighbor blocks */ 16982 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 16983 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 16984 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 16985 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 16986 16987 return (0); 16988} 16989 16990static void 16991bxe_setup_fan_failure_detection(struct bxe_softc *sc) 16992{ 16993 int is_required; 16994 uint32_t val; 16995 int port; 16996 16997 is_required = 0; 16998 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 16999 SHARED_HW_CFG_FAN_FAILURE_MASK); 17000 17001 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17002 is_required = 1; 17003 } 17004 /* 17005 * The fan failure mechanism is usually related to the PHY type since 17006 * the power consumption of the board is affected by the PHY. Currently, 17007 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17008 */ 17009 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17010 for (port = PORT_0; port < PORT_MAX; port++) { 17011 is_required |= elink_fan_failure_det_req(sc, 17012 sc->devinfo.shmem_base, 17013 sc->devinfo.shmem2_base, 17014 port); 17015 } 17016 } 17017 17018 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17019 17020 if (is_required == 0) { 17021 return; 17022 } 17023 17024 /* Fan failure is indicated by SPIO 5 */ 17025 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17026 17027 /* set to active low mode */ 17028 val = REG_RD(sc, MISC_REG_SPIO_INT); 17029 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17030 REG_WR(sc, MISC_REG_SPIO_INT, val); 17031 17032 /* enable interrupt to signal the IGU */ 17033 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17034 val |= MISC_SPIO_SPIO5; 17035 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17036} 17037 17038static void 17039bxe_enable_blocks_attention(struct bxe_softc *sc) 17040{ 17041 uint32_t val; 17042 17043 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17044 if (!CHIP_IS_E1x(sc)) { 17045 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17046 } else { 17047 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17048 } 17049 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17050 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17051 /* 17052 * mask read length error interrupts in brb for parser 17053 * (parsing unit and 'checksum and crc' unit) 17054 * these errors are legal (PU reads fixed length and CAC can cause 17055 * read length error on truncated packets) 17056 */ 17057 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17058 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17059 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17060 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17061 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17062 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17063/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17064/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17065 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17066 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17067 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17068/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17069/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17070 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17071 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17072 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17073 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17074/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17075/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17076 17077 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17078 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17079 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17080 if (!CHIP_IS_E1x(sc)) { 17081 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17082 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17083 } 17084 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17085 17086 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17087 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17088 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17089/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17090 17091 if (!CHIP_IS_E1x(sc)) { 17092 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17093 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17094 } 17095 17096 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17097 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17098/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17099 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17100} 17101 17102/** 17103 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17104 * 17105 * @sc: driver handle 17106 */ 17107static int 17108bxe_init_hw_common(struct bxe_softc *sc) 17109{ 17110 uint8_t abs_func_id; 17111 uint32_t val; 17112 17113 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17114 SC_ABS_FUNC(sc)); 17115 17116 /* 17117 * take the RESET lock to protect undi_unload flow from accessing 17118 * registers while we are resetting the chip 17119 */ 17120 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17121 17122 bxe_reset_common(sc); 17123 17124 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17125 17126 val = 0xfffc; 17127 if (CHIP_IS_E3(sc)) { 17128 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17129 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17130 } 17131 17132 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17133 17134 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17135 17136 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17137 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17138 17139 if (!CHIP_IS_E1x(sc)) { 17140 /* 17141 * 4-port mode or 2-port mode we need to turn off master-enable for 17142 * everyone. After that we turn it back on for self. So, we disregard 17143 * multi-function, and always disable all functions on the given path, 17144 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17145 */ 17146 for (abs_func_id = SC_PATH(sc); 17147 abs_func_id < (E2_FUNC_MAX * 2); 17148 abs_func_id += 2) { 17149 if (abs_func_id == SC_ABS_FUNC(sc)) { 17150 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17151 continue; 17152 } 17153 17154 bxe_pretend_func(sc, abs_func_id); 17155 17156 /* clear pf enable */ 17157 bxe_pf_disable(sc); 17158 17159 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17160 } 17161 } 17162 17163 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17164 17165 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17166 17167 if (CHIP_IS_E1(sc)) { 17168 /* 17169 * enable HW interrupt from PXP on USDM overflow 17170 * bit 16 on INT_MASK_0 17171 */ 17172 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17173 } 17174 17175 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17176 bxe_init_pxp(sc); 17177 17178#ifdef __BIG_ENDIAN 17179 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17180 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17181 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17182 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17183 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17184 /* make sure this value is 0 */ 17185 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17186 17187 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17188 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17189 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17190 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17191 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17192#endif 17193 17194 ecore_ilt_init_page_size(sc, INITOP_SET); 17195 17196 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17197 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17198 } 17199 17200 /* let the HW do it's magic... */ 17201 DELAY(100000); 17202 17203 /* finish PXP init */ 17204 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17205 if (val != 1) { 17206 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", 17207 val); 17208 return (-1); 17209 } 17210 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17211 if (val != 1) { 17212 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); 17213 return (-1); 17214 } 17215 17216 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17217 17218 /* 17219 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17220 * entries with value "0" and valid bit on. This needs to be done by the 17221 * first PF that is loaded in a path (i.e. common phase) 17222 */ 17223 if (!CHIP_IS_E1x(sc)) { 17224/* 17225 * In E2 there is a bug in the timers block that can cause function 6 / 7 17226 * (i.e. vnic3) to start even if it is marked as "scan-off". 17227 * This occurs when a different function (func2,3) is being marked 17228 * as "scan-off". Real-life scenario for example: if a driver is being 17229 * load-unloaded while func6,7 are down. This will cause the timer to access 17230 * the ilt, translate to a logical address and send a request to read/write. 17231 * Since the ilt for the function that is down is not valid, this will cause 17232 * a translation error which is unrecoverable. 17233 * The Workaround is intended to make sure that when this happens nothing 17234 * fatal will occur. The workaround: 17235 * 1. First PF driver which loads on a path will: 17236 * a. After taking the chip out of reset, by using pretend, 17237 * it will write "0" to the following registers of 17238 * the other vnics. 17239 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17240 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17241 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17242 * And for itself it will write '1' to 17243 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17244 * dmae-operations (writing to pram for example.) 17245 * note: can be done for only function 6,7 but cleaner this 17246 * way. 17247 * b. Write zero+valid to the entire ILT. 17248 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17249 * VNIC3 (of that port). The range allocated will be the 17250 * entire ILT. This is needed to prevent ILT range error. 17251 * 2. Any PF driver load flow: 17252 * a. ILT update with the physical addresses of the allocated 17253 * logical pages. 17254 * b. Wait 20msec. - note that this timeout is needed to make 17255 * sure there are no requests in one of the PXP internal 17256 * queues with "old" ILT addresses. 17257 * c. PF enable in the PGLC. 17258 * d. Clear the was_error of the PF in the PGLC. (could have 17259 * occurred while driver was down) 17260 * e. PF enable in the CFC (WEAK + STRONG) 17261 * f. Timers scan enable 17262 * 3. PF driver unload flow: 17263 * a. Clear the Timers scan_en. 17264 * b. Polling for scan_on=0 for that PF. 17265 * c. Clear the PF enable bit in the PXP. 17266 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17267 * e. Write zero+valid to all ILT entries (The valid bit must 17268 * stay set) 17269 * f. If this is VNIC 3 of a port then also init 17270 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17271 * to the last enrty in the ILT. 17272 * 17273 * Notes: 17274 * Currently the PF error in the PGLC is non recoverable. 17275 * In the future the there will be a recovery routine for this error. 17276 * Currently attention is masked. 17277 * Having an MCP lock on the load/unload process does not guarantee that 17278 * there is no Timer disable during Func6/7 enable. This is because the 17279 * Timers scan is currently being cleared by the MCP on FLR. 17280 * Step 2.d can be done only for PF6/7 and the driver can also check if 17281 * there is error before clearing it. But the flow above is simpler and 17282 * more general. 17283 * All ILT entries are written by zero+valid and not just PF6/7 17284 * ILT entries since in the future the ILT entries allocation for 17285 * PF-s might be dynamic. 17286 */ 17287 struct ilt_client_info ilt_cli; 17288 struct ecore_ilt ilt; 17289 17290 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17291 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17292 17293 /* initialize dummy TM client */ 17294 ilt_cli.start = 0; 17295 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17296 ilt_cli.client_num = ILT_CLIENT_TM; 17297 17298 /* 17299 * Step 1: set zeroes to all ilt page entries with valid bit on 17300 * Step 2: set the timers first/last ilt entry to point 17301 * to the entire range to prevent ILT range error for 3rd/4th 17302 * vnic (this code assumes existence of the vnic) 17303 * 17304 * both steps performed by call to ecore_ilt_client_init_op() 17305 * with dummy TM client 17306 * 17307 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17308 * and his brother are split registers 17309 */ 17310 17311 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17312 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17313 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17314 17315 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17316 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17317 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17318 } 17319 17320 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17321 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17322 17323 if (!CHIP_IS_E1x(sc)) { 17324 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17325 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17326 17327 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17328 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17329 17330 /* let the HW do it's magic... */ 17331 do { 17332 DELAY(200000); 17333 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17334 } while (factor-- && (val != 1)); 17335 17336 if (val != 1) { 17337 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); 17338 return (-1); 17339 } 17340 } 17341 17342 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17343 17344 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17345 17346 bxe_iov_init_dmae(sc); 17347 17348 /* clean the DMAE memory */ 17349 sc->dmae_ready = 1; 17350 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17351 17352 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17353 17354 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17355 17356 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17357 17358 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17359 17360 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17361 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17362 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17363 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17364 17365 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17366 17367 /* QM queues pointers table */ 17368 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17369 17370 /* soft reset pulse */ 17371 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17372 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17373 17374 if (CNIC_SUPPORT(sc)) 17375 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17376 17377 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17378 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17379 if (!CHIP_REV_IS_SLOW(sc)) { 17380 /* enable hw interrupt from doorbell Q */ 17381 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17382 } 17383 17384 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17385 17386 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17387 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17388 17389 if (!CHIP_IS_E1(sc)) { 17390 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17391 } 17392 17393 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17394 if (IS_MF_AFEX(sc)) { 17395 /* 17396 * configure that AFEX and VLAN headers must be 17397 * received in AFEX mode 17398 */ 17399 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17400 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17401 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17402 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17403 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17404 } else { 17405 /* 17406 * Bit-map indicating which L2 hdrs may appear 17407 * after the basic Ethernet header 17408 */ 17409 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17410 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17411 } 17412 } 17413 17414 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17415 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17416 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17417 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17418 17419 if (!CHIP_IS_E1x(sc)) { 17420 /* reset VFC memories */ 17421 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17422 VFC_MEMORIES_RST_REG_CAM_RST | 17423 VFC_MEMORIES_RST_REG_RAM_RST); 17424 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17425 VFC_MEMORIES_RST_REG_CAM_RST | 17426 VFC_MEMORIES_RST_REG_RAM_RST); 17427 17428 DELAY(20000); 17429 } 17430 17431 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17432 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17433 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17434 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17435 17436 /* sync semi rtc */ 17437 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17438 0x80000000); 17439 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17440 0x80000000); 17441 17442 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17443 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17444 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17445 17446 if (!CHIP_IS_E1x(sc)) { 17447 if (IS_MF_AFEX(sc)) { 17448 /* 17449 * configure that AFEX and VLAN headers must be 17450 * sent in AFEX mode 17451 */ 17452 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17453 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17454 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17455 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17456 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17457 } else { 17458 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17459 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17460 } 17461 } 17462 17463 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17464 17465 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17466 17467 if (CNIC_SUPPORT(sc)) { 17468 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17469 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17470 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17471 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17472 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17473 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17474 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17475 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17476 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17477 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17478 } 17479 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17480 17481 if (sizeof(union cdu_context) != 1024) { 17482 /* we currently assume that a context is 1024 bytes */ 17483 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17484 (long)sizeof(union cdu_context)); 17485 } 17486 17487 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17488 val = (4 << 24) + (0 << 12) + 1024; 17489 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17490 17491 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17492 17493 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17494 /* enable context validation interrupt from CFC */ 17495 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17496 17497 /* set the thresholds to prevent CFC/CDU race */ 17498 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17499 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17500 17501 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17502 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17503 } 17504 17505 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17506 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17507 17508 /* Reset PCIE errors for debug */ 17509 REG_WR(sc, 0x2814, 0xffffffff); 17510 REG_WR(sc, 0x3820, 0xffffffff); 17511 17512 if (!CHIP_IS_E1x(sc)) { 17513 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17514 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17515 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17516 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17517 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17518 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17519 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17520 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17521 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17522 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17523 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17524 } 17525 17526 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17527 17528 if (!CHIP_IS_E1(sc)) { 17529 /* in E3 this done in per-port section */ 17530 if (!CHIP_IS_E3(sc)) 17531 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17532 } 17533 17534 if (CHIP_IS_E1H(sc)) { 17535 /* not applicable for E2 (and above ...) */ 17536 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17537 } 17538 17539 if (CHIP_REV_IS_SLOW(sc)) { 17540 DELAY(200000); 17541 } 17542 17543 /* finish CFC init */ 17544 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17545 if (val != 1) { 17546 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); 17547 return (-1); 17548 } 17549 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17550 if (val != 1) { 17551 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); 17552 return (-1); 17553 } 17554 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17555 if (val != 1) { 17556 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); 17557 return (-1); 17558 } 17559 REG_WR(sc, CFC_REG_DEBUG0, 0); 17560 17561 if (CHIP_IS_E1(sc)) { 17562 /* read NIG statistic to see if this is our first up since powerup */ 17563 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17564 val = *BXE_SP(sc, wb_data[0]); 17565 17566 /* do internal memory self test */ 17567 if ((val == 0) && bxe_int_mem_test(sc)) { 17568 BLOGE(sc, "internal mem self test failed val=0x%x\n", val); 17569 return (-1); 17570 } 17571 } 17572 17573 bxe_setup_fan_failure_detection(sc); 17574 17575 /* clear PXP2 attentions */ 17576 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17577 17578 bxe_enable_blocks_attention(sc); 17579 17580 if (!CHIP_REV_IS_SLOW(sc)) { 17581 ecore_enable_blocks_parity(sc); 17582 } 17583 17584 if (!BXE_NOMCP(sc)) { 17585 if (CHIP_IS_E1x(sc)) { 17586 bxe_common_init_phy(sc); 17587 } 17588 } 17589 17590 return (0); 17591} 17592 17593/** 17594 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17595 * 17596 * @sc: driver handle 17597 */ 17598static int 17599bxe_init_hw_common_chip(struct bxe_softc *sc) 17600{ 17601 int rc = bxe_init_hw_common(sc); 17602 17603 if (rc) { 17604 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); 17605 return (rc); 17606 } 17607 17608 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17609 if (!BXE_NOMCP(sc)) { 17610 bxe_common_init_phy(sc); 17611 } 17612 17613 return (0); 17614} 17615 17616static int 17617bxe_init_hw_port(struct bxe_softc *sc) 17618{ 17619 int port = SC_PORT(sc); 17620 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17621 uint32_t low, high; 17622 uint32_t val; 17623 17624 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17625 17626 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17627 17628 ecore_init_block(sc, BLOCK_MISC, init_phase); 17629 ecore_init_block(sc, BLOCK_PXP, init_phase); 17630 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17631 17632 /* 17633 * Timers bug workaround: disables the pf_master bit in pglue at 17634 * common phase, we need to enable it here before any dmae access are 17635 * attempted. Therefore we manually added the enable-master to the 17636 * port phase (it also happens in the function phase) 17637 */ 17638 if (!CHIP_IS_E1x(sc)) { 17639 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17640 } 17641 17642 ecore_init_block(sc, BLOCK_ATC, init_phase); 17643 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17644 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17645 ecore_init_block(sc, BLOCK_QM, init_phase); 17646 17647 ecore_init_block(sc, BLOCK_TCM, init_phase); 17648 ecore_init_block(sc, BLOCK_UCM, init_phase); 17649 ecore_init_block(sc, BLOCK_CCM, init_phase); 17650 ecore_init_block(sc, BLOCK_XCM, init_phase); 17651 17652 /* QM cid (connection) count */ 17653 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17654 17655 if (CNIC_SUPPORT(sc)) { 17656 ecore_init_block(sc, BLOCK_TM, init_phase); 17657 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17658 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17659 } 17660 17661 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17662 17663 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17664 17665 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17666 if (IS_MF(sc)) { 17667 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17668 } else if (sc->mtu > 4096) { 17669 if (BXE_ONE_PORT(sc)) { 17670 low = 160; 17671 } else { 17672 val = sc->mtu; 17673 /* (24*1024 + val*4)/256 */ 17674 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17675 } 17676 } else { 17677 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17678 } 17679 high = (low + 56); /* 14*1024/256 */ 17680 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17681 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17682 } 17683 17684 if (CHIP_IS_MODE_4_PORT(sc)) { 17685 REG_WR(sc, SC_PORT(sc) ? 17686 BRB1_REG_MAC_GUARANTIED_1 : 17687 BRB1_REG_MAC_GUARANTIED_0, 40); 17688 } 17689 17690 ecore_init_block(sc, BLOCK_PRS, init_phase); 17691 if (CHIP_IS_E3B0(sc)) { 17692 if (IS_MF_AFEX(sc)) { 17693 /* configure headers for AFEX mode */ 17694 REG_WR(sc, SC_PORT(sc) ? 17695 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17696 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17697 REG_WR(sc, SC_PORT(sc) ? 17698 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17699 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17700 REG_WR(sc, SC_PORT(sc) ? 17701 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17702 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17703 } else { 17704 /* Ovlan exists only if we are in multi-function + 17705 * switch-dependent mode, in switch-independent there 17706 * is no ovlan headers 17707 */ 17708 REG_WR(sc, SC_PORT(sc) ? 17709 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17710 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17711 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17712 } 17713 } 17714 17715 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17716 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17717 ecore_init_block(sc, BLOCK_USDM, init_phase); 17718 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17719 17720 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17721 ecore_init_block(sc, BLOCK_USEM, init_phase); 17722 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17723 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17724 17725 ecore_init_block(sc, BLOCK_UPB, init_phase); 17726 ecore_init_block(sc, BLOCK_XPB, init_phase); 17727 17728 ecore_init_block(sc, BLOCK_PBF, init_phase); 17729 17730 if (CHIP_IS_E1x(sc)) { 17731 /* configure PBF to work without PAUSE mtu 9000 */ 17732 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17733 17734 /* update threshold */ 17735 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17736 /* update init credit */ 17737 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17738 17739 /* probe changes */ 17740 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17741 DELAY(50); 17742 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17743 } 17744 17745 if (CNIC_SUPPORT(sc)) { 17746 ecore_init_block(sc, BLOCK_SRC, init_phase); 17747 } 17748 17749 ecore_init_block(sc, BLOCK_CDU, init_phase); 17750 ecore_init_block(sc, BLOCK_CFC, init_phase); 17751 17752 if (CHIP_IS_E1(sc)) { 17753 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17754 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17755 } 17756 ecore_init_block(sc, BLOCK_HC, init_phase); 17757 17758 ecore_init_block(sc, BLOCK_IGU, init_phase); 17759 17760 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17761 /* init aeu_mask_attn_func_0/1: 17762 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17763 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17764 * bits 4-7 are used for "per vn group attention" */ 17765 val = IS_MF(sc) ? 0xF7 : 0x7; 17766 /* Enable DCBX attention for all but E1 */ 17767 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17768 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17769 17770 ecore_init_block(sc, BLOCK_NIG, init_phase); 17771 17772 if (!CHIP_IS_E1x(sc)) { 17773 /* Bit-map indicating which L2 hdrs may appear after the 17774 * basic Ethernet header 17775 */ 17776 if (IS_MF_AFEX(sc)) { 17777 REG_WR(sc, SC_PORT(sc) ? 17778 NIG_REG_P1_HDRS_AFTER_BASIC : 17779 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17780 } else { 17781 REG_WR(sc, SC_PORT(sc) ? 17782 NIG_REG_P1_HDRS_AFTER_BASIC : 17783 NIG_REG_P0_HDRS_AFTER_BASIC, 17784 IS_MF_SD(sc) ? 7 : 6); 17785 } 17786 17787 if (CHIP_IS_E3(sc)) { 17788 REG_WR(sc, SC_PORT(sc) ? 17789 NIG_REG_LLH1_MF_MODE : 17790 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17791 } 17792 } 17793 if (!CHIP_IS_E3(sc)) { 17794 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17795 } 17796 17797 if (!CHIP_IS_E1(sc)) { 17798 /* 0x2 disable mf_ov, 0x1 enable */ 17799 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17800 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17801 17802 if (!CHIP_IS_E1x(sc)) { 17803 val = 0; 17804 switch (sc->devinfo.mf_info.mf_mode) { 17805 case MULTI_FUNCTION_SD: 17806 val = 1; 17807 break; 17808 case MULTI_FUNCTION_SI: 17809 case MULTI_FUNCTION_AFEX: 17810 val = 2; 17811 break; 17812 } 17813 17814 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17815 NIG_REG_LLH0_CLS_TYPE), val); 17816 } 17817 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17818 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17819 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17820 } 17821 17822 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17823 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17824 if (val & MISC_SPIO_SPIO5) { 17825 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17826 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17827 val = REG_RD(sc, reg_addr); 17828 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17829 REG_WR(sc, reg_addr, val); 17830 } 17831 17832 return (0); 17833} 17834 17835static uint32_t 17836bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17837 uint32_t reg, 17838 uint32_t expected, 17839 uint32_t poll_count) 17840{ 17841 uint32_t cur_cnt = poll_count; 17842 uint32_t val; 17843 17844 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17845 DELAY(FLR_WAIT_INTERVAL); 17846 } 17847 17848 return (val); 17849} 17850 17851static int 17852bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17853 uint32_t reg, 17854 char *msg, 17855 uint32_t poll_cnt) 17856{ 17857 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17858 17859 if (val != 0) { 17860 BLOGE(sc, "%s usage count=%d\n", msg, val); 17861 return (1); 17862 } 17863 17864 return (0); 17865} 17866 17867/* Common routines with VF FLR cleanup */ 17868static uint32_t 17869bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17870{ 17871 /* adjust polling timeout */ 17872 if (CHIP_REV_IS_EMUL(sc)) { 17873 return (FLR_POLL_CNT * 2000); 17874 } 17875 17876 if (CHIP_REV_IS_FPGA(sc)) { 17877 return (FLR_POLL_CNT * 120); 17878 } 17879 17880 return (FLR_POLL_CNT); 17881} 17882 17883static int 17884bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17885 uint32_t poll_cnt) 17886{ 17887 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17888 if (bxe_flr_clnup_poll_hw_counter(sc, 17889 CFC_REG_NUM_LCIDS_INSIDE_PF, 17890 "CFC PF usage counter timed out", 17891 poll_cnt)) { 17892 return (1); 17893 } 17894 17895 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17896 if (bxe_flr_clnup_poll_hw_counter(sc, 17897 DORQ_REG_PF_USAGE_CNT, 17898 "DQ PF usage counter timed out", 17899 poll_cnt)) { 17900 return (1); 17901 } 17902 17903 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17904 if (bxe_flr_clnup_poll_hw_counter(sc, 17905 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17906 "QM PF usage counter timed out", 17907 poll_cnt)) { 17908 return (1); 17909 } 17910 17911 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17912 if (bxe_flr_clnup_poll_hw_counter(sc, 17913 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17914 "Timers VNIC usage counter timed out", 17915 poll_cnt)) { 17916 return (1); 17917 } 17918 17919 if (bxe_flr_clnup_poll_hw_counter(sc, 17920 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17921 "Timers NUM_SCANS usage counter timed out", 17922 poll_cnt)) { 17923 return (1); 17924 } 17925 17926 /* Wait DMAE PF usage counter to zero */ 17927 if (bxe_flr_clnup_poll_hw_counter(sc, 17928 dmae_reg_go_c[INIT_DMAE_C(sc)], 17929 "DMAE dommand register timed out", 17930 poll_cnt)) { 17931 return (1); 17932 } 17933 17934 return (0); 17935} 17936 17937#define OP_GEN_PARAM(param) \ 17938 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 17939#define OP_GEN_TYPE(type) \ 17940 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 17941#define OP_GEN_AGG_VECT(index) \ 17942 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 17943 17944static int 17945bxe_send_final_clnup(struct bxe_softc *sc, 17946 uint8_t clnup_func, 17947 uint32_t poll_cnt) 17948{ 17949 uint32_t op_gen_command = 0; 17950 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 17951 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 17952 int ret = 0; 17953 17954 if (REG_RD(sc, comp_addr)) { 17955 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 17956 return (1); 17957 } 17958 17959 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 17960 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 17961 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 17962 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 17963 17964 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 17965 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 17966 17967 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 17968 BLOGE(sc, "FW final cleanup did not succeed\n"); 17969 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 17970 (REG_RD(sc, comp_addr))); 17971 bxe_panic(sc, ("FLR cleanup failed\n")); 17972 return (1); 17973 } 17974 17975 /* Zero completion for nxt FLR */ 17976 REG_WR(sc, comp_addr, 0); 17977 17978 return (ret); 17979} 17980 17981static void 17982bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 17983 struct pbf_pN_buf_regs *regs, 17984 uint32_t poll_count) 17985{ 17986 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 17987 uint32_t cur_cnt = poll_count; 17988 17989 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 17990 crd = crd_start = REG_RD(sc, regs->crd); 17991 init_crd = REG_RD(sc, regs->init_crd); 17992 17993 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 17994 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 17995 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 17996 17997 while ((crd != init_crd) && 17998 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 17999 (init_crd - crd_start))) { 18000 if (cur_cnt--) { 18001 DELAY(FLR_WAIT_INTERVAL); 18002 crd = REG_RD(sc, regs->crd); 18003 crd_freed = REG_RD(sc, regs->crd_freed); 18004 } else { 18005 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18006 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18007 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18008 break; 18009 } 18010 } 18011 18012 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18013 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18014} 18015 18016static void 18017bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18018 struct pbf_pN_cmd_regs *regs, 18019 uint32_t poll_count) 18020{ 18021 uint32_t occup, to_free, freed, freed_start; 18022 uint32_t cur_cnt = poll_count; 18023 18024 occup = to_free = REG_RD(sc, regs->lines_occup); 18025 freed = freed_start = REG_RD(sc, regs->lines_freed); 18026 18027 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18028 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18029 18030 while (occup && 18031 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18032 if (cur_cnt--) { 18033 DELAY(FLR_WAIT_INTERVAL); 18034 occup = REG_RD(sc, regs->lines_occup); 18035 freed = REG_RD(sc, regs->lines_freed); 18036 } else { 18037 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18038 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18039 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18040 break; 18041 } 18042 } 18043 18044 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18045 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18046} 18047 18048static void 18049bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18050{ 18051 struct pbf_pN_cmd_regs cmd_regs[] = { 18052 {0, (CHIP_IS_E3B0(sc)) ? 18053 PBF_REG_TQ_OCCUPANCY_Q0 : 18054 PBF_REG_P0_TQ_OCCUPANCY, 18055 (CHIP_IS_E3B0(sc)) ? 18056 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18057 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18058 {1, (CHIP_IS_E3B0(sc)) ? 18059 PBF_REG_TQ_OCCUPANCY_Q1 : 18060 PBF_REG_P1_TQ_OCCUPANCY, 18061 (CHIP_IS_E3B0(sc)) ? 18062 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18063 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18064 {4, (CHIP_IS_E3B0(sc)) ? 18065 PBF_REG_TQ_OCCUPANCY_LB_Q : 18066 PBF_REG_P4_TQ_OCCUPANCY, 18067 (CHIP_IS_E3B0(sc)) ? 18068 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18069 PBF_REG_P4_TQ_LINES_FREED_CNT} 18070 }; 18071 18072 struct pbf_pN_buf_regs buf_regs[] = { 18073 {0, (CHIP_IS_E3B0(sc)) ? 18074 PBF_REG_INIT_CRD_Q0 : 18075 PBF_REG_P0_INIT_CRD , 18076 (CHIP_IS_E3B0(sc)) ? 18077 PBF_REG_CREDIT_Q0 : 18078 PBF_REG_P0_CREDIT, 18079 (CHIP_IS_E3B0(sc)) ? 18080 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18081 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18082 {1, (CHIP_IS_E3B0(sc)) ? 18083 PBF_REG_INIT_CRD_Q1 : 18084 PBF_REG_P1_INIT_CRD, 18085 (CHIP_IS_E3B0(sc)) ? 18086 PBF_REG_CREDIT_Q1 : 18087 PBF_REG_P1_CREDIT, 18088 (CHIP_IS_E3B0(sc)) ? 18089 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18090 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18091 {4, (CHIP_IS_E3B0(sc)) ? 18092 PBF_REG_INIT_CRD_LB_Q : 18093 PBF_REG_P4_INIT_CRD, 18094 (CHIP_IS_E3B0(sc)) ? 18095 PBF_REG_CREDIT_LB_Q : 18096 PBF_REG_P4_CREDIT, 18097 (CHIP_IS_E3B0(sc)) ? 18098 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18099 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18100 }; 18101 18102 int i; 18103 18104 /* Verify the command queues are flushed P0, P1, P4 */ 18105 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18106 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18107 } 18108 18109 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18110 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18111 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18112 } 18113} 18114 18115static void 18116bxe_hw_enable_status(struct bxe_softc *sc) 18117{ 18118 uint32_t val; 18119 18120 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18121 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18122 18123 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18124 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18125 18126 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18127 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18128 18129 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18130 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18131 18132 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18133 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18134 18135 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18136 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18137 18138 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18139 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18140 18141 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18142 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18143} 18144 18145static int 18146bxe_pf_flr_clnup(struct bxe_softc *sc) 18147{ 18148 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18149 18150 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18151 18152 /* Re-enable PF target read access */ 18153 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18154 18155 /* Poll HW usage counters */ 18156 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18157 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18158 return (-1); 18159 } 18160 18161 /* Zero the igu 'trailing edge' and 'leading edge' */ 18162 18163 /* Send the FW cleanup command */ 18164 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18165 return (-1); 18166 } 18167 18168 /* ATC cleanup */ 18169 18170 /* Verify TX hw is flushed */ 18171 bxe_tx_hw_flushed(sc, poll_cnt); 18172 18173 /* Wait 100ms (not adjusted according to platform) */ 18174 DELAY(100000); 18175 18176 /* Verify no pending pci transactions */ 18177 if (bxe_is_pcie_pending(sc)) { 18178 BLOGE(sc, "PCIE Transactions still pending\n"); 18179 } 18180 18181 /* Debug */ 18182 bxe_hw_enable_status(sc); 18183 18184 /* 18185 * Master enable - Due to WB DMAE writes performed before this 18186 * register is re-initialized as part of the regular function init 18187 */ 18188 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18189 18190 return (0); 18191} 18192 18193static int 18194bxe_init_hw_func(struct bxe_softc *sc) 18195{ 18196 int port = SC_PORT(sc); 18197 int func = SC_FUNC(sc); 18198 int init_phase = PHASE_PF0 + func; 18199 struct ecore_ilt *ilt = sc->ilt; 18200 uint16_t cdu_ilt_start; 18201 uint32_t addr, val; 18202 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18203 int i, main_mem_width, rc; 18204 18205 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18206 18207 /* FLR cleanup */ 18208 if (!CHIP_IS_E1x(sc)) { 18209 rc = bxe_pf_flr_clnup(sc); 18210 if (rc) { 18211 BLOGE(sc, "FLR cleanup failed!\n"); 18212 // XXX bxe_fw_dump(sc); 18213 // XXX bxe_idle_chk(sc); 18214 return (rc); 18215 } 18216 } 18217 18218 /* set MSI reconfigure capability */ 18219 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18220 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18221 val = REG_RD(sc, addr); 18222 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18223 REG_WR(sc, addr, val); 18224 } 18225 18226 ecore_init_block(sc, BLOCK_PXP, init_phase); 18227 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18228 18229 ilt = sc->ilt; 18230 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18231 18232 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18233 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18234 ilt->lines[cdu_ilt_start + i].page_mapping = 18235 sc->context[i].vcxt_dma.paddr; 18236 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18237 } 18238 ecore_ilt_init_op(sc, INITOP_SET); 18239 18240 /* Set NIC mode */ 18241 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18242 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18243 18244 if (!CHIP_IS_E1x(sc)) { 18245 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18246 18247 /* Turn on a single ISR mode in IGU if driver is going to use 18248 * INT#x or MSI 18249 */ 18250 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18251 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18252 } 18253 18254 /* 18255 * Timers workaround bug: function init part. 18256 * Need to wait 20msec after initializing ILT, 18257 * needed to make sure there are no requests in 18258 * one of the PXP internal queues with "old" ILT addresses 18259 */ 18260 DELAY(20000); 18261 18262 /* 18263 * Master enable - Due to WB DMAE writes performed before this 18264 * register is re-initialized as part of the regular function 18265 * init 18266 */ 18267 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18268 /* Enable the function in IGU */ 18269 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18270 } 18271 18272 sc->dmae_ready = 1; 18273 18274 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18275 18276 if (!CHIP_IS_E1x(sc)) 18277 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18278 18279 ecore_init_block(sc, BLOCK_ATC, init_phase); 18280 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18281 ecore_init_block(sc, BLOCK_NIG, init_phase); 18282 ecore_init_block(sc, BLOCK_SRC, init_phase); 18283 ecore_init_block(sc, BLOCK_MISC, init_phase); 18284 ecore_init_block(sc, BLOCK_TCM, init_phase); 18285 ecore_init_block(sc, BLOCK_UCM, init_phase); 18286 ecore_init_block(sc, BLOCK_CCM, init_phase); 18287 ecore_init_block(sc, BLOCK_XCM, init_phase); 18288 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18289 ecore_init_block(sc, BLOCK_USEM, init_phase); 18290 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18291 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18292 18293 if (!CHIP_IS_E1x(sc)) 18294 REG_WR(sc, QM_REG_PF_EN, 1); 18295 18296 if (!CHIP_IS_E1x(sc)) { 18297 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18298 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18299 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18300 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18301 } 18302 ecore_init_block(sc, BLOCK_QM, init_phase); 18303 18304 ecore_init_block(sc, BLOCK_TM, init_phase); 18305 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18306 18307 bxe_iov_init_dq(sc); 18308 18309 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18310 ecore_init_block(sc, BLOCK_PRS, init_phase); 18311 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18312 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18313 ecore_init_block(sc, BLOCK_USDM, init_phase); 18314 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18315 ecore_init_block(sc, BLOCK_UPB, init_phase); 18316 ecore_init_block(sc, BLOCK_XPB, init_phase); 18317 ecore_init_block(sc, BLOCK_PBF, init_phase); 18318 if (!CHIP_IS_E1x(sc)) 18319 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18320 18321 ecore_init_block(sc, BLOCK_CDU, init_phase); 18322 18323 ecore_init_block(sc, BLOCK_CFC, init_phase); 18324 18325 if (!CHIP_IS_E1x(sc)) 18326 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18327 18328 if (IS_MF(sc)) { 18329 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18330 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18331 } 18332 18333 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18334 18335 /* HC init per function */ 18336 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18337 if (CHIP_IS_E1H(sc)) { 18338 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18339 18340 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18341 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18342 } 18343 ecore_init_block(sc, BLOCK_HC, init_phase); 18344 18345 } else { 18346 int num_segs, sb_idx, prod_offset; 18347 18348 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18349 18350 if (!CHIP_IS_E1x(sc)) { 18351 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18352 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18353 } 18354 18355 ecore_init_block(sc, BLOCK_IGU, init_phase); 18356 18357 if (!CHIP_IS_E1x(sc)) { 18358 int dsb_idx = 0; 18359 /** 18360 * Producer memory: 18361 * E2 mode: address 0-135 match to the mapping memory; 18362 * 136 - PF0 default prod; 137 - PF1 default prod; 18363 * 138 - PF2 default prod; 139 - PF3 default prod; 18364 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18365 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18366 * 144-147 reserved. 18367 * 18368 * E1.5 mode - In backward compatible mode; 18369 * for non default SB; each even line in the memory 18370 * holds the U producer and each odd line hold 18371 * the C producer. The first 128 producers are for 18372 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18373 * producers are for the DSB for each PF. 18374 * Each PF has five segments: (the order inside each 18375 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18376 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18377 * 144-147 attn prods; 18378 */ 18379 /* non-default-status-blocks */ 18380 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18381 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18382 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18383 prod_offset = (sc->igu_base_sb + sb_idx) * 18384 num_segs; 18385 18386 for (i = 0; i < num_segs; i++) { 18387 addr = IGU_REG_PROD_CONS_MEMORY + 18388 (prod_offset + i) * 4; 18389 REG_WR(sc, addr, 0); 18390 } 18391 /* send consumer update with value 0 */ 18392 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18393 USTORM_ID, 0, IGU_INT_NOP, 1); 18394 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18395 } 18396 18397 /* default-status-blocks */ 18398 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18399 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18400 18401 if (CHIP_IS_MODE_4_PORT(sc)) 18402 dsb_idx = SC_FUNC(sc); 18403 else 18404 dsb_idx = SC_VN(sc); 18405 18406 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18407 IGU_BC_BASE_DSB_PROD + dsb_idx : 18408 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18409 18410 /* 18411 * igu prods come in chunks of E1HVN_MAX (4) - 18412 * does not matters what is the current chip mode 18413 */ 18414 for (i = 0; i < (num_segs * E1HVN_MAX); 18415 i += E1HVN_MAX) { 18416 addr = IGU_REG_PROD_CONS_MEMORY + 18417 (prod_offset + i)*4; 18418 REG_WR(sc, addr, 0); 18419 } 18420 /* send consumer update with 0 */ 18421 if (CHIP_INT_MODE_IS_BC(sc)) { 18422 bxe_ack_sb(sc, sc->igu_dsb_id, 18423 USTORM_ID, 0, IGU_INT_NOP, 1); 18424 bxe_ack_sb(sc, sc->igu_dsb_id, 18425 CSTORM_ID, 0, IGU_INT_NOP, 1); 18426 bxe_ack_sb(sc, sc->igu_dsb_id, 18427 XSTORM_ID, 0, IGU_INT_NOP, 1); 18428 bxe_ack_sb(sc, sc->igu_dsb_id, 18429 TSTORM_ID, 0, IGU_INT_NOP, 1); 18430 bxe_ack_sb(sc, sc->igu_dsb_id, 18431 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18432 } else { 18433 bxe_ack_sb(sc, sc->igu_dsb_id, 18434 USTORM_ID, 0, IGU_INT_NOP, 1); 18435 bxe_ack_sb(sc, sc->igu_dsb_id, 18436 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18437 } 18438 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18439 18440 /* !!! these should become driver const once 18441 rf-tool supports split-68 const */ 18442 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18443 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18444 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18445 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18446 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18447 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18448 } 18449 } 18450 18451 /* Reset PCIE errors for debug */ 18452 REG_WR(sc, 0x2114, 0xffffffff); 18453 REG_WR(sc, 0x2120, 0xffffffff); 18454 18455 if (CHIP_IS_E1x(sc)) { 18456 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18457 main_mem_base = HC_REG_MAIN_MEMORY + 18458 SC_PORT(sc) * (main_mem_size * 4); 18459 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18460 main_mem_width = 8; 18461 18462 val = REG_RD(sc, main_mem_prty_clr); 18463 if (val) { 18464 BLOGD(sc, DBG_LOAD, 18465 "Parity errors in HC block during function init (0x%x)!\n", 18466 val); 18467 } 18468 18469 /* Clear "false" parity errors in MSI-X table */ 18470 for (i = main_mem_base; 18471 i < main_mem_base + main_mem_size * 4; 18472 i += main_mem_width) { 18473 bxe_read_dmae(sc, i, main_mem_width / 4); 18474 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18475 i, main_mem_width / 4); 18476 } 18477 /* Clear HC parity attention */ 18478 REG_RD(sc, main_mem_prty_clr); 18479 } 18480 18481#if 1 18482 /* Enable STORMs SP logging */ 18483 REG_WR8(sc, BAR_USTRORM_INTMEM + 18484 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18485 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18486 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18487 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18488 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18489 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18490 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18491#endif 18492 18493 elink_phy_probe(&sc->link_params); 18494 18495 return (0); 18496} 18497 18498static void 18499bxe_link_reset(struct bxe_softc *sc) 18500{ 18501 if (!BXE_NOMCP(sc)) { 18502 bxe_acquire_phy_lock(sc); 18503 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18504 bxe_release_phy_lock(sc); 18505 } else { 18506 if (!CHIP_REV_IS_SLOW(sc)) { 18507 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18508 } 18509 } 18510} 18511 18512static void 18513bxe_reset_port(struct bxe_softc *sc) 18514{ 18515 int port = SC_PORT(sc); 18516 uint32_t val; 18517 18518 ELINK_DEBUG_P0(sc, "bxe_reset_port called\n"); 18519 /* reset physical Link */ 18520 bxe_link_reset(sc); 18521 18522 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18523 18524 /* Do not rcv packets to BRB */ 18525 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18526 /* Do not direct rcv packets that are not for MCP to the BRB */ 18527 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18528 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18529 18530 /* Configure AEU */ 18531 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18532 18533 DELAY(100000); 18534 18535 /* Check for BRB port occupancy */ 18536 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18537 if (val) { 18538 BLOGD(sc, DBG_LOAD, 18539 "BRB1 is not empty, %d blocks are occupied\n", val); 18540 } 18541 18542 /* TODO: Close Doorbell port? */ 18543} 18544 18545static void 18546bxe_ilt_wr(struct bxe_softc *sc, 18547 uint32_t index, 18548 bus_addr_t addr) 18549{ 18550 int reg; 18551 uint32_t wb_write[2]; 18552 18553 if (CHIP_IS_E1(sc)) { 18554 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18555 } else { 18556 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18557 } 18558 18559 wb_write[0] = ONCHIP_ADDR1(addr); 18560 wb_write[1] = ONCHIP_ADDR2(addr); 18561 REG_WR_DMAE(sc, reg, wb_write, 2); 18562} 18563 18564static void 18565bxe_clear_func_ilt(struct bxe_softc *sc, 18566 uint32_t func) 18567{ 18568 uint32_t i, base = FUNC_ILT_BASE(func); 18569 for (i = base; i < base + ILT_PER_FUNC; i++) { 18570 bxe_ilt_wr(sc, i, 0); 18571 } 18572} 18573 18574static void 18575bxe_reset_func(struct bxe_softc *sc) 18576{ 18577 struct bxe_fastpath *fp; 18578 int port = SC_PORT(sc); 18579 int func = SC_FUNC(sc); 18580 int i; 18581 18582 /* Disable the function in the FW */ 18583 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18584 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18585 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18586 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18587 18588 /* FP SBs */ 18589 FOR_EACH_ETH_QUEUE(sc, i) { 18590 fp = &sc->fp[i]; 18591 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18592 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18593 SB_DISABLED); 18594 } 18595 18596 /* SP SB */ 18597 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18598 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18599 SB_DISABLED); 18600 18601 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18602 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18603 } 18604 18605 /* Configure IGU */ 18606 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18607 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18608 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18609 } else { 18610 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18611 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18612 } 18613 18614 if (CNIC_LOADED(sc)) { 18615 /* Disable Timer scan */ 18616 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18617 /* 18618 * Wait for at least 10ms and up to 2 second for the timers 18619 * scan to complete 18620 */ 18621 for (i = 0; i < 200; i++) { 18622 DELAY(10000); 18623 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18624 break; 18625 } 18626 } 18627 18628 /* Clear ILT */ 18629 bxe_clear_func_ilt(sc, func); 18630 18631 /* 18632 * Timers workaround bug for E2: if this is vnic-3, 18633 * we need to set the entire ilt range for this timers. 18634 */ 18635 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18636 struct ilt_client_info ilt_cli; 18637 /* use dummy TM client */ 18638 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18639 ilt_cli.start = 0; 18640 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18641 ilt_cli.client_num = ILT_CLIENT_TM; 18642 18643 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18644 } 18645 18646 /* this assumes that reset_port() called before reset_func()*/ 18647 if (!CHIP_IS_E1x(sc)) { 18648 bxe_pf_disable(sc); 18649 } 18650 18651 sc->dmae_ready = 0; 18652} 18653 18654static int 18655bxe_gunzip_init(struct bxe_softc *sc) 18656{ 18657 return (0); 18658} 18659 18660static void 18661bxe_gunzip_end(struct bxe_softc *sc) 18662{ 18663 return; 18664} 18665 18666static int 18667bxe_init_firmware(struct bxe_softc *sc) 18668{ 18669 if (CHIP_IS_E1(sc)) { 18670 ecore_init_e1_firmware(sc); 18671 sc->iro_array = e1_iro_arr; 18672 } else if (CHIP_IS_E1H(sc)) { 18673 ecore_init_e1h_firmware(sc); 18674 sc->iro_array = e1h_iro_arr; 18675 } else if (!CHIP_IS_E1x(sc)) { 18676 ecore_init_e2_firmware(sc); 18677 sc->iro_array = e2_iro_arr; 18678 } else { 18679 BLOGE(sc, "Unsupported chip revision\n"); 18680 return (-1); 18681 } 18682 18683 return (0); 18684} 18685 18686static void 18687bxe_release_firmware(struct bxe_softc *sc) 18688{ 18689 /* Do nothing */ 18690 return; 18691} 18692 18693static int 18694ecore_gunzip(struct bxe_softc *sc, 18695 const uint8_t *zbuf, 18696 int len) 18697{ 18698 /* XXX : Implement... */ 18699 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18700 return (FALSE); 18701} 18702 18703static void 18704ecore_reg_wr_ind(struct bxe_softc *sc, 18705 uint32_t addr, 18706 uint32_t val) 18707{ 18708 bxe_reg_wr_ind(sc, addr, val); 18709} 18710 18711static void 18712ecore_write_dmae_phys_len(struct bxe_softc *sc, 18713 bus_addr_t phys_addr, 18714 uint32_t addr, 18715 uint32_t len) 18716{ 18717 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18718} 18719 18720void 18721ecore_storm_memset_struct(struct bxe_softc *sc, 18722 uint32_t addr, 18723 size_t size, 18724 uint32_t *data) 18725{ 18726 uint8_t i; 18727 for (i = 0; i < size/4; i++) { 18728 REG_WR(sc, addr + (i * 4), data[i]); 18729 } 18730} 18731 18732 18733/* 18734 * character device - ioctl interface definitions 18735 */ 18736 18737 18738#include "bxe_dump.h" 18739#include "bxe_ioctl.h" 18740#include <sys/conf.h> 18741 18742static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 18743 struct thread *td); 18744 18745static struct cdevsw bxe_cdevsw = { 18746 .d_version = D_VERSION, 18747 .d_ioctl = bxe_eioctl, 18748 .d_name = "bxecnic", 18749}; 18750 18751#define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) 18752 18753 18754#define DUMP_ALL_PRESETS 0x1FFF 18755#define DUMP_MAX_PRESETS 13 18756#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) 18757#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) 18758#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) 18759#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) 18760#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) 18761 18762#define IS_REG_IN_PRESET(presets, idx) \ 18763 ((presets & (1 << (idx-1))) == (1 << (idx-1))) 18764 18765 18766static int 18767bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) 18768{ 18769 if (CHIP_IS_E1(sc)) 18770 return dump_num_registers[0][preset-1]; 18771 else if (CHIP_IS_E1H(sc)) 18772 return dump_num_registers[1][preset-1]; 18773 else if (CHIP_IS_E2(sc)) 18774 return dump_num_registers[2][preset-1]; 18775 else if (CHIP_IS_E3A0(sc)) 18776 return dump_num_registers[3][preset-1]; 18777 else if (CHIP_IS_E3B0(sc)) 18778 return dump_num_registers[4][preset-1]; 18779 else 18780 return 0; 18781} 18782 18783static int 18784bxe_get_total_regs_len32(struct bxe_softc *sc) 18785{ 18786 uint32_t preset_idx; 18787 int regdump_len32 = 0; 18788 18789 18790 /* Calculate the total preset regs length */ 18791 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 18792 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); 18793 } 18794 18795 return regdump_len32; 18796} 18797 18798static const uint32_t * 18799__bxe_get_page_addr_ar(struct bxe_softc *sc) 18800{ 18801 if (CHIP_IS_E2(sc)) 18802 return page_vals_e2; 18803 else if (CHIP_IS_E3(sc)) 18804 return page_vals_e3; 18805 else 18806 return NULL; 18807} 18808 18809static uint32_t 18810__bxe_get_page_reg_num(struct bxe_softc *sc) 18811{ 18812 if (CHIP_IS_E2(sc)) 18813 return PAGE_MODE_VALUES_E2; 18814 else if (CHIP_IS_E3(sc)) 18815 return PAGE_MODE_VALUES_E3; 18816 else 18817 return 0; 18818} 18819 18820static const uint32_t * 18821__bxe_get_page_write_ar(struct bxe_softc *sc) 18822{ 18823 if (CHIP_IS_E2(sc)) 18824 return page_write_regs_e2; 18825 else if (CHIP_IS_E3(sc)) 18826 return page_write_regs_e3; 18827 else 18828 return NULL; 18829} 18830 18831static uint32_t 18832__bxe_get_page_write_num(struct bxe_softc *sc) 18833{ 18834 if (CHIP_IS_E2(sc)) 18835 return PAGE_WRITE_REGS_E2; 18836 else if (CHIP_IS_E3(sc)) 18837 return PAGE_WRITE_REGS_E3; 18838 else 18839 return 0; 18840} 18841 18842static const struct reg_addr * 18843__bxe_get_page_read_ar(struct bxe_softc *sc) 18844{ 18845 if (CHIP_IS_E2(sc)) 18846 return page_read_regs_e2; 18847 else if (CHIP_IS_E3(sc)) 18848 return page_read_regs_e3; 18849 else 18850 return NULL; 18851} 18852 18853static uint32_t 18854__bxe_get_page_read_num(struct bxe_softc *sc) 18855{ 18856 if (CHIP_IS_E2(sc)) 18857 return PAGE_READ_REGS_E2; 18858 else if (CHIP_IS_E3(sc)) 18859 return PAGE_READ_REGS_E3; 18860 else 18861 return 0; 18862} 18863 18864static bool 18865bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) 18866{ 18867 if (CHIP_IS_E1(sc)) 18868 return IS_E1_REG(reg_info->chips); 18869 else if (CHIP_IS_E1H(sc)) 18870 return IS_E1H_REG(reg_info->chips); 18871 else if (CHIP_IS_E2(sc)) 18872 return IS_E2_REG(reg_info->chips); 18873 else if (CHIP_IS_E3A0(sc)) 18874 return IS_E3A0_REG(reg_info->chips); 18875 else if (CHIP_IS_E3B0(sc)) 18876 return IS_E3B0_REG(reg_info->chips); 18877 else 18878 return 0; 18879} 18880 18881static bool 18882bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) 18883{ 18884 if (CHIP_IS_E1(sc)) 18885 return IS_E1_REG(wreg_info->chips); 18886 else if (CHIP_IS_E1H(sc)) 18887 return IS_E1H_REG(wreg_info->chips); 18888 else if (CHIP_IS_E2(sc)) 18889 return IS_E2_REG(wreg_info->chips); 18890 else if (CHIP_IS_E3A0(sc)) 18891 return IS_E3A0_REG(wreg_info->chips); 18892 else if (CHIP_IS_E3B0(sc)) 18893 return IS_E3B0_REG(wreg_info->chips); 18894 else 18895 return 0; 18896} 18897 18898/** 18899 * bxe_read_pages_regs - read "paged" registers 18900 * 18901 * @bp device handle 18902 * @p output buffer 18903 * 18904 * Reads "paged" memories: memories that may only be read by first writing to a 18905 * specific address ("write address") and then reading from a specific address 18906 * ("read address"). There may be more than one write address per "page" and 18907 * more than one read address per write address. 18908 */ 18909static void 18910bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 18911{ 18912 uint32_t i, j, k, n; 18913 18914 /* addresses of the paged registers */ 18915 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); 18916 /* number of paged registers */ 18917 int num_pages = __bxe_get_page_reg_num(sc); 18918 /* write addresses */ 18919 const uint32_t *write_addr = __bxe_get_page_write_ar(sc); 18920 /* number of write addresses */ 18921 int write_num = __bxe_get_page_write_num(sc); 18922 /* read addresses info */ 18923 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); 18924 /* number of read addresses */ 18925 int read_num = __bxe_get_page_read_num(sc); 18926 uint32_t addr, size; 18927 18928 for (i = 0; i < num_pages; i++) { 18929 for (j = 0; j < write_num; j++) { 18930 REG_WR(sc, write_addr[j], page_addr[i]); 18931 18932 for (k = 0; k < read_num; k++) { 18933 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { 18934 size = read_addr[k].size; 18935 for (n = 0; n < size; n++) { 18936 addr = read_addr[k].addr + n*4; 18937 *p++ = REG_RD(sc, addr); 18938 } 18939 } 18940 } 18941 } 18942 } 18943 return; 18944} 18945 18946 18947static int 18948bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 18949{ 18950 uint32_t i, j, addr; 18951 const struct wreg_addr *wreg_addr_p = NULL; 18952 18953 if (CHIP_IS_E1(sc)) 18954 wreg_addr_p = &wreg_addr_e1; 18955 else if (CHIP_IS_E1H(sc)) 18956 wreg_addr_p = &wreg_addr_e1h; 18957 else if (CHIP_IS_E2(sc)) 18958 wreg_addr_p = &wreg_addr_e2; 18959 else if (CHIP_IS_E3A0(sc)) 18960 wreg_addr_p = &wreg_addr_e3; 18961 else if (CHIP_IS_E3B0(sc)) 18962 wreg_addr_p = &wreg_addr_e3b0; 18963 else 18964 return (-1); 18965 18966 /* Read the idle_chk registers */ 18967 for (i = 0; i < IDLE_REGS_COUNT; i++) { 18968 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && 18969 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { 18970 for (j = 0; j < idle_reg_addrs[i].size; j++) 18971 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); 18972 } 18973 } 18974 18975 /* Read the regular registers */ 18976 for (i = 0; i < REGS_COUNT; i++) { 18977 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && 18978 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { 18979 for (j = 0; j < reg_addrs[i].size; j++) 18980 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); 18981 } 18982 } 18983 18984 /* Read the CAM registers */ 18985 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && 18986 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { 18987 for (i = 0; i < wreg_addr_p->size; i++) { 18988 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); 18989 18990 /* In case of wreg_addr register, read additional 18991 registers from read_regs array 18992 */ 18993 for (j = 0; j < wreg_addr_p->read_regs_count; j++) { 18994 addr = *(wreg_addr_p->read_regs); 18995 *p++ = REG_RD(sc, addr + j*4); 18996 } 18997 } 18998 } 18999 19000 /* Paged registers are supported in E2 & E3 only */ 19001 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 19002 /* Read "paged" registers */ 19003 bxe_read_pages_regs(sc, p, preset); 19004 } 19005 19006 return 0; 19007} 19008 19009int 19010bxe_grc_dump(struct bxe_softc *sc) 19011{ 19012 int rval = 0; 19013 uint32_t preset_idx; 19014 uint8_t *buf; 19015 uint32_t size; 19016 struct dump_header *d_hdr; 19017 uint32_t i; 19018 uint32_t reg_val; 19019 uint32_t reg_addr; 19020 uint32_t cmd_offset; 19021 struct ecore_ilt *ilt = SC_ILT(sc); 19022 struct bxe_fastpath *fp; 19023 struct ilt_client_info *ilt_cli; 19024 int grc_dump_size; 19025 19026 19027 if (sc->grcdump_done || sc->grcdump_started) 19028 return (rval); 19029 19030 sc->grcdump_started = 1; 19031 BLOGI(sc, "Started collecting grcdump\n"); 19032 19033 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19034 sizeof(struct dump_header); 19035 19036 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); 19037 19038 if (sc->grc_dump == NULL) { 19039 BLOGW(sc, "Unable to allocate memory for grcdump collection\n"); 19040 return(ENOMEM); 19041 } 19042 19043 19044 19045 /* Disable parity attentions as long as following dump may 19046 * cause false alarms by reading never written registers. We 19047 * will re-enable parity attentions right after the dump. 19048 */ 19049 19050 /* Disable parity on path 0 */ 19051 bxe_pretend_func(sc, 0); 19052 19053 ecore_disable_blocks_parity(sc); 19054 19055 /* Disable parity on path 1 */ 19056 bxe_pretend_func(sc, 1); 19057 ecore_disable_blocks_parity(sc); 19058 19059 /* Return to current function */ 19060 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 19061 19062 buf = sc->grc_dump; 19063 d_hdr = sc->grc_dump; 19064 19065 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; 19066 d_hdr->version = BNX2X_DUMP_VERSION; 19067 d_hdr->preset = DUMP_ALL_PRESETS; 19068 19069 if (CHIP_IS_E1(sc)) { 19070 d_hdr->dump_meta_data = DUMP_CHIP_E1; 19071 } else if (CHIP_IS_E1H(sc)) { 19072 d_hdr->dump_meta_data = DUMP_CHIP_E1H; 19073 } else if (CHIP_IS_E2(sc)) { 19074 d_hdr->dump_meta_data = DUMP_CHIP_E2 | 19075 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19076 } else if (CHIP_IS_E3A0(sc)) { 19077 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | 19078 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19079 } else if (CHIP_IS_E3B0(sc)) { 19080 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | 19081 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19082 } 19083 19084 buf += sizeof(struct dump_header); 19085 19086 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 19087 19088 /* Skip presets with IOR */ 19089 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || 19090 (preset_idx == 11)) 19091 continue; 19092 19093 rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx); 19094 19095 if (rval) 19096 break; 19097 19098 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); 19099 19100 buf += size; 19101 } 19102 19103 bxe_pretend_func(sc, 0); 19104 ecore_clear_blocks_parity(sc); 19105 ecore_enable_blocks_parity(sc); 19106 19107 bxe_pretend_func(sc, 1); 19108 ecore_clear_blocks_parity(sc); 19109 ecore_enable_blocks_parity(sc); 19110 19111 /* Return to current function */ 19112 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 19113 19114 19115 19116 if(sc->state == BXE_STATE_OPEN) { 19117 if(sc->fw_stats_req != NULL) { 19118 BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", 19119 (uintmax_t)sc->fw_stats_req_mapping, 19120 (uintmax_t)sc->fw_stats_data_mapping, 19121 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); 19122 } 19123 if(sc->def_sb != NULL) { 19124 BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", 19125 (void *)sc->def_sb_dma.paddr, sc->def_sb, 19126 sizeof(struct host_sp_status_block)); 19127 } 19128 if(sc->eq_dma.vaddr != NULL) { 19129 BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", 19130 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); 19131 } 19132 if(sc->sp_dma.vaddr != NULL) { 19133 BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", 19134 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, 19135 sizeof(struct bxe_slowpath)); 19136 } 19137 if(sc->spq_dma.vaddr != NULL) { 19138 BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", 19139 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); 19140 } 19141 if(sc->gz_buf_dma.vaddr != NULL) { 19142 BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", 19143 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, 19144 FW_BUF_SIZE); 19145 } 19146 for (i = 0; i < sc->num_queues; i++) { 19147 fp = &sc->fp[i]; 19148 if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL && 19149 fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL && 19150 fp->rx_sge_dma.vaddr != NULL) { 19151 19152 BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, 19153 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, 19154 sizeof(union bxe_host_hc_status_block)); 19155 BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 19156 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, 19157 (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); 19158 BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 19159 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, 19160 (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); 19161 BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, 19162 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, 19163 (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); 19164 BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 19165 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, 19166 (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); 19167 } 19168 } 19169 if(ilt != NULL ) { 19170 ilt_cli = &ilt->clients[1]; 19171 if(ilt->lines != NULL) { 19172 for (i = ilt_cli->start; i <= ilt_cli->end; i++) { 19173 BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", 19174 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), 19175 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); 19176 } 19177 } 19178 } 19179 19180 19181 cmd_offset = DMAE_REG_CMD_MEM; 19182 for (i = 0; i < 224; i++) { 19183 reg_addr = (cmd_offset +(i * 4)); 19184 reg_val = REG_RD(sc, reg_addr); 19185 BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, 19186 reg_addr, reg_val); 19187 } 19188 } 19189 19190 BLOGI(sc, "Collection of grcdump done\n"); 19191 sc->grcdump_done = 1; 19192 return(rval); 19193} 19194 19195static int 19196bxe_add_cdev(struct bxe_softc *sc) 19197{ 19198 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); 19199 19200 if (sc->eeprom == NULL) { 19201 BLOGW(sc, "Unable to alloc for eeprom size buffer\n"); 19202 return (-1); 19203 } 19204 19205 sc->ioctl_dev = make_dev(&bxe_cdevsw, 19206 sc->ifp->if_dunit, 19207 UID_ROOT, 19208 GID_WHEEL, 19209 0600, 19210 "%s", 19211 if_name(sc->ifp)); 19212 19213 if (sc->ioctl_dev == NULL) { 19214 free(sc->eeprom, M_DEVBUF); 19215 sc->eeprom = NULL; 19216 return (-1); 19217 } 19218 19219 sc->ioctl_dev->si_drv1 = sc; 19220 19221 return (0); 19222} 19223 19224static void 19225bxe_del_cdev(struct bxe_softc *sc) 19226{ 19227 if (sc->ioctl_dev != NULL) 19228 destroy_dev(sc->ioctl_dev); 19229 19230 if (sc->eeprom != NULL) { 19231 free(sc->eeprom, M_DEVBUF); 19232 sc->eeprom = NULL; 19233 } 19234 sc->ioctl_dev = NULL; 19235 19236 return; 19237} 19238 19239static bool bxe_is_nvram_accessible(struct bxe_softc *sc) 19240{ 19241 19242 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 19243 return FALSE; 19244 19245 return TRUE; 19246} 19247 19248 19249static int 19250bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) 19251{ 19252 int rval = 0; 19253 19254 if(!bxe_is_nvram_accessible(sc)) { 19255 BLOGW(sc, "Cannot access eeprom when interface is down\n"); 19256 return (-EAGAIN); 19257 } 19258 rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len); 19259 19260 19261 return (rval); 19262} 19263 19264static int 19265bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) 19266{ 19267 int rval = 0; 19268 19269 if(!bxe_is_nvram_accessible(sc)) { 19270 BLOGW(sc, "Cannot access eeprom when interface is down\n"); 19271 return (-EAGAIN); 19272 } 19273 rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len); 19274 19275 return (rval); 19276} 19277 19278static int 19279bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom) 19280{ 19281 int rval = 0; 19282 19283 switch (eeprom->eeprom_cmd) { 19284 19285 case BXE_EEPROM_CMD_SET_EEPROM: 19286 19287 rval = copyin(eeprom->eeprom_data, sc->eeprom, 19288 eeprom->eeprom_data_len); 19289 19290 if (rval) 19291 break; 19292 19293 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, 19294 eeprom->eeprom_data_len); 19295 break; 19296 19297 case BXE_EEPROM_CMD_GET_EEPROM: 19298 19299 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, 19300 eeprom->eeprom_data_len); 19301 19302 if (rval) { 19303 break; 19304 } 19305 19306 rval = copyout(sc->eeprom, eeprom->eeprom_data, 19307 eeprom->eeprom_data_len); 19308 break; 19309 19310 default: 19311 rval = EINVAL; 19312 break; 19313 } 19314 19315 if (rval) { 19316 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); 19317 } 19318 19319 return (rval); 19320} 19321 19322static int 19323bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p) 19324{ 19325 uint32_t ext_phy_config; 19326 int port = SC_PORT(sc); 19327 int cfg_idx = bxe_get_link_cfg_idx(sc); 19328 19329 dev_p->supported = sc->port.supported[cfg_idx] | 19330 (sc->port.supported[cfg_idx ^ 1] & 19331 (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE)); 19332 dev_p->advertising = sc->port.advertising[cfg_idx]; 19333 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == 19334 ELINK_ETH_PHY_SFP_1G_FIBER) { 19335 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); 19336 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); 19337 } 19338 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && 19339 !(sc->flags & BXE_MF_FUNC_DIS)) { 19340 dev_p->duplex = sc->link_vars.duplex; 19341 if (IS_MF(sc) && !BXE_NOMCP(sc)) 19342 dev_p->speed = bxe_get_mf_speed(sc); 19343 else 19344 dev_p->speed = sc->link_vars.line_speed; 19345 } else { 19346 dev_p->duplex = DUPLEX_UNKNOWN; 19347 dev_p->speed = SPEED_UNKNOWN; 19348 } 19349 19350 dev_p->port = bxe_media_detect(sc); 19351 19352 ext_phy_config = SHMEM_RD(sc, 19353 dev_info.port_hw_config[port].external_phy_config); 19354 if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) == 19355 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 19356 dev_p->phy_address = sc->port.phy_addr; 19357 else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != 19358 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 19359 ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != 19360 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 19361 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); 19362 else 19363 dev_p->phy_address = 0; 19364 19365 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) 19366 dev_p->autoneg = AUTONEG_ENABLE; 19367 else 19368 dev_p->autoneg = AUTONEG_DISABLE; 19369 19370 19371 return 0; 19372} 19373 19374static int 19375bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 19376 struct thread *td) 19377{ 19378 struct bxe_softc *sc; 19379 int rval = 0; 19380 device_t pci_dev; 19381 bxe_grcdump_t *dump = NULL; 19382 int grc_dump_size; 19383 bxe_drvinfo_t *drv_infop = NULL; 19384 bxe_dev_setting_t *dev_p; 19385 bxe_dev_setting_t dev_set; 19386 bxe_get_regs_t *reg_p; 19387 bxe_reg_rdw_t *reg_rdw_p; 19388 bxe_pcicfg_rdw_t *cfg_rdw_p; 19389 bxe_perm_mac_addr_t *mac_addr_p; 19390 19391 19392 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) 19393 return ENXIO; 19394 19395 pci_dev= sc->dev; 19396 19397 dump = (bxe_grcdump_t *)data; 19398 19399 switch(cmd) { 19400 19401 case BXE_GRC_DUMP_SIZE: 19402 dump->pci_func = sc->pcie_func; 19403 dump->grcdump_size = 19404 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19405 sizeof(struct dump_header); 19406 break; 19407 19408 case BXE_GRC_DUMP: 19409 19410 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19411 sizeof(struct dump_header); 19412 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || 19413 (dump->grcdump_size < grc_dump_size)) { 19414 rval = EINVAL; 19415 break; 19416 } 19417 19418 if((sc->trigger_grcdump) && (!sc->grcdump_done) && 19419 (!sc->grcdump_started)) { 19420 rval = bxe_grc_dump(sc); 19421 } 19422 19423 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && 19424 (sc->grc_dump != NULL)) { 19425 dump->grcdump_dwords = grc_dump_size >> 2; 19426 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); 19427 free(sc->grc_dump, M_DEVBUF); 19428 sc->grc_dump = NULL; 19429 sc->grcdump_started = 0; 19430 sc->grcdump_done = 0; 19431 } 19432 19433 break; 19434 19435 case BXE_DRV_INFO: 19436 drv_infop = (bxe_drvinfo_t *)data; 19437 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); 19438 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", 19439 BXE_DRIVER_VERSION); 19440 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", 19441 sc->devinfo.bc_ver_str); 19442 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, 19443 "%s", sc->fw_ver_str); 19444 drv_infop->eeprom_dump_len = sc->devinfo.flash_size; 19445 drv_infop->reg_dump_len = 19446 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) 19447 + sizeof(struct dump_header); 19448 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", 19449 sc->pcie_bus, sc->pcie_device, sc->pcie_func); 19450 break; 19451 19452 case BXE_DEV_SETTING: 19453 dev_p = (bxe_dev_setting_t *)data; 19454 bxe_get_settings(sc, &dev_set); 19455 dev_p->supported = dev_set.supported; 19456 dev_p->advertising = dev_set.advertising; 19457 dev_p->speed = dev_set.speed; 19458 dev_p->duplex = dev_set.duplex; 19459 dev_p->port = dev_set.port; 19460 dev_p->phy_address = dev_set.phy_address; 19461 dev_p->autoneg = dev_set.autoneg; 19462 19463 break; 19464 19465 case BXE_GET_REGS: 19466 19467 reg_p = (bxe_get_regs_t *)data; 19468 grc_dump_size = reg_p->reg_buf_len; 19469 19470 if((!sc->grcdump_done) && (!sc->grcdump_started)) { 19471 bxe_grc_dump(sc); 19472 } 19473 if((sc->grcdump_done) && (sc->grcdump_started) && 19474 (sc->grc_dump != NULL)) { 19475 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); 19476 free(sc->grc_dump, M_DEVBUF); 19477 sc->grc_dump = NULL; 19478 sc->grcdump_started = 0; 19479 sc->grcdump_done = 0; 19480 } 19481 19482 break; 19483 19484 case BXE_RDW_REG: 19485 reg_rdw_p = (bxe_reg_rdw_t *)data; 19486 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && 19487 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) 19488 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); 19489 19490 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && 19491 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) 19492 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); 19493 19494 break; 19495 19496 case BXE_RDW_PCICFG: 19497 cfg_rdw_p = (bxe_pcicfg_rdw_t *)data; 19498 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { 19499 19500 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, 19501 cfg_rdw_p->cfg_width); 19502 19503 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { 19504 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, 19505 cfg_rdw_p->cfg_width); 19506 } else { 19507 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n"); 19508 } 19509 break; 19510 19511 case BXE_MAC_ADDR: 19512 mac_addr_p = (bxe_perm_mac_addr_t *)data; 19513 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", 19514 sc->mac_addr_str); 19515 break; 19516 19517 case BXE_EEPROM: 19518 rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data); 19519 break; 19520 19521 19522 default: 19523 break; 19524 } 19525 19526 return (rval); 19527} 19528