bxe.c revision 295823
1/*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/bxe/bxe.c 295823 2016-02-19 21:32:49Z davidcs $"); 29 30#define BXE_DRIVER_VERSION "1.78.79" 31 32#include "bxe.h" 33#include "ecore_sp.h" 34#include "ecore_init.h" 35#include "ecore_init_ops.h" 36 37#include "57710_int_offsets.h" 38#include "57711_int_offsets.h" 39#include "57712_int_offsets.h" 40 41/* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45#ifndef CTLTYPE_U64 46#define CTLTYPE_U64 CTLTYPE_QUAD 47#define sysctl_handle_64 sysctl_handle_quad 48#endif 49 50/* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55#ifndef CSUM_TCP_IPV6 56#define CSUM_TCP_IPV6 0 57#define CSUM_UDP_IPV6 0 58#endif 59 60/* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64#if __FreeBSD_version < 900035 65#define pci_find_cap pci_find_extcap 66#endif 67 68#define BXE_DEF_SB_ATT_IDX 0x0001 69#define BXE_DEF_SB_IDX 0x0002 70 71/* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75#define FLR_WAIT_USEC 10000 /* 10 msecs */ 76#define FLR_WAIT_INTERVAL 50 /* usecs */ 77#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84}; 85 86struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90}; 91 92/* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95#define BXE_DEVDESC_MAX 64 96static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127#if 0 128 { 129 BRCM_VENDORID, 130 CHIP_NUM_57712_VF, 131 PCI_ANY_ID, PCI_ANY_ID, 132 "QLogic NetXtreme II BCM57712 VF 10GbE" 133 }, 134#endif 135 { 136 BRCM_VENDORID, 137 CHIP_NUM_57800, 138 PCI_ANY_ID, PCI_ANY_ID, 139 "QLogic NetXtreme II BCM57800 10GbE" 140 }, 141 { 142 BRCM_VENDORID, 143 CHIP_NUM_57800_MF, 144 PCI_ANY_ID, PCI_ANY_ID, 145 "QLogic NetXtreme II BCM57800 MF 10GbE" 146 }, 147#if 0 148 { 149 BRCM_VENDORID, 150 CHIP_NUM_57800_VF, 151 PCI_ANY_ID, PCI_ANY_ID, 152 "QLogic NetXtreme II BCM57800 VF 10GbE" 153 }, 154#endif 155 { 156 BRCM_VENDORID, 157 CHIP_NUM_57810, 158 PCI_ANY_ID, PCI_ANY_ID, 159 "QLogic NetXtreme II BCM57810 10GbE" 160 }, 161 { 162 BRCM_VENDORID, 163 CHIP_NUM_57810_MF, 164 PCI_ANY_ID, PCI_ANY_ID, 165 "QLogic NetXtreme II BCM57810 MF 10GbE" 166 }, 167#if 0 168 { 169 BRCM_VENDORID, 170 CHIP_NUM_57810_VF, 171 PCI_ANY_ID, PCI_ANY_ID, 172 "QLogic NetXtreme II BCM57810 VF 10GbE" 173 }, 174#endif 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57811, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57811 10GbE" 180 }, 181 { 182 BRCM_VENDORID, 183 CHIP_NUM_57811_MF, 184 PCI_ANY_ID, PCI_ANY_ID, 185 "QLogic NetXtreme II BCM57811 MF 10GbE" 186 }, 187#if 0 188 { 189 BRCM_VENDORID, 190 CHIP_NUM_57811_VF, 191 PCI_ANY_ID, PCI_ANY_ID, 192 "QLogic NetXtreme II BCM57811 VF 10GbE" 193 }, 194#endif 195 { 196 BRCM_VENDORID, 197 CHIP_NUM_57840_4_10, 198 PCI_ANY_ID, PCI_ANY_ID, 199 "QLogic NetXtreme II BCM57840 4x10GbE" 200 }, 201#if 0 202 { 203 BRCM_VENDORID, 204 CHIP_NUM_57840_2_20, 205 PCI_ANY_ID, PCI_ANY_ID, 206 "QLogic NetXtreme II BCM57840 2x20GbE" 207 }, 208#endif 209 { 210 BRCM_VENDORID, 211 CHIP_NUM_57840_MF, 212 PCI_ANY_ID, PCI_ANY_ID, 213 "QLogic NetXtreme II BCM57840 MF 10GbE" 214 }, 215#if 0 216 { 217 BRCM_VENDORID, 218 CHIP_NUM_57840_VF, 219 PCI_ANY_ID, PCI_ANY_ID, 220 "QLogic NetXtreme II BCM57840 VF 10GbE" 221 }, 222#endif 223 { 224 0, 0, 0, 0, NULL 225 } 226}; 227 228MALLOC_DECLARE(M_BXE_ILT); 229MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 230 231/* 232 * FreeBSD device entry points. 233 */ 234static int bxe_probe(device_t); 235static int bxe_attach(device_t); 236static int bxe_detach(device_t); 237static int bxe_shutdown(device_t); 238 239/* 240 * FreeBSD KLD module/device interface event handler method. 241 */ 242static device_method_t bxe_methods[] = { 243 /* Device interface (device_if.h) */ 244 DEVMETHOD(device_probe, bxe_probe), 245 DEVMETHOD(device_attach, bxe_attach), 246 DEVMETHOD(device_detach, bxe_detach), 247 DEVMETHOD(device_shutdown, bxe_shutdown), 248#if 0 249 DEVMETHOD(device_suspend, bxe_suspend), 250 DEVMETHOD(device_resume, bxe_resume), 251#endif 252 /* Bus interface (bus_if.h) */ 253 DEVMETHOD(bus_print_child, bus_generic_print_child), 254 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 255 KOBJMETHOD_END 256}; 257 258/* 259 * FreeBSD KLD Module data declaration 260 */ 261static driver_t bxe_driver = { 262 "bxe", /* module name */ 263 bxe_methods, /* event handler */ 264 sizeof(struct bxe_softc) /* extra data */ 265}; 266 267/* 268 * FreeBSD dev class is needed to manage dev instances and 269 * to associate with a bus type 270 */ 271static devclass_t bxe_devclass; 272 273MODULE_DEPEND(bxe, pci, 1, 1, 1); 274MODULE_DEPEND(bxe, ether, 1, 1, 1); 275DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 276 277/* resources needed for unloading a previously loaded device */ 278 279#define BXE_PREV_WAIT_NEEDED 1 280struct mtx bxe_prev_mtx; 281MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 282struct bxe_prev_list_node { 283 LIST_ENTRY(bxe_prev_list_node) node; 284 uint8_t bus; 285 uint8_t slot; 286 uint8_t path; 287 uint8_t aer; /* XXX automatic error recovery */ 288 uint8_t undi; 289}; 290static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 291 292static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 293 294/* Tunable device values... */ 295 296SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 297 298/* Debug */ 299unsigned long bxe_debug = 0; 300SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, 301 &bxe_debug, 0, "Debug logging mode"); 302 303/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 304static int bxe_interrupt_mode = INTR_MODE_MSIX; 305SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 306 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 307 308/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 309static int bxe_queue_count = 4; 310SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 311 &bxe_queue_count, 0, "Multi-Queue queue count"); 312 313/* max number of buffers per queue (default RX_BD_USABLE) */ 314static int bxe_max_rx_bufs = 0; 315SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 316 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 317 318/* Host interrupt coalescing RX tick timer (usecs) */ 319static int bxe_hc_rx_ticks = 25; 320SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 321 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 322 323/* Host interrupt coalescing TX tick timer (usecs) */ 324static int bxe_hc_tx_ticks = 50; 325SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 326 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 327 328/* Maximum number of Rx packets to process at a time */ 329static int bxe_rx_budget = 0xffffffff; 330SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 331 &bxe_rx_budget, 0, "Rx processing budget"); 332 333/* Maximum LRO aggregation size */ 334static int bxe_max_aggregation_size = 0; 335SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 336 &bxe_max_aggregation_size, 0, "max aggregation size"); 337 338/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 339static int bxe_mrrs = -1; 340SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 341 &bxe_mrrs, 0, "PCIe maximum read request size"); 342 343/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 344static int bxe_autogreeen = 0; 345SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 346 &bxe_autogreeen, 0, "AutoGrEEEn support"); 347 348/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 349static int bxe_udp_rss = 0; 350SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 351 &bxe_udp_rss, 0, "UDP RSS support"); 352 353 354#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 355 356#define STATS_OFFSET32(stat_name) \ 357 (offsetof(struct bxe_eth_stats, stat_name) / 4) 358 359#define Q_STATS_OFFSET32(stat_name) \ 360 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 361 362static const struct { 363 uint32_t offset; 364 uint32_t size; 365 uint32_t flags; 366#define STATS_FLAGS_PORT 1 367#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 368#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 369 char string[STAT_NAME_LEN]; 370} bxe_eth_stats_arr[] = { 371 { STATS_OFFSET32(total_bytes_received_hi), 372 8, STATS_FLAGS_BOTH, "rx_bytes" }, 373 { STATS_OFFSET32(error_bytes_received_hi), 374 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 375 { STATS_OFFSET32(total_unicast_packets_received_hi), 376 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 377 { STATS_OFFSET32(total_multicast_packets_received_hi), 378 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 379 { STATS_OFFSET32(total_broadcast_packets_received_hi), 380 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 381 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 382 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 383 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 384 8, STATS_FLAGS_PORT, "rx_align_errors" }, 385 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 386 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 387 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 388 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 389 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 390 8, STATS_FLAGS_PORT, "rx_fragments" }, 391 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 392 8, STATS_FLAGS_PORT, "rx_jabbers" }, 393 { STATS_OFFSET32(no_buff_discard_hi), 394 8, STATS_FLAGS_BOTH, "rx_discards" }, 395 { STATS_OFFSET32(mac_filter_discard), 396 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 397 { STATS_OFFSET32(mf_tag_discard), 398 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 399 { STATS_OFFSET32(pfc_frames_received_hi), 400 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 401 { STATS_OFFSET32(pfc_frames_sent_hi), 402 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 403 { STATS_OFFSET32(brb_drop_hi), 404 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 405 { STATS_OFFSET32(brb_truncate_hi), 406 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 407 { STATS_OFFSET32(pause_frames_received_hi), 408 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 409 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 410 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 411 { STATS_OFFSET32(nig_timer_max), 412 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 413 { STATS_OFFSET32(total_bytes_transmitted_hi), 414 8, STATS_FLAGS_BOTH, "tx_bytes" }, 415 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 416 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 417 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 418 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 419 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 420 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 421 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 422 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 423 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 424 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 425 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 426 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 427 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 428 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 429 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 430 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 431 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 432 8, STATS_FLAGS_PORT, "tx_deferred" }, 433 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 434 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 435 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 436 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 437 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 438 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 439 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 440 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 441 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 442 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 443 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 444 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 445 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 446 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 447 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 448 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 449 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 450 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 451 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 452 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 453 { STATS_OFFSET32(pause_frames_sent_hi), 454 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 455 { STATS_OFFSET32(total_tpa_aggregations_hi), 456 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 457 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 458 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 459 { STATS_OFFSET32(total_tpa_bytes_hi), 460 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 461#if 0 462 { STATS_OFFSET32(recoverable_error), 463 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 464 { STATS_OFFSET32(unrecoverable_error), 465 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 466#endif 467 { STATS_OFFSET32(eee_tx_lpi), 468 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 469 { STATS_OFFSET32(rx_calls), 470 4, STATS_FLAGS_FUNC, "rx_calls"}, 471 { STATS_OFFSET32(rx_pkts), 472 4, STATS_FLAGS_FUNC, "rx_pkts"}, 473 { STATS_OFFSET32(rx_tpa_pkts), 474 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 475 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 476 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, 477 { STATS_OFFSET32(rx_bxe_service_rxsgl), 478 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, 479 { STATS_OFFSET32(rx_jumbo_sge_pkts), 480 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, 481 { STATS_OFFSET32(rx_soft_errors), 482 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 483 { STATS_OFFSET32(rx_hw_csum_errors), 484 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 485 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 486 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 487 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 488 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 489 { STATS_OFFSET32(rx_budget_reached), 490 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 491 { STATS_OFFSET32(tx_pkts), 492 4, STATS_FLAGS_FUNC, "tx_pkts"}, 493 { STATS_OFFSET32(tx_soft_errors), 494 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 495 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 496 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 497 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 498 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 499 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 500 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 501 { STATS_OFFSET32(tx_ofld_frames_lso), 502 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 503 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 504 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 505 { STATS_OFFSET32(tx_encap_failures), 506 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 507 { STATS_OFFSET32(tx_hw_queue_full), 508 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 509 { STATS_OFFSET32(tx_hw_max_queue_depth), 510 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 511 { STATS_OFFSET32(tx_dma_mapping_failure), 512 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 513 { STATS_OFFSET32(tx_max_drbr_queue_depth), 514 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 515 { STATS_OFFSET32(tx_window_violation_std), 516 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 517 { STATS_OFFSET32(tx_window_violation_tso), 518 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 519#if 0 520 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 521 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, 522 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 523 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, 524#endif 525 { STATS_OFFSET32(tx_chain_lost_mbuf), 526 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 527 { STATS_OFFSET32(tx_frames_deferred), 528 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 529 { STATS_OFFSET32(tx_queue_xoff), 530 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 531 { STATS_OFFSET32(mbuf_defrag_attempts), 532 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 533 { STATS_OFFSET32(mbuf_defrag_failures), 534 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 535 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 536 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 537 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 538 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 539 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 540 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 541 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 542 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 543 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 544 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 545 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 546 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 547 { STATS_OFFSET32(mbuf_alloc_tx), 548 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 549 { STATS_OFFSET32(mbuf_alloc_rx), 550 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 551 { STATS_OFFSET32(mbuf_alloc_sge), 552 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 553 { STATS_OFFSET32(mbuf_alloc_tpa), 554 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} 555}; 556 557static const struct { 558 uint32_t offset; 559 uint32_t size; 560 char string[STAT_NAME_LEN]; 561} bxe_eth_q_stats_arr[] = { 562 { Q_STATS_OFFSET32(total_bytes_received_hi), 563 8, "rx_bytes" }, 564 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 565 8, "rx_ucast_packets" }, 566 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 567 8, "rx_mcast_packets" }, 568 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 569 8, "rx_bcast_packets" }, 570 { Q_STATS_OFFSET32(no_buff_discard_hi), 571 8, "rx_discards" }, 572 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 573 8, "tx_bytes" }, 574 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 575 8, "tx_ucast_packets" }, 576 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 577 8, "tx_mcast_packets" }, 578 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 579 8, "tx_bcast_packets" }, 580 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 581 8, "tpa_aggregations" }, 582 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 583 8, "tpa_aggregated_frames"}, 584 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 585 8, "tpa_bytes"}, 586 { Q_STATS_OFFSET32(rx_calls), 587 4, "rx_calls"}, 588 { Q_STATS_OFFSET32(rx_pkts), 589 4, "rx_pkts"}, 590 { Q_STATS_OFFSET32(rx_tpa_pkts), 591 4, "rx_tpa_pkts"}, 592 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 593 4, "rx_erroneous_jumbo_sge_pkts"}, 594 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 595 4, "rx_bxe_service_rxsgl"}, 596 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 597 4, "rx_jumbo_sge_pkts"}, 598 { Q_STATS_OFFSET32(rx_soft_errors), 599 4, "rx_soft_errors"}, 600 { Q_STATS_OFFSET32(rx_hw_csum_errors), 601 4, "rx_hw_csum_errors"}, 602 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 603 4, "rx_ofld_frames_csum_ip"}, 604 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 605 4, "rx_ofld_frames_csum_tcp_udp"}, 606 { Q_STATS_OFFSET32(rx_budget_reached), 607 4, "rx_budget_reached"}, 608 { Q_STATS_OFFSET32(tx_pkts), 609 4, "tx_pkts"}, 610 { Q_STATS_OFFSET32(tx_soft_errors), 611 4, "tx_soft_errors"}, 612 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 613 4, "tx_ofld_frames_csum_ip"}, 614 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 615 4, "tx_ofld_frames_csum_tcp"}, 616 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 617 4, "tx_ofld_frames_csum_udp"}, 618 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 619 4, "tx_ofld_frames_lso"}, 620 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 621 4, "tx_ofld_frames_lso_hdr_splits"}, 622 { Q_STATS_OFFSET32(tx_encap_failures), 623 4, "tx_encap_failures"}, 624 { Q_STATS_OFFSET32(tx_hw_queue_full), 625 4, "tx_hw_queue_full"}, 626 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 627 4, "tx_hw_max_queue_depth"}, 628 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 629 4, "tx_dma_mapping_failure"}, 630 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 631 4, "tx_max_drbr_queue_depth"}, 632 { Q_STATS_OFFSET32(tx_window_violation_std), 633 4, "tx_window_violation_std"}, 634 { Q_STATS_OFFSET32(tx_window_violation_tso), 635 4, "tx_window_violation_tso"}, 636#if 0 637 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 638 4, "tx_unsupported_tso_request_ipv6"}, 639 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 640 4, "tx_unsupported_tso_request_not_tcp"}, 641#endif 642 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 643 4, "tx_chain_lost_mbuf"}, 644 { Q_STATS_OFFSET32(tx_frames_deferred), 645 4, "tx_frames_deferred"}, 646 { Q_STATS_OFFSET32(tx_queue_xoff), 647 4, "tx_queue_xoff"}, 648 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 649 4, "mbuf_defrag_attempts"}, 650 { Q_STATS_OFFSET32(mbuf_defrag_failures), 651 4, "mbuf_defrag_failures"}, 652 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 653 4, "mbuf_rx_bd_alloc_failed"}, 654 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 655 4, "mbuf_rx_bd_mapping_failed"}, 656 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 657 4, "mbuf_rx_tpa_alloc_failed"}, 658 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 659 4, "mbuf_rx_tpa_mapping_failed"}, 660 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 661 4, "mbuf_rx_sge_alloc_failed"}, 662 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 663 4, "mbuf_rx_sge_mapping_failed"}, 664 { Q_STATS_OFFSET32(mbuf_alloc_tx), 665 4, "mbuf_alloc_tx"}, 666 { Q_STATS_OFFSET32(mbuf_alloc_rx), 667 4, "mbuf_alloc_rx"}, 668 { Q_STATS_OFFSET32(mbuf_alloc_sge), 669 4, "mbuf_alloc_sge"}, 670 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 671 4, "mbuf_alloc_tpa"} 672}; 673 674#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 675#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 676 677 678static void bxe_cmng_fns_init(struct bxe_softc *sc, 679 uint8_t read_cfg, 680 uint8_t cmng_type); 681static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 682static void storm_memset_cmng(struct bxe_softc *sc, 683 struct cmng_init *cmng, 684 uint8_t port); 685static void bxe_set_reset_global(struct bxe_softc *sc); 686static void bxe_set_reset_in_progress(struct bxe_softc *sc); 687static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 688 int engine); 689static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 690static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 691 uint8_t *global, 692 uint8_t print); 693static void bxe_int_disable(struct bxe_softc *sc); 694static int bxe_release_leader_lock(struct bxe_softc *sc); 695static void bxe_pf_disable(struct bxe_softc *sc); 696static void bxe_free_fp_buffers(struct bxe_softc *sc); 697static inline void bxe_update_rx_prod(struct bxe_softc *sc, 698 struct bxe_fastpath *fp, 699 uint16_t rx_bd_prod, 700 uint16_t rx_cq_prod, 701 uint16_t rx_sge_prod); 702static void bxe_link_report_locked(struct bxe_softc *sc); 703static void bxe_link_report(struct bxe_softc *sc); 704static void bxe_link_status_update(struct bxe_softc *sc); 705static void bxe_periodic_callout_func(void *xsc); 706static void bxe_periodic_start(struct bxe_softc *sc); 707static void bxe_periodic_stop(struct bxe_softc *sc); 708static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 709 uint16_t prev_index, 710 uint16_t index); 711static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 712 int queue); 713static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 714 uint16_t index); 715static uint8_t bxe_txeof(struct bxe_softc *sc, 716 struct bxe_fastpath *fp); 717static void bxe_task_fp(struct bxe_fastpath *fp); 718static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 719 struct mbuf *m, 720 uint8_t contents); 721static int bxe_alloc_mem(struct bxe_softc *sc); 722static void bxe_free_mem(struct bxe_softc *sc); 723static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 724static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 725static int bxe_interrupt_attach(struct bxe_softc *sc); 726static void bxe_interrupt_detach(struct bxe_softc *sc); 727static void bxe_set_rx_mode(struct bxe_softc *sc); 728static int bxe_init_locked(struct bxe_softc *sc); 729static int bxe_stop_locked(struct bxe_softc *sc); 730static __noinline int bxe_nic_load(struct bxe_softc *sc, 731 int load_mode); 732static __noinline int bxe_nic_unload(struct bxe_softc *sc, 733 uint32_t unload_mode, 734 uint8_t keep_link); 735 736static void bxe_handle_sp_tq(void *context, int pending); 737static void bxe_handle_fp_tq(void *context, int pending); 738 739static int bxe_add_cdev(struct bxe_softc *sc); 740static void bxe_del_cdev(struct bxe_softc *sc); 741static int bxe_grc_dump(struct bxe_softc *sc); 742 743/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 744uint32_t 745calc_crc32(uint8_t *crc32_packet, 746 uint32_t crc32_length, 747 uint32_t crc32_seed, 748 uint8_t complement) 749{ 750 uint32_t byte = 0; 751 uint32_t bit = 0; 752 uint8_t msb = 0; 753 uint32_t temp = 0; 754 uint32_t shft = 0; 755 uint8_t current_byte = 0; 756 uint32_t crc32_result = crc32_seed; 757 const uint32_t CRC32_POLY = 0x1edc6f41; 758 759 if ((crc32_packet == NULL) || 760 (crc32_length == 0) || 761 ((crc32_length % 8) != 0)) 762 { 763 return (crc32_result); 764 } 765 766 for (byte = 0; byte < crc32_length; byte = byte + 1) 767 { 768 current_byte = crc32_packet[byte]; 769 for (bit = 0; bit < 8; bit = bit + 1) 770 { 771 /* msb = crc32_result[31]; */ 772 msb = (uint8_t)(crc32_result >> 31); 773 774 crc32_result = crc32_result << 1; 775 776 /* it (msb != current_byte[bit]) */ 777 if (msb != (0x1 & (current_byte >> bit))) 778 { 779 crc32_result = crc32_result ^ CRC32_POLY; 780 /* crc32_result[0] = 1 */ 781 crc32_result |= 1; 782 } 783 } 784 } 785 786 /* Last step is to: 787 * 1. "mirror" every bit 788 * 2. swap the 4 bytes 789 * 3. complement each bit 790 */ 791 792 /* Mirror */ 793 temp = crc32_result; 794 shft = sizeof(crc32_result) * 8 - 1; 795 796 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 797 { 798 temp <<= 1; 799 temp |= crc32_result & 1; 800 shft-- ; 801 } 802 803 /* temp[31-bit] = crc32_result[bit] */ 804 temp <<= shft; 805 806 /* Swap */ 807 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 808 { 809 uint32_t t0, t1, t2, t3; 810 t0 = (0x000000ff & (temp >> 24)); 811 t1 = (0x0000ff00 & (temp >> 8)); 812 t2 = (0x00ff0000 & (temp << 8)); 813 t3 = (0xff000000 & (temp << 24)); 814 crc32_result = t0 | t1 | t2 | t3; 815 } 816 817 /* Complement */ 818 if (complement) 819 { 820 crc32_result = ~crc32_result; 821 } 822 823 return (crc32_result); 824} 825 826int 827bxe_test_bit(int nr, 828 volatile unsigned long *addr) 829{ 830 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 831} 832 833void 834bxe_set_bit(unsigned int nr, 835 volatile unsigned long *addr) 836{ 837 atomic_set_acq_long(addr, (1 << nr)); 838} 839 840void 841bxe_clear_bit(int nr, 842 volatile unsigned long *addr) 843{ 844 atomic_clear_acq_long(addr, (1 << nr)); 845} 846 847int 848bxe_test_and_set_bit(int nr, 849 volatile unsigned long *addr) 850{ 851 unsigned long x; 852 nr = (1 << nr); 853 do { 854 x = *addr; 855 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 856 // if (x & nr) bit_was_set; else bit_was_not_set; 857 return (x & nr); 858} 859 860int 861bxe_test_and_clear_bit(int nr, 862 volatile unsigned long *addr) 863{ 864 unsigned long x; 865 nr = (1 << nr); 866 do { 867 x = *addr; 868 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 869 // if (x & nr) bit_was_set; else bit_was_not_set; 870 return (x & nr); 871} 872 873int 874bxe_cmpxchg(volatile int *addr, 875 int old, 876 int new) 877{ 878 int x; 879 do { 880 x = *addr; 881 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 882 return (x); 883} 884 885/* 886 * Get DMA memory from the OS. 887 * 888 * Validates that the OS has provided DMA buffers in response to a 889 * bus_dmamap_load call and saves the physical address of those buffers. 890 * When the callback is used the OS will return 0 for the mapping function 891 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 892 * failures back to the caller. 893 * 894 * Returns: 895 * Nothing. 896 */ 897static void 898bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 899{ 900 struct bxe_dma *dma = arg; 901 902 if (error) { 903 dma->paddr = 0; 904 dma->nseg = 0; 905 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 906 } else { 907 dma->paddr = segs->ds_addr; 908 dma->nseg = nseg; 909#if 0 910 BLOGD(dma->sc, DBG_LOAD, 911 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 912 dma->msg, dma->vaddr, (void *)dma->paddr, 913 dma->nseg, dma->size); 914#endif 915 } 916} 917 918/* 919 * Allocate a block of memory and map it for DMA. No partial completions 920 * allowed and release any resources acquired if we can't acquire all 921 * resources. 922 * 923 * Returns: 924 * 0 = Success, !0 = Failure 925 */ 926int 927bxe_dma_alloc(struct bxe_softc *sc, 928 bus_size_t size, 929 struct bxe_dma *dma, 930 const char *msg) 931{ 932 int rc; 933 934 if (dma->size > 0) { 935 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 936 (unsigned long)dma->size); 937 return (1); 938 } 939 940 memset(dma, 0, sizeof(*dma)); /* sanity */ 941 dma->sc = sc; 942 dma->size = size; 943 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 944 945 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 946 BCM_PAGE_SIZE, /* alignment */ 947 0, /* boundary limit */ 948 BUS_SPACE_MAXADDR, /* restricted low */ 949 BUS_SPACE_MAXADDR, /* restricted hi */ 950 NULL, /* addr filter() */ 951 NULL, /* addr filter() arg */ 952 size, /* max map size */ 953 1, /* num discontinuous */ 954 size, /* max seg size */ 955 BUS_DMA_ALLOCNOW, /* flags */ 956 NULL, /* lock() */ 957 NULL, /* lock() arg */ 958 &dma->tag); /* returned dma tag */ 959 if (rc != 0) { 960 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 961 memset(dma, 0, sizeof(*dma)); 962 return (1); 963 } 964 965 rc = bus_dmamem_alloc(dma->tag, 966 (void **)&dma->vaddr, 967 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 968 &dma->map); 969 if (rc != 0) { 970 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 971 bus_dma_tag_destroy(dma->tag); 972 memset(dma, 0, sizeof(*dma)); 973 return (1); 974 } 975 976 rc = bus_dmamap_load(dma->tag, 977 dma->map, 978 dma->vaddr, 979 size, 980 bxe_dma_map_addr, /* BLOGD in here */ 981 dma, 982 BUS_DMA_NOWAIT); 983 if (rc != 0) { 984 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 985 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 986 bus_dma_tag_destroy(dma->tag); 987 memset(dma, 0, sizeof(*dma)); 988 return (1); 989 } 990 991 return (0); 992} 993 994void 995bxe_dma_free(struct bxe_softc *sc, 996 struct bxe_dma *dma) 997{ 998 if (dma->size > 0) { 999#if 0 1000 BLOGD(sc, DBG_LOAD, 1001 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 1002 dma->msg, dma->vaddr, (void *)dma->paddr, 1003 dma->nseg, dma->size); 1004#endif 1005 1006 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 1007 1008 bus_dmamap_sync(dma->tag, dma->map, 1009 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 1010 bus_dmamap_unload(dma->tag, dma->map); 1011 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1012 bus_dma_tag_destroy(dma->tag); 1013 } 1014 1015 memset(dma, 0, sizeof(*dma)); 1016} 1017 1018/* 1019 * These indirect read and write routines are only during init. 1020 * The locking is handled by the MCP. 1021 */ 1022 1023void 1024bxe_reg_wr_ind(struct bxe_softc *sc, 1025 uint32_t addr, 1026 uint32_t val) 1027{ 1028 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1029 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 1030 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1031} 1032 1033uint32_t 1034bxe_reg_rd_ind(struct bxe_softc *sc, 1035 uint32_t addr) 1036{ 1037 uint32_t val; 1038 1039 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1040 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1041 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1042 1043 return (val); 1044} 1045 1046#if 0 1047void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) 1048{ 1049 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; 1050 1051 switch (dmae->opcode & DMAE_COMMAND_DST) { 1052 case DMAE_CMD_DST_PCI: 1053 if (src_type == DMAE_CMD_SRC_PCI) 1054 DP(msglvl, "DMAE: opcode 0x%08x\n" 1055 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 1056 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1057 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1058 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1059 dmae->comp_addr_hi, dmae->comp_addr_lo, 1060 dmae->comp_val); 1061 else 1062 DP(msglvl, "DMAE: opcode 0x%08x\n" 1063 "src [%08x], len [%d*4], dst [%x:%08x]\n" 1064 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1065 dmae->opcode, dmae->src_addr_lo >> 2, 1066 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1067 dmae->comp_addr_hi, dmae->comp_addr_lo, 1068 dmae->comp_val); 1069 break; 1070 case DMAE_CMD_DST_GRC: 1071 if (src_type == DMAE_CMD_SRC_PCI) 1072 DP(msglvl, "DMAE: opcode 0x%08x\n" 1073 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 1074 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1075 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1076 dmae->len, dmae->dst_addr_lo >> 2, 1077 dmae->comp_addr_hi, dmae->comp_addr_lo, 1078 dmae->comp_val); 1079 else 1080 DP(msglvl, "DMAE: opcode 0x%08x\n" 1081 "src [%08x], len [%d*4], dst [%08x]\n" 1082 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1083 dmae->opcode, dmae->src_addr_lo >> 2, 1084 dmae->len, dmae->dst_addr_lo >> 2, 1085 dmae->comp_addr_hi, dmae->comp_addr_lo, 1086 dmae->comp_val); 1087 break; 1088 default: 1089 if (src_type == DMAE_CMD_SRC_PCI) 1090 DP(msglvl, "DMAE: opcode 0x%08x\n" 1091 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 1092 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1093 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1094 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1095 dmae->comp_val); 1096 else 1097 DP(msglvl, "DMAE: opcode 0x%08x\n" 1098 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 1099 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1100 dmae->opcode, dmae->src_addr_lo >> 2, 1101 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1102 dmae->comp_val); 1103 break; 1104 } 1105 1106} 1107#endif 1108 1109static int 1110bxe_acquire_hw_lock(struct bxe_softc *sc, 1111 uint32_t resource) 1112{ 1113 uint32_t lock_status; 1114 uint32_t resource_bit = (1 << resource); 1115 int func = SC_FUNC(sc); 1116 uint32_t hw_lock_control_reg; 1117 int cnt; 1118 1119 /* validate the resource is within range */ 1120 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1121 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1122 return (-1); 1123 } 1124 1125 if (func <= 5) { 1126 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1127 } else { 1128 hw_lock_control_reg = 1129 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1130 } 1131 1132 /* validate the resource is not already taken */ 1133 lock_status = REG_RD(sc, hw_lock_control_reg); 1134 if (lock_status & resource_bit) { 1135 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", 1136 lock_status, resource_bit); 1137 return (-1); 1138 } 1139 1140 /* try every 5ms for 5 seconds */ 1141 for (cnt = 0; cnt < 1000; cnt++) { 1142 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1143 lock_status = REG_RD(sc, hw_lock_control_reg); 1144 if (lock_status & resource_bit) { 1145 return (0); 1146 } 1147 DELAY(5000); 1148 } 1149 1150 BLOGE(sc, "Resource lock timeout!\n"); 1151 return (-1); 1152} 1153 1154static int 1155bxe_release_hw_lock(struct bxe_softc *sc, 1156 uint32_t resource) 1157{ 1158 uint32_t lock_status; 1159 uint32_t resource_bit = (1 << resource); 1160 int func = SC_FUNC(sc); 1161 uint32_t hw_lock_control_reg; 1162 1163 /* validate the resource is within range */ 1164 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1165 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1166 return (-1); 1167 } 1168 1169 if (func <= 5) { 1170 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1171 } else { 1172 hw_lock_control_reg = 1173 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1174 } 1175 1176 /* validate the resource is currently taken */ 1177 lock_status = REG_RD(sc, hw_lock_control_reg); 1178 if (!(lock_status & resource_bit)) { 1179 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", 1180 lock_status, resource_bit); 1181 return (-1); 1182 } 1183 1184 REG_WR(sc, hw_lock_control_reg, resource_bit); 1185 return (0); 1186} 1187static void bxe_acquire_phy_lock(struct bxe_softc *sc) 1188{ 1189 BXE_PHY_LOCK(sc); 1190 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1191} 1192 1193static void bxe_release_phy_lock(struct bxe_softc *sc) 1194{ 1195 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1196 BXE_PHY_UNLOCK(sc); 1197} 1198/* 1199 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1200 * had we done things the other way around, if two pfs from the same port 1201 * would attempt to access nvram at the same time, we could run into a 1202 * scenario such as: 1203 * pf A takes the port lock. 1204 * pf B succeeds in taking the same lock since they are from the same port. 1205 * pf A takes the per pf misc lock. Performs eeprom access. 1206 * pf A finishes. Unlocks the per pf misc lock. 1207 * Pf B takes the lock and proceeds to perform it's own access. 1208 * pf A unlocks the per port lock, while pf B is still working (!). 1209 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1210 * access corrupted by pf B).* 1211 */ 1212static int 1213bxe_acquire_nvram_lock(struct bxe_softc *sc) 1214{ 1215 int port = SC_PORT(sc); 1216 int count, i; 1217 uint32_t val = 0; 1218 1219 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1220 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1221 1222 /* adjust timeout for emulation/FPGA */ 1223 count = NVRAM_TIMEOUT_COUNT; 1224 if (CHIP_REV_IS_SLOW(sc)) { 1225 count *= 100; 1226 } 1227 1228 /* request access to nvram interface */ 1229 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1230 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1231 1232 for (i = 0; i < count*10; i++) { 1233 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1234 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1235 break; 1236 } 1237 1238 DELAY(5); 1239 } 1240 1241 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1242 BLOGE(sc, "Cannot get access to nvram interface\n"); 1243 return (-1); 1244 } 1245 1246 return (0); 1247} 1248 1249static int 1250bxe_release_nvram_lock(struct bxe_softc *sc) 1251{ 1252 int port = SC_PORT(sc); 1253 int count, i; 1254 uint32_t val = 0; 1255 1256 /* adjust timeout for emulation/FPGA */ 1257 count = NVRAM_TIMEOUT_COUNT; 1258 if (CHIP_REV_IS_SLOW(sc)) { 1259 count *= 100; 1260 } 1261 1262 /* relinquish nvram interface */ 1263 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1264 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1265 1266 for (i = 0; i < count*10; i++) { 1267 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1268 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1269 break; 1270 } 1271 1272 DELAY(5); 1273 } 1274 1275 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1276 BLOGE(sc, "Cannot free access to nvram interface\n"); 1277 return (-1); 1278 } 1279 1280 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1281 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1282 1283 return (0); 1284} 1285 1286static void 1287bxe_enable_nvram_access(struct bxe_softc *sc) 1288{ 1289 uint32_t val; 1290 1291 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1292 1293 /* enable both bits, even on read */ 1294 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1295 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1296} 1297 1298static void 1299bxe_disable_nvram_access(struct bxe_softc *sc) 1300{ 1301 uint32_t val; 1302 1303 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1304 1305 /* disable both bits, even after read */ 1306 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1307 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1308 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1309} 1310 1311static int 1312bxe_nvram_read_dword(struct bxe_softc *sc, 1313 uint32_t offset, 1314 uint32_t *ret_val, 1315 uint32_t cmd_flags) 1316{ 1317 int count, i, rc; 1318 uint32_t val; 1319 1320 /* build the command word */ 1321 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1322 1323 /* need to clear DONE bit separately */ 1324 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1325 1326 /* address of the NVRAM to read from */ 1327 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1328 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1329 1330 /* issue a read command */ 1331 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1332 1333 /* adjust timeout for emulation/FPGA */ 1334 count = NVRAM_TIMEOUT_COUNT; 1335 if (CHIP_REV_IS_SLOW(sc)) { 1336 count *= 100; 1337 } 1338 1339 /* wait for completion */ 1340 *ret_val = 0; 1341 rc = -1; 1342 for (i = 0; i < count; i++) { 1343 DELAY(5); 1344 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1345 1346 if (val & MCPR_NVM_COMMAND_DONE) { 1347 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1348 /* we read nvram data in cpu order 1349 * but ethtool sees it as an array of bytes 1350 * converting to big-endian will do the work 1351 */ 1352 *ret_val = htobe32(val); 1353 rc = 0; 1354 break; 1355 } 1356 } 1357 1358 if (rc == -1) { 1359 BLOGE(sc, "nvram read timeout expired\n"); 1360 } 1361 1362 return (rc); 1363} 1364 1365static int 1366bxe_nvram_read(struct bxe_softc *sc, 1367 uint32_t offset, 1368 uint8_t *ret_buf, 1369 int buf_size) 1370{ 1371 uint32_t cmd_flags; 1372 uint32_t val; 1373 int rc; 1374 1375 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1376 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1377 offset, buf_size); 1378 return (-1); 1379 } 1380 1381 if ((offset + buf_size) > sc->devinfo.flash_size) { 1382 BLOGE(sc, "Invalid parameter, " 1383 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1384 offset, buf_size, sc->devinfo.flash_size); 1385 return (-1); 1386 } 1387 1388 /* request access to nvram interface */ 1389 rc = bxe_acquire_nvram_lock(sc); 1390 if (rc) { 1391 return (rc); 1392 } 1393 1394 /* enable access to nvram interface */ 1395 bxe_enable_nvram_access(sc); 1396 1397 /* read the first word(s) */ 1398 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1399 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1400 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1401 memcpy(ret_buf, &val, 4); 1402 1403 /* advance to the next dword */ 1404 offset += sizeof(uint32_t); 1405 ret_buf += sizeof(uint32_t); 1406 buf_size -= sizeof(uint32_t); 1407 cmd_flags = 0; 1408 } 1409 1410 if (rc == 0) { 1411 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1412 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1413 memcpy(ret_buf, &val, 4); 1414 } 1415 1416 /* disable access to nvram interface */ 1417 bxe_disable_nvram_access(sc); 1418 bxe_release_nvram_lock(sc); 1419 1420 return (rc); 1421} 1422 1423static int 1424bxe_nvram_write_dword(struct bxe_softc *sc, 1425 uint32_t offset, 1426 uint32_t val, 1427 uint32_t cmd_flags) 1428{ 1429 int count, i, rc; 1430 1431 /* build the command word */ 1432 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1433 1434 /* need to clear DONE bit separately */ 1435 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1436 1437 /* write the data */ 1438 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1439 1440 /* address of the NVRAM to write to */ 1441 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1442 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1443 1444 /* issue the write command */ 1445 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1446 1447 /* adjust timeout for emulation/FPGA */ 1448 count = NVRAM_TIMEOUT_COUNT; 1449 if (CHIP_REV_IS_SLOW(sc)) { 1450 count *= 100; 1451 } 1452 1453 /* wait for completion */ 1454 rc = -1; 1455 for (i = 0; i < count; i++) { 1456 DELAY(5); 1457 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1458 if (val & MCPR_NVM_COMMAND_DONE) { 1459 rc = 0; 1460 break; 1461 } 1462 } 1463 1464 if (rc == -1) { 1465 BLOGE(sc, "nvram write timeout expired\n"); 1466 } 1467 1468 return (rc); 1469} 1470 1471#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1472 1473static int 1474bxe_nvram_write1(struct bxe_softc *sc, 1475 uint32_t offset, 1476 uint8_t *data_buf, 1477 int buf_size) 1478{ 1479 uint32_t cmd_flags; 1480 uint32_t align_offset; 1481 uint32_t val; 1482 int rc; 1483 1484 if ((offset + buf_size) > sc->devinfo.flash_size) { 1485 BLOGE(sc, "Invalid parameter, " 1486 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1487 offset, buf_size, sc->devinfo.flash_size); 1488 return (-1); 1489 } 1490 1491 /* request access to nvram interface */ 1492 rc = bxe_acquire_nvram_lock(sc); 1493 if (rc) { 1494 return (rc); 1495 } 1496 1497 /* enable access to nvram interface */ 1498 bxe_enable_nvram_access(sc); 1499 1500 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1501 align_offset = (offset & ~0x03); 1502 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1503 1504 if (rc == 0) { 1505 val &= ~(0xff << BYTE_OFFSET(offset)); 1506 val |= (*data_buf << BYTE_OFFSET(offset)); 1507 1508 /* nvram data is returned as an array of bytes 1509 * convert it back to cpu order 1510 */ 1511 val = be32toh(val); 1512 1513 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1514 } 1515 1516 /* disable access to nvram interface */ 1517 bxe_disable_nvram_access(sc); 1518 bxe_release_nvram_lock(sc); 1519 1520 return (rc); 1521} 1522 1523static int 1524bxe_nvram_write(struct bxe_softc *sc, 1525 uint32_t offset, 1526 uint8_t *data_buf, 1527 int buf_size) 1528{ 1529 uint32_t cmd_flags; 1530 uint32_t val; 1531 uint32_t written_so_far; 1532 int rc; 1533 1534 if (buf_size == 1) { 1535 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1536 } 1537 1538 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1539 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1540 offset, buf_size); 1541 return (-1); 1542 } 1543 1544 if (buf_size == 0) { 1545 return (0); /* nothing to do */ 1546 } 1547 1548 if ((offset + buf_size) > sc->devinfo.flash_size) { 1549 BLOGE(sc, "Invalid parameter, " 1550 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1551 offset, buf_size, sc->devinfo.flash_size); 1552 return (-1); 1553 } 1554 1555 /* request access to nvram interface */ 1556 rc = bxe_acquire_nvram_lock(sc); 1557 if (rc) { 1558 return (rc); 1559 } 1560 1561 /* enable access to nvram interface */ 1562 bxe_enable_nvram_access(sc); 1563 1564 written_so_far = 0; 1565 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1566 while ((written_so_far < buf_size) && (rc == 0)) { 1567 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1568 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1569 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1570 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1571 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1572 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1573 } 1574 1575 memcpy(&val, data_buf, 4); 1576 1577 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1578 1579 /* advance to the next dword */ 1580 offset += sizeof(uint32_t); 1581 data_buf += sizeof(uint32_t); 1582 written_so_far += sizeof(uint32_t); 1583 cmd_flags = 0; 1584 } 1585 1586 /* disable access to nvram interface */ 1587 bxe_disable_nvram_access(sc); 1588 bxe_release_nvram_lock(sc); 1589 1590 return (rc); 1591} 1592 1593/* copy command into DMAE command memory and set DMAE command Go */ 1594void 1595bxe_post_dmae(struct bxe_softc *sc, 1596 struct dmae_command *dmae, 1597 int idx) 1598{ 1599 uint32_t cmd_offset; 1600 int i; 1601 1602 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 1603 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 1604 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1605 } 1606 1607 REG_WR(sc, dmae_reg_go_c[idx], 1); 1608} 1609 1610uint32_t 1611bxe_dmae_opcode_add_comp(uint32_t opcode, 1612 uint8_t comp_type) 1613{ 1614 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 1615 DMAE_COMMAND_C_TYPE_ENABLE)); 1616} 1617 1618uint32_t 1619bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1620{ 1621 return (opcode & ~DMAE_COMMAND_SRC_RESET); 1622} 1623 1624uint32_t 1625bxe_dmae_opcode(struct bxe_softc *sc, 1626 uint8_t src_type, 1627 uint8_t dst_type, 1628 uint8_t with_comp, 1629 uint8_t comp_type) 1630{ 1631 uint32_t opcode = 0; 1632 1633 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 1634 (dst_type << DMAE_COMMAND_DST_SHIFT)); 1635 1636 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 1637 1638 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1639 1640 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 1641 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 1642 1643 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 1644 1645#ifdef __BIG_ENDIAN 1646 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1647#else 1648 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1649#endif 1650 1651 if (with_comp) { 1652 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1653 } 1654 1655 return (opcode); 1656} 1657 1658static void 1659bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1660 struct dmae_command *dmae, 1661 uint8_t src_type, 1662 uint8_t dst_type) 1663{ 1664 memset(dmae, 0, sizeof(struct dmae_command)); 1665 1666 /* set the opcode */ 1667 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1668 TRUE, DMAE_COMP_PCI); 1669 1670 /* fill in the completion parameters */ 1671 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1672 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1673 dmae->comp_val = DMAE_COMP_VAL; 1674} 1675 1676/* issue a DMAE command over the init channel and wait for completion */ 1677static int 1678bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1679 struct dmae_command *dmae) 1680{ 1681 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1682 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1683 1684 BXE_DMAE_LOCK(sc); 1685 1686 /* reset completion */ 1687 *wb_comp = 0; 1688 1689 /* post the command on the channel used for initializations */ 1690 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1691 1692 /* wait for completion */ 1693 DELAY(5); 1694 1695 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1696 if (!timeout || 1697 (sc->recovery_state != BXE_RECOVERY_DONE && 1698 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1699 BLOGE(sc, "DMAE timeout!\n"); 1700 BXE_DMAE_UNLOCK(sc); 1701 return (DMAE_TIMEOUT); 1702 } 1703 1704 timeout--; 1705 DELAY(50); 1706 } 1707 1708 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1709 BLOGE(sc, "DMAE PCI error!\n"); 1710 BXE_DMAE_UNLOCK(sc); 1711 return (DMAE_PCI_ERROR); 1712 } 1713 1714 BXE_DMAE_UNLOCK(sc); 1715 return (0); 1716} 1717 1718void 1719bxe_read_dmae(struct bxe_softc *sc, 1720 uint32_t src_addr, 1721 uint32_t len32) 1722{ 1723 struct dmae_command dmae; 1724 uint32_t *data; 1725 int i, rc; 1726 1727 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1728 1729 if (!sc->dmae_ready) { 1730 data = BXE_SP(sc, wb_data[0]); 1731 1732 for (i = 0; i < len32; i++) { 1733 data[i] = (CHIP_IS_E1(sc)) ? 1734 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1735 REG_RD(sc, (src_addr + (i * 4))); 1736 } 1737 1738 return; 1739 } 1740 1741 /* set opcode and fixed command fields */ 1742 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1743 1744 /* fill in addresses and len */ 1745 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1746 dmae.src_addr_hi = 0; 1747 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1748 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1749 dmae.len = len32; 1750 1751 /* issue the command and wait for completion */ 1752 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1753 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1754 }; 1755} 1756 1757void 1758bxe_write_dmae(struct bxe_softc *sc, 1759 bus_addr_t dma_addr, 1760 uint32_t dst_addr, 1761 uint32_t len32) 1762{ 1763 struct dmae_command dmae; 1764 int rc; 1765 1766 if (!sc->dmae_ready) { 1767 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1768 1769 if (CHIP_IS_E1(sc)) { 1770 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1771 } else { 1772 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1773 } 1774 1775 return; 1776 } 1777 1778 /* set opcode and fixed command fields */ 1779 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1780 1781 /* fill in addresses and len */ 1782 dmae.src_addr_lo = U64_LO(dma_addr); 1783 dmae.src_addr_hi = U64_HI(dma_addr); 1784 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1785 dmae.dst_addr_hi = 0; 1786 dmae.len = len32; 1787 1788 /* issue the command and wait for completion */ 1789 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1790 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1791 } 1792} 1793 1794void 1795bxe_write_dmae_phys_len(struct bxe_softc *sc, 1796 bus_addr_t phys_addr, 1797 uint32_t addr, 1798 uint32_t len) 1799{ 1800 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1801 int offset = 0; 1802 1803 while (len > dmae_wr_max) { 1804 bxe_write_dmae(sc, 1805 (phys_addr + offset), /* src DMA address */ 1806 (addr + offset), /* dst GRC address */ 1807 dmae_wr_max); 1808 offset += (dmae_wr_max * 4); 1809 len -= dmae_wr_max; 1810 } 1811 1812 bxe_write_dmae(sc, 1813 (phys_addr + offset), /* src DMA address */ 1814 (addr + offset), /* dst GRC address */ 1815 len); 1816} 1817 1818void 1819bxe_set_ctx_validation(struct bxe_softc *sc, 1820 struct eth_context *cxt, 1821 uint32_t cid) 1822{ 1823 /* ustorm cxt validation */ 1824 cxt->ustorm_ag_context.cdu_usage = 1825 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1826 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1827 /* xcontext validation */ 1828 cxt->xstorm_ag_context.cdu_reserved = 1829 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1830 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1831} 1832 1833static void 1834bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1835 uint8_t port, 1836 uint8_t fw_sb_id, 1837 uint8_t sb_index, 1838 uint8_t ticks) 1839{ 1840 uint32_t addr = 1841 (BAR_CSTRORM_INTMEM + 1842 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1843 1844 REG_WR8(sc, addr, ticks); 1845 1846 BLOGD(sc, DBG_LOAD, 1847 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1848 port, fw_sb_id, sb_index, ticks); 1849} 1850 1851static void 1852bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1853 uint8_t port, 1854 uint16_t fw_sb_id, 1855 uint8_t sb_index, 1856 uint8_t disable) 1857{ 1858 uint32_t enable_flag = 1859 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1860 uint32_t addr = 1861 (BAR_CSTRORM_INTMEM + 1862 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1863 uint8_t flags; 1864 1865 /* clear and set */ 1866 flags = REG_RD8(sc, addr); 1867 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1868 flags |= enable_flag; 1869 REG_WR8(sc, addr, flags); 1870 1871 BLOGD(sc, DBG_LOAD, 1872 "port %d fw_sb_id %d sb_index %d disable %d\n", 1873 port, fw_sb_id, sb_index, disable); 1874} 1875 1876void 1877bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1878 uint8_t fw_sb_id, 1879 uint8_t sb_index, 1880 uint8_t disable, 1881 uint16_t usec) 1882{ 1883 int port = SC_PORT(sc); 1884 uint8_t ticks = (usec / 4); /* XXX ??? */ 1885 1886 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1887 1888 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1889 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1890} 1891 1892void 1893elink_cb_udelay(struct bxe_softc *sc, 1894 uint32_t usecs) 1895{ 1896 DELAY(usecs); 1897} 1898 1899uint32_t 1900elink_cb_reg_read(struct bxe_softc *sc, 1901 uint32_t reg_addr) 1902{ 1903 return (REG_RD(sc, reg_addr)); 1904} 1905 1906void 1907elink_cb_reg_write(struct bxe_softc *sc, 1908 uint32_t reg_addr, 1909 uint32_t val) 1910{ 1911 REG_WR(sc, reg_addr, val); 1912} 1913 1914void 1915elink_cb_reg_wb_write(struct bxe_softc *sc, 1916 uint32_t offset, 1917 uint32_t *wb_write, 1918 uint16_t len) 1919{ 1920 REG_WR_DMAE(sc, offset, wb_write, len); 1921} 1922 1923void 1924elink_cb_reg_wb_read(struct bxe_softc *sc, 1925 uint32_t offset, 1926 uint32_t *wb_write, 1927 uint16_t len) 1928{ 1929 REG_RD_DMAE(sc, offset, wb_write, len); 1930} 1931 1932uint8_t 1933elink_cb_path_id(struct bxe_softc *sc) 1934{ 1935 return (SC_PATH(sc)); 1936} 1937 1938void 1939elink_cb_event_log(struct bxe_softc *sc, 1940 const elink_log_id_t elink_log_id, 1941 ...) 1942{ 1943 /* XXX */ 1944#if 0 1945 //va_list ap; 1946 va_start(ap, elink_log_id); 1947 _XXX_(sc, lm_log_id, ap); 1948 va_end(ap); 1949#endif 1950 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1951} 1952 1953static int 1954bxe_set_spio(struct bxe_softc *sc, 1955 int spio, 1956 uint32_t mode) 1957{ 1958 uint32_t spio_reg; 1959 1960 /* Only 2 SPIOs are configurable */ 1961 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1962 BLOGE(sc, "Invalid SPIO 0x%x\n", spio); 1963 return (-1); 1964 } 1965 1966 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1967 1968 /* read SPIO and mask except the float bits */ 1969 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1970 1971 switch (mode) { 1972 case MISC_SPIO_OUTPUT_LOW: 1973 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1974 /* clear FLOAT and set CLR */ 1975 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1976 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1977 break; 1978 1979 case MISC_SPIO_OUTPUT_HIGH: 1980 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1981 /* clear FLOAT and set SET */ 1982 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1983 spio_reg |= (spio << MISC_SPIO_SET_POS); 1984 break; 1985 1986 case MISC_SPIO_INPUT_HI_Z: 1987 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1988 /* set FLOAT */ 1989 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1990 break; 1991 1992 default: 1993 break; 1994 } 1995 1996 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1997 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1998 1999 return (0); 2000} 2001 2002static int 2003bxe_gpio_read(struct bxe_softc *sc, 2004 int gpio_num, 2005 uint8_t port) 2006{ 2007 /* The GPIO should be swapped if swap register is set and active */ 2008 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2009 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2010 int gpio_shift = (gpio_num + 2011 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2012 uint32_t gpio_mask = (1 << gpio_shift); 2013 uint32_t gpio_reg; 2014 2015 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2016 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2017 return (-1); 2018 } 2019 2020 /* read GPIO value */ 2021 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2022 2023 /* get the requested pin value */ 2024 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 2025} 2026 2027static int 2028bxe_gpio_write(struct bxe_softc *sc, 2029 int gpio_num, 2030 uint32_t mode, 2031 uint8_t port) 2032{ 2033 /* The GPIO should be swapped if swap register is set and active */ 2034 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2035 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2036 int gpio_shift = (gpio_num + 2037 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2038 uint32_t gpio_mask = (1 << gpio_shift); 2039 uint32_t gpio_reg; 2040 2041 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2042 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2043 return (-1); 2044 } 2045 2046 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2047 2048 /* read GPIO and mask except the float bits */ 2049 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2050 2051 switch (mode) { 2052 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2053 BLOGD(sc, DBG_PHY, 2054 "Set GPIO %d (shift %d) -> output low\n", 2055 gpio_num, gpio_shift); 2056 /* clear FLOAT and set CLR */ 2057 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2058 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2059 break; 2060 2061 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2062 BLOGD(sc, DBG_PHY, 2063 "Set GPIO %d (shift %d) -> output high\n", 2064 gpio_num, gpio_shift); 2065 /* clear FLOAT and set SET */ 2066 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2067 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2068 break; 2069 2070 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2071 BLOGD(sc, DBG_PHY, 2072 "Set GPIO %d (shift %d) -> input\n", 2073 gpio_num, gpio_shift); 2074 /* set FLOAT */ 2075 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2076 break; 2077 2078 default: 2079 break; 2080 } 2081 2082 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2083 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2084 2085 return (0); 2086} 2087 2088static int 2089bxe_gpio_mult_write(struct bxe_softc *sc, 2090 uint8_t pins, 2091 uint32_t mode) 2092{ 2093 uint32_t gpio_reg; 2094 2095 /* any port swapping should be handled by caller */ 2096 2097 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2098 2099 /* read GPIO and mask except the float bits */ 2100 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2101 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2102 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2103 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2104 2105 switch (mode) { 2106 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2107 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2108 /* set CLR */ 2109 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2110 break; 2111 2112 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2113 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2114 /* set SET */ 2115 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2116 break; 2117 2118 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2119 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2120 /* set FLOAT */ 2121 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2122 break; 2123 2124 default: 2125 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); 2126 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2127 return (-1); 2128 } 2129 2130 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2131 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2132 2133 return (0); 2134} 2135 2136static int 2137bxe_gpio_int_write(struct bxe_softc *sc, 2138 int gpio_num, 2139 uint32_t mode, 2140 uint8_t port) 2141{ 2142 /* The GPIO should be swapped if swap register is set and active */ 2143 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2144 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2145 int gpio_shift = (gpio_num + 2146 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2147 uint32_t gpio_mask = (1 << gpio_shift); 2148 uint32_t gpio_reg; 2149 2150 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2151 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2152 return (-1); 2153 } 2154 2155 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2156 2157 /* read GPIO int */ 2158 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2159 2160 switch (mode) { 2161 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2162 BLOGD(sc, DBG_PHY, 2163 "Clear GPIO INT %d (shift %d) -> output low\n", 2164 gpio_num, gpio_shift); 2165 /* clear SET and set CLR */ 2166 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2167 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2168 break; 2169 2170 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2171 BLOGD(sc, DBG_PHY, 2172 "Set GPIO INT %d (shift %d) -> output high\n", 2173 gpio_num, gpio_shift); 2174 /* clear CLR and set SET */ 2175 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2176 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2177 break; 2178 2179 default: 2180 break; 2181 } 2182 2183 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2184 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2185 2186 return (0); 2187} 2188 2189uint32_t 2190elink_cb_gpio_read(struct bxe_softc *sc, 2191 uint16_t gpio_num, 2192 uint8_t port) 2193{ 2194 return (bxe_gpio_read(sc, gpio_num, port)); 2195} 2196 2197uint8_t 2198elink_cb_gpio_write(struct bxe_softc *sc, 2199 uint16_t gpio_num, 2200 uint8_t mode, /* 0=low 1=high */ 2201 uint8_t port) 2202{ 2203 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2204} 2205 2206uint8_t 2207elink_cb_gpio_mult_write(struct bxe_softc *sc, 2208 uint8_t pins, 2209 uint8_t mode) /* 0=low 1=high */ 2210{ 2211 return (bxe_gpio_mult_write(sc, pins, mode)); 2212} 2213 2214uint8_t 2215elink_cb_gpio_int_write(struct bxe_softc *sc, 2216 uint16_t gpio_num, 2217 uint8_t mode, /* 0=low 1=high */ 2218 uint8_t port) 2219{ 2220 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2221} 2222 2223void 2224elink_cb_notify_link_changed(struct bxe_softc *sc) 2225{ 2226 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2227 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2228} 2229 2230/* send the MCP a request, block until there is a reply */ 2231uint32_t 2232elink_cb_fw_command(struct bxe_softc *sc, 2233 uint32_t command, 2234 uint32_t param) 2235{ 2236 int mb_idx = SC_FW_MB_IDX(sc); 2237 uint32_t seq; 2238 uint32_t rc = 0; 2239 uint32_t cnt = 1; 2240 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2241 2242 BXE_FWMB_LOCK(sc); 2243 2244 seq = ++sc->fw_seq; 2245 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2246 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2247 2248 BLOGD(sc, DBG_PHY, 2249 "wrote command 0x%08x to FW MB param 0x%08x\n", 2250 (command | seq), param); 2251 2252 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2253 do { 2254 DELAY(delay * 1000); 2255 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2256 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2257 2258 BLOGD(sc, DBG_PHY, 2259 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2260 cnt*delay, rc, seq); 2261 2262 /* is this a reply to our command? */ 2263 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2264 rc &= FW_MSG_CODE_MASK; 2265 } else { 2266 /* Ruh-roh! */ 2267 BLOGE(sc, "FW failed to respond!\n"); 2268 // XXX bxe_fw_dump(sc); 2269 rc = 0; 2270 } 2271 2272 BXE_FWMB_UNLOCK(sc); 2273 return (rc); 2274} 2275 2276static uint32_t 2277bxe_fw_command(struct bxe_softc *sc, 2278 uint32_t command, 2279 uint32_t param) 2280{ 2281 return (elink_cb_fw_command(sc, command, param)); 2282} 2283 2284static void 2285__storm_memset_dma_mapping(struct bxe_softc *sc, 2286 uint32_t addr, 2287 bus_addr_t mapping) 2288{ 2289 REG_WR(sc, addr, U64_LO(mapping)); 2290 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2291} 2292 2293static void 2294storm_memset_spq_addr(struct bxe_softc *sc, 2295 bus_addr_t mapping, 2296 uint16_t abs_fid) 2297{ 2298 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2299 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2300 __storm_memset_dma_mapping(sc, addr, mapping); 2301} 2302 2303static void 2304storm_memset_vf_to_pf(struct bxe_softc *sc, 2305 uint16_t abs_fid, 2306 uint16_t pf_id) 2307{ 2308 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2309 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2310 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2311 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2312} 2313 2314static void 2315storm_memset_func_en(struct bxe_softc *sc, 2316 uint16_t abs_fid, 2317 uint8_t enable) 2318{ 2319 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2320 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2321 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2322 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2323} 2324 2325static void 2326storm_memset_eq_data(struct bxe_softc *sc, 2327 struct event_ring_data *eq_data, 2328 uint16_t pfid) 2329{ 2330 uint32_t addr; 2331 size_t size; 2332 2333 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2334 size = sizeof(struct event_ring_data); 2335 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2336} 2337 2338static void 2339storm_memset_eq_prod(struct bxe_softc *sc, 2340 uint16_t eq_prod, 2341 uint16_t pfid) 2342{ 2343 uint32_t addr = (BAR_CSTRORM_INTMEM + 2344 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2345 REG_WR16(sc, addr, eq_prod); 2346} 2347 2348/* 2349 * Post a slowpath command. 2350 * 2351 * A slowpath command is used to propogate a configuration change through 2352 * the controller in a controlled manner, allowing each STORM processor and 2353 * other H/W blocks to phase in the change. The commands sent on the 2354 * slowpath are referred to as ramrods. Depending on the ramrod used the 2355 * completion of the ramrod will occur in different ways. Here's a 2356 * breakdown of ramrods and how they complete: 2357 * 2358 * RAMROD_CMD_ID_ETH_PORT_SETUP 2359 * Used to setup the leading connection on a port. Completes on the 2360 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2361 * 2362 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2363 * Used to setup an additional connection on a port. Completes on the 2364 * RCQ of the multi-queue/RSS connection being initialized. 2365 * 2366 * RAMROD_CMD_ID_ETH_STAT_QUERY 2367 * Used to force the storm processors to update the statistics database 2368 * in host memory. This ramrod is send on the leading connection CID and 2369 * completes as an index increment of the CSTORM on the default status 2370 * block. 2371 * 2372 * RAMROD_CMD_ID_ETH_UPDATE 2373 * Used to update the state of the leading connection, usually to udpate 2374 * the RSS indirection table. Completes on the RCQ of the leading 2375 * connection. (Not currently used under FreeBSD until OS support becomes 2376 * available.) 2377 * 2378 * RAMROD_CMD_ID_ETH_HALT 2379 * Used when tearing down a connection prior to driver unload. Completes 2380 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2381 * use this on the leading connection. 2382 * 2383 * RAMROD_CMD_ID_ETH_SET_MAC 2384 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2385 * the RCQ of the leading connection. 2386 * 2387 * RAMROD_CMD_ID_ETH_CFC_DEL 2388 * Used when tearing down a conneciton prior to driver unload. Completes 2389 * on the RCQ of the leading connection (since the current connection 2390 * has been completely removed from controller memory). 2391 * 2392 * RAMROD_CMD_ID_ETH_PORT_DEL 2393 * Used to tear down the leading connection prior to driver unload, 2394 * typically fp[0]. Completes as an index increment of the CSTORM on the 2395 * default status block. 2396 * 2397 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2398 * Used for connection offload. Completes on the RCQ of the multi-queue 2399 * RSS connection that is being offloaded. (Not currently used under 2400 * FreeBSD.) 2401 * 2402 * There can only be one command pending per function. 2403 * 2404 * Returns: 2405 * 0 = Success, !0 = Failure. 2406 */ 2407 2408/* must be called under the spq lock */ 2409static inline 2410struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2411{ 2412 struct eth_spe *next_spe = sc->spq_prod_bd; 2413 2414 if (sc->spq_prod_bd == sc->spq_last_bd) { 2415 /* wrap back to the first eth_spq */ 2416 sc->spq_prod_bd = sc->spq; 2417 sc->spq_prod_idx = 0; 2418 } else { 2419 sc->spq_prod_bd++; 2420 sc->spq_prod_idx++; 2421 } 2422 2423 return (next_spe); 2424} 2425 2426/* must be called under the spq lock */ 2427static inline 2428void bxe_sp_prod_update(struct bxe_softc *sc) 2429{ 2430 int func = SC_FUNC(sc); 2431 2432 /* 2433 * Make sure that BD data is updated before writing the producer. 2434 * BD data is written to the memory, the producer is read from the 2435 * memory, thus we need a full memory barrier to ensure the ordering. 2436 */ 2437 mb(); 2438 2439 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2440 sc->spq_prod_idx); 2441 2442 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2443 BUS_SPACE_BARRIER_WRITE); 2444} 2445 2446/** 2447 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2448 * 2449 * @cmd: command to check 2450 * @cmd_type: command type 2451 */ 2452static inline 2453int bxe_is_contextless_ramrod(int cmd, 2454 int cmd_type) 2455{ 2456 if ((cmd_type == NONE_CONNECTION_TYPE) || 2457 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2458 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2459 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2460 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2461 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2462 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2463 return (TRUE); 2464 } else { 2465 return (FALSE); 2466 } 2467} 2468 2469/** 2470 * bxe_sp_post - place a single command on an SP ring 2471 * 2472 * @sc: driver handle 2473 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2474 * @cid: SW CID the command is related to 2475 * @data_hi: command private data address (high 32 bits) 2476 * @data_lo: command private data address (low 32 bits) 2477 * @cmd_type: command type (e.g. NONE, ETH) 2478 * 2479 * SP data is handled as if it's always an address pair, thus data fields are 2480 * not swapped to little endian in upper functions. Instead this function swaps 2481 * data as if it's two uint32 fields. 2482 */ 2483int 2484bxe_sp_post(struct bxe_softc *sc, 2485 int command, 2486 int cid, 2487 uint32_t data_hi, 2488 uint32_t data_lo, 2489 int cmd_type) 2490{ 2491 struct eth_spe *spe; 2492 uint16_t type; 2493 int common; 2494 2495 common = bxe_is_contextless_ramrod(command, cmd_type); 2496 2497 BXE_SP_LOCK(sc); 2498 2499 if (common) { 2500 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2501 BLOGE(sc, "EQ ring is full!\n"); 2502 BXE_SP_UNLOCK(sc); 2503 return (-1); 2504 } 2505 } else { 2506 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2507 BLOGE(sc, "SPQ ring is full!\n"); 2508 BXE_SP_UNLOCK(sc); 2509 return (-1); 2510 } 2511 } 2512 2513 spe = bxe_sp_get_next(sc); 2514 2515 /* CID needs port number to be encoded int it */ 2516 spe->hdr.conn_and_cmd_data = 2517 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2518 2519 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 2520 2521 /* TBD: Check if it works for VFs */ 2522 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 2523 SPE_HDR_FUNCTION_ID); 2524 2525 spe->hdr.type = htole16(type); 2526 2527 spe->data.update_data_addr.hi = htole32(data_hi); 2528 spe->data.update_data_addr.lo = htole32(data_lo); 2529 2530 /* 2531 * It's ok if the actual decrement is issued towards the memory 2532 * somewhere between the lock and unlock. Thus no more explict 2533 * memory barrier is needed. 2534 */ 2535 if (common) { 2536 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2537 } else { 2538 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2539 } 2540 2541 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2542 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2543 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2544 BLOGD(sc, DBG_SP, 2545 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2546 sc->spq_prod_idx, 2547 (uint32_t)U64_HI(sc->spq_dma.paddr), 2548 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2549 command, 2550 common, 2551 HW_CID(sc, cid), 2552 data_hi, 2553 data_lo, 2554 type, 2555 atomic_load_acq_long(&sc->cq_spq_left), 2556 atomic_load_acq_long(&sc->eq_spq_left)); 2557 2558 bxe_sp_prod_update(sc); 2559 2560 BXE_SP_UNLOCK(sc); 2561 return (0); 2562} 2563 2564/** 2565 * bxe_debug_print_ind_table - prints the indirection table configuration. 2566 * 2567 * @sc: driver hanlde 2568 * @p: pointer to rss configuration 2569 */ 2570#if 0 2571static void 2572bxe_debug_print_ind_table(struct bxe_softc *sc, 2573 struct ecore_config_rss_params *p) 2574{ 2575 int i; 2576 2577 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); 2578 BLOGD(sc, DBG_LOAD, " 0x0000: "); 2579 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2580 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); 2581 2582 /* Print 4 bytes in a line */ 2583 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 2584 (((i + 1) & 0x3) == 0)) { 2585 BLOGD(sc, DBG_LOAD, "\n"); 2586 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); 2587 } 2588 } 2589 2590 BLOGD(sc, DBG_LOAD, "\n"); 2591} 2592#endif 2593 2594/* 2595 * FreeBSD Device probe function. 2596 * 2597 * Compares the device found to the driver's list of supported devices and 2598 * reports back to the bsd loader whether this is the right driver for the device. 2599 * This is the driver entry function called from the "kldload" command. 2600 * 2601 * Returns: 2602 * BUS_PROBE_DEFAULT on success, positive value on failure. 2603 */ 2604static int 2605bxe_probe(device_t dev) 2606{ 2607 struct bxe_softc *sc; 2608 struct bxe_device_type *t; 2609 char *descbuf; 2610 uint16_t did, sdid, svid, vid; 2611 2612 /* Find our device structure */ 2613 sc = device_get_softc(dev); 2614 sc->dev = dev; 2615 t = bxe_devs; 2616 2617 /* Get the data for the device to be probed. */ 2618 vid = pci_get_vendor(dev); 2619 did = pci_get_device(dev); 2620 svid = pci_get_subvendor(dev); 2621 sdid = pci_get_subdevice(dev); 2622 2623 BLOGD(sc, DBG_LOAD, 2624 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 2625 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 2626 2627 /* Look through the list of known devices for a match. */ 2628 while (t->bxe_name != NULL) { 2629 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2630 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2631 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2632 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2633 if (descbuf == NULL) 2634 return (ENOMEM); 2635 2636 /* Print out the device identity. */ 2637 snprintf(descbuf, BXE_DEVDESC_MAX, 2638 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2639 (((pci_read_config(dev, PCIR_REVID, 4) & 2640 0xf0) >> 4) + 'A'), 2641 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2642 BXE_DRIVER_VERSION); 2643 2644 device_set_desc_copy(dev, descbuf); 2645 free(descbuf, M_TEMP); 2646 return (BUS_PROBE_DEFAULT); 2647 } 2648 t++; 2649 } 2650 2651 return (ENXIO); 2652} 2653 2654static void 2655bxe_init_mutexes(struct bxe_softc *sc) 2656{ 2657#ifdef BXE_CORE_LOCK_SX 2658 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2659 "bxe%d_core_lock", sc->unit); 2660 sx_init(&sc->core_sx, sc->core_sx_name); 2661#else 2662 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2663 "bxe%d_core_lock", sc->unit); 2664 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2665#endif 2666 2667 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2668 "bxe%d_sp_lock", sc->unit); 2669 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2670 2671 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2672 "bxe%d_dmae_lock", sc->unit); 2673 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2674 2675 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2676 "bxe%d_phy_lock", sc->unit); 2677 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2678 2679 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2680 "bxe%d_fwmb_lock", sc->unit); 2681 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2682 2683 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2684 "bxe%d_print_lock", sc->unit); 2685 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2686 2687 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2688 "bxe%d_stats_lock", sc->unit); 2689 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2690 2691 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2692 "bxe%d_mcast_lock", sc->unit); 2693 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2694} 2695 2696static void 2697bxe_release_mutexes(struct bxe_softc *sc) 2698{ 2699#ifdef BXE_CORE_LOCK_SX 2700 sx_destroy(&sc->core_sx); 2701#else 2702 if (mtx_initialized(&sc->core_mtx)) { 2703 mtx_destroy(&sc->core_mtx); 2704 } 2705#endif 2706 2707 if (mtx_initialized(&sc->sp_mtx)) { 2708 mtx_destroy(&sc->sp_mtx); 2709 } 2710 2711 if (mtx_initialized(&sc->dmae_mtx)) { 2712 mtx_destroy(&sc->dmae_mtx); 2713 } 2714 2715 if (mtx_initialized(&sc->port.phy_mtx)) { 2716 mtx_destroy(&sc->port.phy_mtx); 2717 } 2718 2719 if (mtx_initialized(&sc->fwmb_mtx)) { 2720 mtx_destroy(&sc->fwmb_mtx); 2721 } 2722 2723 if (mtx_initialized(&sc->print_mtx)) { 2724 mtx_destroy(&sc->print_mtx); 2725 } 2726 2727 if (mtx_initialized(&sc->stats_mtx)) { 2728 mtx_destroy(&sc->stats_mtx); 2729 } 2730 2731 if (mtx_initialized(&sc->mcast_mtx)) { 2732 mtx_destroy(&sc->mcast_mtx); 2733 } 2734} 2735 2736static void 2737bxe_tx_disable(struct bxe_softc* sc) 2738{ 2739 if_t ifp = sc->ifp; 2740 2741 /* tell the stack the driver is stopped and TX queue is full */ 2742 if (ifp != NULL) { 2743 if_setdrvflags(ifp, 0); 2744 } 2745} 2746 2747static void 2748bxe_drv_pulse(struct bxe_softc *sc) 2749{ 2750 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2751 sc->fw_drv_pulse_wr_seq); 2752} 2753 2754static inline uint16_t 2755bxe_tx_avail(struct bxe_softc *sc, 2756 struct bxe_fastpath *fp) 2757{ 2758 int16_t used; 2759 uint16_t prod; 2760 uint16_t cons; 2761 2762 prod = fp->tx_bd_prod; 2763 cons = fp->tx_bd_cons; 2764 2765 used = SUB_S16(prod, cons); 2766 2767#if 0 2768 KASSERT((used < 0), ("used tx bds < 0")); 2769 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); 2770 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), 2771 ("invalid number of tx bds used")); 2772#endif 2773 2774 return (int16_t)(sc->tx_ring_size) - used; 2775} 2776 2777static inline int 2778bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2779{ 2780 uint16_t hw_cons; 2781 2782 mb(); /* status block fields can change */ 2783 hw_cons = le16toh(*fp->tx_cons_sb); 2784 return (hw_cons != fp->tx_pkt_cons); 2785} 2786 2787static inline uint8_t 2788bxe_has_tx_work(struct bxe_fastpath *fp) 2789{ 2790 /* expand this for multi-cos if ever supported */ 2791 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2792} 2793 2794static inline int 2795bxe_has_rx_work(struct bxe_fastpath *fp) 2796{ 2797 uint16_t rx_cq_cons_sb; 2798 2799 mb(); /* status block fields can change */ 2800 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2801 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2802 rx_cq_cons_sb++; 2803 return (fp->rx_cq_cons != rx_cq_cons_sb); 2804} 2805 2806static void 2807bxe_sp_event(struct bxe_softc *sc, 2808 struct bxe_fastpath *fp, 2809 union eth_rx_cqe *rr_cqe) 2810{ 2811 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2812 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2813 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2814 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2815 2816 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2817 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2818 2819#if 0 2820 /* 2821 * If cid is within VF range, replace the slowpath object with the 2822 * one corresponding to this VF 2823 */ 2824 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { 2825 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); 2826 } 2827#endif 2828 2829 switch (command) { 2830 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2831 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2832 drv_cmd = ECORE_Q_CMD_UPDATE; 2833 break; 2834 2835 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2836 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2837 drv_cmd = ECORE_Q_CMD_SETUP; 2838 break; 2839 2840 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2841 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2842 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2843 break; 2844 2845 case (RAMROD_CMD_ID_ETH_HALT): 2846 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2847 drv_cmd = ECORE_Q_CMD_HALT; 2848 break; 2849 2850 case (RAMROD_CMD_ID_ETH_TERMINATE): 2851 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2852 drv_cmd = ECORE_Q_CMD_TERMINATE; 2853 break; 2854 2855 case (RAMROD_CMD_ID_ETH_EMPTY): 2856 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2857 drv_cmd = ECORE_Q_CMD_EMPTY; 2858 break; 2859 2860 default: 2861 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2862 command, fp->index); 2863 return; 2864 } 2865 2866 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2867 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2868 /* 2869 * q_obj->complete_cmd() failure means that this was 2870 * an unexpected completion. 2871 * 2872 * In this case we don't want to increase the sc->spq_left 2873 * because apparently we haven't sent this command the first 2874 * place. 2875 */ 2876 // bxe_panic(sc, ("Unexpected SP completion\n")); 2877 return; 2878 } 2879 2880#if 0 2881 /* SRIOV: reschedule any 'in_progress' operations */ 2882 bxe_iov_sp_event(sc, cid, TRUE); 2883#endif 2884 2885 atomic_add_acq_long(&sc->cq_spq_left, 1); 2886 2887 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2888 atomic_load_acq_long(&sc->cq_spq_left)); 2889 2890#if 0 2891 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 2892 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { 2893 /* 2894 * If Queue update ramrod is completed for last Queue in AFEX VIF set 2895 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to 2896 * prevent case that both bits are cleared. At the end of load/unload 2897 * driver checks that sp_state is cleared and this order prevents 2898 * races. 2899 */ 2900 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); 2901 wmb(); 2902 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); 2903 2904 /* schedule the sp task as MCP ack is required */ 2905 bxe_schedule_sp_task(sc); 2906 } 2907#endif 2908} 2909 2910/* 2911 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2912 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2913 * the current aggregation queue as in-progress. 2914 */ 2915static void 2916bxe_tpa_start(struct bxe_softc *sc, 2917 struct bxe_fastpath *fp, 2918 uint16_t queue, 2919 uint16_t cons, 2920 uint16_t prod, 2921 struct eth_fast_path_rx_cqe *cqe) 2922{ 2923 struct bxe_sw_rx_bd tmp_bd; 2924 struct bxe_sw_rx_bd *rx_buf; 2925 struct eth_rx_bd *rx_bd; 2926 int max_agg_queues; 2927 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2928 uint16_t index; 2929 2930 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2931 "cons=%d prod=%d\n", 2932 fp->index, queue, cons, prod); 2933 2934 max_agg_queues = MAX_AGG_QS(sc); 2935 2936 KASSERT((queue < max_agg_queues), 2937 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2938 fp->index, queue, max_agg_queues)); 2939 2940 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2941 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2942 fp->index, queue)); 2943 2944 /* copy the existing mbuf and mapping from the TPA pool */ 2945 tmp_bd = tpa_info->bd; 2946 2947 if (tmp_bd.m == NULL) { 2948 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", 2949 fp->index, queue); 2950 /* XXX Error handling? */ 2951 return; 2952 } 2953 2954 /* change the TPA queue to the start state */ 2955 tpa_info->state = BXE_TPA_STATE_START; 2956 tpa_info->placement_offset = cqe->placement_offset; 2957 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2958 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2959 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2960 2961 fp->rx_tpa_queue_used |= (1 << queue); 2962 2963 /* 2964 * If all the buffer descriptors are filled with mbufs then fill in 2965 * the current consumer index with a new BD. Else if a maximum Rx 2966 * buffer limit is imposed then fill in the next producer index. 2967 */ 2968 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2969 prod : cons; 2970 2971 /* move the received mbuf and mapping to TPA pool */ 2972 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2973 2974 /* release any existing RX BD mbuf mappings */ 2975 if (cons != index) { 2976 rx_buf = &fp->rx_mbuf_chain[cons]; 2977 2978 if (rx_buf->m_map != NULL) { 2979 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2980 BUS_DMASYNC_POSTREAD); 2981 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2982 } 2983 2984 /* 2985 * We get here when the maximum number of rx buffers is less than 2986 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2987 * it out here without concern of a memory leak. 2988 */ 2989 fp->rx_mbuf_chain[cons].m = NULL; 2990 } 2991 2992 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2993 fp->rx_mbuf_chain[index] = tmp_bd; 2994 2995 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2996 rx_bd = &fp->rx_chain[index]; 2997 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2998 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2999} 3000 3001/* 3002 * When a TPA aggregation is completed, loop through the individual mbufs 3003 * of the aggregation, combining them into a single mbuf which will be sent 3004 * up the stack. Refill all freed SGEs with mbufs as we go along. 3005 */ 3006static int 3007bxe_fill_frag_mbuf(struct bxe_softc *sc, 3008 struct bxe_fastpath *fp, 3009 struct bxe_sw_tpa_info *tpa_info, 3010 uint16_t queue, 3011 uint16_t pages, 3012 struct mbuf *m, 3013 struct eth_end_agg_rx_cqe *cqe, 3014 uint16_t cqe_idx) 3015{ 3016 struct mbuf *m_frag; 3017 uint32_t frag_len, frag_size, i; 3018 uint16_t sge_idx; 3019 int rc = 0; 3020 int j; 3021 3022 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 3023 3024 BLOGD(sc, DBG_LRO, 3025 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 3026 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 3027 3028 /* make sure the aggregated frame is not too big to handle */ 3029 if (pages > 8 * PAGES_PER_SGE) { 3030 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 3031 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 3032 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 3033 tpa_info->len_on_bd, frag_size); 3034 bxe_panic(sc, ("sge page count error\n")); 3035 return (EINVAL); 3036 } 3037 3038 /* 3039 * Scan through the scatter gather list pulling individual mbufs into a 3040 * single mbuf for the host stack. 3041 */ 3042 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 3043 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 3044 3045 /* 3046 * Firmware gives the indices of the SGE as if the ring is an array 3047 * (meaning that the "next" element will consume 2 indices). 3048 */ 3049 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 3050 3051 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 3052 "sge_idx=%d frag_size=%d frag_len=%d\n", 3053 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 3054 3055 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3056 3057 /* allocate a new mbuf for the SGE */ 3058 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3059 if (rc) { 3060 /* Leave all remaining SGEs in the ring! */ 3061 return (rc); 3062 } 3063 3064 /* update the fragment length */ 3065 m_frag->m_len = frag_len; 3066 3067 /* concatenate the fragment to the head mbuf */ 3068 m_cat(m, m_frag); 3069 fp->eth_q_stats.mbuf_alloc_sge--; 3070 3071 /* update the TPA mbuf size and remaining fragment size */ 3072 m->m_pkthdr.len += frag_len; 3073 frag_size -= frag_len; 3074 } 3075 3076 BLOGD(sc, DBG_LRO, 3077 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 3078 fp->index, queue, frag_size); 3079 3080 return (rc); 3081} 3082 3083static inline void 3084bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 3085{ 3086 int i, j; 3087 3088 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 3089 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 3090 3091 for (j = 0; j < 2; j++) { 3092 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 3093 idx--; 3094 } 3095 } 3096} 3097 3098static inline void 3099bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 3100{ 3101 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 3102 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 3103 3104 /* 3105 * Clear the two last indices in the page to 1. These are the indices that 3106 * correspond to the "next" element, hence will never be indicated and 3107 * should be removed from the calculations. 3108 */ 3109 bxe_clear_sge_mask_next_elems(fp); 3110} 3111 3112static inline void 3113bxe_update_last_max_sge(struct bxe_fastpath *fp, 3114 uint16_t idx) 3115{ 3116 uint16_t last_max = fp->last_max_sge; 3117 3118 if (SUB_S16(idx, last_max) > 0) { 3119 fp->last_max_sge = idx; 3120 } 3121} 3122 3123static inline void 3124bxe_update_sge_prod(struct bxe_softc *sc, 3125 struct bxe_fastpath *fp, 3126 uint16_t sge_len, 3127 union eth_sgl_or_raw_data *cqe) 3128{ 3129 uint16_t last_max, last_elem, first_elem; 3130 uint16_t delta = 0; 3131 uint16_t i; 3132 3133 if (!sge_len) { 3134 return; 3135 } 3136 3137 /* first mark all used pages */ 3138 for (i = 0; i < sge_len; i++) { 3139 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3140 RX_SGE(le16toh(cqe->sgl[i]))); 3141 } 3142 3143 BLOGD(sc, DBG_LRO, 3144 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3145 fp->index, sge_len - 1, 3146 le16toh(cqe->sgl[sge_len - 1])); 3147 3148 /* assume that the last SGE index is the biggest */ 3149 bxe_update_last_max_sge(fp, 3150 le16toh(cqe->sgl[sge_len - 1])); 3151 3152 last_max = RX_SGE(fp->last_max_sge); 3153 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3154 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3155 3156 /* if ring is not full */ 3157 if (last_elem + 1 != first_elem) { 3158 last_elem++; 3159 } 3160 3161 /* now update the prod */ 3162 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3163 if (__predict_true(fp->sge_mask[i])) { 3164 break; 3165 } 3166 3167 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3168 delta += BIT_VEC64_ELEM_SZ; 3169 } 3170 3171 if (delta > 0) { 3172 fp->rx_sge_prod += delta; 3173 /* clear page-end entries */ 3174 bxe_clear_sge_mask_next_elems(fp); 3175 } 3176 3177 BLOGD(sc, DBG_LRO, 3178 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3179 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3180} 3181 3182/* 3183 * The aggregation on the current TPA queue has completed. Pull the individual 3184 * mbuf fragments together into a single mbuf, perform all necessary checksum 3185 * calculations, and send the resuting mbuf to the stack. 3186 */ 3187static void 3188bxe_tpa_stop(struct bxe_softc *sc, 3189 struct bxe_fastpath *fp, 3190 struct bxe_sw_tpa_info *tpa_info, 3191 uint16_t queue, 3192 uint16_t pages, 3193 struct eth_end_agg_rx_cqe *cqe, 3194 uint16_t cqe_idx) 3195{ 3196 if_t ifp = sc->ifp; 3197 struct mbuf *m; 3198 int rc = 0; 3199 3200 BLOGD(sc, DBG_LRO, 3201 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3202 fp->index, queue, tpa_info->placement_offset, 3203 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3204 3205 m = tpa_info->bd.m; 3206 3207 /* allocate a replacement before modifying existing mbuf */ 3208 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3209 if (rc) { 3210 /* drop the frame and log an error */ 3211 fp->eth_q_stats.rx_soft_errors++; 3212 goto bxe_tpa_stop_exit; 3213 } 3214 3215 /* we have a replacement, fixup the current mbuf */ 3216 m_adj(m, tpa_info->placement_offset); 3217 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3218 3219 /* mark the checksums valid (taken care of by the firmware) */ 3220 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3221 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3222 m->m_pkthdr.csum_data = 0xffff; 3223 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3224 CSUM_IP_VALID | 3225 CSUM_DATA_VALID | 3226 CSUM_PSEUDO_HDR); 3227 3228 /* aggregate all of the SGEs into a single mbuf */ 3229 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3230 if (rc) { 3231 /* drop the packet and log an error */ 3232 fp->eth_q_stats.rx_soft_errors++; 3233 m_freem(m); 3234 } else { 3235 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) { 3236 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3237 m->m_flags |= M_VLANTAG; 3238 } 3239 3240 /* assign packet to this interface interface */ 3241 if_setrcvif(m, ifp); 3242 3243#if __FreeBSD_version >= 800000 3244 /* specify what RSS queue was used for this flow */ 3245 m->m_pkthdr.flowid = fp->index; 3246 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3247#endif 3248 3249 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3250 fp->eth_q_stats.rx_tpa_pkts++; 3251 3252 /* pass the frame to the stack */ 3253 if_input(ifp, m); 3254 } 3255 3256 /* we passed an mbuf up the stack or dropped the frame */ 3257 fp->eth_q_stats.mbuf_alloc_tpa--; 3258 3259bxe_tpa_stop_exit: 3260 3261 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3262 fp->rx_tpa_queue_used &= ~(1 << queue); 3263} 3264 3265static uint8_t 3266bxe_service_rxsgl( 3267 struct bxe_fastpath *fp, 3268 uint16_t len, 3269 uint16_t lenonbd, 3270 struct mbuf *m, 3271 struct eth_fast_path_rx_cqe *cqe_fp) 3272{ 3273 struct mbuf *m_frag; 3274 uint16_t frags, frag_len; 3275 uint16_t sge_idx = 0; 3276 uint16_t j; 3277 uint8_t i, rc = 0; 3278 uint32_t frag_size; 3279 3280 /* adjust the mbuf */ 3281 m->m_len = lenonbd; 3282 3283 frag_size = len - lenonbd; 3284 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3285 3286 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { 3287 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); 3288 3289 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3290 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); 3291 m_frag->m_len = frag_len; 3292 3293 /* allocate a new mbuf for the SGE */ 3294 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3295 if (rc) { 3296 /* Leave all remaining SGEs in the ring! */ 3297 return (rc); 3298 } 3299 fp->eth_q_stats.mbuf_alloc_sge--; 3300 3301 /* concatenate the fragment to the head mbuf */ 3302 m_cat(m, m_frag); 3303 3304 frag_size -= frag_len; 3305 } 3306 3307 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); 3308 3309 return rc; 3310} 3311 3312static uint8_t 3313bxe_rxeof(struct bxe_softc *sc, 3314 struct bxe_fastpath *fp) 3315{ 3316 if_t ifp = sc->ifp; 3317 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3318 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3319 int rx_pkts = 0; 3320 int rc = 0; 3321 3322 BXE_FP_RX_LOCK(fp); 3323 3324 /* CQ "next element" is of the size of the regular element */ 3325 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3326 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3327 hw_cq_cons++; 3328 } 3329 3330 bd_cons = fp->rx_bd_cons; 3331 bd_prod = fp->rx_bd_prod; 3332 bd_prod_fw = bd_prod; 3333 sw_cq_cons = fp->rx_cq_cons; 3334 sw_cq_prod = fp->rx_cq_prod; 3335 3336 /* 3337 * Memory barrier necessary as speculative reads of the rx 3338 * buffer can be ahead of the index in the status block 3339 */ 3340 rmb(); 3341 3342 BLOGD(sc, DBG_RX, 3343 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3344 fp->index, hw_cq_cons, sw_cq_cons); 3345 3346 while (sw_cq_cons != hw_cq_cons) { 3347 struct bxe_sw_rx_bd *rx_buf = NULL; 3348 union eth_rx_cqe *cqe; 3349 struct eth_fast_path_rx_cqe *cqe_fp; 3350 uint8_t cqe_fp_flags; 3351 enum eth_rx_cqe_type cqe_fp_type; 3352 uint16_t len, lenonbd, pad; 3353 struct mbuf *m = NULL; 3354 3355 comp_ring_cons = RCQ(sw_cq_cons); 3356 bd_prod = RX_BD(bd_prod); 3357 bd_cons = RX_BD(bd_cons); 3358 3359 cqe = &fp->rcq_chain[comp_ring_cons]; 3360 cqe_fp = &cqe->fast_path_cqe; 3361 cqe_fp_flags = cqe_fp->type_error_flags; 3362 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3363 3364 BLOGD(sc, DBG_RX, 3365 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3366 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3367 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", 3368 fp->index, 3369 hw_cq_cons, 3370 sw_cq_cons, 3371 bd_prod, 3372 bd_cons, 3373 CQE_TYPE(cqe_fp_flags), 3374 cqe_fp_flags, 3375 cqe_fp->status_flags, 3376 le32toh(cqe_fp->rss_hash_result), 3377 le16toh(cqe_fp->vlan_tag), 3378 le16toh(cqe_fp->pkt_len_or_gro_seg_len), 3379 le16toh(cqe_fp->len_on_bd)); 3380 3381 /* is this a slowpath msg? */ 3382 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3383 bxe_sp_event(sc, fp, cqe); 3384 goto next_cqe; 3385 } 3386 3387 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3388 3389 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3390 struct bxe_sw_tpa_info *tpa_info; 3391 uint16_t frag_size, pages; 3392 uint8_t queue; 3393 3394#if 0 3395 /* sanity check */ 3396 if (!fp->tpa_enable && 3397 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { 3398 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", 3399 CQE_TYPE(cqe_fp_type)); 3400 } 3401#endif 3402 3403 if (CQE_TYPE_START(cqe_fp_type)) { 3404 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3405 bd_cons, bd_prod, cqe_fp); 3406 m = NULL; /* packet not ready yet */ 3407 goto next_rx; 3408 } 3409 3410 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3411 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3412 3413 queue = cqe->end_agg_cqe.queue_index; 3414 tpa_info = &fp->rx_tpa_info[queue]; 3415 3416 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3417 fp->index, queue); 3418 3419 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3420 tpa_info->len_on_bd); 3421 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3422 3423 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3424 &cqe->end_agg_cqe, comp_ring_cons); 3425 3426 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); 3427 3428 goto next_cqe; 3429 } 3430 3431 /* non TPA */ 3432 3433 /* is this an error packet? */ 3434 if (__predict_false(cqe_fp_flags & 3435 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3436 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3437 fp->eth_q_stats.rx_soft_errors++; 3438 goto next_rx; 3439 } 3440 3441 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3442 lenonbd = le16toh(cqe_fp->len_on_bd); 3443 pad = cqe_fp->placement_offset; 3444 3445 m = rx_buf->m; 3446 3447 if (__predict_false(m == NULL)) { 3448 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3449 bd_cons, fp->index); 3450 goto next_rx; 3451 } 3452 3453 /* XXX double copy if packet length under a threshold */ 3454 3455 /* 3456 * If all the buffer descriptors are filled with mbufs then fill in 3457 * the current consumer index with a new BD. Else if a maximum Rx 3458 * buffer limit is imposed then fill in the next producer index. 3459 */ 3460 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3461 (sc->max_rx_bufs != RX_BD_USABLE) ? 3462 bd_prod : bd_cons); 3463 if (rc != 0) { 3464 3465 /* we simply reuse the received mbuf and don't post it to the stack */ 3466 m = NULL; 3467 3468 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3469 fp->index, rc); 3470 fp->eth_q_stats.rx_soft_errors++; 3471 3472 if (sc->max_rx_bufs != RX_BD_USABLE) { 3473 /* copy this consumer index to the producer index */ 3474 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3475 sizeof(struct bxe_sw_rx_bd)); 3476 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3477 } 3478 3479 goto next_rx; 3480 } 3481 3482 /* current mbuf was detached from the bd */ 3483 fp->eth_q_stats.mbuf_alloc_rx--; 3484 3485 /* we allocated a replacement mbuf, fixup the current one */ 3486 m_adj(m, pad); 3487 m->m_pkthdr.len = m->m_len = len; 3488 3489 if ((len > 60) && (len > lenonbd)) { 3490 fp->eth_q_stats.rx_bxe_service_rxsgl++; 3491 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); 3492 if (rc) 3493 break; 3494 fp->eth_q_stats.rx_jumbo_sge_pkts++; 3495 } else if (lenonbd < len) { 3496 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; 3497 } 3498 3499 /* assign packet to this interface interface */ 3500 if_setrcvif(m, ifp); 3501 3502 /* assume no hardware checksum has complated */ 3503 m->m_pkthdr.csum_flags = 0; 3504 3505 /* validate checksum if offload enabled */ 3506 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 3507 /* check for a valid IP frame */ 3508 if (!(cqe->fast_path_cqe.status_flags & 3509 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3510 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3511 if (__predict_false(cqe_fp_flags & 3512 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3513 fp->eth_q_stats.rx_hw_csum_errors++; 3514 } else { 3515 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3516 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3517 } 3518 } 3519 3520 /* check for a valid TCP/UDP frame */ 3521 if (!(cqe->fast_path_cqe.status_flags & 3522 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3523 if (__predict_false(cqe_fp_flags & 3524 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3525 fp->eth_q_stats.rx_hw_csum_errors++; 3526 } else { 3527 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3528 m->m_pkthdr.csum_data = 0xFFFF; 3529 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3530 CSUM_PSEUDO_HDR); 3531 } 3532 } 3533 } 3534 3535 /* if there is a VLAN tag then flag that info */ 3536 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) { 3537 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3538 m->m_flags |= M_VLANTAG; 3539 } 3540 3541#if __FreeBSD_version >= 800000 3542 /* specify what RSS queue was used for this flow */ 3543 m->m_pkthdr.flowid = fp->index; 3544 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3545#endif 3546 3547next_rx: 3548 3549 bd_cons = RX_BD_NEXT(bd_cons); 3550 bd_prod = RX_BD_NEXT(bd_prod); 3551 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3552 3553 /* pass the frame to the stack */ 3554 if (__predict_true(m != NULL)) { 3555 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3556 rx_pkts++; 3557 if_input(ifp, m); 3558 } 3559 3560next_cqe: 3561 3562 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3563 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3564 3565 /* limit spinning on the queue */ 3566 if (rc != 0) 3567 break; 3568 3569 if (rx_pkts == sc->rx_budget) { 3570 fp->eth_q_stats.rx_budget_reached++; 3571 break; 3572 } 3573 } /* while work to do */ 3574 3575 fp->rx_bd_cons = bd_cons; 3576 fp->rx_bd_prod = bd_prod_fw; 3577 fp->rx_cq_cons = sw_cq_cons; 3578 fp->rx_cq_prod = sw_cq_prod; 3579 3580 /* Update producers */ 3581 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3582 3583 fp->eth_q_stats.rx_pkts += rx_pkts; 3584 fp->eth_q_stats.rx_calls++; 3585 3586 BXE_FP_RX_UNLOCK(fp); 3587 3588 return (sw_cq_cons != hw_cq_cons); 3589} 3590 3591static uint16_t 3592bxe_free_tx_pkt(struct bxe_softc *sc, 3593 struct bxe_fastpath *fp, 3594 uint16_t idx) 3595{ 3596 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3597 struct eth_tx_start_bd *tx_start_bd; 3598 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3599 uint16_t new_cons; 3600 int nbd; 3601 3602 /* unmap the mbuf from non-paged memory */ 3603 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3604 3605 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3606 nbd = le16toh(tx_start_bd->nbd) - 1; 3607 3608#if 0 3609 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { 3610 bxe_panic(sc, ("BAD nbd!\n")); 3611 } 3612#endif 3613 3614 new_cons = (tx_buf->first_bd + nbd); 3615 3616#if 0 3617 struct eth_tx_bd *tx_data_bd; 3618 3619 /* 3620 * The following code doesn't do anything but is left here 3621 * for clarity on what the new value of new_cons skipped. 3622 */ 3623 3624 /* get the next bd */ 3625 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3626 3627 /* skip the parse bd */ 3628 --nbd; 3629 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3630 3631 /* skip the TSO split header bd since they have no mapping */ 3632 if (tx_buf->flags & BXE_TSO_SPLIT_BD) { 3633 --nbd; 3634 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3635 } 3636 3637 /* now free frags */ 3638 while (nbd > 0) { 3639 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; 3640 if (--nbd) { 3641 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3642 } 3643 } 3644#endif 3645 3646 /* free the mbuf */ 3647 if (__predict_true(tx_buf->m != NULL)) { 3648 m_freem(tx_buf->m); 3649 fp->eth_q_stats.mbuf_alloc_tx--; 3650 } else { 3651 fp->eth_q_stats.tx_chain_lost_mbuf++; 3652 } 3653 3654 tx_buf->m = NULL; 3655 tx_buf->first_bd = 0; 3656 3657 return (new_cons); 3658} 3659 3660/* transmit timeout watchdog */ 3661static int 3662bxe_watchdog(struct bxe_softc *sc, 3663 struct bxe_fastpath *fp) 3664{ 3665 BXE_FP_TX_LOCK(fp); 3666 3667 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3668 BXE_FP_TX_UNLOCK(fp); 3669 return (0); 3670 } 3671 3672 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3673 3674 BXE_FP_TX_UNLOCK(fp); 3675 3676 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 3677 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 3678 3679 return (-1); 3680} 3681 3682/* processes transmit completions */ 3683static uint8_t 3684bxe_txeof(struct bxe_softc *sc, 3685 struct bxe_fastpath *fp) 3686{ 3687 if_t ifp = sc->ifp; 3688 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3689 uint16_t tx_bd_avail; 3690 3691 BXE_FP_TX_LOCK_ASSERT(fp); 3692 3693 bd_cons = fp->tx_bd_cons; 3694 hw_cons = le16toh(*fp->tx_cons_sb); 3695 sw_cons = fp->tx_pkt_cons; 3696 3697 while (sw_cons != hw_cons) { 3698 pkt_cons = TX_BD(sw_cons); 3699 3700 BLOGD(sc, DBG_TX, 3701 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3702 fp->index, hw_cons, sw_cons, pkt_cons); 3703 3704 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3705 3706 sw_cons++; 3707 } 3708 3709 fp->tx_pkt_cons = sw_cons; 3710 fp->tx_bd_cons = bd_cons; 3711 3712 BLOGD(sc, DBG_TX, 3713 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3714 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3715 3716 mb(); 3717 3718 tx_bd_avail = bxe_tx_avail(sc, fp); 3719 3720 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3721 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 3722 } else { 3723 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 3724 } 3725 3726 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3727 /* reset the watchdog timer if there are pending transmits */ 3728 fp->watchdog_timer = BXE_TX_TIMEOUT; 3729 return (TRUE); 3730 } else { 3731 /* clear watchdog when there are no pending transmits */ 3732 fp->watchdog_timer = 0; 3733 return (FALSE); 3734 } 3735} 3736 3737static void 3738bxe_drain_tx_queues(struct bxe_softc *sc) 3739{ 3740 struct bxe_fastpath *fp; 3741 int i, count; 3742 3743 /* wait until all TX fastpath tasks have completed */ 3744 for (i = 0; i < sc->num_queues; i++) { 3745 fp = &sc->fp[i]; 3746 3747 count = 1000; 3748 3749 while (bxe_has_tx_work(fp)) { 3750 3751 BXE_FP_TX_LOCK(fp); 3752 bxe_txeof(sc, fp); 3753 BXE_FP_TX_UNLOCK(fp); 3754 3755 if (count == 0) { 3756 BLOGE(sc, "Timeout waiting for fp[%d] " 3757 "transmits to complete!\n", i); 3758 bxe_panic(sc, ("tx drain failure\n")); 3759 return; 3760 } 3761 3762 count--; 3763 DELAY(1000); 3764 rmb(); 3765 } 3766 } 3767 3768 return; 3769} 3770 3771static int 3772bxe_del_all_macs(struct bxe_softc *sc, 3773 struct ecore_vlan_mac_obj *mac_obj, 3774 int mac_type, 3775 uint8_t wait_for_comp) 3776{ 3777 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3778 int rc; 3779 3780 /* wait for completion of requested */ 3781 if (wait_for_comp) { 3782 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3783 } 3784 3785 /* Set the mac type of addresses we want to clear */ 3786 bxe_set_bit(mac_type, &vlan_mac_flags); 3787 3788 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3789 if (rc < 0) { 3790 BLOGE(sc, "Failed to delete MACs (%d)\n", rc); 3791 } 3792 3793 return (rc); 3794} 3795 3796static int 3797bxe_fill_accept_flags(struct bxe_softc *sc, 3798 uint32_t rx_mode, 3799 unsigned long *rx_accept_flags, 3800 unsigned long *tx_accept_flags) 3801{ 3802 /* Clear the flags first */ 3803 *rx_accept_flags = 0; 3804 *tx_accept_flags = 0; 3805 3806 switch (rx_mode) { 3807 case BXE_RX_MODE_NONE: 3808 /* 3809 * 'drop all' supersedes any accept flags that may have been 3810 * passed to the function. 3811 */ 3812 break; 3813 3814 case BXE_RX_MODE_NORMAL: 3815 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3816 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3817 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3818 3819 /* internal switching mode */ 3820 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3821 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3822 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3823 3824 break; 3825 3826 case BXE_RX_MODE_ALLMULTI: 3827 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3828 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3829 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3830 3831 /* internal switching mode */ 3832 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3833 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3834 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3835 3836 break; 3837 3838 case BXE_RX_MODE_PROMISC: 3839 /* 3840 * According to deffinition of SI mode, iface in promisc mode 3841 * should receive matched and unmatched (in resolution of port) 3842 * unicast packets. 3843 */ 3844 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3845 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3846 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3847 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3848 3849 /* internal switching mode */ 3850 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3851 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3852 3853 if (IS_MF_SI(sc)) { 3854 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3855 } else { 3856 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3857 } 3858 3859 break; 3860 3861 default: 3862 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); 3863 return (-1); 3864 } 3865 3866 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3867 if (rx_mode != BXE_RX_MODE_NONE) { 3868 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3869 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3870 } 3871 3872 return (0); 3873} 3874 3875static int 3876bxe_set_q_rx_mode(struct bxe_softc *sc, 3877 uint8_t cl_id, 3878 unsigned long rx_mode_flags, 3879 unsigned long rx_accept_flags, 3880 unsigned long tx_accept_flags, 3881 unsigned long ramrod_flags) 3882{ 3883 struct ecore_rx_mode_ramrod_params ramrod_param; 3884 int rc; 3885 3886 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3887 3888 /* Prepare ramrod parameters */ 3889 ramrod_param.cid = 0; 3890 ramrod_param.cl_id = cl_id; 3891 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3892 ramrod_param.func_id = SC_FUNC(sc); 3893 3894 ramrod_param.pstate = &sc->sp_state; 3895 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3896 3897 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3898 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3899 3900 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3901 3902 ramrod_param.ramrod_flags = ramrod_flags; 3903 ramrod_param.rx_mode_flags = rx_mode_flags; 3904 3905 ramrod_param.rx_accept_flags = rx_accept_flags; 3906 ramrod_param.tx_accept_flags = tx_accept_flags; 3907 3908 rc = ecore_config_rx_mode(sc, &ramrod_param); 3909 if (rc < 0) { 3910 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); 3911 return (rc); 3912 } 3913 3914 return (0); 3915} 3916 3917static int 3918bxe_set_storm_rx_mode(struct bxe_softc *sc) 3919{ 3920 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3921 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3922 int rc; 3923 3924 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3925 &tx_accept_flags); 3926 if (rc) { 3927 return (rc); 3928 } 3929 3930 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3931 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3932 3933 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3934 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3935 rx_accept_flags, tx_accept_flags, 3936 ramrod_flags)); 3937} 3938 3939/* returns the "mcp load_code" according to global load_count array */ 3940static int 3941bxe_nic_load_no_mcp(struct bxe_softc *sc) 3942{ 3943 int path = SC_PATH(sc); 3944 int port = SC_PORT(sc); 3945 3946 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3947 path, load_count[path][0], load_count[path][1], 3948 load_count[path][2]); 3949 load_count[path][0]++; 3950 load_count[path][1 + port]++; 3951 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3952 path, load_count[path][0], load_count[path][1], 3953 load_count[path][2]); 3954 if (load_count[path][0] == 1) { 3955 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3956 } else if (load_count[path][1 + port] == 1) { 3957 return (FW_MSG_CODE_DRV_LOAD_PORT); 3958 } else { 3959 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3960 } 3961} 3962 3963/* returns the "mcp load_code" according to global load_count array */ 3964static int 3965bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3966{ 3967 int port = SC_PORT(sc); 3968 int path = SC_PATH(sc); 3969 3970 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3971 path, load_count[path][0], load_count[path][1], 3972 load_count[path][2]); 3973 load_count[path][0]--; 3974 load_count[path][1 + port]--; 3975 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3976 path, load_count[path][0], load_count[path][1], 3977 load_count[path][2]); 3978 if (load_count[path][0] == 0) { 3979 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3980 } else if (load_count[path][1 + port] == 0) { 3981 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3982 } else { 3983 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3984 } 3985} 3986 3987/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3988static uint32_t 3989bxe_send_unload_req(struct bxe_softc *sc, 3990 int unload_mode) 3991{ 3992 uint32_t reset_code = 0; 3993#if 0 3994 int port = SC_PORT(sc); 3995 int path = SC_PATH(sc); 3996#endif 3997 3998 /* Select the UNLOAD request mode */ 3999 if (unload_mode == UNLOAD_NORMAL) { 4000 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 4001 } 4002#if 0 4003 else if (sc->flags & BXE_NO_WOL_FLAG) { 4004 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 4005 } else if (sc->wol) { 4006 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 4007 uint8_t *mac_addr = sc->dev->dev_addr; 4008 uint32_t val; 4009 uint16_t pmc; 4010 4011 /* 4012 * The mac address is written to entries 1-4 to 4013 * preserve entry 0 which is used by the PMF 4014 */ 4015 uint8_t entry = (SC_VN(sc) + 1)*8; 4016 4017 val = (mac_addr[0] << 8) | mac_addr[1]; 4018 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); 4019 4020 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4021 (mac_addr[4] << 8) | mac_addr[5]; 4022 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 4023 4024 /* Enable the PME and clear the status */ 4025 pmc = pci_read_config(sc->dev, 4026 (sc->devinfo.pcie_pm_cap_reg + 4027 PCIR_POWER_STATUS), 4028 2); 4029 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; 4030 pci_write_config(sc->dev, 4031 (sc->devinfo.pcie_pm_cap_reg + 4032 PCIR_POWER_STATUS), 4033 pmc, 4); 4034 4035 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 4036 } 4037#endif 4038 else { 4039 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 4040 } 4041 4042 /* Send the request to the MCP */ 4043 if (!BXE_NOMCP(sc)) { 4044 reset_code = bxe_fw_command(sc, reset_code, 0); 4045 } else { 4046 reset_code = bxe_nic_unload_no_mcp(sc); 4047 } 4048 4049 return (reset_code); 4050} 4051 4052/* send UNLOAD_DONE command to the MCP */ 4053static void 4054bxe_send_unload_done(struct bxe_softc *sc, 4055 uint8_t keep_link) 4056{ 4057 uint32_t reset_param = 4058 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 4059 4060 /* Report UNLOAD_DONE to MCP */ 4061 if (!BXE_NOMCP(sc)) { 4062 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 4063 } 4064} 4065 4066static int 4067bxe_func_wait_started(struct bxe_softc *sc) 4068{ 4069 int tout = 50; 4070 4071 if (!sc->port.pmf) { 4072 return (0); 4073 } 4074 4075 /* 4076 * (assumption: No Attention from MCP at this stage) 4077 * PMF probably in the middle of TX disable/enable transaction 4078 * 1. Sync IRS for default SB 4079 * 2. Sync SP queue - this guarantees us that attention handling started 4080 * 3. Wait, that TX disable/enable transaction completes 4081 * 4082 * 1+2 guarantee that if DCBX attention was scheduled it already changed 4083 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 4084 * received completion for the transaction the state is TX_STOPPED. 4085 * State will return to STARTED after completion of TX_STOPPED-->STARTED 4086 * transaction. 4087 */ 4088 4089 /* XXX make sure default SB ISR is done */ 4090 /* need a way to synchronize an irq (intr_mtx?) */ 4091 4092 /* XXX flush any work queues */ 4093 4094 while (ecore_func_get_state(sc, &sc->func_obj) != 4095 ECORE_F_STATE_STARTED && tout--) { 4096 DELAY(20000); 4097 } 4098 4099 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 4100 /* 4101 * Failed to complete the transaction in a "good way" 4102 * Force both transactions with CLR bit. 4103 */ 4104 struct ecore_func_state_params func_params = { NULL }; 4105 4106 BLOGE(sc, "Unexpected function state! " 4107 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 4108 4109 func_params.f_obj = &sc->func_obj; 4110 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4111 4112 /* STARTED-->TX_STOPPED */ 4113 func_params.cmd = ECORE_F_CMD_TX_STOP; 4114 ecore_func_state_change(sc, &func_params); 4115 4116 /* TX_STOPPED-->STARTED */ 4117 func_params.cmd = ECORE_F_CMD_TX_START; 4118 return (ecore_func_state_change(sc, &func_params)); 4119 } 4120 4121 return (0); 4122} 4123 4124static int 4125bxe_stop_queue(struct bxe_softc *sc, 4126 int index) 4127{ 4128 struct bxe_fastpath *fp = &sc->fp[index]; 4129 struct ecore_queue_state_params q_params = { NULL }; 4130 int rc; 4131 4132 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 4133 4134 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 4135 /* We want to wait for completion in this context */ 4136 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 4137 4138 /* Stop the primary connection: */ 4139 4140 /* ...halt the connection */ 4141 q_params.cmd = ECORE_Q_CMD_HALT; 4142 rc = ecore_queue_state_change(sc, &q_params); 4143 if (rc) { 4144 return (rc); 4145 } 4146 4147 /* ...terminate the connection */ 4148 q_params.cmd = ECORE_Q_CMD_TERMINATE; 4149 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 4150 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 4151 rc = ecore_queue_state_change(sc, &q_params); 4152 if (rc) { 4153 return (rc); 4154 } 4155 4156 /* ...delete cfc entry */ 4157 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 4158 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 4159 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 4160 return (ecore_queue_state_change(sc, &q_params)); 4161} 4162 4163/* wait for the outstanding SP commands */ 4164static inline uint8_t 4165bxe_wait_sp_comp(struct bxe_softc *sc, 4166 unsigned long mask) 4167{ 4168 unsigned long tmp; 4169 int tout = 5000; /* wait for 5 secs tops */ 4170 4171 while (tout--) { 4172 mb(); 4173 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 4174 return (TRUE); 4175 } 4176 4177 DELAY(1000); 4178 } 4179 4180 mb(); 4181 4182 tmp = atomic_load_acq_long(&sc->sp_state); 4183 if (tmp & mask) { 4184 BLOGE(sc, "Filtering completion timed out: " 4185 "sp_state 0x%lx, mask 0x%lx\n", 4186 tmp, mask); 4187 return (FALSE); 4188 } 4189 4190 return (FALSE); 4191} 4192 4193static int 4194bxe_func_stop(struct bxe_softc *sc) 4195{ 4196 struct ecore_func_state_params func_params = { NULL }; 4197 int rc; 4198 4199 /* prepare parameters for function state transitions */ 4200 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4201 func_params.f_obj = &sc->func_obj; 4202 func_params.cmd = ECORE_F_CMD_STOP; 4203 4204 /* 4205 * Try to stop the function the 'good way'. If it fails (in case 4206 * of a parity error during bxe_chip_cleanup()) and we are 4207 * not in a debug mode, perform a state transaction in order to 4208 * enable further HW_RESET transaction. 4209 */ 4210 rc = ecore_func_state_change(sc, &func_params); 4211 if (rc) { 4212 BLOGE(sc, "FUNC_STOP ramrod failed. " 4213 "Running a dry transaction\n"); 4214 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4215 return (ecore_func_state_change(sc, &func_params)); 4216 } 4217 4218 return (0); 4219} 4220 4221static int 4222bxe_reset_hw(struct bxe_softc *sc, 4223 uint32_t load_code) 4224{ 4225 struct ecore_func_state_params func_params = { NULL }; 4226 4227 /* Prepare parameters for function state transitions */ 4228 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4229 4230 func_params.f_obj = &sc->func_obj; 4231 func_params.cmd = ECORE_F_CMD_HW_RESET; 4232 4233 func_params.params.hw_init.load_phase = load_code; 4234 4235 return (ecore_func_state_change(sc, &func_params)); 4236} 4237 4238static void 4239bxe_int_disable_sync(struct bxe_softc *sc, 4240 int disable_hw) 4241{ 4242 if (disable_hw) { 4243 /* prevent the HW from sending interrupts */ 4244 bxe_int_disable(sc); 4245 } 4246 4247 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4248 /* make sure all ISRs are done */ 4249 4250 /* XXX make sure sp_task is not running */ 4251 /* cancel and flush work queues */ 4252} 4253 4254static void 4255bxe_chip_cleanup(struct bxe_softc *sc, 4256 uint32_t unload_mode, 4257 uint8_t keep_link) 4258{ 4259 int port = SC_PORT(sc); 4260 struct ecore_mcast_ramrod_params rparam = { NULL }; 4261 uint32_t reset_code; 4262 int i, rc = 0; 4263 4264 bxe_drain_tx_queues(sc); 4265 4266 /* give HW time to discard old tx messages */ 4267 DELAY(1000); 4268 4269 /* Clean all ETH MACs */ 4270 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4271 if (rc < 0) { 4272 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4273 } 4274 4275 /* Clean up UC list */ 4276 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4277 if (rc < 0) { 4278 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4279 } 4280 4281 /* Disable LLH */ 4282 if (!CHIP_IS_E1(sc)) { 4283 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4284 } 4285 4286 /* Set "drop all" to stop Rx */ 4287 4288 /* 4289 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4290 * a race between the completion code and this code. 4291 */ 4292 BXE_MCAST_LOCK(sc); 4293 4294 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4295 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4296 } else { 4297 bxe_set_storm_rx_mode(sc); 4298 } 4299 4300 /* Clean up multicast configuration */ 4301 rparam.mcast_obj = &sc->mcast_obj; 4302 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4303 if (rc < 0) { 4304 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4305 } 4306 4307 BXE_MCAST_UNLOCK(sc); 4308 4309 // XXX bxe_iov_chip_cleanup(sc); 4310 4311 /* 4312 * Send the UNLOAD_REQUEST to the MCP. This will return if 4313 * this function should perform FUNCTION, PORT, or COMMON HW 4314 * reset. 4315 */ 4316 reset_code = bxe_send_unload_req(sc, unload_mode); 4317 4318 /* 4319 * (assumption: No Attention from MCP at this stage) 4320 * PMF probably in the middle of TX disable/enable transaction 4321 */ 4322 rc = bxe_func_wait_started(sc); 4323 if (rc) { 4324 BLOGE(sc, "bxe_func_wait_started failed\n"); 4325 } 4326 4327 /* 4328 * Close multi and leading connections 4329 * Completions for ramrods are collected in a synchronous way 4330 */ 4331 for (i = 0; i < sc->num_queues; i++) { 4332 if (bxe_stop_queue(sc, i)) { 4333 goto unload_error; 4334 } 4335 } 4336 4337 /* 4338 * If SP settings didn't get completed so far - something 4339 * very wrong has happen. 4340 */ 4341 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4342 BLOGE(sc, "Common slow path ramrods got stuck!\n"); 4343 } 4344 4345unload_error: 4346 4347 rc = bxe_func_stop(sc); 4348 if (rc) { 4349 BLOGE(sc, "Function stop failed!\n"); 4350 } 4351 4352 /* disable HW interrupts */ 4353 bxe_int_disable_sync(sc, TRUE); 4354 4355 /* detach interrupts */ 4356 bxe_interrupt_detach(sc); 4357 4358 /* Reset the chip */ 4359 rc = bxe_reset_hw(sc, reset_code); 4360 if (rc) { 4361 BLOGE(sc, "Hardware reset failed\n"); 4362 } 4363 4364 /* Report UNLOAD_DONE to MCP */ 4365 bxe_send_unload_done(sc, keep_link); 4366} 4367 4368static void 4369bxe_disable_close_the_gate(struct bxe_softc *sc) 4370{ 4371 uint32_t val; 4372 int port = SC_PORT(sc); 4373 4374 BLOGD(sc, DBG_LOAD, 4375 "Disabling 'close the gates'\n"); 4376 4377 if (CHIP_IS_E1(sc)) { 4378 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4379 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4380 val = REG_RD(sc, addr); 4381 val &= ~(0x300); 4382 REG_WR(sc, addr, val); 4383 } else { 4384 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4385 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4386 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4387 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4388 } 4389} 4390 4391/* 4392 * Cleans the object that have internal lists without sending 4393 * ramrods. Should be run when interrutps are disabled. 4394 */ 4395static void 4396bxe_squeeze_objects(struct bxe_softc *sc) 4397{ 4398 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4399 struct ecore_mcast_ramrod_params rparam = { NULL }; 4400 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4401 int rc; 4402 4403 /* Cleanup MACs' object first... */ 4404 4405 /* Wait for completion of requested */ 4406 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4407 /* Perform a dry cleanup */ 4408 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4409 4410 /* Clean ETH primary MAC */ 4411 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4412 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4413 &ramrod_flags); 4414 if (rc != 0) { 4415 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4416 } 4417 4418 /* Cleanup UC list */ 4419 vlan_mac_flags = 0; 4420 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4421 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4422 &ramrod_flags); 4423 if (rc != 0) { 4424 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4425 } 4426 4427 /* Now clean mcast object... */ 4428 4429 rparam.mcast_obj = &sc->mcast_obj; 4430 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4431 4432 /* Add a DEL command... */ 4433 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4434 if (rc < 0) { 4435 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4436 } 4437 4438 /* now wait until all pending commands are cleared */ 4439 4440 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4441 while (rc != 0) { 4442 if (rc < 0) { 4443 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4444 return; 4445 } 4446 4447 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4448 } 4449} 4450 4451/* stop the controller */ 4452static __noinline int 4453bxe_nic_unload(struct bxe_softc *sc, 4454 uint32_t unload_mode, 4455 uint8_t keep_link) 4456{ 4457 uint8_t global = FALSE; 4458 uint32_t val; 4459 4460 BXE_CORE_LOCK_ASSERT(sc); 4461 4462 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4463 4464 /* mark driver as unloaded in shmem2 */ 4465 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4466 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4467 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4468 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4469 } 4470 4471 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4472 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4473 /* 4474 * We can get here if the driver has been unloaded 4475 * during parity error recovery and is either waiting for a 4476 * leader to complete or for other functions to unload and 4477 * then ifconfig down has been issued. In this case we want to 4478 * unload and let other functions to complete a recovery 4479 * process. 4480 */ 4481 sc->recovery_state = BXE_RECOVERY_DONE; 4482 sc->is_leader = 0; 4483 bxe_release_leader_lock(sc); 4484 mb(); 4485 4486 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4487 BLOGE(sc, "Can't unload in closed or error state\n"); 4488 return (-1); 4489 } 4490 4491 /* 4492 * Nothing to do during unload if previous bxe_nic_load() 4493 * did not completed succesfully - all resourses are released. 4494 */ 4495 if ((sc->state == BXE_STATE_CLOSED) || 4496 (sc->state == BXE_STATE_ERROR)) { 4497 return (0); 4498 } 4499 4500 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4501 mb(); 4502 4503 /* stop tx */ 4504 bxe_tx_disable(sc); 4505 4506 sc->rx_mode = BXE_RX_MODE_NONE; 4507 /* XXX set rx mode ??? */ 4508 4509 if (IS_PF(sc) && !sc->grcdump_done) { 4510 /* set ALWAYS_ALIVE bit in shmem */ 4511 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4512 4513 bxe_drv_pulse(sc); 4514 4515 bxe_stats_handle(sc, STATS_EVENT_STOP); 4516 bxe_save_statistics(sc); 4517 } 4518 4519 /* wait till consumers catch up with producers in all queues */ 4520 bxe_drain_tx_queues(sc); 4521 4522 /* if VF indicate to PF this function is going down (PF will delete sp 4523 * elements and clear initializations 4524 */ 4525 if (IS_VF(sc)) { 4526 ; /* bxe_vfpf_close_vf(sc); */ 4527 } else if (unload_mode != UNLOAD_RECOVERY) { 4528 /* if this is a normal/close unload need to clean up chip */ 4529 if (!sc->grcdump_done) 4530 bxe_chip_cleanup(sc, unload_mode, keep_link); 4531 } else { 4532 /* Send the UNLOAD_REQUEST to the MCP */ 4533 bxe_send_unload_req(sc, unload_mode); 4534 4535 /* 4536 * Prevent transactions to host from the functions on the 4537 * engine that doesn't reset global blocks in case of global 4538 * attention once gloabl blocks are reset and gates are opened 4539 * (the engine which leader will perform the recovery 4540 * last). 4541 */ 4542 if (!CHIP_IS_E1x(sc)) { 4543 bxe_pf_disable(sc); 4544 } 4545 4546 /* disable HW interrupts */ 4547 bxe_int_disable_sync(sc, TRUE); 4548 4549 /* detach interrupts */ 4550 bxe_interrupt_detach(sc); 4551 4552 /* Report UNLOAD_DONE to MCP */ 4553 bxe_send_unload_done(sc, FALSE); 4554 } 4555 4556 /* 4557 * At this stage no more interrupts will arrive so we may safely clean 4558 * the queue'able objects here in case they failed to get cleaned so far. 4559 */ 4560 if (IS_PF(sc)) { 4561 bxe_squeeze_objects(sc); 4562 } 4563 4564 /* There should be no more pending SP commands at this stage */ 4565 sc->sp_state = 0; 4566 4567 sc->port.pmf = 0; 4568 4569 bxe_free_fp_buffers(sc); 4570 4571 if (IS_PF(sc)) { 4572 bxe_free_mem(sc); 4573 } 4574 4575 bxe_free_fw_stats_mem(sc); 4576 4577 sc->state = BXE_STATE_CLOSED; 4578 4579 /* 4580 * Check if there are pending parity attentions. If there are - set 4581 * RECOVERY_IN_PROGRESS. 4582 */ 4583 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4584 bxe_set_reset_in_progress(sc); 4585 4586 /* Set RESET_IS_GLOBAL if needed */ 4587 if (global) { 4588 bxe_set_reset_global(sc); 4589 } 4590 } 4591 4592 /* 4593 * The last driver must disable a "close the gate" if there is no 4594 * parity attention or "process kill" pending. 4595 */ 4596 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4597 bxe_reset_is_done(sc, SC_PATH(sc))) { 4598 bxe_disable_close_the_gate(sc); 4599 } 4600 4601 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4602 4603 return (0); 4604} 4605 4606/* 4607 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4608 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4609 */ 4610static int 4611bxe_ifmedia_update(struct ifnet *ifp) 4612{ 4613 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); 4614 struct ifmedia *ifm; 4615 4616 ifm = &sc->ifmedia; 4617 4618 /* We only support Ethernet media type. */ 4619 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4620 return (EINVAL); 4621 } 4622 4623 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4624 case IFM_AUTO: 4625 break; 4626 case IFM_10G_CX4: 4627 case IFM_10G_SR: 4628 case IFM_10G_T: 4629 case IFM_10G_TWINAX: 4630 default: 4631 /* We don't support changing the media type. */ 4632 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4633 IFM_SUBTYPE(ifm->ifm_media)); 4634 return (EINVAL); 4635 } 4636 4637 return (0); 4638} 4639 4640/* 4641 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4642 */ 4643static void 4644bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4645{ 4646 struct bxe_softc *sc = if_getsoftc(ifp); 4647 4648 /* Report link down if the driver isn't running. */ 4649 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 4650 ifmr->ifm_active |= IFM_NONE; 4651 return; 4652 } 4653 4654 /* Setup the default interface info. */ 4655 ifmr->ifm_status = IFM_AVALID; 4656 ifmr->ifm_active = IFM_ETHER; 4657 4658 if (sc->link_vars.link_up) { 4659 ifmr->ifm_status |= IFM_ACTIVE; 4660 } else { 4661 ifmr->ifm_active |= IFM_NONE; 4662 return; 4663 } 4664 4665 ifmr->ifm_active |= sc->media; 4666 4667 if (sc->link_vars.duplex == DUPLEX_FULL) { 4668 ifmr->ifm_active |= IFM_FDX; 4669 } else { 4670 ifmr->ifm_active |= IFM_HDX; 4671 } 4672} 4673 4674static int 4675bxe_ioctl_nvram(struct bxe_softc *sc, 4676 uint32_t priv_op, 4677 struct ifreq *ifr) 4678{ 4679 struct bxe_nvram_data nvdata_base; 4680 struct bxe_nvram_data *nvdata; 4681 int len; 4682 int error = 0; 4683 4684 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); 4685 4686 len = (sizeof(struct bxe_nvram_data) + 4687 nvdata_base.len - 4688 sizeof(uint32_t)); 4689 4690 if (len > sizeof(struct bxe_nvram_data)) { 4691 if ((nvdata = (struct bxe_nvram_data *) 4692 malloc(len, M_DEVBUF, 4693 (M_NOWAIT | M_ZERO))) == NULL) { 4694 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); 4695 return (1); 4696 } 4697 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); 4698 } else { 4699 nvdata = &nvdata_base; 4700 } 4701 4702 if (priv_op == BXE_IOC_RD_NVRAM) { 4703 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", 4704 nvdata->offset, nvdata->len); 4705 error = bxe_nvram_read(sc, 4706 nvdata->offset, 4707 (uint8_t *)nvdata->value, 4708 nvdata->len); 4709 copyout(nvdata, ifr->ifr_data, len); 4710 } else { /* BXE_IOC_WR_NVRAM */ 4711 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", 4712 nvdata->offset, nvdata->len); 4713 copyin(ifr->ifr_data, nvdata, len); 4714 error = bxe_nvram_write(sc, 4715 nvdata->offset, 4716 (uint8_t *)nvdata->value, 4717 nvdata->len); 4718 } 4719 4720 if (len > sizeof(struct bxe_nvram_data)) { 4721 free(nvdata, M_DEVBUF); 4722 } 4723 4724 return (error); 4725} 4726 4727static int 4728bxe_ioctl_stats_show(struct bxe_softc *sc, 4729 uint32_t priv_op, 4730 struct ifreq *ifr) 4731{ 4732 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); 4733 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); 4734 caddr_t p_tmp; 4735 uint32_t *offset; 4736 int i; 4737 4738 switch (priv_op) 4739 { 4740 case BXE_IOC_STATS_SHOW_NUM: 4741 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); 4742 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = 4743 BXE_NUM_ETH_STATS; 4744 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = 4745 STAT_NAME_LEN; 4746 return (0); 4747 4748 case BXE_IOC_STATS_SHOW_STR: 4749 memset(ifr->ifr_data, 0, str_size); 4750 p_tmp = ifr->ifr_data; 4751 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4752 strcpy(p_tmp, bxe_eth_stats_arr[i].string); 4753 p_tmp += STAT_NAME_LEN; 4754 } 4755 return (0); 4756 4757 case BXE_IOC_STATS_SHOW_CNT: 4758 memset(ifr->ifr_data, 0, stats_size); 4759 p_tmp = ifr->ifr_data; 4760 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4761 offset = ((uint32_t *)&sc->eth_stats + 4762 bxe_eth_stats_arr[i].offset); 4763 switch (bxe_eth_stats_arr[i].size) { 4764 case 4: 4765 *((uint64_t *)p_tmp) = (uint64_t)*offset; 4766 break; 4767 case 8: 4768 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); 4769 break; 4770 default: 4771 *((uint64_t *)p_tmp) = 0; 4772 } 4773 p_tmp += sizeof(uint64_t); 4774 } 4775 return (0); 4776 4777 default: 4778 return (-1); 4779 } 4780} 4781 4782static void 4783bxe_handle_chip_tq(void *context, 4784 int pending) 4785{ 4786 struct bxe_softc *sc = (struct bxe_softc *)context; 4787 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4788 4789 switch (work) 4790 { 4791 4792 case CHIP_TQ_REINIT: 4793 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 4794 /* restart the interface */ 4795 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4796 bxe_periodic_stop(sc); 4797 BXE_CORE_LOCK(sc); 4798 bxe_stop_locked(sc); 4799 bxe_init_locked(sc); 4800 BXE_CORE_UNLOCK(sc); 4801 } 4802 break; 4803 4804 default: 4805 break; 4806 } 4807} 4808 4809/* 4810 * Handles any IOCTL calls from the operating system. 4811 * 4812 * Returns: 4813 * 0 = Success, >0 Failure 4814 */ 4815static int 4816bxe_ioctl(if_t ifp, 4817 u_long command, 4818 caddr_t data) 4819{ 4820 struct bxe_softc *sc = if_getsoftc(ifp); 4821 struct ifreq *ifr = (struct ifreq *)data; 4822 struct bxe_nvram_data *nvdata; 4823 uint32_t priv_op; 4824 int mask = 0; 4825 int reinit = 0; 4826 int error = 0; 4827 4828 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4829 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4830 4831 switch (command) 4832 { 4833 case SIOCSIFMTU: 4834 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4835 ifr->ifr_mtu); 4836 4837 if (sc->mtu == ifr->ifr_mtu) { 4838 /* nothing to change */ 4839 break; 4840 } 4841 4842 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4843 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4844 ifr->ifr_mtu, mtu_min, mtu_max); 4845 error = EINVAL; 4846 break; 4847 } 4848 4849 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4850 (unsigned long)ifr->ifr_mtu); 4851 /* 4852 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), 4853 (unsigned long)ifr->ifr_mtu); 4854 XXX - Not sure why it needs to be atomic 4855 */ 4856 if_setmtu(ifp, ifr->ifr_mtu); 4857 reinit = 1; 4858 break; 4859 4860 case SIOCSIFFLAGS: 4861 /* toggle the interface state up or down */ 4862 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4863 4864 BXE_CORE_LOCK(sc); 4865 /* check if the interface is up */ 4866 if (if_getflags(ifp) & IFF_UP) { 4867 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4868 /* set the receive mode flags */ 4869 bxe_set_rx_mode(sc); 4870 } else { 4871 bxe_init_locked(sc); 4872 } 4873 } else { 4874 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4875 bxe_periodic_stop(sc); 4876 bxe_stop_locked(sc); 4877 } 4878 } 4879 BXE_CORE_UNLOCK(sc); 4880 4881 break; 4882 4883 case SIOCADDMULTI: 4884 case SIOCDELMULTI: 4885 /* add/delete multicast addresses */ 4886 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4887 4888 /* check if the interface is up */ 4889 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4890 /* set the receive mode flags */ 4891 BXE_CORE_LOCK(sc); 4892 bxe_set_rx_mode(sc); 4893 BXE_CORE_UNLOCK(sc); 4894 } 4895 4896 break; 4897 4898 case SIOCSIFCAP: 4899 /* find out which capabilities have changed */ 4900 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); 4901 4902 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4903 mask); 4904 4905 /* toggle the LRO capabilites enable flag */ 4906 if (mask & IFCAP_LRO) { 4907 if_togglecapenable(ifp, IFCAP_LRO); 4908 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4909 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); 4910 reinit = 1; 4911 } 4912 4913 /* toggle the TXCSUM checksum capabilites enable flag */ 4914 if (mask & IFCAP_TXCSUM) { 4915 if_togglecapenable(ifp, IFCAP_TXCSUM); 4916 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4917 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); 4918 if (if_getcapenable(ifp) & IFCAP_TXCSUM) { 4919 if_sethwassistbits(ifp, (CSUM_IP | 4920 CSUM_TCP | 4921 CSUM_UDP | 4922 CSUM_TSO | 4923 CSUM_TCP_IPV6 | 4924 CSUM_UDP_IPV6), 0); 4925 } else { 4926 if_clearhwassist(ifp); /* XXX */ 4927 } 4928 } 4929 4930 /* toggle the RXCSUM checksum capabilities enable flag */ 4931 if (mask & IFCAP_RXCSUM) { 4932 if_togglecapenable(ifp, IFCAP_RXCSUM); 4933 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4934 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); 4935 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 4936 if_sethwassistbits(ifp, (CSUM_IP | 4937 CSUM_TCP | 4938 CSUM_UDP | 4939 CSUM_TSO | 4940 CSUM_TCP_IPV6 | 4941 CSUM_UDP_IPV6), 0); 4942 } else { 4943 if_clearhwassist(ifp); /* XXX */ 4944 } 4945 } 4946 4947 /* toggle TSO4 capabilities enabled flag */ 4948 if (mask & IFCAP_TSO4) { 4949 if_togglecapenable(ifp, IFCAP_TSO4); 4950 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4951 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); 4952 } 4953 4954 /* toggle TSO6 capabilities enabled flag */ 4955 if (mask & IFCAP_TSO6) { 4956 if_togglecapenable(ifp, IFCAP_TSO6); 4957 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4958 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); 4959 } 4960 4961 /* toggle VLAN_HWTSO capabilities enabled flag */ 4962 if (mask & IFCAP_VLAN_HWTSO) { 4963 4964 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 4965 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4966 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4967 } 4968 4969 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4970 if (mask & IFCAP_VLAN_HWCSUM) { 4971 /* XXX investigate this... */ 4972 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4973 error = EINVAL; 4974 } 4975 4976 /* toggle VLAN_MTU capabilities enable flag */ 4977 if (mask & IFCAP_VLAN_MTU) { 4978 /* XXX investigate this... */ 4979 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4980 error = EINVAL; 4981 } 4982 4983 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4984 if (mask & IFCAP_VLAN_HWTAGGING) { 4985 /* XXX investigate this... */ 4986 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4987 error = EINVAL; 4988 } 4989 4990 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4991 if (mask & IFCAP_VLAN_HWFILTER) { 4992 /* XXX investigate this... */ 4993 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4994 error = EINVAL; 4995 } 4996 4997 /* XXX not yet... 4998 * IFCAP_WOL_MAGIC 4999 */ 5000 5001 break; 5002 5003 case SIOCSIFMEDIA: 5004 case SIOCGIFMEDIA: 5005 /* set/get interface media */ 5006 BLOGD(sc, DBG_IOCTL, 5007 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 5008 (command & 0xff)); 5009 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 5010 break; 5011 5012 case SIOCGPRIVATE_0: 5013 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); 5014 5015 switch (priv_op) 5016 { 5017 case BXE_IOC_RD_NVRAM: 5018 case BXE_IOC_WR_NVRAM: 5019 nvdata = (struct bxe_nvram_data *)ifr->ifr_data; 5020 BLOGD(sc, DBG_IOCTL, 5021 "Received Private NVRAM ioctl addr=0x%x size=%u\n", 5022 nvdata->offset, nvdata->len); 5023 error = bxe_ioctl_nvram(sc, priv_op, ifr); 5024 break; 5025 5026 case BXE_IOC_STATS_SHOW_NUM: 5027 case BXE_IOC_STATS_SHOW_STR: 5028 case BXE_IOC_STATS_SHOW_CNT: 5029 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", 5030 priv_op); 5031 error = bxe_ioctl_stats_show(sc, priv_op, ifr); 5032 break; 5033 5034 default: 5035 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); 5036 error = EINVAL; 5037 break; 5038 } 5039 5040 break; 5041 5042 default: 5043 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 5044 (command & 0xff)); 5045 error = ether_ioctl(ifp, command, data); 5046 break; 5047 } 5048 5049 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 5050 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 5051 "Re-initializing hardware from IOCTL change\n"); 5052 bxe_periodic_stop(sc); 5053 BXE_CORE_LOCK(sc); 5054 bxe_stop_locked(sc); 5055 bxe_init_locked(sc); 5056 BXE_CORE_UNLOCK(sc); 5057 } 5058 5059 return (error); 5060} 5061 5062static __noinline void 5063bxe_dump_mbuf(struct bxe_softc *sc, 5064 struct mbuf *m, 5065 uint8_t contents) 5066{ 5067 char * type; 5068 int i = 0; 5069 5070 if (!(sc->debug & DBG_MBUF)) { 5071 return; 5072 } 5073 5074 if (m == NULL) { 5075 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 5076 return; 5077 } 5078 5079 while (m) { 5080 BLOGD(sc, DBG_MBUF, 5081 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 5082 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 5083 5084 if (m->m_flags & M_PKTHDR) { 5085 BLOGD(sc, DBG_MBUF, 5086 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 5087 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 5088 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 5089 } 5090 5091 if (m->m_flags & M_EXT) { 5092 switch (m->m_ext.ext_type) { 5093 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 5094 case EXT_SFBUF: type = "EXT_SFBUF"; break; 5095 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 5096 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 5097 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 5098 case EXT_PACKET: type = "EXT_PACKET"; break; 5099 case EXT_MBUF: type = "EXT_MBUF"; break; 5100 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 5101 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 5102 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 5103 case EXT_EXTREF: type = "EXT_EXTREF"; break; 5104 default: type = "UNKNOWN"; break; 5105 } 5106 5107 BLOGD(sc, DBG_MBUF, 5108 "%02d: - m_ext: %p ext_size=%d type=%s\n", 5109 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 5110 } 5111 5112 if (contents) { 5113 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 5114 } 5115 5116 m = m->m_next; 5117 i++; 5118 } 5119} 5120 5121/* 5122 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 5123 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 5124 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 5125 * The headers comes in a seperate bd in FreeBSD so 13-3=10. 5126 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 5127 */ 5128static int 5129bxe_chktso_window(struct bxe_softc *sc, 5130 int nsegs, 5131 bus_dma_segment_t *segs, 5132 struct mbuf *m) 5133{ 5134 uint32_t num_wnds, wnd_size, wnd_sum; 5135 int32_t frag_idx, wnd_idx; 5136 unsigned short lso_mss; 5137 int defrag; 5138 5139 defrag = 0; 5140 wnd_sum = 0; 5141 wnd_size = 10; 5142 num_wnds = nsegs - wnd_size; 5143 lso_mss = htole16(m->m_pkthdr.tso_segsz); 5144 5145 /* 5146 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 5147 * first window sum of data while skipping the first assuming it is the 5148 * header in FreeBSD. 5149 */ 5150 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 5151 wnd_sum += htole16(segs[frag_idx].ds_len); 5152 } 5153 5154 /* check the first 10 bd window size */ 5155 if (wnd_sum < lso_mss) { 5156 return (1); 5157 } 5158 5159 /* run through the windows */ 5160 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 5161 /* subtract the first mbuf->m_len of the last wndw(-header) */ 5162 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 5163 /* add the next mbuf len to the len of our new window */ 5164 wnd_sum += htole16(segs[frag_idx].ds_len); 5165 if (wnd_sum < lso_mss) { 5166 return (1); 5167 } 5168 } 5169 5170 return (0); 5171} 5172 5173static uint8_t 5174bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 5175 struct mbuf *m, 5176 uint32_t *parsing_data) 5177{ 5178 struct ether_vlan_header *eh = NULL; 5179 struct ip *ip4 = NULL; 5180 struct ip6_hdr *ip6 = NULL; 5181 caddr_t ip = NULL; 5182 struct tcphdr *th = NULL; 5183 int e_hlen, ip_hlen, l4_off; 5184 uint16_t proto; 5185 5186 if (m->m_pkthdr.csum_flags == CSUM_IP) { 5187 /* no L4 checksum offload needed */ 5188 return (0); 5189 } 5190 5191 /* get the Ethernet header */ 5192 eh = mtod(m, struct ether_vlan_header *); 5193 5194 /* handle VLAN encapsulation if present */ 5195 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5196 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5197 proto = ntohs(eh->evl_proto); 5198 } else { 5199 e_hlen = ETHER_HDR_LEN; 5200 proto = ntohs(eh->evl_encap_proto); 5201 } 5202 5203 switch (proto) { 5204 case ETHERTYPE_IP: 5205 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5206 ip4 = (m->m_len < sizeof(struct ip)) ? 5207 (struct ip *)m->m_next->m_data : 5208 (struct ip *)(m->m_data + e_hlen); 5209 /* ip_hl is number of 32-bit words */ 5210 ip_hlen = (ip4->ip_hl << 2); 5211 ip = (caddr_t)ip4; 5212 break; 5213 case ETHERTYPE_IPV6: 5214 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5215 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5216 (struct ip6_hdr *)m->m_next->m_data : 5217 (struct ip6_hdr *)(m->m_data + e_hlen); 5218 /* XXX cannot support offload with IPv6 extensions */ 5219 ip_hlen = sizeof(struct ip6_hdr); 5220 ip = (caddr_t)ip6; 5221 break; 5222 default: 5223 /* We can't offload in this case... */ 5224 /* XXX error stat ??? */ 5225 return (0); 5226 } 5227 5228 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5229 l4_off = (e_hlen + ip_hlen); 5230 5231 *parsing_data |= 5232 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 5233 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 5234 5235 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5236 CSUM_TSO | 5237 CSUM_TCP_IPV6)) { 5238 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5239 th = (struct tcphdr *)(ip + ip_hlen); 5240 /* th_off is number of 32-bit words */ 5241 *parsing_data |= ((th->th_off << 5242 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 5243 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 5244 return (l4_off + (th->th_off << 2)); /* entire header length */ 5245 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5246 CSUM_UDP_IPV6)) { 5247 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5248 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 5249 } else { 5250 /* XXX error stat ??? */ 5251 return (0); 5252 } 5253} 5254 5255static uint8_t 5256bxe_set_pbd_csum(struct bxe_fastpath *fp, 5257 struct mbuf *m, 5258 struct eth_tx_parse_bd_e1x *pbd) 5259{ 5260 struct ether_vlan_header *eh = NULL; 5261 struct ip *ip4 = NULL; 5262 struct ip6_hdr *ip6 = NULL; 5263 caddr_t ip = NULL; 5264 struct tcphdr *th = NULL; 5265 struct udphdr *uh = NULL; 5266 int e_hlen, ip_hlen; 5267 uint16_t proto; 5268 uint8_t hlen; 5269 uint16_t tmp_csum; 5270 uint32_t *tmp_uh; 5271 5272 /* get the Ethernet header */ 5273 eh = mtod(m, struct ether_vlan_header *); 5274 5275 /* handle VLAN encapsulation if present */ 5276 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5277 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5278 proto = ntohs(eh->evl_proto); 5279 } else { 5280 e_hlen = ETHER_HDR_LEN; 5281 proto = ntohs(eh->evl_encap_proto); 5282 } 5283 5284 switch (proto) { 5285 case ETHERTYPE_IP: 5286 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5287 ip4 = (m->m_len < sizeof(struct ip)) ? 5288 (struct ip *)m->m_next->m_data : 5289 (struct ip *)(m->m_data + e_hlen); 5290 /* ip_hl is number of 32-bit words */ 5291 ip_hlen = (ip4->ip_hl << 1); 5292 ip = (caddr_t)ip4; 5293 break; 5294 case ETHERTYPE_IPV6: 5295 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5296 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5297 (struct ip6_hdr *)m->m_next->m_data : 5298 (struct ip6_hdr *)(m->m_data + e_hlen); 5299 /* XXX cannot support offload with IPv6 extensions */ 5300 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 5301 ip = (caddr_t)ip6; 5302 break; 5303 default: 5304 /* We can't offload in this case... */ 5305 /* XXX error stat ??? */ 5306 return (0); 5307 } 5308 5309 hlen = (e_hlen >> 1); 5310 5311 /* note that rest of global_data is indirectly zeroed here */ 5312 if (m->m_flags & M_VLANTAG) { 5313 pbd->global_data = 5314 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5315 } else { 5316 pbd->global_data = htole16(hlen); 5317 } 5318 5319 pbd->ip_hlen_w = ip_hlen; 5320 5321 hlen += pbd->ip_hlen_w; 5322 5323 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5324 5325 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5326 CSUM_TSO | 5327 CSUM_TCP_IPV6)) { 5328 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5329 /* th_off is number of 32-bit words */ 5330 hlen += (uint16_t)(th->th_off << 1); 5331 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5332 CSUM_UDP_IPV6)) { 5333 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5334 hlen += (sizeof(struct udphdr) / 2); 5335 } else { 5336 /* valid case as only CSUM_IP was set */ 5337 return (0); 5338 } 5339 5340 pbd->total_hlen_w = htole16(hlen); 5341 5342 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5343 CSUM_TSO | 5344 CSUM_TCP_IPV6)) { 5345 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5346 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5347 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5348 CSUM_UDP_IPV6)) { 5349 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5350 5351 /* 5352 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5353 * checksums and does not know anything about the UDP header and where 5354 * the checksum field is located. It only knows about TCP. Therefore 5355 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5356 * offload. Since the checksum field offset for TCP is 16 bytes and 5357 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5358 * bytes less than the start of the UDP header. This allows the 5359 * hardware to write the checksum in the correct spot. But the 5360 * hardware will compute a checksum which includes the last 10 bytes 5361 * of the IP header. To correct this we tweak the stack computed 5362 * pseudo checksum by folding in the calculation of the inverse 5363 * checksum for those final 10 bytes of the IP header. This allows 5364 * the correct checksum to be computed by the hardware. 5365 */ 5366 5367 /* set pointer 10 bytes before UDP header */ 5368 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5369 5370 /* calculate a pseudo header checksum over the first 10 bytes */ 5371 tmp_csum = in_pseudo(*tmp_uh, 5372 *(tmp_uh + 1), 5373 *(uint16_t *)(tmp_uh + 2)); 5374 5375 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5376 } 5377 5378 return (hlen * 2); /* entire header length, number of bytes */ 5379} 5380 5381static void 5382bxe_set_pbd_lso_e2(struct mbuf *m, 5383 uint32_t *parsing_data) 5384{ 5385 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5386 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5387 ETH_TX_PARSE_BD_E2_LSO_MSS); 5388 5389 /* XXX test for IPv6 with extension header... */ 5390#if 0 5391 struct ip6_hdr *ip6; 5392 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') 5393 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 5394#endif 5395} 5396 5397static void 5398bxe_set_pbd_lso(struct mbuf *m, 5399 struct eth_tx_parse_bd_e1x *pbd) 5400{ 5401 struct ether_vlan_header *eh = NULL; 5402 struct ip *ip = NULL; 5403 struct tcphdr *th = NULL; 5404 int e_hlen; 5405 5406 /* get the Ethernet header */ 5407 eh = mtod(m, struct ether_vlan_header *); 5408 5409 /* handle VLAN encapsulation if present */ 5410 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5411 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5412 5413 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5414 /* XXX assuming IPv4 */ 5415 ip = (struct ip *)(m->m_data + e_hlen); 5416 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5417 5418 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5419 pbd->tcp_send_seq = ntohl(th->th_seq); 5420 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5421 5422#if 1 5423 /* XXX IPv4 */ 5424 pbd->ip_id = ntohs(ip->ip_id); 5425 pbd->tcp_pseudo_csum = 5426 ntohs(in_pseudo(ip->ip_src.s_addr, 5427 ip->ip_dst.s_addr, 5428 htons(IPPROTO_TCP))); 5429#else 5430 /* XXX IPv6 */ 5431 pbd->tcp_pseudo_csum = 5432 ntohs(in_pseudo(&ip6->ip6_src, 5433 &ip6->ip6_dst, 5434 htons(IPPROTO_TCP))); 5435#endif 5436 5437 pbd->global_data |= 5438 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5439} 5440 5441/* 5442 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5443 * visible to the controller. 5444 * 5445 * If an mbuf is submitted to this routine and cannot be given to the 5446 * controller (e.g. it has too many fragments) then the function may free 5447 * the mbuf and return to the caller. 5448 * 5449 * Returns: 5450 * 0 = Success, !0 = Failure 5451 * Note the side effect that an mbuf may be freed if it causes a problem. 5452 */ 5453static int 5454bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5455{ 5456 bus_dma_segment_t segs[32]; 5457 struct mbuf *m0; 5458 struct bxe_sw_tx_bd *tx_buf; 5459 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5460 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5461 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5462 struct eth_tx_bd *tx_data_bd; 5463 struct eth_tx_bd *tx_total_pkt_size_bd; 5464 struct eth_tx_start_bd *tx_start_bd; 5465 uint16_t bd_prod, pkt_prod, total_pkt_size; 5466 uint8_t mac_type; 5467 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5468 struct bxe_softc *sc; 5469 uint16_t tx_bd_avail; 5470 struct ether_vlan_header *eh; 5471 uint32_t pbd_e2_parsing_data = 0; 5472 uint8_t hlen = 0; 5473 int tmp_bd; 5474 int i; 5475 5476 sc = fp->sc; 5477 5478 M_ASSERTPKTHDR(*m_head); 5479 5480 m0 = *m_head; 5481 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5482 tx_start_bd = NULL; 5483 tx_data_bd = NULL; 5484 tx_total_pkt_size_bd = NULL; 5485 5486 /* get the H/W pointer for packets and BDs */ 5487 pkt_prod = fp->tx_pkt_prod; 5488 bd_prod = fp->tx_bd_prod; 5489 5490 mac_type = UNICAST_ADDRESS; 5491 5492 /* map the mbuf into the next open DMAable memory */ 5493 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5494 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5495 tx_buf->m_map, m0, 5496 segs, &nsegs, BUS_DMA_NOWAIT); 5497 5498 /* mapping errors */ 5499 if(__predict_false(error != 0)) { 5500 fp->eth_q_stats.tx_dma_mapping_failure++; 5501 if (error == ENOMEM) { 5502 /* resource issue, try again later */ 5503 rc = ENOMEM; 5504 } else if (error == EFBIG) { 5505 /* possibly recoverable with defragmentation */ 5506 fp->eth_q_stats.mbuf_defrag_attempts++; 5507 m0 = m_defrag(*m_head, M_NOWAIT); 5508 if (m0 == NULL) { 5509 fp->eth_q_stats.mbuf_defrag_failures++; 5510 rc = ENOBUFS; 5511 } else { 5512 /* defrag successful, try mapping again */ 5513 *m_head = m0; 5514 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5515 tx_buf->m_map, m0, 5516 segs, &nsegs, BUS_DMA_NOWAIT); 5517 if (error) { 5518 fp->eth_q_stats.tx_dma_mapping_failure++; 5519 rc = error; 5520 } 5521 } 5522 } else { 5523 /* unknown, unrecoverable mapping error */ 5524 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5525 bxe_dump_mbuf(sc, m0, FALSE); 5526 rc = error; 5527 } 5528 5529 goto bxe_tx_encap_continue; 5530 } 5531 5532 tx_bd_avail = bxe_tx_avail(sc, fp); 5533 5534 /* make sure there is enough room in the send queue */ 5535 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5536 /* Recoverable, try again later. */ 5537 fp->eth_q_stats.tx_hw_queue_full++; 5538 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5539 rc = ENOMEM; 5540 goto bxe_tx_encap_continue; 5541 } 5542 5543 /* capture the current H/W TX chain high watermark */ 5544 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5545 (TX_BD_USABLE - tx_bd_avail))) { 5546 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5547 } 5548 5549 /* make sure it fits in the packet window */ 5550 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5551 /* 5552 * The mbuf may be to big for the controller to handle. If the frame 5553 * is a TSO frame we'll need to do an additional check. 5554 */ 5555 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5556 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5557 goto bxe_tx_encap_continue; /* OK to send */ 5558 } else { 5559 fp->eth_q_stats.tx_window_violation_tso++; 5560 } 5561 } else { 5562 fp->eth_q_stats.tx_window_violation_std++; 5563 } 5564 5565 /* lets try to defragment this mbuf and remap it */ 5566 fp->eth_q_stats.mbuf_defrag_attempts++; 5567 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5568 5569 m0 = m_defrag(*m_head, M_NOWAIT); 5570 if (m0 == NULL) { 5571 fp->eth_q_stats.mbuf_defrag_failures++; 5572 /* Ugh, just drop the frame... :( */ 5573 rc = ENOBUFS; 5574 } else { 5575 /* defrag successful, try mapping again */ 5576 *m_head = m0; 5577 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5578 tx_buf->m_map, m0, 5579 segs, &nsegs, BUS_DMA_NOWAIT); 5580 if (error) { 5581 fp->eth_q_stats.tx_dma_mapping_failure++; 5582 /* No sense in trying to defrag/copy chain, drop it. :( */ 5583 rc = error; 5584 } 5585 else { 5586 /* if the chain is still too long then drop it */ 5587 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5588 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5589 rc = ENODEV; 5590 } 5591 } 5592 } 5593 } 5594 5595bxe_tx_encap_continue: 5596 5597 /* Check for errors */ 5598 if (rc) { 5599 if (rc == ENOMEM) { 5600 /* recoverable try again later */ 5601 } else { 5602 fp->eth_q_stats.tx_soft_errors++; 5603 fp->eth_q_stats.mbuf_alloc_tx--; 5604 m_freem(*m_head); 5605 *m_head = NULL; 5606 } 5607 5608 return (rc); 5609 } 5610 5611 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5612 if (m0->m_flags & M_BCAST) { 5613 mac_type = BROADCAST_ADDRESS; 5614 } else if (m0->m_flags & M_MCAST) { 5615 mac_type = MULTICAST_ADDRESS; 5616 } 5617 5618 /* store the mbuf into the mbuf ring */ 5619 tx_buf->m = m0; 5620 tx_buf->first_bd = fp->tx_bd_prod; 5621 tx_buf->flags = 0; 5622 5623 /* prepare the first transmit (start) BD for the mbuf */ 5624 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5625 5626 BLOGD(sc, DBG_TX, 5627 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5628 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5629 5630 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5631 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5632 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5633 total_pkt_size += tx_start_bd->nbytes; 5634 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5635 5636 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5637 5638 /* all frames have at least Start BD + Parsing BD */ 5639 nbds = nsegs + 1; 5640 tx_start_bd->nbd = htole16(nbds); 5641 5642 if (m0->m_flags & M_VLANTAG) { 5643 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5644 tx_start_bd->bd_flags.as_bitfield |= 5645 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5646 } else { 5647 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5648 if (IS_VF(sc)) { 5649 /* map ethernet header to find type and header length */ 5650 eh = mtod(m0, struct ether_vlan_header *); 5651 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5652 } else { 5653 /* used by FW for packet accounting */ 5654 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5655#if 0 5656 /* 5657 * If NPAR-SD is active then FW should do the tagging regardless 5658 * of value of priority. Otherwise, if priority indicates this is 5659 * a control packet we need to indicate to FW to avoid tagging. 5660 */ 5661 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { 5662 SET_FLAG(tx_start_bd->general_data, 5663 ETH_TX_START_BD_FORCE_VLAN_MODE, 1); 5664 } 5665#endif 5666 } 5667 } 5668 5669 /* 5670 * add a parsing BD from the chain. The parsing BD is always added 5671 * though it is only used for TSO and chksum 5672 */ 5673 bd_prod = TX_BD_NEXT(bd_prod); 5674 5675 if (m0->m_pkthdr.csum_flags) { 5676 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5677 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5678 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5679 } 5680 5681 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5682 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5683 ETH_TX_BD_FLAGS_L4_CSUM); 5684 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5685 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5686 ETH_TX_BD_FLAGS_IS_UDP | 5687 ETH_TX_BD_FLAGS_L4_CSUM); 5688 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5689 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5690 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5691 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5692 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5693 ETH_TX_BD_FLAGS_IS_UDP); 5694 } 5695 } 5696 5697 if (!CHIP_IS_E1x(sc)) { 5698 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5699 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5700 5701 if (m0->m_pkthdr.csum_flags) { 5702 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5703 } 5704 5705#if 0 5706 /* 5707 * Add the MACs to the parsing BD if the module param was 5708 * explicitly set, if this is a vf, or in switch independent 5709 * mode. 5710 */ 5711 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { 5712 eh = mtod(m0, struct ether_vlan_header *); 5713 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 5714 &pbd_e2->data.mac_addr.src_mid, 5715 &pbd_e2->data.mac_addr.src_lo, 5716 eh->evl_shost); 5717 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 5718 &pbd_e2->data.mac_addr.dst_mid, 5719 &pbd_e2->data.mac_addr.dst_lo, 5720 eh->evl_dhost); 5721 } 5722#endif 5723 5724 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5725 mac_type); 5726 } else { 5727 uint16_t global_data = 0; 5728 5729 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5730 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5731 5732 if (m0->m_pkthdr.csum_flags) { 5733 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5734 } 5735 5736 SET_FLAG(global_data, 5737 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5738 pbd_e1x->global_data |= htole16(global_data); 5739 } 5740 5741 /* setup the parsing BD with TSO specific info */ 5742 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5743 fp->eth_q_stats.tx_ofld_frames_lso++; 5744 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5745 5746 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5747 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5748 5749 /* split the first BD into header/data making the fw job easy */ 5750 nbds++; 5751 tx_start_bd->nbd = htole16(nbds); 5752 tx_start_bd->nbytes = htole16(hlen); 5753 5754 bd_prod = TX_BD_NEXT(bd_prod); 5755 5756 /* new transmit BD after the tx_parse_bd */ 5757 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5758 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5759 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5760 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5761 if (tx_total_pkt_size_bd == NULL) { 5762 tx_total_pkt_size_bd = tx_data_bd; 5763 } 5764 5765 BLOGD(sc, DBG_TX, 5766 "TSO split header size is %d (%x:%x) nbds %d\n", 5767 le16toh(tx_start_bd->nbytes), 5768 le32toh(tx_start_bd->addr_hi), 5769 le32toh(tx_start_bd->addr_lo), 5770 nbds); 5771 } 5772 5773 if (!CHIP_IS_E1x(sc)) { 5774 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5775 } else { 5776 bxe_set_pbd_lso(m0, pbd_e1x); 5777 } 5778 } 5779 5780 if (pbd_e2_parsing_data) { 5781 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5782 } 5783 5784 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5785 for (i = 1; i < nsegs ; i++) { 5786 bd_prod = TX_BD_NEXT(bd_prod); 5787 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5788 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5789 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5790 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5791 if (tx_total_pkt_size_bd == NULL) { 5792 tx_total_pkt_size_bd = tx_data_bd; 5793 } 5794 total_pkt_size += tx_data_bd->nbytes; 5795 } 5796 5797 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5798 5799 if (tx_total_pkt_size_bd != NULL) { 5800 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5801 } 5802 5803 if (__predict_false(sc->debug & DBG_TX)) { 5804 tmp_bd = tx_buf->first_bd; 5805 for (i = 0; i < nbds; i++) 5806 { 5807 if (i == 0) { 5808 BLOGD(sc, DBG_TX, 5809 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5810 "bd_flags=0x%x hdr_nbds=%d\n", 5811 tx_start_bd, 5812 tmp_bd, 5813 le16toh(tx_start_bd->nbd), 5814 le16toh(tx_start_bd->vlan_or_ethertype), 5815 tx_start_bd->bd_flags.as_bitfield, 5816 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5817 } else if (i == 1) { 5818 if (pbd_e1x) { 5819 BLOGD(sc, DBG_TX, 5820 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5821 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5822 "tcp_seq=%u total_hlen_w=%u\n", 5823 pbd_e1x, 5824 tmp_bd, 5825 pbd_e1x->global_data, 5826 pbd_e1x->ip_hlen_w, 5827 pbd_e1x->ip_id, 5828 pbd_e1x->lso_mss, 5829 pbd_e1x->tcp_flags, 5830 pbd_e1x->tcp_pseudo_csum, 5831 pbd_e1x->tcp_send_seq, 5832 le16toh(pbd_e1x->total_hlen_w)); 5833 } else { /* if (pbd_e2) */ 5834 BLOGD(sc, DBG_TX, 5835 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5836 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5837 pbd_e2, 5838 tmp_bd, 5839 pbd_e2->data.mac_addr.dst_hi, 5840 pbd_e2->data.mac_addr.dst_mid, 5841 pbd_e2->data.mac_addr.dst_lo, 5842 pbd_e2->data.mac_addr.src_hi, 5843 pbd_e2->data.mac_addr.src_mid, 5844 pbd_e2->data.mac_addr.src_lo, 5845 pbd_e2->parsing_data); 5846 } 5847 } 5848 5849 if (i != 1) { /* skip parse db as it doesn't hold data */ 5850 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5851 BLOGD(sc, DBG_TX, 5852 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5853 tx_data_bd, 5854 tmp_bd, 5855 le16toh(tx_data_bd->nbytes), 5856 le32toh(tx_data_bd->addr_hi), 5857 le32toh(tx_data_bd->addr_lo)); 5858 } 5859 5860 tmp_bd = TX_BD_NEXT(tmp_bd); 5861 } 5862 } 5863 5864 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5865 5866 /* update TX BD producer index value for next TX */ 5867 bd_prod = TX_BD_NEXT(bd_prod); 5868 5869 /* 5870 * If the chain of tx_bd's describing this frame is adjacent to or spans 5871 * an eth_tx_next_bd element then we need to increment the nbds value. 5872 */ 5873 if (TX_BD_IDX(bd_prod) < nbds) { 5874 nbds++; 5875 } 5876 5877 /* don't allow reordering of writes for nbd and packets */ 5878 mb(); 5879 5880 fp->tx_db.data.prod += nbds; 5881 5882 /* producer points to the next free tx_bd at this point */ 5883 fp->tx_pkt_prod++; 5884 fp->tx_bd_prod = bd_prod; 5885 5886 DOORBELL(sc, fp->index, fp->tx_db.raw); 5887 5888 fp->eth_q_stats.tx_pkts++; 5889 5890 /* Prevent speculative reads from getting ahead of the status block. */ 5891 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5892 0, 0, BUS_SPACE_BARRIER_READ); 5893 5894 /* Prevent speculative reads from getting ahead of the doorbell. */ 5895 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5896 0, 0, BUS_SPACE_BARRIER_READ); 5897 5898 return (0); 5899} 5900 5901static void 5902bxe_tx_start_locked(struct bxe_softc *sc, 5903 if_t ifp, 5904 struct bxe_fastpath *fp) 5905{ 5906 struct mbuf *m = NULL; 5907 int tx_count = 0; 5908 uint16_t tx_bd_avail; 5909 5910 BXE_FP_TX_LOCK_ASSERT(fp); 5911 5912 /* keep adding entries while there are frames to send */ 5913 while (!if_sendq_empty(ifp)) { 5914 5915 /* 5916 * check for any frames to send 5917 * dequeue can still be NULL even if queue is not empty 5918 */ 5919 m = if_dequeue(ifp); 5920 if (__predict_false(m == NULL)) { 5921 break; 5922 } 5923 5924 /* the mbuf now belongs to us */ 5925 fp->eth_q_stats.mbuf_alloc_tx++; 5926 5927 /* 5928 * Put the frame into the transmit ring. If we don't have room, 5929 * place the mbuf back at the head of the TX queue, set the 5930 * OACTIVE flag, and wait for the NIC to drain the chain. 5931 */ 5932 if (__predict_false(bxe_tx_encap(fp, &m))) { 5933 fp->eth_q_stats.tx_encap_failures++; 5934 if (m != NULL) { 5935 /* mark the TX queue as full and return the frame */ 5936 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5937 if_sendq_prepend(ifp, m); 5938 fp->eth_q_stats.mbuf_alloc_tx--; 5939 fp->eth_q_stats.tx_queue_xoff++; 5940 } 5941 5942 /* stop looking for more work */ 5943 break; 5944 } 5945 5946 /* the frame was enqueued successfully */ 5947 tx_count++; 5948 5949 /* send a copy of the frame to any BPF listeners. */ 5950 if_etherbpfmtap(ifp, m); 5951 5952 tx_bd_avail = bxe_tx_avail(sc, fp); 5953 5954 /* handle any completions if we're running low */ 5955 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5956 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5957 bxe_txeof(sc, fp); 5958 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5959 break; 5960 } 5961 } 5962 } 5963 5964 /* all TX packets were dequeued and/or the tx ring is full */ 5965 if (tx_count > 0) { 5966 /* reset the TX watchdog timeout timer */ 5967 fp->watchdog_timer = BXE_TX_TIMEOUT; 5968 } 5969} 5970 5971/* Legacy (non-RSS) dispatch routine */ 5972static void 5973bxe_tx_start(if_t ifp) 5974{ 5975 struct bxe_softc *sc; 5976 struct bxe_fastpath *fp; 5977 5978 sc = if_getsoftc(ifp); 5979 5980 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 5981 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5982 return; 5983 } 5984 5985 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5986 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 5987 return; 5988 } 5989 5990 if (!sc->link_vars.link_up) { 5991 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5992 return; 5993 } 5994 5995 fp = &sc->fp[0]; 5996 5997 BXE_FP_TX_LOCK(fp); 5998 bxe_tx_start_locked(sc, ifp, fp); 5999 BXE_FP_TX_UNLOCK(fp); 6000} 6001 6002#if __FreeBSD_version >= 800000 6003 6004static int 6005bxe_tx_mq_start_locked(struct bxe_softc *sc, 6006 if_t ifp, 6007 struct bxe_fastpath *fp, 6008 struct mbuf *m) 6009{ 6010 struct buf_ring *tx_br = fp->tx_br; 6011 struct mbuf *next; 6012 int depth, rc, tx_count; 6013 uint16_t tx_bd_avail; 6014 6015 rc = tx_count = 0; 6016 6017 BXE_FP_TX_LOCK_ASSERT(fp); 6018 6019 if (!tx_br) { 6020 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 6021 return (EINVAL); 6022 } 6023 6024 if (!sc->link_vars.link_up || 6025 (ifp->if_drv_flags & 6026 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { 6027 rc = drbr_enqueue_drv(ifp, tx_br, m); 6028 goto bxe_tx_mq_start_locked_exit; 6029 } 6030 6031 /* fetch the depth of the driver queue */ 6032 depth = drbr_inuse_drv(ifp, tx_br); 6033 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 6034 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 6035 } 6036 6037 if (m == NULL) { 6038 /* no new work, check for pending frames */ 6039 next = drbr_dequeue_drv(ifp, tx_br); 6040 } else if (drbr_needs_enqueue_drv(ifp, tx_br)) { 6041 /* have both new and pending work, maintain packet order */ 6042 rc = drbr_enqueue_drv(ifp, tx_br, m); 6043 if (rc != 0) { 6044 fp->eth_q_stats.tx_soft_errors++; 6045 goto bxe_tx_mq_start_locked_exit; 6046 } 6047 next = drbr_dequeue_drv(ifp, tx_br); 6048 } else { 6049 /* new work only and nothing pending */ 6050 next = m; 6051 } 6052 6053 /* keep adding entries while there are frames to send */ 6054 while (next != NULL) { 6055 6056 /* the mbuf now belongs to us */ 6057 fp->eth_q_stats.mbuf_alloc_tx++; 6058 6059 /* 6060 * Put the frame into the transmit ring. If we don't have room, 6061 * place the mbuf back at the head of the TX queue, set the 6062 * OACTIVE flag, and wait for the NIC to drain the chain. 6063 */ 6064 rc = bxe_tx_encap(fp, &next); 6065 if (__predict_false(rc != 0)) { 6066 fp->eth_q_stats.tx_encap_failures++; 6067 if (next != NULL) { 6068 /* mark the TX queue as full and save the frame */ 6069 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 6070 /* XXX this may reorder the frame */ 6071 rc = drbr_enqueue_drv(ifp, tx_br, next); 6072 fp->eth_q_stats.mbuf_alloc_tx--; 6073 fp->eth_q_stats.tx_frames_deferred++; 6074 } 6075 6076 /* stop looking for more work */ 6077 break; 6078 } 6079 6080 /* the transmit frame was enqueued successfully */ 6081 tx_count++; 6082 6083 /* send a copy of the frame to any BPF listeners */ 6084 if_etherbpfmtap(ifp, next); 6085 6086 tx_bd_avail = bxe_tx_avail(sc, fp); 6087 6088 /* handle any completions if we're running low */ 6089 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 6090 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 6091 bxe_txeof(sc, fp); 6092 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 6093 break; 6094 } 6095 } 6096 6097 next = drbr_dequeue_drv(ifp, tx_br); 6098 } 6099 6100 /* all TX packets were dequeued and/or the tx ring is full */ 6101 if (tx_count > 0) { 6102 /* reset the TX watchdog timeout timer */ 6103 fp->watchdog_timer = BXE_TX_TIMEOUT; 6104 } 6105 6106bxe_tx_mq_start_locked_exit: 6107 6108 return (rc); 6109} 6110 6111/* Multiqueue (TSS) dispatch routine. */ 6112static int 6113bxe_tx_mq_start(struct ifnet *ifp, 6114 struct mbuf *m) 6115{ 6116 struct bxe_softc *sc = if_getsoftc(ifp); 6117 struct bxe_fastpath *fp; 6118 int fp_index, rc; 6119 6120 fp_index = 0; /* default is the first queue */ 6121 6122 /* check if flowid is set */ 6123 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 6124 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 6125 6126 fp = &sc->fp[fp_index]; 6127 6128 if (BXE_FP_TX_TRYLOCK(fp)) { 6129 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 6130 BXE_FP_TX_UNLOCK(fp); 6131 } else 6132 rc = drbr_enqueue_drv(ifp, fp->tx_br, m); 6133 6134 return (rc); 6135} 6136 6137static void 6138bxe_mq_flush(struct ifnet *ifp) 6139{ 6140 struct bxe_softc *sc = if_getsoftc(ifp); 6141 struct bxe_fastpath *fp; 6142 struct mbuf *m; 6143 int i; 6144 6145 for (i = 0; i < sc->num_queues; i++) { 6146 fp = &sc->fp[i]; 6147 6148 if (fp->state != BXE_FP_STATE_OPEN) { 6149 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 6150 fp->index, fp->state); 6151 continue; 6152 } 6153 6154 if (fp->tx_br != NULL) { 6155 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 6156 BXE_FP_TX_LOCK(fp); 6157 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6158 m_freem(m); 6159 } 6160 BXE_FP_TX_UNLOCK(fp); 6161 } 6162 } 6163 6164 if_qflush(ifp); 6165} 6166 6167#endif /* FreeBSD_version >= 800000 */ 6168 6169static uint16_t 6170bxe_cid_ilt_lines(struct bxe_softc *sc) 6171{ 6172 if (IS_SRIOV(sc)) { 6173 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 6174 } 6175 return (L2_ILT_LINES(sc)); 6176} 6177 6178static void 6179bxe_ilt_set_info(struct bxe_softc *sc) 6180{ 6181 struct ilt_client_info *ilt_client; 6182 struct ecore_ilt *ilt = sc->ilt; 6183 uint16_t line = 0; 6184 6185 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 6186 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 6187 6188 /* CDU */ 6189 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 6190 ilt_client->client_num = ILT_CLIENT_CDU; 6191 ilt_client->page_size = CDU_ILT_PAGE_SZ; 6192 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 6193 ilt_client->start = line; 6194 line += bxe_cid_ilt_lines(sc); 6195 6196 if (CNIC_SUPPORT(sc)) { 6197 line += CNIC_ILT_LINES; 6198 } 6199 6200 ilt_client->end = (line - 1); 6201 6202 BLOGD(sc, DBG_LOAD, 6203 "ilt client[CDU]: start %d, end %d, " 6204 "psz 0x%x, flags 0x%x, hw psz %d\n", 6205 ilt_client->start, ilt_client->end, 6206 ilt_client->page_size, 6207 ilt_client->flags, 6208 ilog2(ilt_client->page_size >> 12)); 6209 6210 /* QM */ 6211 if (QM_INIT(sc->qm_cid_count)) { 6212 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 6213 ilt_client->client_num = ILT_CLIENT_QM; 6214 ilt_client->page_size = QM_ILT_PAGE_SZ; 6215 ilt_client->flags = 0; 6216 ilt_client->start = line; 6217 6218 /* 4 bytes for each cid */ 6219 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 6220 QM_ILT_PAGE_SZ); 6221 6222 ilt_client->end = (line - 1); 6223 6224 BLOGD(sc, DBG_LOAD, 6225 "ilt client[QM]: start %d, end %d, " 6226 "psz 0x%x, flags 0x%x, hw psz %d\n", 6227 ilt_client->start, ilt_client->end, 6228 ilt_client->page_size, ilt_client->flags, 6229 ilog2(ilt_client->page_size >> 12)); 6230 } 6231 6232 if (CNIC_SUPPORT(sc)) { 6233 /* SRC */ 6234 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 6235 ilt_client->client_num = ILT_CLIENT_SRC; 6236 ilt_client->page_size = SRC_ILT_PAGE_SZ; 6237 ilt_client->flags = 0; 6238 ilt_client->start = line; 6239 line += SRC_ILT_LINES; 6240 ilt_client->end = (line - 1); 6241 6242 BLOGD(sc, DBG_LOAD, 6243 "ilt client[SRC]: start %d, end %d, " 6244 "psz 0x%x, flags 0x%x, hw psz %d\n", 6245 ilt_client->start, ilt_client->end, 6246 ilt_client->page_size, ilt_client->flags, 6247 ilog2(ilt_client->page_size >> 12)); 6248 6249 /* TM */ 6250 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 6251 ilt_client->client_num = ILT_CLIENT_TM; 6252 ilt_client->page_size = TM_ILT_PAGE_SZ; 6253 ilt_client->flags = 0; 6254 ilt_client->start = line; 6255 line += TM_ILT_LINES; 6256 ilt_client->end = (line - 1); 6257 6258 BLOGD(sc, DBG_LOAD, 6259 "ilt client[TM]: start %d, end %d, " 6260 "psz 0x%x, flags 0x%x, hw psz %d\n", 6261 ilt_client->start, ilt_client->end, 6262 ilt_client->page_size, ilt_client->flags, 6263 ilog2(ilt_client->page_size >> 12)); 6264 } 6265 6266 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 6267} 6268 6269static void 6270bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 6271{ 6272 int i; 6273 uint32_t rx_buf_size; 6274 6275 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 6276 6277 for (i = 0; i < sc->num_queues; i++) { 6278 if(rx_buf_size <= MCLBYTES){ 6279 sc->fp[i].rx_buf_size = rx_buf_size; 6280 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6281 }else if (rx_buf_size <= MJUMPAGESIZE){ 6282 sc->fp[i].rx_buf_size = rx_buf_size; 6283 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6284 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ 6285 sc->fp[i].rx_buf_size = MCLBYTES; 6286 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6287 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ 6288 sc->fp[i].rx_buf_size = MJUMPAGESIZE; 6289 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6290 }else { 6291 sc->fp[i].rx_buf_size = MCLBYTES; 6292 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6293 } 6294 } 6295} 6296 6297static int 6298bxe_alloc_ilt_mem(struct bxe_softc *sc) 6299{ 6300 int rc = 0; 6301 6302 if ((sc->ilt = 6303 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 6304 M_BXE_ILT, 6305 (M_NOWAIT | M_ZERO))) == NULL) { 6306 rc = 1; 6307 } 6308 6309 return (rc); 6310} 6311 6312static int 6313bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6314{ 6315 int rc = 0; 6316 6317 if ((sc->ilt->lines = 6318 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6319 M_BXE_ILT, 6320 (M_NOWAIT | M_ZERO))) == NULL) { 6321 rc = 1; 6322 } 6323 6324 return (rc); 6325} 6326 6327static void 6328bxe_free_ilt_mem(struct bxe_softc *sc) 6329{ 6330 if (sc->ilt != NULL) { 6331 free(sc->ilt, M_BXE_ILT); 6332 sc->ilt = NULL; 6333 } 6334} 6335 6336static void 6337bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6338{ 6339 if (sc->ilt->lines != NULL) { 6340 free(sc->ilt->lines, M_BXE_ILT); 6341 sc->ilt->lines = NULL; 6342 } 6343} 6344 6345static void 6346bxe_free_mem(struct bxe_softc *sc) 6347{ 6348 int i; 6349 6350#if 0 6351 if (!CONFIGURE_NIC_MODE(sc)) { 6352 /* free searcher T2 table */ 6353 bxe_dma_free(sc, &sc->t2); 6354 } 6355#endif 6356 6357 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6358 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6359 sc->context[i].vcxt = NULL; 6360 sc->context[i].size = 0; 6361 } 6362 6363 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6364 6365 bxe_free_ilt_lines_mem(sc); 6366 6367#if 0 6368 bxe_iov_free_mem(sc); 6369#endif 6370} 6371 6372static int 6373bxe_alloc_mem(struct bxe_softc *sc) 6374{ 6375 int context_size; 6376 int allocated; 6377 int i; 6378 6379#if 0 6380 if (!CONFIGURE_NIC_MODE(sc)) { 6381 /* allocate searcher T2 table */ 6382 if (bxe_dma_alloc(sc, SRC_T2_SZ, 6383 &sc->t2, "searcher t2 table") != 0) { 6384 return (-1); 6385 } 6386 } 6387#endif 6388 6389 /* 6390 * Allocate memory for CDU context: 6391 * This memory is allocated separately and not in the generic ILT 6392 * functions because CDU differs in few aspects: 6393 * 1. There can be multiple entities allocating memory for context - 6394 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6395 * its own ILT lines. 6396 * 2. Since CDU page-size is not a single 4KB page (which is the case 6397 * for the other ILT clients), to be efficient we want to support 6398 * allocation of sub-page-size in the last entry. 6399 * 3. Context pointers are used by the driver to pass to FW / update 6400 * the context (for the other ILT clients the pointers are used just to 6401 * free the memory during unload). 6402 */ 6403 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6404 for (i = 0, allocated = 0; allocated < context_size; i++) { 6405 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6406 (context_size - allocated)); 6407 6408 if (bxe_dma_alloc(sc, sc->context[i].size, 6409 &sc->context[i].vcxt_dma, 6410 "cdu context") != 0) { 6411 bxe_free_mem(sc); 6412 return (-1); 6413 } 6414 6415 sc->context[i].vcxt = 6416 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6417 6418 allocated += sc->context[i].size; 6419 } 6420 6421 bxe_alloc_ilt_lines_mem(sc); 6422 6423 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6424 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6425 { 6426 for (i = 0; i < 4; i++) { 6427 BLOGD(sc, DBG_LOAD, 6428 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6429 i, 6430 sc->ilt->clients[i].page_size, 6431 sc->ilt->clients[i].start, 6432 sc->ilt->clients[i].end, 6433 sc->ilt->clients[i].client_num, 6434 sc->ilt->clients[i].flags); 6435 } 6436 } 6437 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6438 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6439 bxe_free_mem(sc); 6440 return (-1); 6441 } 6442 6443#if 0 6444 if (bxe_iov_alloc_mem(sc)) { 6445 BLOGE(sc, "Failed to allocate memory for SRIOV\n"); 6446 bxe_free_mem(sc); 6447 return (-1); 6448 } 6449#endif 6450 6451 return (0); 6452} 6453 6454static void 6455bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6456{ 6457 struct bxe_softc *sc; 6458 int i; 6459 6460 sc = fp->sc; 6461 6462 if (fp->rx_mbuf_tag == NULL) { 6463 return; 6464 } 6465 6466 /* free all mbufs and unload all maps */ 6467 for (i = 0; i < RX_BD_TOTAL; i++) { 6468 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6469 bus_dmamap_sync(fp->rx_mbuf_tag, 6470 fp->rx_mbuf_chain[i].m_map, 6471 BUS_DMASYNC_POSTREAD); 6472 bus_dmamap_unload(fp->rx_mbuf_tag, 6473 fp->rx_mbuf_chain[i].m_map); 6474 } 6475 6476 if (fp->rx_mbuf_chain[i].m != NULL) { 6477 m_freem(fp->rx_mbuf_chain[i].m); 6478 fp->rx_mbuf_chain[i].m = NULL; 6479 fp->eth_q_stats.mbuf_alloc_rx--; 6480 } 6481 } 6482} 6483 6484static void 6485bxe_free_tpa_pool(struct bxe_fastpath *fp) 6486{ 6487 struct bxe_softc *sc; 6488 int i, max_agg_queues; 6489 6490 sc = fp->sc; 6491 6492 if (fp->rx_mbuf_tag == NULL) { 6493 return; 6494 } 6495 6496 max_agg_queues = MAX_AGG_QS(sc); 6497 6498 /* release all mbufs and unload all DMA maps in the TPA pool */ 6499 for (i = 0; i < max_agg_queues; i++) { 6500 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6501 bus_dmamap_sync(fp->rx_mbuf_tag, 6502 fp->rx_tpa_info[i].bd.m_map, 6503 BUS_DMASYNC_POSTREAD); 6504 bus_dmamap_unload(fp->rx_mbuf_tag, 6505 fp->rx_tpa_info[i].bd.m_map); 6506 } 6507 6508 if (fp->rx_tpa_info[i].bd.m != NULL) { 6509 m_freem(fp->rx_tpa_info[i].bd.m); 6510 fp->rx_tpa_info[i].bd.m = NULL; 6511 fp->eth_q_stats.mbuf_alloc_tpa--; 6512 } 6513 } 6514} 6515 6516static void 6517bxe_free_sge_chain(struct bxe_fastpath *fp) 6518{ 6519 struct bxe_softc *sc; 6520 int i; 6521 6522 sc = fp->sc; 6523 6524 if (fp->rx_sge_mbuf_tag == NULL) { 6525 return; 6526 } 6527 6528 /* rree all mbufs and unload all maps */ 6529 for (i = 0; i < RX_SGE_TOTAL; i++) { 6530 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6531 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6532 fp->rx_sge_mbuf_chain[i].m_map, 6533 BUS_DMASYNC_POSTREAD); 6534 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6535 fp->rx_sge_mbuf_chain[i].m_map); 6536 } 6537 6538 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6539 m_freem(fp->rx_sge_mbuf_chain[i].m); 6540 fp->rx_sge_mbuf_chain[i].m = NULL; 6541 fp->eth_q_stats.mbuf_alloc_sge--; 6542 } 6543 } 6544} 6545 6546static void 6547bxe_free_fp_buffers(struct bxe_softc *sc) 6548{ 6549 struct bxe_fastpath *fp; 6550 int i; 6551 6552 for (i = 0; i < sc->num_queues; i++) { 6553 fp = &sc->fp[i]; 6554 6555#if __FreeBSD_version >= 800000 6556 if (fp->tx_br != NULL) { 6557 /* just in case bxe_mq_flush() wasn't called */ 6558 if (mtx_initialized(&fp->tx_mtx)) { 6559 struct mbuf *m; 6560 6561 BXE_FP_TX_LOCK(fp); 6562 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) 6563 m_freem(m); 6564 BXE_FP_TX_UNLOCK(fp); 6565 } 6566 buf_ring_free(fp->tx_br, M_DEVBUF); 6567 fp->tx_br = NULL; 6568 } 6569#endif 6570 6571 /* free all RX buffers */ 6572 bxe_free_rx_bd_chain(fp); 6573 bxe_free_tpa_pool(fp); 6574 bxe_free_sge_chain(fp); 6575 6576 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6577 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6578 fp->eth_q_stats.mbuf_alloc_rx); 6579 } 6580 6581 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6582 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6583 fp->eth_q_stats.mbuf_alloc_sge); 6584 } 6585 6586 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6587 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6588 fp->eth_q_stats.mbuf_alloc_tpa); 6589 } 6590 6591 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6592 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6593 fp->eth_q_stats.mbuf_alloc_tx); 6594 } 6595 6596 /* XXX verify all mbufs were reclaimed */ 6597 6598 if (mtx_initialized(&fp->tx_mtx)) { 6599 mtx_destroy(&fp->tx_mtx); 6600 } 6601 6602 if (mtx_initialized(&fp->rx_mtx)) { 6603 mtx_destroy(&fp->rx_mtx); 6604 } 6605 } 6606} 6607 6608static int 6609bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6610 uint16_t prev_index, 6611 uint16_t index) 6612{ 6613 struct bxe_sw_rx_bd *rx_buf; 6614 struct eth_rx_bd *rx_bd; 6615 bus_dma_segment_t segs[1]; 6616 bus_dmamap_t map; 6617 struct mbuf *m; 6618 int nsegs, rc; 6619 6620 rc = 0; 6621 6622 /* allocate the new RX BD mbuf */ 6623 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6624 if (__predict_false(m == NULL)) { 6625 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6626 return (ENOBUFS); 6627 } 6628 6629 fp->eth_q_stats.mbuf_alloc_rx++; 6630 6631 /* initialize the mbuf buffer length */ 6632 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6633 6634 /* map the mbuf into non-paged pool */ 6635 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6636 fp->rx_mbuf_spare_map, 6637 m, segs, &nsegs, BUS_DMA_NOWAIT); 6638 if (__predict_false(rc != 0)) { 6639 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6640 m_freem(m); 6641 fp->eth_q_stats.mbuf_alloc_rx--; 6642 return (rc); 6643 } 6644 6645 /* all mbufs must map to a single segment */ 6646 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6647 6648 /* release any existing RX BD mbuf mappings */ 6649 6650 if (prev_index != index) { 6651 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6652 6653 if (rx_buf->m_map != NULL) { 6654 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6655 BUS_DMASYNC_POSTREAD); 6656 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6657 } 6658 6659 /* 6660 * We only get here from bxe_rxeof() when the maximum number 6661 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6662 * holds the mbuf in the prev_index so it's OK to NULL it out 6663 * here without concern of a memory leak. 6664 */ 6665 fp->rx_mbuf_chain[prev_index].m = NULL; 6666 } 6667 6668 rx_buf = &fp->rx_mbuf_chain[index]; 6669 6670 if (rx_buf->m_map != NULL) { 6671 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6672 BUS_DMASYNC_POSTREAD); 6673 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6674 } 6675 6676 /* save the mbuf and mapping info for a future packet */ 6677 map = (prev_index != index) ? 6678 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6679 rx_buf->m_map = fp->rx_mbuf_spare_map; 6680 fp->rx_mbuf_spare_map = map; 6681 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6682 BUS_DMASYNC_PREREAD); 6683 rx_buf->m = m; 6684 6685 rx_bd = &fp->rx_chain[index]; 6686 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6687 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6688 6689 return (rc); 6690} 6691 6692static int 6693bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6694 int queue) 6695{ 6696 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6697 bus_dma_segment_t segs[1]; 6698 bus_dmamap_t map; 6699 struct mbuf *m; 6700 int nsegs; 6701 int rc = 0; 6702 6703 /* allocate the new TPA mbuf */ 6704 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6705 if (__predict_false(m == NULL)) { 6706 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6707 return (ENOBUFS); 6708 } 6709 6710 fp->eth_q_stats.mbuf_alloc_tpa++; 6711 6712 /* initialize the mbuf buffer length */ 6713 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6714 6715 /* map the mbuf into non-paged pool */ 6716 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6717 fp->rx_tpa_info_mbuf_spare_map, 6718 m, segs, &nsegs, BUS_DMA_NOWAIT); 6719 if (__predict_false(rc != 0)) { 6720 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6721 m_free(m); 6722 fp->eth_q_stats.mbuf_alloc_tpa--; 6723 return (rc); 6724 } 6725 6726 /* all mbufs must map to a single segment */ 6727 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6728 6729 /* release any existing TPA mbuf mapping */ 6730 if (tpa_info->bd.m_map != NULL) { 6731 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6732 BUS_DMASYNC_POSTREAD); 6733 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6734 } 6735 6736 /* save the mbuf and mapping info for the TPA mbuf */ 6737 map = tpa_info->bd.m_map; 6738 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6739 fp->rx_tpa_info_mbuf_spare_map = map; 6740 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6741 BUS_DMASYNC_PREREAD); 6742 tpa_info->bd.m = m; 6743 tpa_info->seg = segs[0]; 6744 6745 return (rc); 6746} 6747 6748/* 6749 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6750 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6751 * chain. 6752 */ 6753static int 6754bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6755 uint16_t index) 6756{ 6757 struct bxe_sw_rx_bd *sge_buf; 6758 struct eth_rx_sge *sge; 6759 bus_dma_segment_t segs[1]; 6760 bus_dmamap_t map; 6761 struct mbuf *m; 6762 int nsegs; 6763 int rc = 0; 6764 6765 /* allocate a new SGE mbuf */ 6766 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6767 if (__predict_false(m == NULL)) { 6768 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6769 return (ENOMEM); 6770 } 6771 6772 fp->eth_q_stats.mbuf_alloc_sge++; 6773 6774 /* initialize the mbuf buffer length */ 6775 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6776 6777 /* map the SGE mbuf into non-paged pool */ 6778 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6779 fp->rx_sge_mbuf_spare_map, 6780 m, segs, &nsegs, BUS_DMA_NOWAIT); 6781 if (__predict_false(rc != 0)) { 6782 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6783 m_freem(m); 6784 fp->eth_q_stats.mbuf_alloc_sge--; 6785 return (rc); 6786 } 6787 6788 /* all mbufs must map to a single segment */ 6789 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6790 6791 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6792 6793 /* release any existing SGE mbuf mapping */ 6794 if (sge_buf->m_map != NULL) { 6795 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6796 BUS_DMASYNC_POSTREAD); 6797 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6798 } 6799 6800 /* save the mbuf and mapping info for a future packet */ 6801 map = sge_buf->m_map; 6802 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6803 fp->rx_sge_mbuf_spare_map = map; 6804 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6805 BUS_DMASYNC_PREREAD); 6806 sge_buf->m = m; 6807 6808 sge = &fp->rx_sge_chain[index]; 6809 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6810 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6811 6812 return (rc); 6813} 6814 6815static __noinline int 6816bxe_alloc_fp_buffers(struct bxe_softc *sc) 6817{ 6818 struct bxe_fastpath *fp; 6819 int i, j, rc = 0; 6820 int ring_prod, cqe_ring_prod; 6821 int max_agg_queues; 6822 6823 for (i = 0; i < sc->num_queues; i++) { 6824 fp = &sc->fp[i]; 6825 6826#if __FreeBSD_version >= 800000 6827 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 6828 M_NOWAIT, &fp->tx_mtx); 6829 if (fp->tx_br == NULL) { 6830 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); 6831 goto bxe_alloc_fp_buffers_error; 6832 } 6833#endif 6834 6835 ring_prod = cqe_ring_prod = 0; 6836 fp->rx_bd_cons = 0; 6837 fp->rx_cq_cons = 0; 6838 6839 /* allocate buffers for the RX BDs in RX BD chain */ 6840 for (j = 0; j < sc->max_rx_bufs; j++) { 6841 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6842 if (rc != 0) { 6843 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6844 i, rc); 6845 goto bxe_alloc_fp_buffers_error; 6846 } 6847 6848 ring_prod = RX_BD_NEXT(ring_prod); 6849 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6850 } 6851 6852 fp->rx_bd_prod = ring_prod; 6853 fp->rx_cq_prod = cqe_ring_prod; 6854 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6855 6856 max_agg_queues = MAX_AGG_QS(sc); 6857 6858 fp->tpa_enable = TRUE; 6859 6860 /* fill the TPA pool */ 6861 for (j = 0; j < max_agg_queues; j++) { 6862 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6863 if (rc != 0) { 6864 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6865 i, j); 6866 fp->tpa_enable = FALSE; 6867 goto bxe_alloc_fp_buffers_error; 6868 } 6869 6870 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6871 } 6872 6873 if (fp->tpa_enable) { 6874 /* fill the RX SGE chain */ 6875 ring_prod = 0; 6876 for (j = 0; j < RX_SGE_USABLE; j++) { 6877 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6878 if (rc != 0) { 6879 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6880 i, ring_prod); 6881 fp->tpa_enable = FALSE; 6882 ring_prod = 0; 6883 goto bxe_alloc_fp_buffers_error; 6884 } 6885 6886 ring_prod = RX_SGE_NEXT(ring_prod); 6887 } 6888 6889 fp->rx_sge_prod = ring_prod; 6890 } 6891 } 6892 6893 return (0); 6894 6895bxe_alloc_fp_buffers_error: 6896 6897 /* unwind what was already allocated */ 6898 bxe_free_rx_bd_chain(fp); 6899 bxe_free_tpa_pool(fp); 6900 bxe_free_sge_chain(fp); 6901 6902 return (ENOBUFS); 6903} 6904 6905static void 6906bxe_free_fw_stats_mem(struct bxe_softc *sc) 6907{ 6908 bxe_dma_free(sc, &sc->fw_stats_dma); 6909 6910 sc->fw_stats_num = 0; 6911 6912 sc->fw_stats_req_size = 0; 6913 sc->fw_stats_req = NULL; 6914 sc->fw_stats_req_mapping = 0; 6915 6916 sc->fw_stats_data_size = 0; 6917 sc->fw_stats_data = NULL; 6918 sc->fw_stats_data_mapping = 0; 6919} 6920 6921static int 6922bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6923{ 6924 uint8_t num_queue_stats; 6925 int num_groups; 6926 6927 /* number of queues for statistics is number of eth queues */ 6928 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6929 6930 /* 6931 * Total number of FW statistics requests = 6932 * 1 for port stats + 1 for PF stats + num of queues 6933 */ 6934 sc->fw_stats_num = (2 + num_queue_stats); 6935 6936 /* 6937 * Request is built from stats_query_header and an array of 6938 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6939 * rules. The real number or requests is configured in the 6940 * stats_query_header. 6941 */ 6942 num_groups = 6943 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6944 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6945 6946 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6947 sc->fw_stats_num, num_groups); 6948 6949 sc->fw_stats_req_size = 6950 (sizeof(struct stats_query_header) + 6951 (num_groups * sizeof(struct stats_query_cmd_group))); 6952 6953 /* 6954 * Data for statistics requests + stats_counter. 6955 * stats_counter holds per-STORM counters that are incremented when 6956 * STORM has finished with the current request. Memory for FCoE 6957 * offloaded statistics are counted anyway, even if they will not be sent. 6958 * VF stats are not accounted for here as the data of VF stats is stored 6959 * in memory allocated by the VF, not here. 6960 */ 6961 sc->fw_stats_data_size = 6962 (sizeof(struct stats_counter) + 6963 sizeof(struct per_port_stats) + 6964 sizeof(struct per_pf_stats) + 6965 /* sizeof(struct fcoe_statistics_params) + */ 6966 (sizeof(struct per_queue_stats) * num_queue_stats)); 6967 6968 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6969 &sc->fw_stats_dma, "fw stats") != 0) { 6970 bxe_free_fw_stats_mem(sc); 6971 return (-1); 6972 } 6973 6974 /* set up the shortcuts */ 6975 6976 sc->fw_stats_req = 6977 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6978 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6979 6980 sc->fw_stats_data = 6981 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6982 sc->fw_stats_req_size); 6983 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6984 sc->fw_stats_req_size); 6985 6986 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6987 (uintmax_t)sc->fw_stats_req_mapping); 6988 6989 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6990 (uintmax_t)sc->fw_stats_data_mapping); 6991 6992 return (0); 6993} 6994 6995/* 6996 * Bits map: 6997 * 0-7 - Engine0 load counter. 6998 * 8-15 - Engine1 load counter. 6999 * 16 - Engine0 RESET_IN_PROGRESS bit. 7000 * 17 - Engine1 RESET_IN_PROGRESS bit. 7001 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 7002 * function on the engine 7003 * 19 - Engine1 ONE_IS_LOADED. 7004 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 7005 * leader to complete (check for both RESET_IN_PROGRESS bits and not 7006 * for just the one belonging to its engine). 7007 */ 7008#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 7009#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 7010#define BXE_PATH0_LOAD_CNT_SHIFT 0 7011#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 7012#define BXE_PATH1_LOAD_CNT_SHIFT 8 7013#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 7014#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 7015#define BXE_GLOBAL_RESET_BIT 0x00040000 7016 7017/* set the GLOBAL_RESET bit, should be run under rtnl lock */ 7018static void 7019bxe_set_reset_global(struct bxe_softc *sc) 7020{ 7021 uint32_t val; 7022 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7023 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7024 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 7025 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7026} 7027 7028/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 7029static void 7030bxe_clear_reset_global(struct bxe_softc *sc) 7031{ 7032 uint32_t val; 7033 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7034 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7035 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 7036 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7037} 7038 7039/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 7040static uint8_t 7041bxe_reset_is_global(struct bxe_softc *sc) 7042{ 7043 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7044 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 7045 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 7046} 7047 7048/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 7049static void 7050bxe_set_reset_done(struct bxe_softc *sc) 7051{ 7052 uint32_t val; 7053 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7054 BXE_PATH0_RST_IN_PROG_BIT; 7055 7056 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7057 7058 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7059 /* Clear the bit */ 7060 val &= ~bit; 7061 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7062 7063 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7064} 7065 7066/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 7067static void 7068bxe_set_reset_in_progress(struct bxe_softc *sc) 7069{ 7070 uint32_t val; 7071 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7072 BXE_PATH0_RST_IN_PROG_BIT; 7073 7074 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7075 7076 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7077 /* Set the bit */ 7078 val |= bit; 7079 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7080 7081 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7082} 7083 7084/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 7085static uint8_t 7086bxe_reset_is_done(struct bxe_softc *sc, 7087 int engine) 7088{ 7089 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7090 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 7091 BXE_PATH0_RST_IN_PROG_BIT; 7092 7093 /* return false if bit is set */ 7094 return (val & bit) ? FALSE : TRUE; 7095} 7096 7097/* get the load status for an engine, should be run under rtnl lock */ 7098static uint8_t 7099bxe_get_load_status(struct bxe_softc *sc, 7100 int engine) 7101{ 7102 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 7103 BXE_PATH0_LOAD_CNT_MASK; 7104 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 7105 BXE_PATH0_LOAD_CNT_SHIFT; 7106 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7107 7108 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7109 7110 val = ((val & mask) >> shift); 7111 7112 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 7113 7114 return (val != 0); 7115} 7116 7117/* set pf load mark */ 7118/* XXX needs to be under rtnl lock */ 7119static void 7120bxe_set_pf_load(struct bxe_softc *sc) 7121{ 7122 uint32_t val; 7123 uint32_t val1; 7124 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7125 BXE_PATH0_LOAD_CNT_MASK; 7126 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7127 BXE_PATH0_LOAD_CNT_SHIFT; 7128 7129 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7130 7131 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7132 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7133 7134 /* get the current counter value */ 7135 val1 = ((val & mask) >> shift); 7136 7137 /* set bit of this PF */ 7138 val1 |= (1 << SC_ABS_FUNC(sc)); 7139 7140 /* clear the old value */ 7141 val &= ~mask; 7142 7143 /* set the new one */ 7144 val |= ((val1 << shift) & mask); 7145 7146 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7147 7148 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7149} 7150 7151/* clear pf load mark */ 7152/* XXX needs to be under rtnl lock */ 7153static uint8_t 7154bxe_clear_pf_load(struct bxe_softc *sc) 7155{ 7156 uint32_t val1, val; 7157 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7158 BXE_PATH0_LOAD_CNT_MASK; 7159 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7160 BXE_PATH0_LOAD_CNT_SHIFT; 7161 7162 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7163 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7164 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 7165 7166 /* get the current counter value */ 7167 val1 = (val & mask) >> shift; 7168 7169 /* clear bit of that PF */ 7170 val1 &= ~(1 << SC_ABS_FUNC(sc)); 7171 7172 /* clear the old value */ 7173 val &= ~mask; 7174 7175 /* set the new one */ 7176 val |= ((val1 << shift) & mask); 7177 7178 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7179 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7180 return (val1 != 0); 7181} 7182 7183/* send load requrest to mcp and analyze response */ 7184static int 7185bxe_nic_load_request(struct bxe_softc *sc, 7186 uint32_t *load_code) 7187{ 7188 /* init fw_seq */ 7189 sc->fw_seq = 7190 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 7191 DRV_MSG_SEQ_NUMBER_MASK); 7192 7193 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 7194 7195 /* get the current FW pulse sequence */ 7196 sc->fw_drv_pulse_wr_seq = 7197 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 7198 DRV_PULSE_SEQ_MASK); 7199 7200 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 7201 sc->fw_drv_pulse_wr_seq); 7202 7203 /* load request */ 7204 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 7205 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 7206 7207 /* if the MCP fails to respond we must abort */ 7208 if (!(*load_code)) { 7209 BLOGE(sc, "MCP response failure!\n"); 7210 return (-1); 7211 } 7212 7213 /* if MCP refused then must abort */ 7214 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7215 BLOGE(sc, "MCP refused load request\n"); 7216 return (-1); 7217 } 7218 7219 return (0); 7220} 7221 7222/* 7223 * Check whether another PF has already loaded FW to chip. In virtualized 7224 * environments a pf from anoth VM may have already initialized the device 7225 * including loading FW. 7226 */ 7227static int 7228bxe_nic_load_analyze_req(struct bxe_softc *sc, 7229 uint32_t load_code) 7230{ 7231 uint32_t my_fw, loaded_fw; 7232 7233 /* is another pf loaded on this engine? */ 7234 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 7235 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 7236 /* build my FW version dword */ 7237 my_fw = (BCM_5710_FW_MAJOR_VERSION + 7238 (BCM_5710_FW_MINOR_VERSION << 8 ) + 7239 (BCM_5710_FW_REVISION_VERSION << 16) + 7240 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 7241 7242 /* read loaded FW from chip */ 7243 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 7244 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 7245 loaded_fw, my_fw); 7246 7247 /* abort nic load if version mismatch */ 7248 if (my_fw != loaded_fw) { 7249 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 7250 loaded_fw, my_fw); 7251 return (-1); 7252 } 7253 } 7254 7255 return (0); 7256} 7257 7258/* mark PMF if applicable */ 7259static void 7260bxe_nic_load_pmf(struct bxe_softc *sc, 7261 uint32_t load_code) 7262{ 7263 uint32_t ncsi_oem_data_addr; 7264 7265 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 7266 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 7267 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 7268 /* 7269 * Barrier here for ordering between the writing to sc->port.pmf here 7270 * and reading it from the periodic task. 7271 */ 7272 sc->port.pmf = 1; 7273 mb(); 7274 } else { 7275 sc->port.pmf = 0; 7276 } 7277 7278 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 7279 7280 /* XXX needed? */ 7281 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 7282 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 7283 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 7284 if (ncsi_oem_data_addr) { 7285 REG_WR(sc, 7286 (ncsi_oem_data_addr + 7287 offsetof(struct glob_ncsi_oem_data, driver_version)), 7288 0); 7289 } 7290 } 7291 } 7292} 7293 7294static void 7295bxe_read_mf_cfg(struct bxe_softc *sc) 7296{ 7297 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 7298 int abs_func; 7299 int vn; 7300 7301 if (BXE_NOMCP(sc)) { 7302 return; /* what should be the default bvalue in this case */ 7303 } 7304 7305 /* 7306 * The formula for computing the absolute function number is... 7307 * For 2 port configuration (4 functions per port): 7308 * abs_func = 2 * vn + SC_PORT + SC_PATH 7309 * For 4 port configuration (2 functions per port): 7310 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 7311 */ 7312 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 7313 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 7314 if (abs_func >= E1H_FUNC_MAX) { 7315 break; 7316 } 7317 sc->devinfo.mf_info.mf_config[vn] = 7318 MFCFG_RD(sc, func_mf_config[abs_func].config); 7319 } 7320 7321 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 7322 FUNC_MF_CFG_FUNC_DISABLED) { 7323 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 7324 sc->flags |= BXE_MF_FUNC_DIS; 7325 } else { 7326 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 7327 sc->flags &= ~BXE_MF_FUNC_DIS; 7328 } 7329} 7330 7331/* acquire split MCP access lock register */ 7332static int bxe_acquire_alr(struct bxe_softc *sc) 7333{ 7334 uint32_t j, val; 7335 7336 for (j = 0; j < 1000; j++) { 7337 val = (1UL << 31); 7338 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 7339 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 7340 if (val & (1L << 31)) 7341 break; 7342 7343 DELAY(5000); 7344 } 7345 7346 if (!(val & (1L << 31))) { 7347 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 7348 return (-1); 7349 } 7350 7351 return (0); 7352} 7353 7354/* release split MCP access lock register */ 7355static void bxe_release_alr(struct bxe_softc *sc) 7356{ 7357 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7358} 7359 7360static void 7361bxe_fan_failure(struct bxe_softc *sc) 7362{ 7363 int port = SC_PORT(sc); 7364 uint32_t ext_phy_config; 7365 7366 /* mark the failure */ 7367 ext_phy_config = 7368 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7369 7370 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7371 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7372 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7373 ext_phy_config); 7374 7375 /* log the failure */ 7376 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7377 "the card to prevent permanent damage. " 7378 "Please contact OEM Support for assistance\n"); 7379 7380 /* XXX */ 7381#if 1 7382 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7383#else 7384 /* 7385 * Schedule device reset (unload) 7386 * This is due to some boards consuming sufficient power when driver is 7387 * up to overheat if fan fails. 7388 */ 7389 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7390 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7391#endif 7392} 7393 7394/* this function is called upon a link interrupt */ 7395static void 7396bxe_link_attn(struct bxe_softc *sc) 7397{ 7398 uint32_t pause_enabled = 0; 7399 struct host_port_stats *pstats; 7400 int cmng_fns; 7401 7402 /* Make sure that we are synced with the current statistics */ 7403 bxe_stats_handle(sc, STATS_EVENT_STOP); 7404 7405 elink_link_update(&sc->link_params, &sc->link_vars); 7406 7407 if (sc->link_vars.link_up) { 7408 7409 /* dropless flow control */ 7410 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7411 pause_enabled = 0; 7412 7413 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7414 pause_enabled = 1; 7415 } 7416 7417 REG_WR(sc, 7418 (BAR_USTRORM_INTMEM + 7419 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7420 pause_enabled); 7421 } 7422 7423 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7424 pstats = BXE_SP(sc, port_stats); 7425 /* reset old mac stats */ 7426 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7427 } 7428 7429 if (sc->state == BXE_STATE_OPEN) { 7430 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7431 } 7432 } 7433 7434 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7435 cmng_fns = bxe_get_cmng_fns_mode(sc); 7436 7437 if (cmng_fns != CMNG_FNS_NONE) { 7438 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7439 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7440 } else { 7441 /* rate shaping and fairness are disabled */ 7442 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7443 } 7444 } 7445 7446 bxe_link_report_locked(sc); 7447 7448 if (IS_MF(sc)) { 7449 ; // XXX bxe_link_sync_notify(sc); 7450 } 7451} 7452 7453static void 7454bxe_attn_int_asserted(struct bxe_softc *sc, 7455 uint32_t asserted) 7456{ 7457 int port = SC_PORT(sc); 7458 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7459 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7460 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7461 NIG_REG_MASK_INTERRUPT_PORT0; 7462 uint32_t aeu_mask; 7463 uint32_t nig_mask = 0; 7464 uint32_t reg_addr; 7465 uint32_t igu_acked; 7466 uint32_t cnt; 7467 7468 if (sc->attn_state & asserted) { 7469 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7470 } 7471 7472 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7473 7474 aeu_mask = REG_RD(sc, aeu_addr); 7475 7476 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7477 aeu_mask, asserted); 7478 7479 aeu_mask &= ~(asserted & 0x3ff); 7480 7481 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7482 7483 REG_WR(sc, aeu_addr, aeu_mask); 7484 7485 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7486 7487 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7488 sc->attn_state |= asserted; 7489 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7490 7491 if (asserted & ATTN_HARD_WIRED_MASK) { 7492 if (asserted & ATTN_NIG_FOR_FUNC) { 7493 7494 bxe_acquire_phy_lock(sc); 7495 /* save nig interrupt mask */ 7496 nig_mask = REG_RD(sc, nig_int_mask_addr); 7497 7498 /* If nig_mask is not set, no need to call the update function */ 7499 if (nig_mask) { 7500 REG_WR(sc, nig_int_mask_addr, 0); 7501 7502 bxe_link_attn(sc); 7503 } 7504 7505 /* handle unicore attn? */ 7506 } 7507 7508 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7509 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7510 } 7511 7512 if (asserted & GPIO_2_FUNC) { 7513 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7514 } 7515 7516 if (asserted & GPIO_3_FUNC) { 7517 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7518 } 7519 7520 if (asserted & GPIO_4_FUNC) { 7521 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7522 } 7523 7524 if (port == 0) { 7525 if (asserted & ATTN_GENERAL_ATTN_1) { 7526 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7527 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7528 } 7529 if (asserted & ATTN_GENERAL_ATTN_2) { 7530 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7531 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7532 } 7533 if (asserted & ATTN_GENERAL_ATTN_3) { 7534 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7535 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7536 } 7537 } else { 7538 if (asserted & ATTN_GENERAL_ATTN_4) { 7539 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7540 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7541 } 7542 if (asserted & ATTN_GENERAL_ATTN_5) { 7543 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7544 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7545 } 7546 if (asserted & ATTN_GENERAL_ATTN_6) { 7547 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7548 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7549 } 7550 } 7551 } /* hardwired */ 7552 7553 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7554 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7555 } else { 7556 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7557 } 7558 7559 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7560 asserted, 7561 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7562 REG_WR(sc, reg_addr, asserted); 7563 7564 /* now set back the mask */ 7565 if (asserted & ATTN_NIG_FOR_FUNC) { 7566 /* 7567 * Verify that IGU ack through BAR was written before restoring 7568 * NIG mask. This loop should exit after 2-3 iterations max. 7569 */ 7570 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7571 cnt = 0; 7572 7573 do { 7574 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7575 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7576 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7577 7578 if (!igu_acked) { 7579 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7580 } 7581 7582 mb(); 7583 } 7584 7585 REG_WR(sc, nig_int_mask_addr, nig_mask); 7586 7587 bxe_release_phy_lock(sc); 7588 } 7589} 7590 7591static void 7592bxe_print_next_block(struct bxe_softc *sc, 7593 int idx, 7594 const char *blk) 7595{ 7596 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7597} 7598 7599static int 7600bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7601 uint32_t sig, 7602 int par_num, 7603 uint8_t print) 7604{ 7605 uint32_t cur_bit = 0; 7606 int i = 0; 7607 7608 for (i = 0; sig; i++) { 7609 cur_bit = ((uint32_t)0x1 << i); 7610 if (sig & cur_bit) { 7611 switch (cur_bit) { 7612 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7613 if (print) 7614 bxe_print_next_block(sc, par_num++, "BRB"); 7615 break; 7616 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7617 if (print) 7618 bxe_print_next_block(sc, par_num++, "PARSER"); 7619 break; 7620 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7621 if (print) 7622 bxe_print_next_block(sc, par_num++, "TSDM"); 7623 break; 7624 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7625 if (print) 7626 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7627 break; 7628 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7629 if (print) 7630 bxe_print_next_block(sc, par_num++, "TCM"); 7631 break; 7632 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7633 if (print) 7634 bxe_print_next_block(sc, par_num++, "TSEMI"); 7635 break; 7636 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7637 if (print) 7638 bxe_print_next_block(sc, par_num++, "XPB"); 7639 break; 7640 } 7641 7642 /* Clear the bit */ 7643 sig &= ~cur_bit; 7644 } 7645 } 7646 7647 return (par_num); 7648} 7649 7650static int 7651bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7652 uint32_t sig, 7653 int par_num, 7654 uint8_t *global, 7655 uint8_t print) 7656{ 7657 int i = 0; 7658 uint32_t cur_bit = 0; 7659 for (i = 0; sig; i++) { 7660 cur_bit = ((uint32_t)0x1 << i); 7661 if (sig & cur_bit) { 7662 switch (cur_bit) { 7663 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7664 if (print) 7665 bxe_print_next_block(sc, par_num++, "PBF"); 7666 break; 7667 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7668 if (print) 7669 bxe_print_next_block(sc, par_num++, "QM"); 7670 break; 7671 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7672 if (print) 7673 bxe_print_next_block(sc, par_num++, "TM"); 7674 break; 7675 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7676 if (print) 7677 bxe_print_next_block(sc, par_num++, "XSDM"); 7678 break; 7679 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7680 if (print) 7681 bxe_print_next_block(sc, par_num++, "XCM"); 7682 break; 7683 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7684 if (print) 7685 bxe_print_next_block(sc, par_num++, "XSEMI"); 7686 break; 7687 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7688 if (print) 7689 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7690 break; 7691 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7692 if (print) 7693 bxe_print_next_block(sc, par_num++, "NIG"); 7694 break; 7695 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7696 if (print) 7697 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7698 *global = TRUE; 7699 break; 7700 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7701 if (print) 7702 bxe_print_next_block(sc, par_num++, "DEBUG"); 7703 break; 7704 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7705 if (print) 7706 bxe_print_next_block(sc, par_num++, "USDM"); 7707 break; 7708 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7709 if (print) 7710 bxe_print_next_block(sc, par_num++, "UCM"); 7711 break; 7712 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7713 if (print) 7714 bxe_print_next_block(sc, par_num++, "USEMI"); 7715 break; 7716 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7717 if (print) 7718 bxe_print_next_block(sc, par_num++, "UPB"); 7719 break; 7720 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7721 if (print) 7722 bxe_print_next_block(sc, par_num++, "CSDM"); 7723 break; 7724 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7725 if (print) 7726 bxe_print_next_block(sc, par_num++, "CCM"); 7727 break; 7728 } 7729 7730 /* Clear the bit */ 7731 sig &= ~cur_bit; 7732 } 7733 } 7734 7735 return (par_num); 7736} 7737 7738static int 7739bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7740 uint32_t sig, 7741 int par_num, 7742 uint8_t print) 7743{ 7744 uint32_t cur_bit = 0; 7745 int i = 0; 7746 7747 for (i = 0; sig; i++) { 7748 cur_bit = ((uint32_t)0x1 << i); 7749 if (sig & cur_bit) { 7750 switch (cur_bit) { 7751 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7752 if (print) 7753 bxe_print_next_block(sc, par_num++, "CSEMI"); 7754 break; 7755 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7756 if (print) 7757 bxe_print_next_block(sc, par_num++, "PXP"); 7758 break; 7759 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7760 if (print) 7761 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7762 break; 7763 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7764 if (print) 7765 bxe_print_next_block(sc, par_num++, "CFC"); 7766 break; 7767 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7768 if (print) 7769 bxe_print_next_block(sc, par_num++, "CDU"); 7770 break; 7771 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7772 if (print) 7773 bxe_print_next_block(sc, par_num++, "DMAE"); 7774 break; 7775 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7776 if (print) 7777 bxe_print_next_block(sc, par_num++, "IGU"); 7778 break; 7779 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7780 if (print) 7781 bxe_print_next_block(sc, par_num++, "MISC"); 7782 break; 7783 } 7784 7785 /* Clear the bit */ 7786 sig &= ~cur_bit; 7787 } 7788 } 7789 7790 return (par_num); 7791} 7792 7793static int 7794bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7795 uint32_t sig, 7796 int par_num, 7797 uint8_t *global, 7798 uint8_t print) 7799{ 7800 uint32_t cur_bit = 0; 7801 int i = 0; 7802 7803 for (i = 0; sig; i++) { 7804 cur_bit = ((uint32_t)0x1 << i); 7805 if (sig & cur_bit) { 7806 switch (cur_bit) { 7807 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7808 if (print) 7809 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7810 *global = TRUE; 7811 break; 7812 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7813 if (print) 7814 bxe_print_next_block(sc, par_num++, 7815 "MCP UMP RX"); 7816 *global = TRUE; 7817 break; 7818 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7819 if (print) 7820 bxe_print_next_block(sc, par_num++, 7821 "MCP UMP TX"); 7822 *global = TRUE; 7823 break; 7824 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7825 if (print) 7826 bxe_print_next_block(sc, par_num++, 7827 "MCP SCPAD"); 7828 *global = TRUE; 7829 break; 7830 } 7831 7832 /* Clear the bit */ 7833 sig &= ~cur_bit; 7834 } 7835 } 7836 7837 return (par_num); 7838} 7839 7840static int 7841bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7842 uint32_t sig, 7843 int par_num, 7844 uint8_t print) 7845{ 7846 uint32_t cur_bit = 0; 7847 int i = 0; 7848 7849 for (i = 0; sig; i++) { 7850 cur_bit = ((uint32_t)0x1 << i); 7851 if (sig & cur_bit) { 7852 switch (cur_bit) { 7853 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7854 if (print) 7855 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7856 break; 7857 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7858 if (print) 7859 bxe_print_next_block(sc, par_num++, "ATC"); 7860 break; 7861 } 7862 7863 /* Clear the bit */ 7864 sig &= ~cur_bit; 7865 } 7866 } 7867 7868 return (par_num); 7869} 7870 7871static uint8_t 7872bxe_parity_attn(struct bxe_softc *sc, 7873 uint8_t *global, 7874 uint8_t print, 7875 uint32_t *sig) 7876{ 7877 int par_num = 0; 7878 7879 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7880 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7881 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7882 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7883 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7884 BLOGE(sc, "Parity error: HW block parity attention:\n" 7885 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7886 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7887 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7888 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7889 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7890 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7891 7892 if (print) 7893 BLOGI(sc, "Parity errors detected in blocks: "); 7894 7895 par_num = 7896 bxe_check_blocks_with_parity0(sc, sig[0] & 7897 HW_PRTY_ASSERT_SET_0, 7898 par_num, print); 7899 par_num = 7900 bxe_check_blocks_with_parity1(sc, sig[1] & 7901 HW_PRTY_ASSERT_SET_1, 7902 par_num, global, print); 7903 par_num = 7904 bxe_check_blocks_with_parity2(sc, sig[2] & 7905 HW_PRTY_ASSERT_SET_2, 7906 par_num, print); 7907 par_num = 7908 bxe_check_blocks_with_parity3(sc, sig[3] & 7909 HW_PRTY_ASSERT_SET_3, 7910 par_num, global, print); 7911 par_num = 7912 bxe_check_blocks_with_parity4(sc, sig[4] & 7913 HW_PRTY_ASSERT_SET_4, 7914 par_num, print); 7915 7916 if (print) 7917 BLOGI(sc, "\n"); 7918 7919 return (TRUE); 7920 } 7921 7922 return (FALSE); 7923} 7924 7925static uint8_t 7926bxe_chk_parity_attn(struct bxe_softc *sc, 7927 uint8_t *global, 7928 uint8_t print) 7929{ 7930 struct attn_route attn = { {0} }; 7931 int port = SC_PORT(sc); 7932 7933 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7934 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7935 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7936 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7937 7938 /* 7939 * Since MCP attentions can't be disabled inside the block, we need to 7940 * read AEU registers to see whether they're currently disabled 7941 */ 7942 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 7943 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & 7944 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 7945 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 7946 7947 7948 if (!CHIP_IS_E1x(sc)) 7949 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7950 7951 return (bxe_parity_attn(sc, global, print, attn.sig)); 7952} 7953 7954static void 7955bxe_attn_int_deasserted4(struct bxe_softc *sc, 7956 uint32_t attn) 7957{ 7958 uint32_t val; 7959 7960 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7961 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7962 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7963 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7964 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7965 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7966 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7967 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7968 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7969 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7970 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7971 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7972 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7973 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7974 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7975 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7976 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7977 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7978 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7979 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7980 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7981 } 7982 7983 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7984 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7985 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7986 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7987 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7988 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7989 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7990 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7991 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7992 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7993 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7994 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7995 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7996 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7997 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7998 } 7999 8000 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 8001 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 8002 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 8003 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 8004 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 8005 } 8006} 8007 8008static void 8009bxe_e1h_disable(struct bxe_softc *sc) 8010{ 8011 int port = SC_PORT(sc); 8012 8013 bxe_tx_disable(sc); 8014 8015 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8016} 8017 8018static void 8019bxe_e1h_enable(struct bxe_softc *sc) 8020{ 8021 int port = SC_PORT(sc); 8022 8023 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 8024 8025 // XXX bxe_tx_enable(sc); 8026} 8027 8028/* 8029 * called due to MCP event (on pmf): 8030 * reread new bandwidth configuration 8031 * configure FW 8032 * notify others function about the change 8033 */ 8034static void 8035bxe_config_mf_bw(struct bxe_softc *sc) 8036{ 8037 if (sc->link_vars.link_up) { 8038 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 8039 // XXX bxe_link_sync_notify(sc); 8040 } 8041 8042 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 8043} 8044 8045static void 8046bxe_set_mf_bw(struct bxe_softc *sc) 8047{ 8048 bxe_config_mf_bw(sc); 8049 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 8050} 8051 8052static void 8053bxe_handle_eee_event(struct bxe_softc *sc) 8054{ 8055 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 8056 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 8057} 8058 8059#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 8060 8061static void 8062bxe_drv_info_ether_stat(struct bxe_softc *sc) 8063{ 8064 struct eth_stats_info *ether_stat = 8065 &sc->sp->drv_info_to_mcp.ether_stat; 8066 8067 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 8068 ETH_STAT_INFO_VERSION_LEN); 8069 8070 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 8071 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 8072 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 8073 ether_stat->mac_local + MAC_PAD, 8074 MAC_PAD, ETH_ALEN); 8075 8076 ether_stat->mtu_size = sc->mtu; 8077 8078 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 8079 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 8080 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 8081 } 8082 8083 // XXX ether_stat->feature_flags |= ???; 8084 8085 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 8086 8087 ether_stat->txq_size = sc->tx_ring_size; 8088 ether_stat->rxq_size = sc->rx_ring_size; 8089} 8090 8091static void 8092bxe_handle_drv_info_req(struct bxe_softc *sc) 8093{ 8094 enum drv_info_opcode op_code; 8095 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 8096 8097 /* if drv_info version supported by MFW doesn't match - send NACK */ 8098 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 8099 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8100 return; 8101 } 8102 8103 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 8104 DRV_INFO_CONTROL_OP_CODE_SHIFT); 8105 8106 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 8107 8108 switch (op_code) { 8109 case ETH_STATS_OPCODE: 8110 bxe_drv_info_ether_stat(sc); 8111 break; 8112 case FCOE_STATS_OPCODE: 8113 case ISCSI_STATS_OPCODE: 8114 default: 8115 /* if op code isn't supported - send NACK */ 8116 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8117 return; 8118 } 8119 8120 /* 8121 * If we got drv_info attn from MFW then these fields are defined in 8122 * shmem2 for sure 8123 */ 8124 SHMEM2_WR(sc, drv_info_host_addr_lo, 8125 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8126 SHMEM2_WR(sc, drv_info_host_addr_hi, 8127 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8128 8129 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 8130} 8131 8132static void 8133bxe_dcc_event(struct bxe_softc *sc, 8134 uint32_t dcc_event) 8135{ 8136 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 8137 8138 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 8139 /* 8140 * This is the only place besides the function initialization 8141 * where the sc->flags can change so it is done without any 8142 * locks 8143 */ 8144 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 8145 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 8146 sc->flags |= BXE_MF_FUNC_DIS; 8147 bxe_e1h_disable(sc); 8148 } else { 8149 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 8150 sc->flags &= ~BXE_MF_FUNC_DIS; 8151 bxe_e1h_enable(sc); 8152 } 8153 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 8154 } 8155 8156 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 8157 bxe_config_mf_bw(sc); 8158 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 8159 } 8160 8161 /* Report results to MCP */ 8162 if (dcc_event) 8163 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 8164 else 8165 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 8166} 8167 8168static void 8169bxe_pmf_update(struct bxe_softc *sc) 8170{ 8171 int port = SC_PORT(sc); 8172 uint32_t val; 8173 8174 sc->port.pmf = 1; 8175 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 8176 8177 /* 8178 * We need the mb() to ensure the ordering between the writing to 8179 * sc->port.pmf here and reading it from the bxe_periodic_task(). 8180 */ 8181 mb(); 8182 8183 /* queue a periodic task */ 8184 // XXX schedule task... 8185 8186 // XXX bxe_dcbx_pmf_update(sc); 8187 8188 /* enable nig attention */ 8189 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 8190 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8191 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 8192 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 8193 } else if (!CHIP_IS_E1x(sc)) { 8194 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 8195 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 8196 } 8197 8198 bxe_stats_handle(sc, STATS_EVENT_PMF); 8199} 8200 8201static int 8202bxe_mc_assert(struct bxe_softc *sc) 8203{ 8204 char last_idx; 8205 int i, rc = 0; 8206 uint32_t row0, row1, row2, row3; 8207 8208 /* XSTORM */ 8209 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 8210 if (last_idx) 8211 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8212 8213 /* print the asserts */ 8214 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8215 8216 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 8217 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 8218 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 8219 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 8220 8221 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8222 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8223 i, row3, row2, row1, row0); 8224 rc++; 8225 } else { 8226 break; 8227 } 8228 } 8229 8230 /* TSTORM */ 8231 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 8232 if (last_idx) { 8233 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8234 } 8235 8236 /* print the asserts */ 8237 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8238 8239 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 8240 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 8241 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 8242 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 8243 8244 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8245 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8246 i, row3, row2, row1, row0); 8247 rc++; 8248 } else { 8249 break; 8250 } 8251 } 8252 8253 /* CSTORM */ 8254 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 8255 if (last_idx) { 8256 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8257 } 8258 8259 /* print the asserts */ 8260 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8261 8262 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 8263 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 8264 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 8265 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 8266 8267 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8268 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8269 i, row3, row2, row1, row0); 8270 rc++; 8271 } else { 8272 break; 8273 } 8274 } 8275 8276 /* USTORM */ 8277 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 8278 if (last_idx) { 8279 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8280 } 8281 8282 /* print the asserts */ 8283 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8284 8285 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 8286 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 8287 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 8288 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 8289 8290 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8291 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8292 i, row3, row2, row1, row0); 8293 rc++; 8294 } else { 8295 break; 8296 } 8297 } 8298 8299 return (rc); 8300} 8301 8302static void 8303bxe_attn_int_deasserted3(struct bxe_softc *sc, 8304 uint32_t attn) 8305{ 8306 int func = SC_FUNC(sc); 8307 uint32_t val; 8308 8309 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 8310 8311 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 8312 8313 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8314 bxe_read_mf_cfg(sc); 8315 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 8316 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8317 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 8318 8319 if (val & DRV_STATUS_DCC_EVENT_MASK) 8320 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 8321 8322 if (val & DRV_STATUS_SET_MF_BW) 8323 bxe_set_mf_bw(sc); 8324 8325 if (val & DRV_STATUS_DRV_INFO_REQ) 8326 bxe_handle_drv_info_req(sc); 8327 8328#if 0 8329 if (val & DRV_STATUS_VF_DISABLED) 8330 bxe_vf_handle_flr_event(sc); 8331#endif 8332 8333 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 8334 bxe_pmf_update(sc); 8335 8336#if 0 8337 if (sc->port.pmf && 8338 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 8339 (sc->dcbx_enabled > 0)) 8340 /* start dcbx state machine */ 8341 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); 8342#endif 8343 8344#if 0 8345 if (val & DRV_STATUS_AFEX_EVENT_MASK) 8346 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); 8347#endif 8348 8349 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8350 bxe_handle_eee_event(sc); 8351 8352 if (sc->link_vars.periodic_flags & 8353 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8354 /* sync with link */ 8355 bxe_acquire_phy_lock(sc); 8356 sc->link_vars.periodic_flags &= 8357 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8358 bxe_release_phy_lock(sc); 8359 if (IS_MF(sc)) 8360 ; // XXX bxe_link_sync_notify(sc); 8361 bxe_link_report(sc); 8362 } 8363 8364 /* 8365 * Always call it here: bxe_link_report() will 8366 * prevent the link indication duplication. 8367 */ 8368 bxe_link_status_update(sc); 8369 8370 } else if (attn & BXE_MC_ASSERT_BITS) { 8371 8372 BLOGE(sc, "MC assert!\n"); 8373 bxe_mc_assert(sc); 8374 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8375 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8376 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8377 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8378 bxe_panic(sc, ("MC assert!\n")); 8379 8380 } else if (attn & BXE_MCP_ASSERT) { 8381 8382 BLOGE(sc, "MCP assert!\n"); 8383 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8384 // XXX bxe_fw_dump(sc); 8385 8386 } else { 8387 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8388 } 8389 } 8390 8391 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8392 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8393 if (attn & BXE_GRC_TIMEOUT) { 8394 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8395 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8396 } 8397 if (attn & BXE_GRC_RSV) { 8398 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8399 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8400 } 8401 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8402 } 8403} 8404 8405static void 8406bxe_attn_int_deasserted2(struct bxe_softc *sc, 8407 uint32_t attn) 8408{ 8409 int port = SC_PORT(sc); 8410 int reg_offset; 8411 uint32_t val0, mask0, val1, mask1; 8412 uint32_t val; 8413 8414 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8415 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8416 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8417 /* CFC error attention */ 8418 if (val & 0x2) { 8419 BLOGE(sc, "FATAL error from CFC\n"); 8420 } 8421 } 8422 8423 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8424 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8425 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8426 /* RQ_USDMDP_FIFO_OVERFLOW */ 8427 if (val & 0x18000) { 8428 BLOGE(sc, "FATAL error from PXP\n"); 8429 } 8430 8431 if (!CHIP_IS_E1x(sc)) { 8432 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8433 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8434 } 8435 } 8436 8437#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8438#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8439 8440 if (attn & AEU_PXP2_HW_INT_BIT) { 8441 /* CQ47854 workaround do not panic on 8442 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8443 */ 8444 if (!CHIP_IS_E1x(sc)) { 8445 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8446 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8447 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8448 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8449 /* 8450 * If the olny PXP2_EOP_ERROR_BIT is set in 8451 * STS0 and STS1 - clear it 8452 * 8453 * probably we lose additional attentions between 8454 * STS0 and STS_CLR0, in this case user will not 8455 * be notified about them 8456 */ 8457 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8458 !(val1 & mask1)) 8459 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8460 8461 /* print the register, since no one can restore it */ 8462 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8463 8464 /* 8465 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8466 * then notify 8467 */ 8468 if (val0 & PXP2_EOP_ERROR_BIT) { 8469 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8470 8471 /* 8472 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8473 * set then clear attention from PXP2 block without panic 8474 */ 8475 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8476 ((val1 & mask1) == 0)) 8477 attn &= ~AEU_PXP2_HW_INT_BIT; 8478 } 8479 } 8480 } 8481 8482 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8483 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8484 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8485 8486 val = REG_RD(sc, reg_offset); 8487 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8488 REG_WR(sc, reg_offset, val); 8489 8490 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8491 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8492 bxe_panic(sc, ("HW block attention set2\n")); 8493 } 8494} 8495 8496static void 8497bxe_attn_int_deasserted1(struct bxe_softc *sc, 8498 uint32_t attn) 8499{ 8500 int port = SC_PORT(sc); 8501 int reg_offset; 8502 uint32_t val; 8503 8504 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8505 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8506 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8507 /* DORQ discard attention */ 8508 if (val & 0x2) { 8509 BLOGE(sc, "FATAL error from DORQ\n"); 8510 } 8511 } 8512 8513 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8514 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8515 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8516 8517 val = REG_RD(sc, reg_offset); 8518 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8519 REG_WR(sc, reg_offset, val); 8520 8521 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8522 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8523 bxe_panic(sc, ("HW block attention set1\n")); 8524 } 8525} 8526 8527static void 8528bxe_attn_int_deasserted0(struct bxe_softc *sc, 8529 uint32_t attn) 8530{ 8531 int port = SC_PORT(sc); 8532 int reg_offset; 8533 uint32_t val; 8534 8535 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8536 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8537 8538 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8539 val = REG_RD(sc, reg_offset); 8540 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8541 REG_WR(sc, reg_offset, val); 8542 8543 BLOGW(sc, "SPIO5 hw attention\n"); 8544 8545 /* Fan failure attention */ 8546 elink_hw_reset_phy(&sc->link_params); 8547 bxe_fan_failure(sc); 8548 } 8549 8550 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8551 bxe_acquire_phy_lock(sc); 8552 elink_handle_module_detect_int(&sc->link_params); 8553 bxe_release_phy_lock(sc); 8554 } 8555 8556 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8557 val = REG_RD(sc, reg_offset); 8558 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8559 REG_WR(sc, reg_offset, val); 8560 8561 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8562 (attn & HW_INTERRUT_ASSERT_SET_0))); 8563 } 8564} 8565 8566static void 8567bxe_attn_int_deasserted(struct bxe_softc *sc, 8568 uint32_t deasserted) 8569{ 8570 struct attn_route attn; 8571 struct attn_route *group_mask; 8572 int port = SC_PORT(sc); 8573 int index; 8574 uint32_t reg_addr; 8575 uint32_t val; 8576 uint32_t aeu_mask; 8577 uint8_t global = FALSE; 8578 8579 /* 8580 * Need to take HW lock because MCP or other port might also 8581 * try to handle this event. 8582 */ 8583 bxe_acquire_alr(sc); 8584 8585 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8586 /* XXX 8587 * In case of parity errors don't handle attentions so that 8588 * other function would "see" parity errors. 8589 */ 8590 sc->recovery_state = BXE_RECOVERY_INIT; 8591 // XXX schedule a recovery task... 8592 /* disable HW interrupts */ 8593 bxe_int_disable(sc); 8594 bxe_release_alr(sc); 8595 return; 8596 } 8597 8598 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8599 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8600 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8601 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8602 if (!CHIP_IS_E1x(sc)) { 8603 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8604 } else { 8605 attn.sig[4] = 0; 8606 } 8607 8608 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8609 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8610 8611 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8612 if (deasserted & (1 << index)) { 8613 group_mask = &sc->attn_group[index]; 8614 8615 BLOGD(sc, DBG_INTR, 8616 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8617 group_mask->sig[0], group_mask->sig[1], 8618 group_mask->sig[2], group_mask->sig[3], 8619 group_mask->sig[4]); 8620 8621 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8622 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8623 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8624 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8625 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8626 } 8627 } 8628 8629 bxe_release_alr(sc); 8630 8631 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8632 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8633 COMMAND_REG_ATTN_BITS_CLR); 8634 } else { 8635 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8636 } 8637 8638 val = ~deasserted; 8639 BLOGD(sc, DBG_INTR, 8640 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8641 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8642 REG_WR(sc, reg_addr, val); 8643 8644 if (~sc->attn_state & deasserted) { 8645 BLOGE(sc, "IGU error\n"); 8646 } 8647 8648 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8649 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8650 8651 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8652 8653 aeu_mask = REG_RD(sc, reg_addr); 8654 8655 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8656 aeu_mask, deasserted); 8657 aeu_mask |= (deasserted & 0x3ff); 8658 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8659 8660 REG_WR(sc, reg_addr, aeu_mask); 8661 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8662 8663 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8664 sc->attn_state &= ~deasserted; 8665 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8666} 8667 8668static void 8669bxe_attn_int(struct bxe_softc *sc) 8670{ 8671 /* read local copy of bits */ 8672 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8673 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8674 uint32_t attn_state = sc->attn_state; 8675 8676 /* look for changed bits */ 8677 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8678 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8679 8680 BLOGD(sc, DBG_INTR, 8681 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8682 attn_bits, attn_ack, asserted, deasserted); 8683 8684 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8685 BLOGE(sc, "BAD attention state\n"); 8686 } 8687 8688 /* handle bits that were raised */ 8689 if (asserted) { 8690 bxe_attn_int_asserted(sc, asserted); 8691 } 8692 8693 if (deasserted) { 8694 bxe_attn_int_deasserted(sc, deasserted); 8695 } 8696} 8697 8698static uint16_t 8699bxe_update_dsb_idx(struct bxe_softc *sc) 8700{ 8701 struct host_sp_status_block *def_sb = sc->def_sb; 8702 uint16_t rc = 0; 8703 8704 mb(); /* status block is written to by the chip */ 8705 8706 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8707 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8708 rc |= BXE_DEF_SB_ATT_IDX; 8709 } 8710 8711 if (sc->def_idx != def_sb->sp_sb.running_index) { 8712 sc->def_idx = def_sb->sp_sb.running_index; 8713 rc |= BXE_DEF_SB_IDX; 8714 } 8715 8716 mb(); 8717 8718 return (rc); 8719} 8720 8721static inline struct ecore_queue_sp_obj * 8722bxe_cid_to_q_obj(struct bxe_softc *sc, 8723 uint32_t cid) 8724{ 8725 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8726 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8727} 8728 8729static void 8730bxe_handle_mcast_eqe(struct bxe_softc *sc) 8731{ 8732 struct ecore_mcast_ramrod_params rparam; 8733 int rc; 8734 8735 memset(&rparam, 0, sizeof(rparam)); 8736 8737 rparam.mcast_obj = &sc->mcast_obj; 8738 8739 BXE_MCAST_LOCK(sc); 8740 8741 /* clear pending state for the last command */ 8742 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8743 8744 /* if there are pending mcast commands - send them */ 8745 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8746 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8747 if (rc < 0) { 8748 BLOGD(sc, DBG_SP, 8749 "ERROR: Failed to send pending mcast commands (%d)\n", 8750 rc); 8751 } 8752 } 8753 8754 BXE_MCAST_UNLOCK(sc); 8755} 8756 8757static void 8758bxe_handle_classification_eqe(struct bxe_softc *sc, 8759 union event_ring_elem *elem) 8760{ 8761 unsigned long ramrod_flags = 0; 8762 int rc = 0; 8763 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8764 struct ecore_vlan_mac_obj *vlan_mac_obj; 8765 8766 /* always push next commands out, don't wait here */ 8767 bit_set(&ramrod_flags, RAMROD_CONT); 8768 8769 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8770 case ECORE_FILTER_MAC_PENDING: 8771 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8772 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8773 break; 8774 8775 case ECORE_FILTER_MCAST_PENDING: 8776 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8777 /* 8778 * This is only relevant for 57710 where multicast MACs are 8779 * configured as unicast MACs using the same ramrod. 8780 */ 8781 bxe_handle_mcast_eqe(sc); 8782 return; 8783 8784 default: 8785 BLOGE(sc, "Unsupported classification command: %d\n", 8786 elem->message.data.eth_event.echo); 8787 return; 8788 } 8789 8790 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8791 8792 if (rc < 0) { 8793 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8794 } else if (rc > 0) { 8795 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8796 } 8797} 8798 8799static void 8800bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8801 union event_ring_elem *elem) 8802{ 8803 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8804 8805 /* send rx_mode command again if was requested */ 8806 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8807 &sc->sp_state)) { 8808 bxe_set_storm_rx_mode(sc); 8809 } 8810#if 0 8811 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, 8812 &sc->sp_state)) { 8813 bxe_set_iscsi_eth_rx_mode(sc, TRUE); 8814 } 8815 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 8816 &sc->sp_state)) { 8817 bxe_set_iscsi_eth_rx_mode(sc, FALSE); 8818 } 8819#endif 8820} 8821 8822static void 8823bxe_update_eq_prod(struct bxe_softc *sc, 8824 uint16_t prod) 8825{ 8826 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8827 wmb(); /* keep prod updates ordered */ 8828} 8829 8830static void 8831bxe_eq_int(struct bxe_softc *sc) 8832{ 8833 uint16_t hw_cons, sw_cons, sw_prod; 8834 union event_ring_elem *elem; 8835 uint8_t echo; 8836 uint32_t cid; 8837 uint8_t opcode; 8838 int spqe_cnt = 0; 8839 struct ecore_queue_sp_obj *q_obj; 8840 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8841 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8842 8843 hw_cons = le16toh(*sc->eq_cons_sb); 8844 8845 /* 8846 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8847 * when we get to the next-page we need to adjust so the loop 8848 * condition below will be met. The next element is the size of a 8849 * regular element and hence incrementing by 1 8850 */ 8851 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8852 hw_cons++; 8853 } 8854 8855 /* 8856 * This function may never run in parallel with itself for a 8857 * specific sc and no need for a read memory barrier here. 8858 */ 8859 sw_cons = sc->eq_cons; 8860 sw_prod = sc->eq_prod; 8861 8862 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8863 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8864 8865 for (; 8866 sw_cons != hw_cons; 8867 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8868 8869 elem = &sc->eq[EQ_DESC(sw_cons)]; 8870 8871#if 0 8872 int rc; 8873 rc = bxe_iov_eq_sp_event(sc, elem); 8874 if (!rc) { 8875 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); 8876 goto next_spqe; 8877 } 8878#endif 8879 8880 /* elem CID originates from FW, actually LE */ 8881 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8882 opcode = elem->message.opcode; 8883 8884 /* handle eq element */ 8885 switch (opcode) { 8886#if 0 8887 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 8888 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); 8889 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); 8890 continue; 8891#endif 8892 8893 case EVENT_RING_OPCODE_STAT_QUERY: 8894 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8895 sc->stats_comp++); 8896 /* nothing to do with stats comp */ 8897 goto next_spqe; 8898 8899 case EVENT_RING_OPCODE_CFC_DEL: 8900 /* handle according to cid range */ 8901 /* we may want to verify here that the sc state is HALTING */ 8902 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8903 q_obj = bxe_cid_to_q_obj(sc, cid); 8904 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8905 break; 8906 } 8907 goto next_spqe; 8908 8909 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8910 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8911 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8912 break; 8913 } 8914 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8915 goto next_spqe; 8916 8917 case EVENT_RING_OPCODE_START_TRAFFIC: 8918 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8919 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8920 break; 8921 } 8922 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8923 goto next_spqe; 8924 8925 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8926 echo = elem->message.data.function_update_event.echo; 8927 if (echo == SWITCH_UPDATE) { 8928 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8929 if (f_obj->complete_cmd(sc, f_obj, 8930 ECORE_F_CMD_SWITCH_UPDATE)) { 8931 break; 8932 } 8933 } 8934 else { 8935 BLOGD(sc, DBG_SP, 8936 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8937#if 0 8938 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); 8939 /* 8940 * We will perform the queues update from the sp_core_task as 8941 * all queue SP operations should run with CORE_LOCK. 8942 */ 8943 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); 8944 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8945#endif 8946 } 8947 goto next_spqe; 8948 8949#if 0 8950 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 8951 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); 8952 bxe_after_afex_vif_lists(sc, elem); 8953 goto next_spqe; 8954#endif 8955 8956 case EVENT_RING_OPCODE_FORWARD_SETUP: 8957 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8958 if (q_obj->complete_cmd(sc, q_obj, 8959 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8960 break; 8961 } 8962 goto next_spqe; 8963 8964 case EVENT_RING_OPCODE_FUNCTION_START: 8965 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8966 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8967 break; 8968 } 8969 goto next_spqe; 8970 8971 case EVENT_RING_OPCODE_FUNCTION_STOP: 8972 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8973 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8974 break; 8975 } 8976 goto next_spqe; 8977 } 8978 8979 switch (opcode | sc->state) { 8980 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8981 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8982 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8983 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8984 rss_raw->clear_pending(rss_raw); 8985 break; 8986 8987 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8988 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8989 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8990 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8991 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8992 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8993 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8994 bxe_handle_classification_eqe(sc, elem); 8995 break; 8996 8997 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8998 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8999 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 9000 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 9001 bxe_handle_mcast_eqe(sc); 9002 break; 9003 9004 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 9005 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 9006 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 9007 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 9008 bxe_handle_rx_mode_eqe(sc, elem); 9009 break; 9010 9011 default: 9012 /* unknown event log error and continue */ 9013 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 9014 elem->message.opcode, sc->state); 9015 } 9016 9017next_spqe: 9018 spqe_cnt++; 9019 } /* for */ 9020 9021 mb(); 9022 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 9023 9024 sc->eq_cons = sw_cons; 9025 sc->eq_prod = sw_prod; 9026 9027 /* make sure that above mem writes were issued towards the memory */ 9028 wmb(); 9029 9030 /* update producer */ 9031 bxe_update_eq_prod(sc, sc->eq_prod); 9032} 9033 9034static void 9035bxe_handle_sp_tq(void *context, 9036 int pending) 9037{ 9038 struct bxe_softc *sc = (struct bxe_softc *)context; 9039 uint16_t status; 9040 9041 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 9042 9043 /* what work needs to be performed? */ 9044 status = bxe_update_dsb_idx(sc); 9045 9046 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 9047 9048 /* HW attentions */ 9049 if (status & BXE_DEF_SB_ATT_IDX) { 9050 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 9051 bxe_attn_int(sc); 9052 status &= ~BXE_DEF_SB_ATT_IDX; 9053 } 9054 9055 /* SP events: STAT_QUERY and others */ 9056 if (status & BXE_DEF_SB_IDX) { 9057 /* handle EQ completions */ 9058 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 9059 bxe_eq_int(sc); 9060 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 9061 le16toh(sc->def_idx), IGU_INT_NOP, 1); 9062 status &= ~BXE_DEF_SB_IDX; 9063 } 9064 9065 /* if status is non zero then something went wrong */ 9066 if (__predict_false(status)) { 9067 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 9068 } 9069 9070 /* ack status block only if something was actually handled */ 9071 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 9072 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 9073 9074 /* 9075 * Must be called after the EQ processing (since eq leads to sriov 9076 * ramrod completion flows). 9077 * This flow may have been scheduled by the arrival of a ramrod 9078 * completion, or by the sriov code rescheduling itself. 9079 */ 9080 // XXX bxe_iov_sp_task(sc); 9081 9082#if 0 9083 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ 9084 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, 9085 &sc->sp_state)) { 9086 bxe_link_report(sc); 9087 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 9088 } 9089#endif 9090} 9091 9092static void 9093bxe_handle_fp_tq(void *context, 9094 int pending) 9095{ 9096 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 9097 struct bxe_softc *sc = fp->sc; 9098 uint8_t more_tx = FALSE; 9099 uint8_t more_rx = FALSE; 9100 9101 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 9102 9103 /* XXX 9104 * IFF_DRV_RUNNING state can't be checked here since we process 9105 * slowpath events on a client queue during setup. Instead 9106 * we need to add a "process/continue" flag here that the driver 9107 * can use to tell the task here not to do anything. 9108 */ 9109#if 0 9110 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 9111 return; 9112 } 9113#endif 9114 9115 /* update the fastpath index */ 9116 bxe_update_fp_sb_idx(fp); 9117 9118 /* XXX add loop here if ever support multiple tx CoS */ 9119 /* fp->txdata[cos] */ 9120 if (bxe_has_tx_work(fp)) { 9121 BXE_FP_TX_LOCK(fp); 9122 more_tx = bxe_txeof(sc, fp); 9123 BXE_FP_TX_UNLOCK(fp); 9124 } 9125 9126 if (bxe_has_rx_work(fp)) { 9127 more_rx = bxe_rxeof(sc, fp); 9128 } 9129 9130 if (more_rx /*|| more_tx*/) { 9131 /* still more work to do */ 9132 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9133 return; 9134 } 9135 9136 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9137 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9138} 9139 9140static void 9141bxe_task_fp(struct bxe_fastpath *fp) 9142{ 9143 struct bxe_softc *sc = fp->sc; 9144 uint8_t more_tx = FALSE; 9145 uint8_t more_rx = FALSE; 9146 9147 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 9148 9149 /* update the fastpath index */ 9150 bxe_update_fp_sb_idx(fp); 9151 9152 /* XXX add loop here if ever support multiple tx CoS */ 9153 /* fp->txdata[cos] */ 9154 if (bxe_has_tx_work(fp)) { 9155 BXE_FP_TX_LOCK(fp); 9156 more_tx = bxe_txeof(sc, fp); 9157 BXE_FP_TX_UNLOCK(fp); 9158 } 9159 9160 if (bxe_has_rx_work(fp)) { 9161 more_rx = bxe_rxeof(sc, fp); 9162 } 9163 9164 if (more_rx /*|| more_tx*/) { 9165 /* still more work to do, bail out if this ISR and process later */ 9166 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9167 return; 9168 } 9169 9170 /* 9171 * Here we write the fastpath index taken before doing any tx or rx work. 9172 * It is very well possible other hw events occurred up to this point and 9173 * they were actually processed accordingly above. Since we're going to 9174 * write an older fastpath index, an interrupt is coming which we might 9175 * not do any work in. 9176 */ 9177 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9178 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9179} 9180 9181/* 9182 * Legacy interrupt entry point. 9183 * 9184 * Verifies that the controller generated the interrupt and 9185 * then calls a separate routine to handle the various 9186 * interrupt causes: link, RX, and TX. 9187 */ 9188static void 9189bxe_intr_legacy(void *xsc) 9190{ 9191 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9192 struct bxe_fastpath *fp; 9193 uint16_t status, mask; 9194 int i; 9195 9196 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 9197 9198#if 0 9199 /* Don't handle any interrupts if we're not ready. */ 9200 if (__predict_false(sc->intr_sem != 0)) { 9201 return; 9202 } 9203#endif 9204 9205 /* 9206 * 0 for ustorm, 1 for cstorm 9207 * the bits returned from ack_int() are 0-15 9208 * bit 0 = attention status block 9209 * bit 1 = fast path status block 9210 * a mask of 0x2 or more = tx/rx event 9211 * a mask of 1 = slow path event 9212 */ 9213 9214 status = bxe_ack_int(sc); 9215 9216 /* the interrupt is not for us */ 9217 if (__predict_false(status == 0)) { 9218 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 9219 return; 9220 } 9221 9222 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 9223 9224 FOR_EACH_ETH_QUEUE(sc, i) { 9225 fp = &sc->fp[i]; 9226 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 9227 if (status & mask) { 9228 /* acknowledge and disable further fastpath interrupts */ 9229 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9230 bxe_task_fp(fp); 9231 status &= ~mask; 9232 } 9233 } 9234 9235#if 0 9236 if (CNIC_SUPPORT(sc)) { 9237 mask = 0x2; 9238 if (status & (mask | 0x1)) { 9239 ... 9240 status &= ~mask; 9241 } 9242 } 9243#endif 9244 9245 if (__predict_false(status & 0x1)) { 9246 /* acknowledge and disable further slowpath interrupts */ 9247 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9248 9249 /* schedule slowpath handler */ 9250 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9251 9252 status &= ~0x1; 9253 } 9254 9255 if (__predict_false(status)) { 9256 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 9257 } 9258} 9259 9260/* slowpath interrupt entry point */ 9261static void 9262bxe_intr_sp(void *xsc) 9263{ 9264 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9265 9266 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 9267 9268 /* acknowledge and disable further slowpath interrupts */ 9269 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9270 9271 /* schedule slowpath handler */ 9272 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9273} 9274 9275/* fastpath interrupt entry point */ 9276static void 9277bxe_intr_fp(void *xfp) 9278{ 9279 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 9280 struct bxe_softc *sc = fp->sc; 9281 9282 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 9283 9284 BLOGD(sc, DBG_INTR, 9285 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 9286 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 9287 9288#if 0 9289 /* Don't handle any interrupts if we're not ready. */ 9290 if (__predict_false(sc->intr_sem != 0)) { 9291 return; 9292 } 9293#endif 9294 9295 /* acknowledge and disable further fastpath interrupts */ 9296 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9297 9298 bxe_task_fp(fp); 9299} 9300 9301/* Release all interrupts allocated by the driver. */ 9302static void 9303bxe_interrupt_free(struct bxe_softc *sc) 9304{ 9305 int i; 9306 9307 switch (sc->interrupt_mode) { 9308 case INTR_MODE_INTX: 9309 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 9310 if (sc->intr[0].resource != NULL) { 9311 bus_release_resource(sc->dev, 9312 SYS_RES_IRQ, 9313 sc->intr[0].rid, 9314 sc->intr[0].resource); 9315 } 9316 break; 9317 case INTR_MODE_MSI: 9318 for (i = 0; i < sc->intr_count; i++) { 9319 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 9320 if (sc->intr[i].resource && sc->intr[i].rid) { 9321 bus_release_resource(sc->dev, 9322 SYS_RES_IRQ, 9323 sc->intr[i].rid, 9324 sc->intr[i].resource); 9325 } 9326 } 9327 pci_release_msi(sc->dev); 9328 break; 9329 case INTR_MODE_MSIX: 9330 for (i = 0; i < sc->intr_count; i++) { 9331 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 9332 if (sc->intr[i].resource && sc->intr[i].rid) { 9333 bus_release_resource(sc->dev, 9334 SYS_RES_IRQ, 9335 sc->intr[i].rid, 9336 sc->intr[i].resource); 9337 } 9338 } 9339 pci_release_msi(sc->dev); 9340 break; 9341 default: 9342 /* nothing to do as initial allocation failed */ 9343 break; 9344 } 9345} 9346 9347/* 9348 * This function determines and allocates the appropriate 9349 * interrupt based on system capabilites and user request. 9350 * 9351 * The user may force a particular interrupt mode, specify 9352 * the number of receive queues, specify the method for 9353 * distribuitng received frames to receive queues, or use 9354 * the default settings which will automatically select the 9355 * best supported combination. In addition, the OS may or 9356 * may not support certain combinations of these settings. 9357 * This routine attempts to reconcile the settings requested 9358 * by the user with the capabilites available from the system 9359 * to select the optimal combination of features. 9360 * 9361 * Returns: 9362 * 0 = Success, !0 = Failure. 9363 */ 9364static int 9365bxe_interrupt_alloc(struct bxe_softc *sc) 9366{ 9367 int msix_count = 0; 9368 int msi_count = 0; 9369 int num_requested = 0; 9370 int num_allocated = 0; 9371 int rid, i, j; 9372 int rc; 9373 9374 /* get the number of available MSI/MSI-X interrupts from the OS */ 9375 if (sc->interrupt_mode > 0) { 9376 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 9377 msix_count = pci_msix_count(sc->dev); 9378 } 9379 9380 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 9381 msi_count = pci_msi_count(sc->dev); 9382 } 9383 9384 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 9385 msi_count, msix_count); 9386 } 9387 9388 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9389 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9390 break; 9391 } 9392 9393 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9394 (msix_count < 2)) { 9395 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9396 break; 9397 } 9398 9399 /* ask for the necessary number of MSI-X vectors */ 9400 num_requested = min((sc->num_queues + 1), msix_count); 9401 9402 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9403 9404 num_allocated = num_requested; 9405 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9406 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9407 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9408 break; 9409 } 9410 9411 if (num_allocated < 2) { /* possible? */ 9412 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9413 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9414 pci_release_msi(sc->dev); 9415 break; 9416 } 9417 9418 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9419 num_requested, num_allocated); 9420 9421 /* best effort so use the number of vectors allocated to us */ 9422 sc->intr_count = num_allocated; 9423 sc->num_queues = num_allocated - 1; 9424 9425 rid = 1; /* initial resource identifier */ 9426 9427 /* allocate the MSI-X vectors */ 9428 for (i = 0; i < num_allocated; i++) { 9429 sc->intr[i].rid = (rid + i); 9430 9431 if ((sc->intr[i].resource = 9432 bus_alloc_resource_any(sc->dev, 9433 SYS_RES_IRQ, 9434 &sc->intr[i].rid, 9435 RF_ACTIVE)) == NULL) { 9436 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9437 i, (rid + i)); 9438 9439 for (j = (i - 1); j >= 0; j--) { 9440 bus_release_resource(sc->dev, 9441 SYS_RES_IRQ, 9442 sc->intr[j].rid, 9443 sc->intr[j].resource); 9444 } 9445 9446 sc->intr_count = 0; 9447 sc->num_queues = 0; 9448 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9449 pci_release_msi(sc->dev); 9450 break; 9451 } 9452 9453 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9454 } 9455 } while (0); 9456 9457 do { /* try allocating MSI vector resources (at least 2) */ 9458 if (sc->interrupt_mode != INTR_MODE_MSI) { 9459 break; 9460 } 9461 9462 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9463 (msi_count < 1)) { 9464 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9465 break; 9466 } 9467 9468 /* ask for a single MSI vector */ 9469 num_requested = 1; 9470 9471 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9472 9473 num_allocated = num_requested; 9474 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9475 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9476 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9477 break; 9478 } 9479 9480 if (num_allocated != 1) { /* possible? */ 9481 BLOGE(sc, "MSI allocation is not 1!\n"); 9482 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9483 pci_release_msi(sc->dev); 9484 break; 9485 } 9486 9487 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9488 num_requested, num_allocated); 9489 9490 /* best effort so use the number of vectors allocated to us */ 9491 sc->intr_count = num_allocated; 9492 sc->num_queues = num_allocated; 9493 9494 rid = 1; /* initial resource identifier */ 9495 9496 sc->intr[0].rid = rid; 9497 9498 if ((sc->intr[0].resource = 9499 bus_alloc_resource_any(sc->dev, 9500 SYS_RES_IRQ, 9501 &sc->intr[0].rid, 9502 RF_ACTIVE)) == NULL) { 9503 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9504 sc->intr_count = 0; 9505 sc->num_queues = 0; 9506 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9507 pci_release_msi(sc->dev); 9508 break; 9509 } 9510 9511 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9512 } while (0); 9513 9514 do { /* try allocating INTx vector resources */ 9515 if (sc->interrupt_mode != INTR_MODE_INTX) { 9516 break; 9517 } 9518 9519 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9520 9521 /* only one vector for INTx */ 9522 sc->intr_count = 1; 9523 sc->num_queues = 1; 9524 9525 rid = 0; /* initial resource identifier */ 9526 9527 sc->intr[0].rid = rid; 9528 9529 if ((sc->intr[0].resource = 9530 bus_alloc_resource_any(sc->dev, 9531 SYS_RES_IRQ, 9532 &sc->intr[0].rid, 9533 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9534 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9535 sc->intr_count = 0; 9536 sc->num_queues = 0; 9537 sc->interrupt_mode = -1; /* Failed! */ 9538 break; 9539 } 9540 9541 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9542 } while (0); 9543 9544 if (sc->interrupt_mode == -1) { 9545 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9546 rc = 1; 9547 } else { 9548 BLOGD(sc, DBG_LOAD, 9549 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9550 sc->interrupt_mode, sc->num_queues); 9551 rc = 0; 9552 } 9553 9554 return (rc); 9555} 9556 9557static void 9558bxe_interrupt_detach(struct bxe_softc *sc) 9559{ 9560 struct bxe_fastpath *fp; 9561 int i; 9562 9563 /* release interrupt resources */ 9564 for (i = 0; i < sc->intr_count; i++) { 9565 if (sc->intr[i].resource && sc->intr[i].tag) { 9566 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9567 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9568 } 9569 } 9570 9571 for (i = 0; i < sc->num_queues; i++) { 9572 fp = &sc->fp[i]; 9573 if (fp->tq) { 9574 taskqueue_drain(fp->tq, &fp->tq_task); 9575 taskqueue_free(fp->tq); 9576 fp->tq = NULL; 9577 } 9578 } 9579 9580 9581 if (sc->sp_tq) { 9582 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9583 taskqueue_free(sc->sp_tq); 9584 sc->sp_tq = NULL; 9585 } 9586} 9587 9588/* 9589 * Enables interrupts and attach to the ISR. 9590 * 9591 * When using multiple MSI/MSI-X vectors the first vector 9592 * is used for slowpath operations while all remaining 9593 * vectors are used for fastpath operations. If only a 9594 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9595 * ISR must look for both slowpath and fastpath completions. 9596 */ 9597static int 9598bxe_interrupt_attach(struct bxe_softc *sc) 9599{ 9600 struct bxe_fastpath *fp; 9601 int rc = 0; 9602 int i; 9603 9604 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9605 "bxe%d_sp_tq", sc->unit); 9606 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9607 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, 9608 taskqueue_thread_enqueue, 9609 &sc->sp_tq); 9610 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9611 "%s", sc->sp_tq_name); 9612 9613 9614 for (i = 0; i < sc->num_queues; i++) { 9615 fp = &sc->fp[i]; 9616 snprintf(fp->tq_name, sizeof(fp->tq_name), 9617 "bxe%d_fp%d_tq", sc->unit, i); 9618 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9619 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, 9620 taskqueue_thread_enqueue, 9621 &fp->tq); 9622 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9623 "%s", fp->tq_name); 9624 } 9625 9626 /* setup interrupt handlers */ 9627 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9628 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9629 9630 /* 9631 * Setup the interrupt handler. Note that we pass the driver instance 9632 * to the interrupt handler for the slowpath. 9633 */ 9634 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9635 (INTR_TYPE_NET | INTR_MPSAFE), 9636 NULL, bxe_intr_sp, sc, 9637 &sc->intr[0].tag)) != 0) { 9638 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9639 goto bxe_interrupt_attach_exit; 9640 } 9641 9642 bus_describe_intr(sc->dev, sc->intr[0].resource, 9643 sc->intr[0].tag, "sp"); 9644 9645 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9646 9647 /* initialize the fastpath vectors (note the first was used for sp) */ 9648 for (i = 0; i < sc->num_queues; i++) { 9649 fp = &sc->fp[i]; 9650 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9651 9652 /* 9653 * Setup the interrupt handler. Note that we pass the 9654 * fastpath context to the interrupt handler in this 9655 * case. 9656 */ 9657 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9658 (INTR_TYPE_NET | INTR_MPSAFE), 9659 NULL, bxe_intr_fp, fp, 9660 &sc->intr[i + 1].tag)) != 0) { 9661 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9662 (i + 1), rc); 9663 goto bxe_interrupt_attach_exit; 9664 } 9665 9666 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9667 sc->intr[i + 1].tag, "fp%02d", i); 9668 9669 /* bind the fastpath instance to a cpu */ 9670 if (sc->num_queues > 1) { 9671 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9672 } 9673 9674 fp->state = BXE_FP_STATE_IRQ; 9675 } 9676 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9677 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9678 9679 /* 9680 * Setup the interrupt handler. Note that we pass the 9681 * driver instance to the interrupt handler which 9682 * will handle both the slowpath and fastpath. 9683 */ 9684 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9685 (INTR_TYPE_NET | INTR_MPSAFE), 9686 NULL, bxe_intr_legacy, sc, 9687 &sc->intr[0].tag)) != 0) { 9688 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9689 goto bxe_interrupt_attach_exit; 9690 } 9691 9692 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9693 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9694 9695 /* 9696 * Setup the interrupt handler. Note that we pass the 9697 * driver instance to the interrupt handler which 9698 * will handle both the slowpath and fastpath. 9699 */ 9700 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9701 (INTR_TYPE_NET | INTR_MPSAFE), 9702 NULL, bxe_intr_legacy, sc, 9703 &sc->intr[0].tag)) != 0) { 9704 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9705 goto bxe_interrupt_attach_exit; 9706 } 9707 } 9708 9709bxe_interrupt_attach_exit: 9710 9711 return (rc); 9712} 9713 9714static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9715static int bxe_init_hw_common(struct bxe_softc *sc); 9716static int bxe_init_hw_port(struct bxe_softc *sc); 9717static int bxe_init_hw_func(struct bxe_softc *sc); 9718static void bxe_reset_common(struct bxe_softc *sc); 9719static void bxe_reset_port(struct bxe_softc *sc); 9720static void bxe_reset_func(struct bxe_softc *sc); 9721static int bxe_gunzip_init(struct bxe_softc *sc); 9722static void bxe_gunzip_end(struct bxe_softc *sc); 9723static int bxe_init_firmware(struct bxe_softc *sc); 9724static void bxe_release_firmware(struct bxe_softc *sc); 9725 9726static struct 9727ecore_func_sp_drv_ops bxe_func_sp_drv = { 9728 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9729 .init_hw_cmn = bxe_init_hw_common, 9730 .init_hw_port = bxe_init_hw_port, 9731 .init_hw_func = bxe_init_hw_func, 9732 9733 .reset_hw_cmn = bxe_reset_common, 9734 .reset_hw_port = bxe_reset_port, 9735 .reset_hw_func = bxe_reset_func, 9736 9737 .gunzip_init = bxe_gunzip_init, 9738 .gunzip_end = bxe_gunzip_end, 9739 9740 .init_fw = bxe_init_firmware, 9741 .release_fw = bxe_release_firmware, 9742}; 9743 9744static void 9745bxe_init_func_obj(struct bxe_softc *sc) 9746{ 9747 sc->dmae_ready = 0; 9748 9749 ecore_init_func_obj(sc, 9750 &sc->func_obj, 9751 BXE_SP(sc, func_rdata), 9752 BXE_SP_MAPPING(sc, func_rdata), 9753 BXE_SP(sc, func_afex_rdata), 9754 BXE_SP_MAPPING(sc, func_afex_rdata), 9755 &bxe_func_sp_drv); 9756} 9757 9758static int 9759bxe_init_hw(struct bxe_softc *sc, 9760 uint32_t load_code) 9761{ 9762 struct ecore_func_state_params func_params = { NULL }; 9763 int rc; 9764 9765 /* prepare the parameters for function state transitions */ 9766 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9767 9768 func_params.f_obj = &sc->func_obj; 9769 func_params.cmd = ECORE_F_CMD_HW_INIT; 9770 9771 func_params.params.hw_init.load_phase = load_code; 9772 9773 /* 9774 * Via a plethora of function pointers, we will eventually reach 9775 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9776 */ 9777 rc = ecore_func_state_change(sc, &func_params); 9778 9779 return (rc); 9780} 9781 9782static void 9783bxe_fill(struct bxe_softc *sc, 9784 uint32_t addr, 9785 int fill, 9786 uint32_t len) 9787{ 9788 uint32_t i; 9789 9790 if (!(len % 4) && !(addr % 4)) { 9791 for (i = 0; i < len; i += 4) { 9792 REG_WR(sc, (addr + i), fill); 9793 } 9794 } else { 9795 for (i = 0; i < len; i++) { 9796 REG_WR8(sc, (addr + i), fill); 9797 } 9798 } 9799} 9800 9801/* writes FP SP data to FW - data_size in dwords */ 9802static void 9803bxe_wr_fp_sb_data(struct bxe_softc *sc, 9804 int fw_sb_id, 9805 uint32_t *sb_data_p, 9806 uint32_t data_size) 9807{ 9808 int index; 9809 9810 for (index = 0; index < data_size; index++) { 9811 REG_WR(sc, 9812 (BAR_CSTRORM_INTMEM + 9813 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9814 (sizeof(uint32_t) * index)), 9815 *(sb_data_p + index)); 9816 } 9817} 9818 9819static void 9820bxe_zero_fp_sb(struct bxe_softc *sc, 9821 int fw_sb_id) 9822{ 9823 struct hc_status_block_data_e2 sb_data_e2; 9824 struct hc_status_block_data_e1x sb_data_e1x; 9825 uint32_t *sb_data_p; 9826 uint32_t data_size = 0; 9827 9828 if (!CHIP_IS_E1x(sc)) { 9829 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9830 sb_data_e2.common.state = SB_DISABLED; 9831 sb_data_e2.common.p_func.vf_valid = FALSE; 9832 sb_data_p = (uint32_t *)&sb_data_e2; 9833 data_size = (sizeof(struct hc_status_block_data_e2) / 9834 sizeof(uint32_t)); 9835 } else { 9836 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9837 sb_data_e1x.common.state = SB_DISABLED; 9838 sb_data_e1x.common.p_func.vf_valid = FALSE; 9839 sb_data_p = (uint32_t *)&sb_data_e1x; 9840 data_size = (sizeof(struct hc_status_block_data_e1x) / 9841 sizeof(uint32_t)); 9842 } 9843 9844 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9845 9846 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9847 0, CSTORM_STATUS_BLOCK_SIZE); 9848 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9849 0, CSTORM_SYNC_BLOCK_SIZE); 9850} 9851 9852static void 9853bxe_wr_sp_sb_data(struct bxe_softc *sc, 9854 struct hc_sp_status_block_data *sp_sb_data) 9855{ 9856 int i; 9857 9858 for (i = 0; 9859 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9860 i++) { 9861 REG_WR(sc, 9862 (BAR_CSTRORM_INTMEM + 9863 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9864 (i * sizeof(uint32_t))), 9865 *((uint32_t *)sp_sb_data + i)); 9866 } 9867} 9868 9869static void 9870bxe_zero_sp_sb(struct bxe_softc *sc) 9871{ 9872 struct hc_sp_status_block_data sp_sb_data; 9873 9874 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9875 9876 sp_sb_data.state = SB_DISABLED; 9877 sp_sb_data.p_func.vf_valid = FALSE; 9878 9879 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9880 9881 bxe_fill(sc, 9882 (BAR_CSTRORM_INTMEM + 9883 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9884 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9885 bxe_fill(sc, 9886 (BAR_CSTRORM_INTMEM + 9887 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9888 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9889} 9890 9891static void 9892bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9893 int igu_sb_id, 9894 int igu_seg_id) 9895{ 9896 hc_sm->igu_sb_id = igu_sb_id; 9897 hc_sm->igu_seg_id = igu_seg_id; 9898 hc_sm->timer_value = 0xFF; 9899 hc_sm->time_to_expire = 0xFFFFFFFF; 9900} 9901 9902static void 9903bxe_map_sb_state_machines(struct hc_index_data *index_data) 9904{ 9905 /* zero out state machine indices */ 9906 9907 /* rx indices */ 9908 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9909 9910 /* tx indices */ 9911 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9912 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9913 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9914 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9915 9916 /* map indices */ 9917 9918 /* rx indices */ 9919 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9920 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9921 9922 /* tx indices */ 9923 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9924 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9925 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9926 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9927 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9928 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9929 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9930 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9931} 9932 9933static void 9934bxe_init_sb(struct bxe_softc *sc, 9935 bus_addr_t busaddr, 9936 int vfid, 9937 uint8_t vf_valid, 9938 int fw_sb_id, 9939 int igu_sb_id) 9940{ 9941 struct hc_status_block_data_e2 sb_data_e2; 9942 struct hc_status_block_data_e1x sb_data_e1x; 9943 struct hc_status_block_sm *hc_sm_p; 9944 uint32_t *sb_data_p; 9945 int igu_seg_id; 9946 int data_size; 9947 9948 if (CHIP_INT_MODE_IS_BC(sc)) { 9949 igu_seg_id = HC_SEG_ACCESS_NORM; 9950 } else { 9951 igu_seg_id = IGU_SEG_ACCESS_NORM; 9952 } 9953 9954 bxe_zero_fp_sb(sc, fw_sb_id); 9955 9956 if (!CHIP_IS_E1x(sc)) { 9957 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9958 sb_data_e2.common.state = SB_ENABLED; 9959 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9960 sb_data_e2.common.p_func.vf_id = vfid; 9961 sb_data_e2.common.p_func.vf_valid = vf_valid; 9962 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9963 sb_data_e2.common.same_igu_sb_1b = TRUE; 9964 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9965 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9966 hc_sm_p = sb_data_e2.common.state_machine; 9967 sb_data_p = (uint32_t *)&sb_data_e2; 9968 data_size = (sizeof(struct hc_status_block_data_e2) / 9969 sizeof(uint32_t)); 9970 bxe_map_sb_state_machines(sb_data_e2.index_data); 9971 } else { 9972 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9973 sb_data_e1x.common.state = SB_ENABLED; 9974 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9975 sb_data_e1x.common.p_func.vf_id = 0xff; 9976 sb_data_e1x.common.p_func.vf_valid = FALSE; 9977 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9978 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9979 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9980 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9981 hc_sm_p = sb_data_e1x.common.state_machine; 9982 sb_data_p = (uint32_t *)&sb_data_e1x; 9983 data_size = (sizeof(struct hc_status_block_data_e1x) / 9984 sizeof(uint32_t)); 9985 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9986 } 9987 9988 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9989 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9990 9991 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9992 9993 /* write indices to HW - PCI guarantees endianity of regpairs */ 9994 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9995} 9996 9997static inline uint8_t 9998bxe_fp_qzone_id(struct bxe_fastpath *fp) 9999{ 10000 if (CHIP_IS_E1x(fp->sc)) { 10001 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 10002 } else { 10003 return (fp->cl_id); 10004 } 10005} 10006 10007static inline uint32_t 10008bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 10009 struct bxe_fastpath *fp) 10010{ 10011 uint32_t offset = BAR_USTRORM_INTMEM; 10012 10013#if 0 10014 if (IS_VF(sc)) { 10015 return (PXP_VF_ADDR_USDM_QUEUES_START + 10016 (sc->acquire_resp.resc.hw_qid[fp->index] * 10017 sizeof(struct ustorm_queue_zone_data))); 10018 } else 10019#endif 10020 if (!CHIP_IS_E1x(sc)) { 10021 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 10022 } else { 10023 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 10024 } 10025 10026 return (offset); 10027} 10028 10029static void 10030bxe_init_eth_fp(struct bxe_softc *sc, 10031 int idx) 10032{ 10033 struct bxe_fastpath *fp = &sc->fp[idx]; 10034 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 10035 unsigned long q_type = 0; 10036 int cos; 10037 10038 fp->sc = sc; 10039 fp->index = idx; 10040 10041 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 10042 "bxe%d_fp%d_tx_lock", sc->unit, idx); 10043 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 10044 10045 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 10046 "bxe%d_fp%d_rx_lock", sc->unit, idx); 10047 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 10048 10049 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 10050 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 10051 10052 fp->cl_id = (CHIP_IS_E1x(sc)) ? 10053 (SC_L_ID(sc) + idx) : 10054 /* want client ID same as IGU SB ID for non-E1 */ 10055 fp->igu_sb_id; 10056 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 10057 10058 /* setup sb indices */ 10059 if (!CHIP_IS_E1x(sc)) { 10060 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 10061 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 10062 } else { 10063 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 10064 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 10065 } 10066 10067 /* init shortcut */ 10068 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 10069 10070 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 10071 10072 /* 10073 * XXX If multiple CoS is ever supported then each fastpath structure 10074 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 10075 */ 10076 for (cos = 0; cos < sc->max_cos; cos++) { 10077 cids[cos] = idx; 10078 } 10079 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 10080 10081 /* nothing more for a VF to do */ 10082 if (IS_VF(sc)) { 10083 return; 10084 } 10085 10086 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 10087 fp->fw_sb_id, fp->igu_sb_id); 10088 10089 bxe_update_fp_sb_idx(fp); 10090 10091 /* Configure Queue State object */ 10092 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 10093 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 10094 10095 ecore_init_queue_obj(sc, 10096 &sc->sp_objs[idx].q_obj, 10097 fp->cl_id, 10098 cids, 10099 sc->max_cos, 10100 SC_FUNC(sc), 10101 BXE_SP(sc, q_rdata), 10102 BXE_SP_MAPPING(sc, q_rdata), 10103 q_type); 10104 10105 /* configure classification DBs */ 10106 ecore_init_mac_obj(sc, 10107 &sc->sp_objs[idx].mac_obj, 10108 fp->cl_id, 10109 idx, 10110 SC_FUNC(sc), 10111 BXE_SP(sc, mac_rdata), 10112 BXE_SP_MAPPING(sc, mac_rdata), 10113 ECORE_FILTER_MAC_PENDING, 10114 &sc->sp_state, 10115 ECORE_OBJ_TYPE_RX_TX, 10116 &sc->macs_pool); 10117 10118 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 10119 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 10120} 10121 10122static inline void 10123bxe_update_rx_prod(struct bxe_softc *sc, 10124 struct bxe_fastpath *fp, 10125 uint16_t rx_bd_prod, 10126 uint16_t rx_cq_prod, 10127 uint16_t rx_sge_prod) 10128{ 10129 struct ustorm_eth_rx_producers rx_prods = { 0 }; 10130 uint32_t i; 10131 10132 /* update producers */ 10133 rx_prods.bd_prod = rx_bd_prod; 10134 rx_prods.cqe_prod = rx_cq_prod; 10135 rx_prods.sge_prod = rx_sge_prod; 10136 10137 /* 10138 * Make sure that the BD and SGE data is updated before updating the 10139 * producers since FW might read the BD/SGE right after the producer 10140 * is updated. 10141 * This is only applicable for weak-ordered memory model archs such 10142 * as IA-64. The following barrier is also mandatory since FW will 10143 * assumes BDs must have buffers. 10144 */ 10145 wmb(); 10146 10147 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 10148 REG_WR(sc, 10149 (fp->ustorm_rx_prods_offset + (i * 4)), 10150 ((uint32_t *)&rx_prods)[i]); 10151 } 10152 10153 wmb(); /* keep prod updates ordered */ 10154 10155 BLOGD(sc, DBG_RX, 10156 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 10157 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 10158} 10159 10160static void 10161bxe_init_rx_rings(struct bxe_softc *sc) 10162{ 10163 struct bxe_fastpath *fp; 10164 int i; 10165 10166 for (i = 0; i < sc->num_queues; i++) { 10167 fp = &sc->fp[i]; 10168 10169 fp->rx_bd_cons = 0; 10170 10171 /* 10172 * Activate the BD ring... 10173 * Warning, this will generate an interrupt (to the TSTORM) 10174 * so this can only be done after the chip is initialized 10175 */ 10176 bxe_update_rx_prod(sc, fp, 10177 fp->rx_bd_prod, 10178 fp->rx_cq_prod, 10179 fp->rx_sge_prod); 10180 10181 if (i != 0) { 10182 continue; 10183 } 10184 10185 if (CHIP_IS_E1(sc)) { 10186 REG_WR(sc, 10187 (BAR_USTRORM_INTMEM + 10188 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 10189 U64_LO(fp->rcq_dma.paddr)); 10190 REG_WR(sc, 10191 (BAR_USTRORM_INTMEM + 10192 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 10193 U64_HI(fp->rcq_dma.paddr)); 10194 } 10195 } 10196} 10197 10198static void 10199bxe_init_tx_ring_one(struct bxe_fastpath *fp) 10200{ 10201 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 10202 fp->tx_db.data.zero_fill1 = 0; 10203 fp->tx_db.data.prod = 0; 10204 10205 fp->tx_pkt_prod = 0; 10206 fp->tx_pkt_cons = 0; 10207 fp->tx_bd_prod = 0; 10208 fp->tx_bd_cons = 0; 10209 fp->eth_q_stats.tx_pkts = 0; 10210} 10211 10212static inline void 10213bxe_init_tx_rings(struct bxe_softc *sc) 10214{ 10215 int i; 10216 10217 for (i = 0; i < sc->num_queues; i++) { 10218#if 0 10219 uint8_t cos; 10220 for (cos = 0; cos < sc->max_cos; cos++) { 10221 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); 10222 } 10223#else 10224 bxe_init_tx_ring_one(&sc->fp[i]); 10225#endif 10226 } 10227} 10228 10229static void 10230bxe_init_def_sb(struct bxe_softc *sc) 10231{ 10232 struct host_sp_status_block *def_sb = sc->def_sb; 10233 bus_addr_t mapping = sc->def_sb_dma.paddr; 10234 int igu_sp_sb_index; 10235 int igu_seg_id; 10236 int port = SC_PORT(sc); 10237 int func = SC_FUNC(sc); 10238 int reg_offset, reg_offset_en5; 10239 uint64_t section; 10240 int index, sindex; 10241 struct hc_sp_status_block_data sp_sb_data; 10242 10243 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 10244 10245 if (CHIP_INT_MODE_IS_BC(sc)) { 10246 igu_sp_sb_index = DEF_SB_IGU_ID; 10247 igu_seg_id = HC_SEG_ACCESS_DEF; 10248 } else { 10249 igu_sp_sb_index = sc->igu_dsb_id; 10250 igu_seg_id = IGU_SEG_ACCESS_DEF; 10251 } 10252 10253 /* attentions */ 10254 section = ((uint64_t)mapping + 10255 offsetof(struct host_sp_status_block, atten_status_block)); 10256 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 10257 sc->attn_state = 0; 10258 10259 reg_offset = (port) ? 10260 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10261 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 10262 reg_offset_en5 = (port) ? 10263 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 10264 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 10265 10266 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 10267 /* take care of sig[0]..sig[4] */ 10268 for (sindex = 0; sindex < 4; sindex++) { 10269 sc->attn_group[index].sig[sindex] = 10270 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 10271 } 10272 10273 if (!CHIP_IS_E1x(sc)) { 10274 /* 10275 * enable5 is separate from the rest of the registers, 10276 * and the address skip is 4 and not 16 between the 10277 * different groups 10278 */ 10279 sc->attn_group[index].sig[4] = 10280 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 10281 } else { 10282 sc->attn_group[index].sig[4] = 0; 10283 } 10284 } 10285 10286 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10287 reg_offset = (port) ? 10288 HC_REG_ATTN_MSG1_ADDR_L : 10289 HC_REG_ATTN_MSG0_ADDR_L; 10290 REG_WR(sc, reg_offset, U64_LO(section)); 10291 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 10292 } else if (!CHIP_IS_E1x(sc)) { 10293 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 10294 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 10295 } 10296 10297 section = ((uint64_t)mapping + 10298 offsetof(struct host_sp_status_block, sp_sb)); 10299 10300 bxe_zero_sp_sb(sc); 10301 10302 /* PCI guarantees endianity of regpair */ 10303 sp_sb_data.state = SB_ENABLED; 10304 sp_sb_data.host_sb_addr.lo = U64_LO(section); 10305 sp_sb_data.host_sb_addr.hi = U64_HI(section); 10306 sp_sb_data.igu_sb_id = igu_sp_sb_index; 10307 sp_sb_data.igu_seg_id = igu_seg_id; 10308 sp_sb_data.p_func.pf_id = func; 10309 sp_sb_data.p_func.vnic_id = SC_VN(sc); 10310 sp_sb_data.p_func.vf_id = 0xff; 10311 10312 bxe_wr_sp_sb_data(sc, &sp_sb_data); 10313 10314 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 10315} 10316 10317static void 10318bxe_init_sp_ring(struct bxe_softc *sc) 10319{ 10320 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 10321 sc->spq_prod_idx = 0; 10322 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 10323 sc->spq_prod_bd = sc->spq; 10324 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 10325} 10326 10327static void 10328bxe_init_eq_ring(struct bxe_softc *sc) 10329{ 10330 union event_ring_elem *elem; 10331 int i; 10332 10333 for (i = 1; i <= NUM_EQ_PAGES; i++) { 10334 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 10335 10336 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 10337 BCM_PAGE_SIZE * 10338 (i % NUM_EQ_PAGES))); 10339 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 10340 BCM_PAGE_SIZE * 10341 (i % NUM_EQ_PAGES))); 10342 } 10343 10344 sc->eq_cons = 0; 10345 sc->eq_prod = NUM_EQ_DESC; 10346 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 10347 10348 atomic_store_rel_long(&sc->eq_spq_left, 10349 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 10350 NUM_EQ_DESC) - 1)); 10351} 10352 10353static void 10354bxe_init_internal_common(struct bxe_softc *sc) 10355{ 10356 int i; 10357 10358 if (IS_MF_SI(sc)) { 10359 /* 10360 * In switch independent mode, the TSTORM needs to accept 10361 * packets that failed classification, since approximate match 10362 * mac addresses aren't written to NIG LLH. 10363 */ 10364 REG_WR8(sc, 10365 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10366 2); 10367 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ 10368 REG_WR8(sc, 10369 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10370 0); 10371 } 10372 10373 /* 10374 * Zero this manually as its initialization is currently missing 10375 * in the initTool. 10376 */ 10377 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 10378 REG_WR(sc, 10379 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 10380 0); 10381 } 10382 10383 if (!CHIP_IS_E1x(sc)) { 10384 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 10385 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 10386 } 10387} 10388 10389static void 10390bxe_init_internal(struct bxe_softc *sc, 10391 uint32_t load_code) 10392{ 10393 switch (load_code) { 10394 case FW_MSG_CODE_DRV_LOAD_COMMON: 10395 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 10396 bxe_init_internal_common(sc); 10397 /* no break */ 10398 10399 case FW_MSG_CODE_DRV_LOAD_PORT: 10400 /* nothing to do */ 10401 /* no break */ 10402 10403 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 10404 /* internal memory per function is initialized inside bxe_pf_init */ 10405 break; 10406 10407 default: 10408 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 10409 break; 10410 } 10411} 10412 10413static void 10414storm_memset_func_cfg(struct bxe_softc *sc, 10415 struct tstorm_eth_function_common_config *tcfg, 10416 uint16_t abs_fid) 10417{ 10418 uint32_t addr; 10419 size_t size; 10420 10421 addr = (BAR_TSTRORM_INTMEM + 10422 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10423 size = sizeof(struct tstorm_eth_function_common_config); 10424 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10425} 10426 10427static void 10428bxe_func_init(struct bxe_softc *sc, 10429 struct bxe_func_init_params *p) 10430{ 10431 struct tstorm_eth_function_common_config tcfg = { 0 }; 10432 10433 if (CHIP_IS_E1x(sc)) { 10434 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10435 } 10436 10437 /* Enable the function in the FW */ 10438 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10439 storm_memset_func_en(sc, p->func_id, 1); 10440 10441 /* spq */ 10442 if (p->func_flgs & FUNC_FLG_SPQ) { 10443 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10444 REG_WR(sc, 10445 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10446 p->spq_prod); 10447 } 10448} 10449 10450/* 10451 * Calculates the sum of vn_min_rates. 10452 * It's needed for further normalizing of the min_rates. 10453 * Returns: 10454 * sum of vn_min_rates. 10455 * or 10456 * 0 - if all the min_rates are 0. 10457 * In the later case fainess algorithm should be deactivated. 10458 * If all min rates are not zero then those that are zeroes will be set to 1. 10459 */ 10460static void 10461bxe_calc_vn_min(struct bxe_softc *sc, 10462 struct cmng_init_input *input) 10463{ 10464 uint32_t vn_cfg; 10465 uint32_t vn_min_rate; 10466 int all_zero = 1; 10467 int vn; 10468 10469 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10470 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10471 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10472 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10473 10474 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10475 /* skip hidden VNs */ 10476 vn_min_rate = 0; 10477 } else if (!vn_min_rate) { 10478 /* If min rate is zero - set it to 100 */ 10479 vn_min_rate = DEF_MIN_RATE; 10480 } else { 10481 all_zero = 0; 10482 } 10483 10484 input->vnic_min_rate[vn] = vn_min_rate; 10485 } 10486 10487 /* if ETS or all min rates are zeros - disable fairness */ 10488 if (BXE_IS_ETS_ENABLED(sc)) { 10489 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10490 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10491 } else if (all_zero) { 10492 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10493 BLOGD(sc, DBG_LOAD, 10494 "Fariness disabled (all MIN values are zeroes)\n"); 10495 } else { 10496 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10497 } 10498} 10499 10500static inline uint16_t 10501bxe_extract_max_cfg(struct bxe_softc *sc, 10502 uint32_t mf_cfg) 10503{ 10504 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10505 FUNC_MF_CFG_MAX_BW_SHIFT); 10506 10507 if (!max_cfg) { 10508 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10509 max_cfg = 100; 10510 } 10511 10512 return (max_cfg); 10513} 10514 10515static void 10516bxe_calc_vn_max(struct bxe_softc *sc, 10517 int vn, 10518 struct cmng_init_input *input) 10519{ 10520 uint16_t vn_max_rate; 10521 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10522 uint32_t max_cfg; 10523 10524 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10525 vn_max_rate = 0; 10526 } else { 10527 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10528 10529 if (IS_MF_SI(sc)) { 10530 /* max_cfg in percents of linkspeed */ 10531 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10532 } else { /* SD modes */ 10533 /* max_cfg is absolute in 100Mb units */ 10534 vn_max_rate = (max_cfg * 100); 10535 } 10536 } 10537 10538 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10539 10540 input->vnic_max_rate[vn] = vn_max_rate; 10541} 10542 10543static void 10544bxe_cmng_fns_init(struct bxe_softc *sc, 10545 uint8_t read_cfg, 10546 uint8_t cmng_type) 10547{ 10548 struct cmng_init_input input; 10549 int vn; 10550 10551 memset(&input, 0, sizeof(struct cmng_init_input)); 10552 10553 input.port_rate = sc->link_vars.line_speed; 10554 10555 if (cmng_type == CMNG_FNS_MINMAX) { 10556 /* read mf conf from shmem */ 10557 if (read_cfg) { 10558 bxe_read_mf_cfg(sc); 10559 } 10560 10561 /* get VN min rate and enable fairness if not 0 */ 10562 bxe_calc_vn_min(sc, &input); 10563 10564 /* get VN max rate */ 10565 if (sc->port.pmf) { 10566 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10567 bxe_calc_vn_max(sc, vn, &input); 10568 } 10569 } 10570 10571 /* always enable rate shaping and fairness */ 10572 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10573 10574 ecore_init_cmng(&input, &sc->cmng); 10575 return; 10576 } 10577 10578 /* rate shaping and fairness are disabled */ 10579 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10580} 10581 10582static int 10583bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10584{ 10585 if (CHIP_REV_IS_SLOW(sc)) { 10586 return (CMNG_FNS_NONE); 10587 } 10588 10589 if (IS_MF(sc)) { 10590 return (CMNG_FNS_MINMAX); 10591 } 10592 10593 return (CMNG_FNS_NONE); 10594} 10595 10596static void 10597storm_memset_cmng(struct bxe_softc *sc, 10598 struct cmng_init *cmng, 10599 uint8_t port) 10600{ 10601 int vn; 10602 int func; 10603 uint32_t addr; 10604 size_t size; 10605 10606 addr = (BAR_XSTRORM_INTMEM + 10607 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10608 size = sizeof(struct cmng_struct_per_port); 10609 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10610 10611 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10612 func = func_by_vn(sc, vn); 10613 10614 addr = (BAR_XSTRORM_INTMEM + 10615 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10616 size = sizeof(struct rate_shaping_vars_per_vn); 10617 ecore_storm_memset_struct(sc, addr, size, 10618 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10619 10620 addr = (BAR_XSTRORM_INTMEM + 10621 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10622 size = sizeof(struct fairness_vars_per_vn); 10623 ecore_storm_memset_struct(sc, addr, size, 10624 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10625 } 10626} 10627 10628static void 10629bxe_pf_init(struct bxe_softc *sc) 10630{ 10631 struct bxe_func_init_params func_init = { 0 }; 10632 struct event_ring_data eq_data = { { 0 } }; 10633 uint16_t flags; 10634 10635 if (!CHIP_IS_E1x(sc)) { 10636 /* reset IGU PF statistics: MSIX + ATTN */ 10637 /* PF */ 10638 REG_WR(sc, 10639 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10640 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10641 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10642 0); 10643 /* ATTN */ 10644 REG_WR(sc, 10645 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10646 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10647 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10648 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10649 0); 10650 } 10651 10652 /* function setup flags */ 10653 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10654 10655 /* 10656 * This flag is relevant for E1x only. 10657 * E2 doesn't have a TPA configuration in a function level. 10658 */ 10659 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10660 10661 func_init.func_flgs = flags; 10662 func_init.pf_id = SC_FUNC(sc); 10663 func_init.func_id = SC_FUNC(sc); 10664 func_init.spq_map = sc->spq_dma.paddr; 10665 func_init.spq_prod = sc->spq_prod_idx; 10666 10667 bxe_func_init(sc, &func_init); 10668 10669 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10670 10671 /* 10672 * Congestion management values depend on the link rate. 10673 * There is no active link so initial link rate is set to 10Gbps. 10674 * When the link comes up the congestion management values are 10675 * re-calculated according to the actual link rate. 10676 */ 10677 sc->link_vars.line_speed = SPEED_10000; 10678 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10679 10680 /* Only the PMF sets the HW */ 10681 if (sc->port.pmf) { 10682 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10683 } 10684 10685 /* init Event Queue - PCI bus guarantees correct endainity */ 10686 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10687 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10688 eq_data.producer = sc->eq_prod; 10689 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10690 eq_data.sb_id = DEF_SB_ID; 10691 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10692} 10693 10694static void 10695bxe_hc_int_enable(struct bxe_softc *sc) 10696{ 10697 int port = SC_PORT(sc); 10698 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10699 uint32_t val = REG_RD(sc, addr); 10700 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10701 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10702 (sc->intr_count == 1)) ? TRUE : FALSE; 10703 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10704 10705 if (msix) { 10706 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10707 HC_CONFIG_0_REG_INT_LINE_EN_0); 10708 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10709 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10710 if (single_msix) { 10711 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10712 } 10713 } else if (msi) { 10714 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10715 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10717 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10718 } else { 10719 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10720 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10721 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10722 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10723 10724 if (!CHIP_IS_E1(sc)) { 10725 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10726 val, port, addr); 10727 10728 REG_WR(sc, addr, val); 10729 10730 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10731 } 10732 } 10733 10734 if (CHIP_IS_E1(sc)) { 10735 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10736 } 10737 10738 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10739 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10740 10741 REG_WR(sc, addr, val); 10742 10743 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10744 mb(); 10745 10746 if (!CHIP_IS_E1(sc)) { 10747 /* init leading/trailing edge */ 10748 if (IS_MF(sc)) { 10749 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10750 if (sc->port.pmf) { 10751 /* enable nig and gpio3 attention */ 10752 val |= 0x1100; 10753 } 10754 } else { 10755 val = 0xffff; 10756 } 10757 10758 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10759 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10760 } 10761 10762 /* make sure that interrupts are indeed enabled from here on */ 10763 mb(); 10764} 10765 10766static void 10767bxe_igu_int_enable(struct bxe_softc *sc) 10768{ 10769 uint32_t val; 10770 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10771 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10772 (sc->intr_count == 1)) ? TRUE : FALSE; 10773 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10774 10775 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10776 10777 if (msix) { 10778 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10779 IGU_PF_CONF_SINGLE_ISR_EN); 10780 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10781 IGU_PF_CONF_ATTN_BIT_EN); 10782 if (single_msix) { 10783 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10784 } 10785 } else if (msi) { 10786 val &= ~IGU_PF_CONF_INT_LINE_EN; 10787 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10788 IGU_PF_CONF_ATTN_BIT_EN | 10789 IGU_PF_CONF_SINGLE_ISR_EN); 10790 } else { 10791 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10792 val |= (IGU_PF_CONF_INT_LINE_EN | 10793 IGU_PF_CONF_ATTN_BIT_EN | 10794 IGU_PF_CONF_SINGLE_ISR_EN); 10795 } 10796 10797 /* clean previous status - need to configure igu prior to ack*/ 10798 if ((!msix) || single_msix) { 10799 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10800 bxe_ack_int(sc); 10801 } 10802 10803 val |= IGU_PF_CONF_FUNC_EN; 10804 10805 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10806 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10807 10808 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10809 10810 mb(); 10811 10812 /* init leading/trailing edge */ 10813 if (IS_MF(sc)) { 10814 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10815 if (sc->port.pmf) { 10816 /* enable nig and gpio3 attention */ 10817 val |= 0x1100; 10818 } 10819 } else { 10820 val = 0xffff; 10821 } 10822 10823 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10824 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10825 10826 /* make sure that interrupts are indeed enabled from here on */ 10827 mb(); 10828} 10829 10830static void 10831bxe_int_enable(struct bxe_softc *sc) 10832{ 10833 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10834 bxe_hc_int_enable(sc); 10835 } else { 10836 bxe_igu_int_enable(sc); 10837 } 10838} 10839 10840static void 10841bxe_hc_int_disable(struct bxe_softc *sc) 10842{ 10843 int port = SC_PORT(sc); 10844 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10845 uint32_t val = REG_RD(sc, addr); 10846 10847 /* 10848 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10849 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10850 * block 10851 */ 10852 if (CHIP_IS_E1(sc)) { 10853 /* 10854 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10855 * to prevent from HC sending interrupts after we exit the function 10856 */ 10857 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10858 10859 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10860 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10861 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10862 } else { 10863 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10864 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10865 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10866 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10867 } 10868 10869 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10870 10871 /* flush all outstanding writes */ 10872 mb(); 10873 10874 REG_WR(sc, addr, val); 10875 if (REG_RD(sc, addr) != val) { 10876 BLOGE(sc, "proper val not read from HC IGU!\n"); 10877 } 10878} 10879 10880static void 10881bxe_igu_int_disable(struct bxe_softc *sc) 10882{ 10883 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10884 10885 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10886 IGU_PF_CONF_INT_LINE_EN | 10887 IGU_PF_CONF_ATTN_BIT_EN); 10888 10889 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10890 10891 /* flush all outstanding writes */ 10892 mb(); 10893 10894 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10895 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10896 BLOGE(sc, "proper val not read from IGU!\n"); 10897 } 10898} 10899 10900static void 10901bxe_int_disable(struct bxe_softc *sc) 10902{ 10903 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10904 bxe_hc_int_disable(sc); 10905 } else { 10906 bxe_igu_int_disable(sc); 10907 } 10908} 10909 10910static void 10911bxe_nic_init(struct bxe_softc *sc, 10912 int load_code) 10913{ 10914 int i; 10915 10916 for (i = 0; i < sc->num_queues; i++) { 10917 bxe_init_eth_fp(sc, i); 10918 } 10919 10920 rmb(); /* ensure status block indices were read */ 10921 10922 bxe_init_rx_rings(sc); 10923 bxe_init_tx_rings(sc); 10924 10925 if (IS_VF(sc)) { 10926 return; 10927 } 10928 10929 /* initialize MOD_ABS interrupts */ 10930 elink_init_mod_abs_int(sc, &sc->link_vars, 10931 sc->devinfo.chip_id, 10932 sc->devinfo.shmem_base, 10933 sc->devinfo.shmem2_base, 10934 SC_PORT(sc)); 10935 10936 bxe_init_def_sb(sc); 10937 bxe_update_dsb_idx(sc); 10938 bxe_init_sp_ring(sc); 10939 bxe_init_eq_ring(sc); 10940 bxe_init_internal(sc, load_code); 10941 bxe_pf_init(sc); 10942 bxe_stats_init(sc); 10943 10944 /* flush all before enabling interrupts */ 10945 mb(); 10946 10947 bxe_int_enable(sc); 10948 10949 /* check for SPIO5 */ 10950 bxe_attn_int_deasserted0(sc, 10951 REG_RD(sc, 10952 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10953 SC_PORT(sc)*4)) & 10954 AEU_INPUTS_ATTN_BITS_SPIO5); 10955} 10956 10957static inline void 10958bxe_init_objs(struct bxe_softc *sc) 10959{ 10960 /* mcast rules must be added to tx if tx switching is enabled */ 10961 ecore_obj_type o_type = 10962 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10963 ECORE_OBJ_TYPE_RX; 10964 10965 /* RX_MODE controlling object */ 10966 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10967 10968 /* multicast configuration controlling object */ 10969 ecore_init_mcast_obj(sc, 10970 &sc->mcast_obj, 10971 sc->fp[0].cl_id, 10972 sc->fp[0].index, 10973 SC_FUNC(sc), 10974 SC_FUNC(sc), 10975 BXE_SP(sc, mcast_rdata), 10976 BXE_SP_MAPPING(sc, mcast_rdata), 10977 ECORE_FILTER_MCAST_PENDING, 10978 &sc->sp_state, 10979 o_type); 10980 10981 /* Setup CAM credit pools */ 10982 ecore_init_mac_credit_pool(sc, 10983 &sc->macs_pool, 10984 SC_FUNC(sc), 10985 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10986 VNICS_PER_PATH(sc)); 10987 10988 ecore_init_vlan_credit_pool(sc, 10989 &sc->vlans_pool, 10990 SC_ABS_FUNC(sc) >> 1, 10991 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10992 VNICS_PER_PATH(sc)); 10993 10994 /* RSS configuration object */ 10995 ecore_init_rss_config_obj(sc, 10996 &sc->rss_conf_obj, 10997 sc->fp[0].cl_id, 10998 sc->fp[0].index, 10999 SC_FUNC(sc), 11000 SC_FUNC(sc), 11001 BXE_SP(sc, rss_rdata), 11002 BXE_SP_MAPPING(sc, rss_rdata), 11003 ECORE_FILTER_RSS_CONF_PENDING, 11004 &sc->sp_state, ECORE_OBJ_TYPE_RX); 11005} 11006 11007/* 11008 * Initialize the function. This must be called before sending CLIENT_SETUP 11009 * for the first client. 11010 */ 11011static inline int 11012bxe_func_start(struct bxe_softc *sc) 11013{ 11014 struct ecore_func_state_params func_params = { NULL }; 11015 struct ecore_func_start_params *start_params = &func_params.params.start; 11016 11017 /* Prepare parameters for function state transitions */ 11018 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 11019 11020 func_params.f_obj = &sc->func_obj; 11021 func_params.cmd = ECORE_F_CMD_START; 11022 11023 /* Function parameters */ 11024 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 11025 start_params->sd_vlan_tag = OVLAN(sc); 11026 11027 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 11028 start_params->network_cos_mode = STATIC_COS; 11029 } else { /* CHIP_IS_E1X */ 11030 start_params->network_cos_mode = FW_WRR; 11031 } 11032 11033 start_params->gre_tunnel_mode = 0; 11034 start_params->gre_tunnel_rss = 0; 11035 11036 return (ecore_func_state_change(sc, &func_params)); 11037} 11038 11039static int 11040bxe_set_power_state(struct bxe_softc *sc, 11041 uint8_t state) 11042{ 11043 uint16_t pmcsr; 11044 11045 /* If there is no power capability, silently succeed */ 11046 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 11047 BLOGW(sc, "No power capability\n"); 11048 return (0); 11049 } 11050 11051 pmcsr = pci_read_config(sc->dev, 11052 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11053 2); 11054 11055 switch (state) { 11056 case PCI_PM_D0: 11057 pci_write_config(sc->dev, 11058 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11059 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 11060 11061 if (pmcsr & PCIM_PSTAT_DMASK) { 11062 /* delay required during transition out of D3hot */ 11063 DELAY(20000); 11064 } 11065 11066 break; 11067 11068 case PCI_PM_D3hot: 11069 /* XXX if there are other clients above don't shut down the power */ 11070 11071 /* don't shut down the power for emulation and FPGA */ 11072 if (CHIP_REV_IS_SLOW(sc)) { 11073 return (0); 11074 } 11075 11076 pmcsr &= ~PCIM_PSTAT_DMASK; 11077 pmcsr |= PCIM_PSTAT_D3; 11078 11079 if (sc->wol) { 11080 pmcsr |= PCIM_PSTAT_PMEENABLE; 11081 } 11082 11083 pci_write_config(sc->dev, 11084 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11085 pmcsr, 4); 11086 11087 /* 11088 * No more memory access after this point until device is brought back 11089 * to D0 state. 11090 */ 11091 break; 11092 11093 default: 11094 BLOGE(sc, "Can't support PCI power state = %d\n", state); 11095 return (-1); 11096 } 11097 11098 return (0); 11099} 11100 11101 11102/* return true if succeeded to acquire the lock */ 11103static uint8_t 11104bxe_trylock_hw_lock(struct bxe_softc *sc, 11105 uint32_t resource) 11106{ 11107 uint32_t lock_status; 11108 uint32_t resource_bit = (1 << resource); 11109 int func = SC_FUNC(sc); 11110 uint32_t hw_lock_control_reg; 11111 11112 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 11113 11114 /* Validating that the resource is within range */ 11115 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 11116 BLOGD(sc, DBG_LOAD, 11117 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 11118 resource, HW_LOCK_MAX_RESOURCE_VALUE); 11119 return (FALSE); 11120 } 11121 11122 if (func <= 5) { 11123 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 11124 } else { 11125 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 11126 } 11127 11128 /* try to acquire the lock */ 11129 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 11130 lock_status = REG_RD(sc, hw_lock_control_reg); 11131 if (lock_status & resource_bit) { 11132 return (TRUE); 11133 } 11134 11135 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); 11136 11137 return (FALSE); 11138} 11139 11140/* 11141 * Get the recovery leader resource id according to the engine this function 11142 * belongs to. Currently only only 2 engines is supported. 11143 */ 11144static int 11145bxe_get_leader_lock_resource(struct bxe_softc *sc) 11146{ 11147 if (SC_PATH(sc)) { 11148 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 11149 } else { 11150 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 11151 } 11152} 11153 11154/* try to acquire a leader lock for current engine */ 11155static uint8_t 11156bxe_trylock_leader_lock(struct bxe_softc *sc) 11157{ 11158 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11159} 11160 11161static int 11162bxe_release_leader_lock(struct bxe_softc *sc) 11163{ 11164 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11165} 11166 11167/* close gates #2, #3 and #4 */ 11168static void 11169bxe_set_234_gates(struct bxe_softc *sc, 11170 uint8_t close) 11171{ 11172 uint32_t val; 11173 11174 /* gates #2 and #4a are closed/opened for "not E1" only */ 11175 if (!CHIP_IS_E1(sc)) { 11176 /* #4 */ 11177 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 11178 /* #2 */ 11179 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 11180 } 11181 11182 /* #3 */ 11183 if (CHIP_IS_E1x(sc)) { 11184 /* prevent interrupts from HC on both ports */ 11185 val = REG_RD(sc, HC_REG_CONFIG_1); 11186 REG_WR(sc, HC_REG_CONFIG_1, 11187 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 11188 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 11189 11190 val = REG_RD(sc, HC_REG_CONFIG_0); 11191 REG_WR(sc, HC_REG_CONFIG_0, 11192 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 11193 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 11194 } else { 11195 /* Prevent incomming interrupts in IGU */ 11196 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 11197 11198 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 11199 (!close) ? 11200 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 11201 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 11202 } 11203 11204 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 11205 close ? "closing" : "opening"); 11206 11207 wmb(); 11208} 11209 11210/* poll for pending writes bit, it should get cleared in no more than 1s */ 11211static int 11212bxe_er_poll_igu_vq(struct bxe_softc *sc) 11213{ 11214 uint32_t cnt = 1000; 11215 uint32_t pend_bits = 0; 11216 11217 do { 11218 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 11219 11220 if (pend_bits == 0) { 11221 break; 11222 } 11223 11224 DELAY(1000); 11225 } while (--cnt > 0); 11226 11227 if (cnt == 0) { 11228 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 11229 return (-1); 11230 } 11231 11232 return (0); 11233} 11234 11235#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 11236 11237static void 11238bxe_clp_reset_prep(struct bxe_softc *sc, 11239 uint32_t *magic_val) 11240{ 11241 /* Do some magic... */ 11242 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11243 *magic_val = val & SHARED_MF_CLP_MAGIC; 11244 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 11245} 11246 11247/* restore the value of the 'magic' bit */ 11248static void 11249bxe_clp_reset_done(struct bxe_softc *sc, 11250 uint32_t magic_val) 11251{ 11252 /* Restore the 'magic' bit value... */ 11253 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11254 MFCFG_WR(sc, shared_mf_config.clp_mb, 11255 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 11256} 11257 11258/* prepare for MCP reset, takes care of CLP configurations */ 11259static void 11260bxe_reset_mcp_prep(struct bxe_softc *sc, 11261 uint32_t *magic_val) 11262{ 11263 uint32_t shmem; 11264 uint32_t validity_offset; 11265 11266 /* set `magic' bit in order to save MF config */ 11267 if (!CHIP_IS_E1(sc)) { 11268 bxe_clp_reset_prep(sc, magic_val); 11269 } 11270 11271 /* get shmem offset */ 11272 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11273 validity_offset = 11274 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 11275 11276 /* Clear validity map flags */ 11277 if (shmem > 0) { 11278 REG_WR(sc, shmem + validity_offset, 0); 11279 } 11280} 11281 11282#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 11283#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 11284 11285static void 11286bxe_mcp_wait_one(struct bxe_softc *sc) 11287{ 11288 /* special handling for emulation and FPGA (10 times longer) */ 11289 if (CHIP_REV_IS_SLOW(sc)) { 11290 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 11291 } else { 11292 DELAY((MCP_ONE_TIMEOUT) * 1000); 11293 } 11294} 11295 11296/* initialize shmem_base and waits for validity signature to appear */ 11297static int 11298bxe_init_shmem(struct bxe_softc *sc) 11299{ 11300 int cnt = 0; 11301 uint32_t val = 0; 11302 11303 do { 11304 sc->devinfo.shmem_base = 11305 sc->link_params.shmem_base = 11306 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11307 11308 if (sc->devinfo.shmem_base) { 11309 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 11310 if (val & SHR_MEM_VALIDITY_MB) 11311 return (0); 11312 } 11313 11314 bxe_mcp_wait_one(sc); 11315 11316 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 11317 11318 BLOGE(sc, "BAD MCP validity signature\n"); 11319 11320 return (-1); 11321} 11322 11323static int 11324bxe_reset_mcp_comp(struct bxe_softc *sc, 11325 uint32_t magic_val) 11326{ 11327 int rc = bxe_init_shmem(sc); 11328 11329 /* Restore the `magic' bit value */ 11330 if (!CHIP_IS_E1(sc)) { 11331 bxe_clp_reset_done(sc, magic_val); 11332 } 11333 11334 return (rc); 11335} 11336 11337static void 11338bxe_pxp_prep(struct bxe_softc *sc) 11339{ 11340 if (!CHIP_IS_E1(sc)) { 11341 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 11342 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 11343 wmb(); 11344 } 11345} 11346 11347/* 11348 * Reset the whole chip except for: 11349 * - PCIE core 11350 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 11351 * - IGU 11352 * - MISC (including AEU) 11353 * - GRC 11354 * - RBCN, RBCP 11355 */ 11356static void 11357bxe_process_kill_chip_reset(struct bxe_softc *sc, 11358 uint8_t global) 11359{ 11360 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 11361 uint32_t global_bits2, stay_reset2; 11362 11363 /* 11364 * Bits that have to be set in reset_mask2 if we want to reset 'global' 11365 * (per chip) blocks. 11366 */ 11367 global_bits2 = 11368 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 11369 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 11370 11371 /* 11372 * Don't reset the following blocks. 11373 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 11374 * reset, as in 4 port device they might still be owned 11375 * by the MCP (there is only one leader per path). 11376 */ 11377 not_reset_mask1 = 11378 MISC_REGISTERS_RESET_REG_1_RST_HC | 11379 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 11380 MISC_REGISTERS_RESET_REG_1_RST_PXP; 11381 11382 not_reset_mask2 = 11383 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 11384 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 11385 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 11386 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 11387 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 11388 MISC_REGISTERS_RESET_REG_2_RST_GRC | 11389 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 11390 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 11391 MISC_REGISTERS_RESET_REG_2_RST_ATC | 11392 MISC_REGISTERS_RESET_REG_2_PGLC | 11393 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 11394 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 11395 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 11396 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 11397 MISC_REGISTERS_RESET_REG_2_UMAC0 | 11398 MISC_REGISTERS_RESET_REG_2_UMAC1; 11399 11400 /* 11401 * Keep the following blocks in reset: 11402 * - all xxMACs are handled by the elink code. 11403 */ 11404 stay_reset2 = 11405 MISC_REGISTERS_RESET_REG_2_XMAC | 11406 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 11407 11408 /* Full reset masks according to the chip */ 11409 reset_mask1 = 0xffffffff; 11410 11411 if (CHIP_IS_E1(sc)) 11412 reset_mask2 = 0xffff; 11413 else if (CHIP_IS_E1H(sc)) 11414 reset_mask2 = 0x1ffff; 11415 else if (CHIP_IS_E2(sc)) 11416 reset_mask2 = 0xfffff; 11417 else /* CHIP_IS_E3 */ 11418 reset_mask2 = 0x3ffffff; 11419 11420 /* Don't reset global blocks unless we need to */ 11421 if (!global) 11422 reset_mask2 &= ~global_bits2; 11423 11424 /* 11425 * In case of attention in the QM, we need to reset PXP 11426 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11427 * because otherwise QM reset would release 'close the gates' shortly 11428 * before resetting the PXP, then the PSWRQ would send a write 11429 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11430 * read the payload data from PSWWR, but PSWWR would not 11431 * respond. The write queue in PGLUE would stuck, dmae commands 11432 * would not return. Therefore it's important to reset the second 11433 * reset register (containing the 11434 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11435 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11436 * bit). 11437 */ 11438 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11439 reset_mask2 & (~not_reset_mask2)); 11440 11441 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11442 reset_mask1 & (~not_reset_mask1)); 11443 11444 mb(); 11445 wmb(); 11446 11447 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11448 reset_mask2 & (~stay_reset2)); 11449 11450 mb(); 11451 wmb(); 11452 11453 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11454 wmb(); 11455} 11456 11457static int 11458bxe_process_kill(struct bxe_softc *sc, 11459 uint8_t global) 11460{ 11461 int cnt = 1000; 11462 uint32_t val = 0; 11463 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11464 uint32_t tags_63_32 = 0; 11465 11466 /* Empty the Tetris buffer, wait for 1s */ 11467 do { 11468 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11469 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11470 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11471 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11472 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11473 if (CHIP_IS_E3(sc)) { 11474 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11475 } 11476 11477 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11478 ((port_is_idle_0 & 0x1) == 0x1) && 11479 ((port_is_idle_1 & 0x1) == 0x1) && 11480 (pgl_exp_rom2 == 0xffffffff) && 11481 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11482 break; 11483 DELAY(1000); 11484 } while (cnt-- > 0); 11485 11486 if (cnt <= 0) { 11487 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11488 "are still outstanding read requests after 1s! " 11489 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11490 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11491 sr_cnt, blk_cnt, port_is_idle_0, 11492 port_is_idle_1, pgl_exp_rom2); 11493 return (-1); 11494 } 11495 11496 mb(); 11497 11498 /* Close gates #2, #3 and #4 */ 11499 bxe_set_234_gates(sc, TRUE); 11500 11501 /* Poll for IGU VQs for 57712 and newer chips */ 11502 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11503 return (-1); 11504 } 11505 11506 /* XXX indicate that "process kill" is in progress to MCP */ 11507 11508 /* clear "unprepared" bit */ 11509 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11510 mb(); 11511 11512 /* Make sure all is written to the chip before the reset */ 11513 wmb(); 11514 11515 /* 11516 * Wait for 1ms to empty GLUE and PCI-E core queues, 11517 * PSWHST, GRC and PSWRD Tetris buffer. 11518 */ 11519 DELAY(1000); 11520 11521 /* Prepare to chip reset: */ 11522 /* MCP */ 11523 if (global) { 11524 bxe_reset_mcp_prep(sc, &val); 11525 } 11526 11527 /* PXP */ 11528 bxe_pxp_prep(sc); 11529 mb(); 11530 11531 /* reset the chip */ 11532 bxe_process_kill_chip_reset(sc, global); 11533 mb(); 11534 11535 /* clear errors in PGB */ 11536 if (!CHIP_IS_E1(sc)) 11537 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11538 11539 /* Recover after reset: */ 11540 /* MCP */ 11541 if (global && bxe_reset_mcp_comp(sc, val)) { 11542 return (-1); 11543 } 11544 11545 /* XXX add resetting the NO_MCP mode DB here */ 11546 11547 /* Open the gates #2, #3 and #4 */ 11548 bxe_set_234_gates(sc, FALSE); 11549 11550 /* XXX 11551 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11552 * re-enable attentions 11553 */ 11554 11555 return (0); 11556} 11557 11558static int 11559bxe_leader_reset(struct bxe_softc *sc) 11560{ 11561 int rc = 0; 11562 uint8_t global = bxe_reset_is_global(sc); 11563 uint32_t load_code; 11564 11565 /* 11566 * If not going to reset MCP, load "fake" driver to reset HW while 11567 * driver is owner of the HW. 11568 */ 11569 if (!global && !BXE_NOMCP(sc)) { 11570 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11571 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11572 if (!load_code) { 11573 BLOGE(sc, "MCP response failure, aborting\n"); 11574 rc = -1; 11575 goto exit_leader_reset; 11576 } 11577 11578 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11579 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11580 BLOGE(sc, "MCP unexpected response, aborting\n"); 11581 rc = -1; 11582 goto exit_leader_reset2; 11583 } 11584 11585 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11586 if (!load_code) { 11587 BLOGE(sc, "MCP response failure, aborting\n"); 11588 rc = -1; 11589 goto exit_leader_reset2; 11590 } 11591 } 11592 11593 /* try to recover after the failure */ 11594 if (bxe_process_kill(sc, global)) { 11595 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11596 rc = -1; 11597 goto exit_leader_reset2; 11598 } 11599 11600 /* 11601 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11602 * state. 11603 */ 11604 bxe_set_reset_done(sc); 11605 if (global) { 11606 bxe_clear_reset_global(sc); 11607 } 11608 11609exit_leader_reset2: 11610 11611 /* unload "fake driver" if it was loaded */ 11612 if (!global && !BXE_NOMCP(sc)) { 11613 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11614 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11615 } 11616 11617exit_leader_reset: 11618 11619 sc->is_leader = 0; 11620 bxe_release_leader_lock(sc); 11621 11622 mb(); 11623 return (rc); 11624} 11625 11626/* 11627 * prepare INIT transition, parameters configured: 11628 * - HC configuration 11629 * - Queue's CDU context 11630 */ 11631static void 11632bxe_pf_q_prep_init(struct bxe_softc *sc, 11633 struct bxe_fastpath *fp, 11634 struct ecore_queue_init_params *init_params) 11635{ 11636 uint8_t cos; 11637 int cxt_index, cxt_offset; 11638 11639 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11640 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11641 11642 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11643 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11644 11645 /* HC rate */ 11646 init_params->rx.hc_rate = 11647 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11648 init_params->tx.hc_rate = 11649 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11650 11651 /* FW SB ID */ 11652 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11653 11654 /* CQ index among the SB indices */ 11655 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11656 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11657 11658 /* set maximum number of COSs supported by this queue */ 11659 init_params->max_cos = sc->max_cos; 11660 11661 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11662 fp->index, init_params->max_cos); 11663 11664 /* set the context pointers queue object */ 11665 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11666 /* XXX change index/cid here if ever support multiple tx CoS */ 11667 /* fp->txdata[cos]->cid */ 11668 cxt_index = fp->index / ILT_PAGE_CIDS; 11669 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11670 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11671 } 11672} 11673 11674/* set flags that are common for the Tx-only and not normal connections */ 11675static unsigned long 11676bxe_get_common_flags(struct bxe_softc *sc, 11677 struct bxe_fastpath *fp, 11678 uint8_t zero_stats) 11679{ 11680 unsigned long flags = 0; 11681 11682 /* PF driver will always initialize the Queue to an ACTIVE state */ 11683 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11684 11685 /* 11686 * tx only connections collect statistics (on the same index as the 11687 * parent connection). The statistics are zeroed when the parent 11688 * connection is initialized. 11689 */ 11690 11691 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11692 if (zero_stats) { 11693 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11694 } 11695 11696 /* 11697 * tx only connections can support tx-switching, though their 11698 * CoS-ness doesn't survive the loopback 11699 */ 11700 if (sc->flags & BXE_TX_SWITCHING) { 11701 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11702 } 11703 11704 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11705 11706 return (flags); 11707} 11708 11709static unsigned long 11710bxe_get_q_flags(struct bxe_softc *sc, 11711 struct bxe_fastpath *fp, 11712 uint8_t leading) 11713{ 11714 unsigned long flags = 0; 11715 11716 if (IS_MF_SD(sc)) { 11717 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11718 } 11719 11720 if (if_getcapenable(sc->ifp) & IFCAP_LRO) { 11721 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11722 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11723#if 0 11724 if (fp->mode == TPA_MODE_GRO) 11725 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags); 11726#endif 11727 } 11728 11729 if (leading) { 11730 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11731 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11732 } 11733 11734 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11735 11736#if 0 11737 /* configure silent vlan removal */ 11738 if (IS_MF_AFEX(sc)) { 11739 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); 11740 } 11741#endif 11742 11743 /* merge with common flags */ 11744 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11745} 11746 11747static void 11748bxe_pf_q_prep_general(struct bxe_softc *sc, 11749 struct bxe_fastpath *fp, 11750 struct ecore_general_setup_params *gen_init, 11751 uint8_t cos) 11752{ 11753 gen_init->stat_id = bxe_stats_id(fp); 11754 gen_init->spcl_id = fp->cl_id; 11755 gen_init->mtu = sc->mtu; 11756 gen_init->cos = cos; 11757} 11758 11759static void 11760bxe_pf_rx_q_prep(struct bxe_softc *sc, 11761 struct bxe_fastpath *fp, 11762 struct rxq_pause_params *pause, 11763 struct ecore_rxq_setup_params *rxq_init) 11764{ 11765 uint8_t max_sge = 0; 11766 uint16_t sge_sz = 0; 11767 uint16_t tpa_agg_size = 0; 11768 11769 pause->sge_th_lo = SGE_TH_LO(sc); 11770 pause->sge_th_hi = SGE_TH_HI(sc); 11771 11772 /* validate SGE ring has enough to cross high threshold */ 11773 if (sc->dropless_fc && 11774 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11775 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11776 BLOGW(sc, "sge ring threshold limit\n"); 11777 } 11778 11779 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11780 tpa_agg_size = (2 * sc->mtu); 11781 if (tpa_agg_size < sc->max_aggregation_size) { 11782 tpa_agg_size = sc->max_aggregation_size; 11783 } 11784 11785 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11786 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11787 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11788 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11789 11790 /* pause - not for e1 */ 11791 if (!CHIP_IS_E1(sc)) { 11792 pause->bd_th_lo = BD_TH_LO(sc); 11793 pause->bd_th_hi = BD_TH_HI(sc); 11794 11795 pause->rcq_th_lo = RCQ_TH_LO(sc); 11796 pause->rcq_th_hi = RCQ_TH_HI(sc); 11797 11798 /* validate rings have enough entries to cross high thresholds */ 11799 if (sc->dropless_fc && 11800 pause->bd_th_hi + FW_PREFETCH_CNT > 11801 sc->rx_ring_size) { 11802 BLOGW(sc, "rx bd ring threshold limit\n"); 11803 } 11804 11805 if (sc->dropless_fc && 11806 pause->rcq_th_hi + FW_PREFETCH_CNT > 11807 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11808 BLOGW(sc, "rcq ring threshold limit\n"); 11809 } 11810 11811 pause->pri_map = 1; 11812 } 11813 11814 /* rxq setup */ 11815 rxq_init->dscr_map = fp->rx_dma.paddr; 11816 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11817 rxq_init->rcq_map = fp->rcq_dma.paddr; 11818 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11819 11820 /* 11821 * This should be a maximum number of data bytes that may be 11822 * placed on the BD (not including paddings). 11823 */ 11824 rxq_init->buf_sz = (fp->rx_buf_size - 11825 IP_HEADER_ALIGNMENT_PADDING); 11826 11827 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11828 rxq_init->tpa_agg_sz = tpa_agg_size; 11829 rxq_init->sge_buf_sz = sge_sz; 11830 rxq_init->max_sges_pkt = max_sge; 11831 rxq_init->rss_engine_id = SC_FUNC(sc); 11832 rxq_init->mcast_engine_id = SC_FUNC(sc); 11833 11834 /* 11835 * Maximum number or simultaneous TPA aggregation for this Queue. 11836 * For PF Clients it should be the maximum available number. 11837 * VF driver(s) may want to define it to a smaller value. 11838 */ 11839 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11840 11841 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11842 rxq_init->fw_sb_id = fp->fw_sb_id; 11843 11844 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11845 11846 /* 11847 * configure silent vlan removal 11848 * if multi function mode is afex, then mask default vlan 11849 */ 11850 if (IS_MF_AFEX(sc)) { 11851 rxq_init->silent_removal_value = 11852 sc->devinfo.mf_info.afex_def_vlan_tag; 11853 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11854 } 11855} 11856 11857static void 11858bxe_pf_tx_q_prep(struct bxe_softc *sc, 11859 struct bxe_fastpath *fp, 11860 struct ecore_txq_setup_params *txq_init, 11861 uint8_t cos) 11862{ 11863 /* 11864 * XXX If multiple CoS is ever supported then each fastpath structure 11865 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11866 * fp->txdata[cos]->tx_dma.paddr; 11867 */ 11868 txq_init->dscr_map = fp->tx_dma.paddr; 11869 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11870 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11871 txq_init->fw_sb_id = fp->fw_sb_id; 11872 11873 /* 11874 * set the TSS leading client id for TX classfication to the 11875 * leading RSS client id 11876 */ 11877 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11878} 11879 11880/* 11881 * This function performs 2 steps in a queue state machine: 11882 * 1) RESET->INIT 11883 * 2) INIT->SETUP 11884 */ 11885static int 11886bxe_setup_queue(struct bxe_softc *sc, 11887 struct bxe_fastpath *fp, 11888 uint8_t leading) 11889{ 11890 struct ecore_queue_state_params q_params = { NULL }; 11891 struct ecore_queue_setup_params *setup_params = 11892 &q_params.params.setup; 11893#if 0 11894 struct ecore_queue_setup_tx_only_params *tx_only_params = 11895 &q_params.params.tx_only; 11896 uint8_t tx_index; 11897#endif 11898 int rc; 11899 11900 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11901 11902 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11903 11904 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11905 11906 /* we want to wait for completion in this context */ 11907 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11908 11909 /* prepare the INIT parameters */ 11910 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11911 11912 /* Set the command */ 11913 q_params.cmd = ECORE_Q_CMD_INIT; 11914 11915 /* Change the state to INIT */ 11916 rc = ecore_queue_state_change(sc, &q_params); 11917 if (rc) { 11918 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); 11919 return (rc); 11920 } 11921 11922 BLOGD(sc, DBG_LOAD, "init complete\n"); 11923 11924 /* now move the Queue to the SETUP state */ 11925 memset(setup_params, 0, sizeof(*setup_params)); 11926 11927 /* set Queue flags */ 11928 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11929 11930 /* set general SETUP parameters */ 11931 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11932 FIRST_TX_COS_INDEX); 11933 11934 bxe_pf_rx_q_prep(sc, fp, 11935 &setup_params->pause_params, 11936 &setup_params->rxq_params); 11937 11938 bxe_pf_tx_q_prep(sc, fp, 11939 &setup_params->txq_params, 11940 FIRST_TX_COS_INDEX); 11941 11942 /* Set the command */ 11943 q_params.cmd = ECORE_Q_CMD_SETUP; 11944 11945 /* change the state to SETUP */ 11946 rc = ecore_queue_state_change(sc, &q_params); 11947 if (rc) { 11948 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); 11949 return (rc); 11950 } 11951 11952#if 0 11953 /* loop through the relevant tx-only indices */ 11954 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 11955 tx_index < sc->max_cos; 11956 tx_index++) { 11957 /* prepare and send tx-only ramrod*/ 11958 rc = bxe_setup_tx_only(sc, fp, &q_params, 11959 tx_only_params, tx_index, leading); 11960 if (rc) { 11961 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", 11962 fp->index, tx_index); 11963 return (rc); 11964 } 11965 } 11966#endif 11967 11968 return (rc); 11969} 11970 11971static int 11972bxe_setup_leading(struct bxe_softc *sc) 11973{ 11974 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11975} 11976 11977static int 11978bxe_config_rss_pf(struct bxe_softc *sc, 11979 struct ecore_rss_config_obj *rss_obj, 11980 uint8_t config_hash) 11981{ 11982 struct ecore_config_rss_params params = { NULL }; 11983 int i; 11984 11985 /* 11986 * Although RSS is meaningless when there is a single HW queue we 11987 * still need it enabled in order to have HW Rx hash generated. 11988 */ 11989 11990 params.rss_obj = rss_obj; 11991 11992 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11993 11994 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11995 11996 /* RSS configuration */ 11997 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11998 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11999 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 12000 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 12001 if (rss_obj->udp_rss_v4) { 12002 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 12003 } 12004 if (rss_obj->udp_rss_v6) { 12005 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 12006 } 12007 12008 /* Hash bits */ 12009 params.rss_result_mask = MULTI_MASK; 12010 12011 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 12012 12013 if (config_hash) { 12014 /* RSS keys */ 12015 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 12016 params.rss_key[i] = arc4random(); 12017 } 12018 12019 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 12020 } 12021 12022 return (ecore_config_rss(sc, ¶ms)); 12023} 12024 12025static int 12026bxe_config_rss_eth(struct bxe_softc *sc, 12027 uint8_t config_hash) 12028{ 12029 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 12030} 12031 12032static int 12033bxe_init_rss_pf(struct bxe_softc *sc) 12034{ 12035 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 12036 int i; 12037 12038 /* 12039 * Prepare the initial contents of the indirection table if 12040 * RSS is enabled 12041 */ 12042 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 12043 sc->rss_conf_obj.ind_table[i] = 12044 (sc->fp->cl_id + (i % num_eth_queues)); 12045 } 12046 12047 if (sc->udp_rss) { 12048 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 12049 } 12050 12051 /* 12052 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 12053 * per-port, so if explicit configuration is needed, do it only 12054 * for a PMF. 12055 * 12056 * For 57712 and newer it's a per-function configuration. 12057 */ 12058 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 12059} 12060 12061static int 12062bxe_set_mac_one(struct bxe_softc *sc, 12063 uint8_t *mac, 12064 struct ecore_vlan_mac_obj *obj, 12065 uint8_t set, 12066 int mac_type, 12067 unsigned long *ramrod_flags) 12068{ 12069 struct ecore_vlan_mac_ramrod_params ramrod_param; 12070 int rc; 12071 12072 memset(&ramrod_param, 0, sizeof(ramrod_param)); 12073 12074 /* fill in general parameters */ 12075 ramrod_param.vlan_mac_obj = obj; 12076 ramrod_param.ramrod_flags = *ramrod_flags; 12077 12078 /* fill a user request section if needed */ 12079 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 12080 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 12081 12082 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 12083 12084 /* Set the command: ADD or DEL */ 12085 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 12086 ECORE_VLAN_MAC_DEL; 12087 } 12088 12089 rc = ecore_config_vlan_mac(sc, &ramrod_param); 12090 12091 if (rc == ECORE_EXISTS) { 12092 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12093 /* do not treat adding same MAC as error */ 12094 rc = 0; 12095 } else if (rc < 0) { 12096 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 12097 } 12098 12099 return (rc); 12100} 12101 12102static int 12103bxe_set_eth_mac(struct bxe_softc *sc, 12104 uint8_t set) 12105{ 12106 unsigned long ramrod_flags = 0; 12107 12108 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 12109 12110 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12111 12112 /* Eth MAC is set on RSS leading client (fp[0]) */ 12113 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 12114 &sc->sp_objs->mac_obj, 12115 set, ECORE_ETH_MAC, &ramrod_flags)); 12116} 12117 12118#if 0 12119static void 12120bxe_update_max_mf_config(struct bxe_softc *sc, 12121 uint32_t value) 12122{ 12123 /* load old values */ 12124 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; 12125 12126 if (value != bxe_extract_max_cfg(sc, mf_cfg)) { 12127 /* leave all but MAX value */ 12128 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 12129 12130 /* set new MAX value */ 12131 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & 12132 FUNC_MF_CFG_MAX_BW_MASK); 12133 12134 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 12135 } 12136} 12137#endif 12138 12139static int 12140bxe_get_cur_phy_idx(struct bxe_softc *sc) 12141{ 12142 uint32_t sel_phy_idx = 0; 12143 12144 if (sc->link_params.num_phys <= 1) { 12145 return (ELINK_INT_PHY); 12146 } 12147 12148 if (sc->link_vars.link_up) { 12149 sel_phy_idx = ELINK_EXT_PHY1; 12150 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 12151 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 12152 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 12153 ELINK_SUPPORTED_FIBRE)) 12154 sel_phy_idx = ELINK_EXT_PHY2; 12155 } else { 12156 switch (elink_phy_selection(&sc->link_params)) { 12157 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 12158 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 12159 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 12160 sel_phy_idx = ELINK_EXT_PHY1; 12161 break; 12162 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 12163 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 12164 sel_phy_idx = ELINK_EXT_PHY2; 12165 break; 12166 } 12167 } 12168 12169 return (sel_phy_idx); 12170} 12171 12172static int 12173bxe_get_link_cfg_idx(struct bxe_softc *sc) 12174{ 12175 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 12176 12177 /* 12178 * The selected activated PHY is always after swapping (in case PHY 12179 * swapping is enabled). So when swapping is enabled, we need to reverse 12180 * the configuration 12181 */ 12182 12183 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 12184 if (sel_phy_idx == ELINK_EXT_PHY1) 12185 sel_phy_idx = ELINK_EXT_PHY2; 12186 else if (sel_phy_idx == ELINK_EXT_PHY2) 12187 sel_phy_idx = ELINK_EXT_PHY1; 12188 } 12189 12190 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 12191} 12192 12193static void 12194bxe_set_requested_fc(struct bxe_softc *sc) 12195{ 12196 /* 12197 * Initialize link parameters structure variables 12198 * It is recommended to turn off RX FC for jumbo frames 12199 * for better performance 12200 */ 12201 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 12202 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 12203 } else { 12204 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 12205 } 12206} 12207 12208static void 12209bxe_calc_fc_adv(struct bxe_softc *sc) 12210{ 12211 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 12212 switch (sc->link_vars.ieee_fc & 12213 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 12214 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 12215 default: 12216 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 12217 ADVERTISED_Pause); 12218 break; 12219 12220 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 12221 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 12222 ADVERTISED_Pause); 12223 break; 12224 12225 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 12226 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 12227 break; 12228 } 12229} 12230 12231static uint16_t 12232bxe_get_mf_speed(struct bxe_softc *sc) 12233{ 12234 uint16_t line_speed = sc->link_vars.line_speed; 12235 if (IS_MF(sc)) { 12236 uint16_t maxCfg = 12237 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 12238 12239 /* calculate the current MAX line speed limit for the MF devices */ 12240 if (IS_MF_SI(sc)) { 12241 line_speed = (line_speed * maxCfg) / 100; 12242 } else { /* SD mode */ 12243 uint16_t vn_max_rate = maxCfg * 100; 12244 12245 if (vn_max_rate < line_speed) { 12246 line_speed = vn_max_rate; 12247 } 12248 } 12249 } 12250 12251 return (line_speed); 12252} 12253 12254static void 12255bxe_fill_report_data(struct bxe_softc *sc, 12256 struct bxe_link_report_data *data) 12257{ 12258 uint16_t line_speed = bxe_get_mf_speed(sc); 12259 12260 memset(data, 0, sizeof(*data)); 12261 12262 /* fill the report data with the effective line speed */ 12263 data->line_speed = line_speed; 12264 12265 /* Link is down */ 12266 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 12267 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 12268 } 12269 12270 /* Full DUPLEX */ 12271 if (sc->link_vars.duplex == DUPLEX_FULL) { 12272 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 12273 } 12274 12275 /* Rx Flow Control is ON */ 12276 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 12277 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 12278 } 12279 12280 /* Tx Flow Control is ON */ 12281 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 12282 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 12283 } 12284} 12285 12286/* report link status to OS, should be called under phy_lock */ 12287static void 12288bxe_link_report_locked(struct bxe_softc *sc) 12289{ 12290 struct bxe_link_report_data cur_data; 12291 12292 /* reread mf_cfg */ 12293 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 12294 bxe_read_mf_cfg(sc); 12295 } 12296 12297 /* Read the current link report info */ 12298 bxe_fill_report_data(sc, &cur_data); 12299 12300 /* Don't report link down or exactly the same link status twice */ 12301 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 12302 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12303 &sc->last_reported_link.link_report_flags) && 12304 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12305 &cur_data.link_report_flags))) { 12306 return; 12307 } 12308 12309 sc->link_cnt++; 12310 12311 /* report new link params and remember the state for the next time */ 12312 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 12313 12314 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12315 &cur_data.link_report_flags)) { 12316 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 12317 BLOGI(sc, "NIC Link is Down\n"); 12318 } else { 12319 const char *duplex; 12320 const char *flow; 12321 12322 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 12323 &cur_data.link_report_flags)) { 12324 duplex = "full"; 12325 } else { 12326 duplex = "half"; 12327 } 12328 12329 /* 12330 * Handle the FC at the end so that only these flags would be 12331 * possibly set. This way we may easily check if there is no FC 12332 * enabled. 12333 */ 12334 if (cur_data.link_report_flags) { 12335 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12336 &cur_data.link_report_flags) && 12337 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12338 &cur_data.link_report_flags)) { 12339 flow = "ON - receive & transmit"; 12340 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12341 &cur_data.link_report_flags) && 12342 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12343 &cur_data.link_report_flags)) { 12344 flow = "ON - receive"; 12345 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12346 &cur_data.link_report_flags) && 12347 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12348 &cur_data.link_report_flags)) { 12349 flow = "ON - transmit"; 12350 } else { 12351 flow = "none"; /* possible? */ 12352 } 12353 } else { 12354 flow = "none"; 12355 } 12356 12357 if_link_state_change(sc->ifp, LINK_STATE_UP); 12358 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 12359 cur_data.line_speed, duplex, flow); 12360 } 12361} 12362 12363static void 12364bxe_link_report(struct bxe_softc *sc) 12365{ 12366 bxe_acquire_phy_lock(sc); 12367 bxe_link_report_locked(sc); 12368 bxe_release_phy_lock(sc); 12369} 12370 12371static void 12372bxe_link_status_update(struct bxe_softc *sc) 12373{ 12374 if (sc->state != BXE_STATE_OPEN) { 12375 return; 12376 } 12377 12378#if 0 12379 /* read updated dcb configuration */ 12380 if (IS_PF(sc)) 12381 bxe_dcbx_pmf_update(sc); 12382#endif 12383 12384 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 12385 elink_link_status_update(&sc->link_params, &sc->link_vars); 12386 } else { 12387 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 12388 ELINK_SUPPORTED_10baseT_Full | 12389 ELINK_SUPPORTED_100baseT_Half | 12390 ELINK_SUPPORTED_100baseT_Full | 12391 ELINK_SUPPORTED_1000baseT_Full | 12392 ELINK_SUPPORTED_2500baseX_Full | 12393 ELINK_SUPPORTED_10000baseT_Full | 12394 ELINK_SUPPORTED_TP | 12395 ELINK_SUPPORTED_FIBRE | 12396 ELINK_SUPPORTED_Autoneg | 12397 ELINK_SUPPORTED_Pause | 12398 ELINK_SUPPORTED_Asym_Pause); 12399 sc->port.advertising[0] = sc->port.supported[0]; 12400 12401 sc->link_params.sc = sc; 12402 sc->link_params.port = SC_PORT(sc); 12403 sc->link_params.req_duplex[0] = DUPLEX_FULL; 12404 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 12405 sc->link_params.req_line_speed[0] = SPEED_10000; 12406 sc->link_params.speed_cap_mask[0] = 0x7f0000; 12407 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 12408 12409 if (CHIP_REV_IS_FPGA(sc)) { 12410 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 12411 sc->link_vars.line_speed = ELINK_SPEED_1000; 12412 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12413 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 12414 } else { 12415 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 12416 sc->link_vars.line_speed = ELINK_SPEED_10000; 12417 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12418 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 12419 } 12420 12421 sc->link_vars.link_up = 1; 12422 12423 sc->link_vars.duplex = DUPLEX_FULL; 12424 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 12425 12426 if (IS_PF(sc)) { 12427 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 12428 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12429 bxe_link_report(sc); 12430 } 12431 } 12432 12433 if (IS_PF(sc)) { 12434 if (sc->link_vars.link_up) { 12435 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12436 } else { 12437 bxe_stats_handle(sc, STATS_EVENT_STOP); 12438 } 12439 bxe_link_report(sc); 12440 } else { 12441 bxe_link_report(sc); 12442 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12443 } 12444} 12445 12446static int 12447bxe_initial_phy_init(struct bxe_softc *sc, 12448 int load_mode) 12449{ 12450 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 12451 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 12452 struct elink_params *lp = &sc->link_params; 12453 12454 bxe_set_requested_fc(sc); 12455 12456 if (CHIP_REV_IS_SLOW(sc)) { 12457 uint32_t bond = CHIP_BOND_ID(sc); 12458 uint32_t feat = 0; 12459 12460 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12461 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12462 } else if (bond & 0x4) { 12463 if (CHIP_IS_E3(sc)) { 12464 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12465 } else { 12466 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12467 } 12468 } else if (bond & 0x8) { 12469 if (CHIP_IS_E3(sc)) { 12470 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12471 } else { 12472 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12473 } 12474 } 12475 12476 /* disable EMAC for E3 and above */ 12477 if (bond & 0x2) { 12478 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12479 } 12480 12481 sc->link_params.feature_config_flags |= feat; 12482 } 12483 12484 bxe_acquire_phy_lock(sc); 12485 12486 if (load_mode == LOAD_DIAG) { 12487 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12488 /* Prefer doing PHY loopback at 10G speed, if possible */ 12489 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12490 if (lp->speed_cap_mask[cfg_idx] & 12491 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12492 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12493 } else { 12494 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12495 } 12496 } 12497 } 12498 12499 if (load_mode == LOAD_LOOPBACK_EXT) { 12500 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12501 } 12502 12503 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12504 12505 bxe_release_phy_lock(sc); 12506 12507 bxe_calc_fc_adv(sc); 12508 12509 if (sc->link_vars.link_up) { 12510 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12511 bxe_link_report(sc); 12512 } 12513 12514 if (!CHIP_REV_IS_SLOW(sc)) { 12515 bxe_periodic_start(sc); 12516 } 12517 12518 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12519 return (rc); 12520} 12521 12522/* must be called under IF_ADDR_LOCK */ 12523 12524static int 12525bxe_set_mc_list(struct bxe_softc *sc) 12526{ 12527 struct ecore_mcast_ramrod_params rparam = { NULL }; 12528 int rc = 0; 12529 int mc_count = 0; 12530 int mcnt, i; 12531 struct ecore_mcast_list_elem *mc_mac, *mc_mac_start; 12532 unsigned char *mta; 12533 if_t ifp = sc->ifp; 12534 12535 mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ 12536 if (!mc_count) 12537 return (0); 12538 12539 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * 12540 mc_count, M_DEVBUF, M_NOWAIT); 12541 12542 if(mta == NULL) { 12543 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12544 return (-1); 12545 } 12546 bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count)); 12547 12548 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); 12549 mc_mac_start = mc_mac; 12550 12551 if (!mc_mac) { 12552 free(mta, M_DEVBUF); 12553 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12554 return (-1); 12555 } 12556 bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); 12557 12558 /* mta and mcnt not expected to be different */ 12559 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 12560 12561 12562 rparam.mcast_obj = &sc->mcast_obj; 12563 ECORE_LIST_INIT(&rparam.mcast_list); 12564 12565 for(i=0; i< mcnt; i++) { 12566 12567 mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN)); 12568 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list); 12569 12570 BLOGD(sc, DBG_LOAD, 12571 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", 12572 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12573 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); 12574 12575 mc_mac++; 12576 } 12577 rparam.mcast_list_len = mc_count; 12578 12579 BXE_MCAST_LOCK(sc); 12580 12581 /* first, clear all configured multicast MACs */ 12582 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12583 if (rc < 0) { 12584 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12585 BXE_MCAST_UNLOCK(sc); 12586 free(mc_mac_start, M_DEVBUF); 12587 free(mta, M_DEVBUF); 12588 return (rc); 12589 } 12590 12591 /* Now add the new MACs */ 12592 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12593 if (rc < 0) { 12594 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12595 } 12596 12597 BXE_MCAST_UNLOCK(sc); 12598 12599 free(mc_mac_start, M_DEVBUF); 12600 free(mta, M_DEVBUF); 12601 12602 return (rc); 12603} 12604 12605static int 12606bxe_set_uc_list(struct bxe_softc *sc) 12607{ 12608 if_t ifp = sc->ifp; 12609 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12610 struct ifaddr *ifa; 12611 unsigned long ramrod_flags = 0; 12612 int rc; 12613 12614#if __FreeBSD_version < 800000 12615 IF_ADDR_LOCK(ifp); 12616#else 12617 if_addr_rlock(ifp); 12618#endif 12619 12620 /* first schedule a cleanup up of old configuration */ 12621 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12622 if (rc < 0) { 12623 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12624#if __FreeBSD_version < 800000 12625 IF_ADDR_UNLOCK(ifp); 12626#else 12627 if_addr_runlock(ifp); 12628#endif 12629 return (rc); 12630 } 12631 12632 ifa = if_getifaddr(ifp); /* XXX Is this structure */ 12633 while (ifa) { 12634 if (ifa->ifa_addr->sa_family != AF_LINK) { 12635 ifa = TAILQ_NEXT(ifa, ifa_link); 12636 continue; 12637 } 12638 12639 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12640 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12641 if (rc == -EEXIST) { 12642 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12643 /* do not treat adding same MAC as an error */ 12644 rc = 0; 12645 } else if (rc < 0) { 12646 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12647#if __FreeBSD_version < 800000 12648 IF_ADDR_UNLOCK(ifp); 12649#else 12650 if_addr_runlock(ifp); 12651#endif 12652 return (rc); 12653 } 12654 12655 ifa = TAILQ_NEXT(ifa, ifa_link); 12656 } 12657 12658#if __FreeBSD_version < 800000 12659 IF_ADDR_UNLOCK(ifp); 12660#else 12661 if_addr_runlock(ifp); 12662#endif 12663 12664 /* Execute the pending commands */ 12665 bit_set(&ramrod_flags, RAMROD_CONT); 12666 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12667 ECORE_UC_LIST_MAC, &ramrod_flags)); 12668} 12669 12670static void 12671bxe_set_rx_mode(struct bxe_softc *sc) 12672{ 12673 if_t ifp = sc->ifp; 12674 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12675 12676 if (sc->state != BXE_STATE_OPEN) { 12677 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12678 return; 12679 } 12680 12681 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); 12682 12683 if (if_getflags(ifp) & IFF_PROMISC) { 12684 rx_mode = BXE_RX_MODE_PROMISC; 12685 } else if ((if_getflags(ifp) & IFF_ALLMULTI) || 12686 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && 12687 CHIP_IS_E1(sc))) { 12688 rx_mode = BXE_RX_MODE_ALLMULTI; 12689 } else { 12690 if (IS_PF(sc)) { 12691 /* some multicasts */ 12692 if (bxe_set_mc_list(sc) < 0) { 12693 rx_mode = BXE_RX_MODE_ALLMULTI; 12694 } 12695 if (bxe_set_uc_list(sc) < 0) { 12696 rx_mode = BXE_RX_MODE_PROMISC; 12697 } 12698 } 12699#if 0 12700 else { 12701 /* 12702 * Configuring mcast to a VF involves sleeping (when we 12703 * wait for the PF's response). Since this function is 12704 * called from a non sleepable context we must schedule 12705 * a work item for this purpose 12706 */ 12707 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); 12708 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12709 } 12710#endif 12711 } 12712 12713 sc->rx_mode = rx_mode; 12714 12715 /* schedule the rx_mode command */ 12716 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12717 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12718 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12719 return; 12720 } 12721 12722 if (IS_PF(sc)) { 12723 bxe_set_storm_rx_mode(sc); 12724 } 12725#if 0 12726 else { 12727 /* 12728 * Configuring mcast to a VF involves sleeping (when we 12729 * wait for the PF's response). Since this function is 12730 * called from a non sleepable context we must schedule 12731 * a work item for this purpose 12732 */ 12733 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); 12734 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12735 } 12736#endif 12737 12738} 12739 12740 12741/* update flags in shmem */ 12742static void 12743bxe_update_drv_flags(struct bxe_softc *sc, 12744 uint32_t flags, 12745 uint32_t set) 12746{ 12747 uint32_t drv_flags; 12748 12749 if (SHMEM2_HAS(sc, drv_flags)) { 12750 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12751 drv_flags = SHMEM2_RD(sc, drv_flags); 12752 12753 if (set) { 12754 SET_FLAGS(drv_flags, flags); 12755 } else { 12756 RESET_FLAGS(drv_flags, flags); 12757 } 12758 12759 SHMEM2_WR(sc, drv_flags, drv_flags); 12760 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12761 12762 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12763 } 12764} 12765 12766/* periodic timer callout routine, only runs when the interface is up */ 12767 12768static void 12769bxe_periodic_callout_func(void *xsc) 12770{ 12771 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12772 int i; 12773 12774 if (!BXE_CORE_TRYLOCK(sc)) { 12775 /* just bail and try again next time */ 12776 12777 if ((sc->state == BXE_STATE_OPEN) && 12778 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12779 /* schedule the next periodic callout */ 12780 callout_reset(&sc->periodic_callout, hz, 12781 bxe_periodic_callout_func, sc); 12782 } 12783 12784 return; 12785 } 12786 12787 if ((sc->state != BXE_STATE_OPEN) || 12788 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12789 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12790 BXE_CORE_UNLOCK(sc); 12791 return; 12792 } 12793 12794 /* Check for TX timeouts on any fastpath. */ 12795 FOR_EACH_QUEUE(sc, i) { 12796 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12797 /* Ruh-Roh, chip was reset! */ 12798 break; 12799 } 12800 } 12801 12802 if (!CHIP_REV_IS_SLOW(sc)) { 12803 /* 12804 * This barrier is needed to ensure the ordering between the writing 12805 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12806 * the reading here. 12807 */ 12808 mb(); 12809 if (sc->port.pmf) { 12810 bxe_acquire_phy_lock(sc); 12811 elink_period_func(&sc->link_params, &sc->link_vars); 12812 bxe_release_phy_lock(sc); 12813 } 12814 } 12815 12816 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { 12817 int mb_idx = SC_FW_MB_IDX(sc); 12818 uint32_t drv_pulse; 12819 uint32_t mcp_pulse; 12820 12821 ++sc->fw_drv_pulse_wr_seq; 12822 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12823 12824 drv_pulse = sc->fw_drv_pulse_wr_seq; 12825 bxe_drv_pulse(sc); 12826 12827 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12828 MCP_PULSE_SEQ_MASK); 12829 12830 /* 12831 * The delta between driver pulse and mcp response should 12832 * be 1 (before mcp response) or 0 (after mcp response). 12833 */ 12834 if ((drv_pulse != mcp_pulse) && 12835 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12836 /* someone lost a heartbeat... */ 12837 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12838 drv_pulse, mcp_pulse); 12839 } 12840 } 12841 12842 /* state is BXE_STATE_OPEN */ 12843 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12844 12845#if 0 12846 /* sample VF bulletin board for new posts from PF */ 12847 if (IS_VF(sc)) { 12848 bxe_sample_bulletin(sc); 12849 } 12850#endif 12851 12852 BXE_CORE_UNLOCK(sc); 12853 12854 if ((sc->state == BXE_STATE_OPEN) && 12855 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12856 /* schedule the next periodic callout */ 12857 callout_reset(&sc->periodic_callout, hz, 12858 bxe_periodic_callout_func, sc); 12859 } 12860} 12861 12862static void 12863bxe_periodic_start(struct bxe_softc *sc) 12864{ 12865 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12866 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12867} 12868 12869static void 12870bxe_periodic_stop(struct bxe_softc *sc) 12871{ 12872 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12873 callout_drain(&sc->periodic_callout); 12874} 12875 12876/* start the controller */ 12877static __noinline int 12878bxe_nic_load(struct bxe_softc *sc, 12879 int load_mode) 12880{ 12881 uint32_t val; 12882 int load_code = 0; 12883 int i, rc = 0; 12884 12885 BXE_CORE_LOCK_ASSERT(sc); 12886 12887 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12888 12889 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12890 12891 if (IS_PF(sc)) { 12892 /* must be called before memory allocation and HW init */ 12893 bxe_ilt_set_info(sc); 12894 } 12895 12896 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12897 12898 bxe_set_fp_rx_buf_size(sc); 12899 12900 if (bxe_alloc_fp_buffers(sc) != 0) { 12901 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12902 sc->state = BXE_STATE_CLOSED; 12903 rc = ENOMEM; 12904 goto bxe_nic_load_error0; 12905 } 12906 12907 if (bxe_alloc_mem(sc) != 0) { 12908 sc->state = BXE_STATE_CLOSED; 12909 rc = ENOMEM; 12910 goto bxe_nic_load_error0; 12911 } 12912 12913 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12914 sc->state = BXE_STATE_CLOSED; 12915 rc = ENOMEM; 12916 goto bxe_nic_load_error0; 12917 } 12918 12919 if (IS_PF(sc)) { 12920 /* set pf load just before approaching the MCP */ 12921 bxe_set_pf_load(sc); 12922 12923 /* if MCP exists send load request and analyze response */ 12924 if (!BXE_NOMCP(sc)) { 12925 /* attempt to load pf */ 12926 if (bxe_nic_load_request(sc, &load_code) != 0) { 12927 sc->state = BXE_STATE_CLOSED; 12928 rc = ENXIO; 12929 goto bxe_nic_load_error1; 12930 } 12931 12932 /* what did the MCP say? */ 12933 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12934 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12935 sc->state = BXE_STATE_CLOSED; 12936 rc = ENXIO; 12937 goto bxe_nic_load_error2; 12938 } 12939 } else { 12940 BLOGI(sc, "Device has no MCP!\n"); 12941 load_code = bxe_nic_load_no_mcp(sc); 12942 } 12943 12944 /* mark PMF if applicable */ 12945 bxe_nic_load_pmf(sc, load_code); 12946 12947 /* Init Function state controlling object */ 12948 bxe_init_func_obj(sc); 12949 12950 /* Initialize HW */ 12951 if (bxe_init_hw(sc, load_code) != 0) { 12952 BLOGE(sc, "HW init failed\n"); 12953 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12954 sc->state = BXE_STATE_CLOSED; 12955 rc = ENXIO; 12956 goto bxe_nic_load_error2; 12957 } 12958 } 12959 12960 /* set ALWAYS_ALIVE bit in shmem */ 12961 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 12962 bxe_drv_pulse(sc); 12963 sc->flags |= BXE_NO_PULSE; 12964 12965 /* attach interrupts */ 12966 if (bxe_interrupt_attach(sc) != 0) { 12967 sc->state = BXE_STATE_CLOSED; 12968 rc = ENXIO; 12969 goto bxe_nic_load_error2; 12970 } 12971 12972 bxe_nic_init(sc, load_code); 12973 12974 /* Init per-function objects */ 12975 if (IS_PF(sc)) { 12976 bxe_init_objs(sc); 12977 // XXX bxe_iov_nic_init(sc); 12978 12979 /* set AFEX default VLAN tag to an invalid value */ 12980 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12981 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12982 12983 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12984 rc = bxe_func_start(sc); 12985 if (rc) { 12986 BLOGE(sc, "Function start failed!\n"); 12987 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12988 sc->state = BXE_STATE_ERROR; 12989 goto bxe_nic_load_error3; 12990 } 12991 12992 /* send LOAD_DONE command to MCP */ 12993 if (!BXE_NOMCP(sc)) { 12994 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12995 if (!load_code) { 12996 BLOGE(sc, "MCP response failure, aborting\n"); 12997 sc->state = BXE_STATE_ERROR; 12998 rc = ENXIO; 12999 goto bxe_nic_load_error3; 13000 } 13001 } 13002 13003 rc = bxe_setup_leading(sc); 13004 if (rc) { 13005 BLOGE(sc, "Setup leading failed!\n"); 13006 sc->state = BXE_STATE_ERROR; 13007 goto bxe_nic_load_error3; 13008 } 13009 13010 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 13011 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 13012 if (rc) { 13013 BLOGE(sc, "Queue(%d) setup failed\n", i); 13014 sc->state = BXE_STATE_ERROR; 13015 goto bxe_nic_load_error3; 13016 } 13017 } 13018 13019 rc = bxe_init_rss_pf(sc); 13020 if (rc) { 13021 BLOGE(sc, "PF RSS init failed\n"); 13022 sc->state = BXE_STATE_ERROR; 13023 goto bxe_nic_load_error3; 13024 } 13025 } 13026 /* XXX VF */ 13027#if 0 13028 else { /* VF */ 13029 FOR_EACH_ETH_QUEUE(sc, i) { 13030 rc = bxe_vfpf_setup_q(sc, i); 13031 if (rc) { 13032 BLOGE(sc, "Queue(%d) setup failed\n", i); 13033 sc->state = BXE_STATE_ERROR; 13034 goto bxe_nic_load_error3; 13035 } 13036 } 13037 } 13038#endif 13039 13040 /* now when Clients are configured we are ready to work */ 13041 sc->state = BXE_STATE_OPEN; 13042 13043 /* Configure a ucast MAC */ 13044 if (IS_PF(sc)) { 13045 rc = bxe_set_eth_mac(sc, TRUE); 13046 } 13047#if 0 13048 else { /* IS_VF(sc) */ 13049 rc = bxe_vfpf_set_mac(sc); 13050 } 13051#endif 13052 if (rc) { 13053 BLOGE(sc, "Setting Ethernet MAC failed\n"); 13054 sc->state = BXE_STATE_ERROR; 13055 goto bxe_nic_load_error3; 13056 } 13057 13058#if 0 13059 if (IS_PF(sc) && sc->pending_max) { 13060 /* for AFEX */ 13061 bxe_update_max_mf_config(sc, sc->pending_max); 13062 sc->pending_max = 0; 13063 } 13064#endif 13065 13066 if (sc->port.pmf) { 13067 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 13068 if (rc) { 13069 sc->state = BXE_STATE_ERROR; 13070 goto bxe_nic_load_error3; 13071 } 13072 } 13073 13074 sc->link_params.feature_config_flags &= 13075 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 13076 13077 /* start fast path */ 13078 13079 /* Initialize Rx filter */ 13080 bxe_set_rx_mode(sc); 13081 13082 /* start the Tx */ 13083 switch (/* XXX load_mode */LOAD_OPEN) { 13084 case LOAD_NORMAL: 13085 case LOAD_OPEN: 13086 break; 13087 13088 case LOAD_DIAG: 13089 case LOAD_LOOPBACK_EXT: 13090 sc->state = BXE_STATE_DIAG; 13091 break; 13092 13093 default: 13094 break; 13095 } 13096 13097 if (sc->port.pmf) { 13098 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 13099 } else { 13100 bxe_link_status_update(sc); 13101 } 13102 13103 /* start the periodic timer callout */ 13104 bxe_periodic_start(sc); 13105 13106 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 13107 /* mark driver is loaded in shmem2 */ 13108 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 13109 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 13110 (val | 13111 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 13112 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 13113 } 13114 13115 /* wait for all pending SP commands to complete */ 13116 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 13117 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 13118 bxe_periodic_stop(sc); 13119 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 13120 return (ENXIO); 13121 } 13122 13123#if 0 13124 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 13125 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { 13126 bxe_dcbx_init(sc, FALSE); 13127 } 13128#endif 13129 13130 /* Tell the stack the driver is running! */ 13131 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); 13132 13133 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 13134 13135 return (0); 13136 13137bxe_nic_load_error3: 13138 13139 if (IS_PF(sc)) { 13140 bxe_int_disable_sync(sc, 1); 13141 13142 /* clean out queued objects */ 13143 bxe_squeeze_objects(sc); 13144 } 13145 13146 bxe_interrupt_detach(sc); 13147 13148bxe_nic_load_error2: 13149 13150 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 13151 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 13152 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 13153 } 13154 13155 sc->port.pmf = 0; 13156 13157bxe_nic_load_error1: 13158 13159 /* clear pf_load status, as it was already set */ 13160 if (IS_PF(sc)) { 13161 bxe_clear_pf_load(sc); 13162 } 13163 13164bxe_nic_load_error0: 13165 13166 bxe_free_fw_stats_mem(sc); 13167 bxe_free_fp_buffers(sc); 13168 bxe_free_mem(sc); 13169 13170 return (rc); 13171} 13172 13173static int 13174bxe_init_locked(struct bxe_softc *sc) 13175{ 13176 int other_engine = SC_PATH(sc) ? 0 : 1; 13177 uint8_t other_load_status, load_status; 13178 uint8_t global = FALSE; 13179 int rc; 13180 13181 BXE_CORE_LOCK_ASSERT(sc); 13182 13183 /* check if the driver is already running */ 13184 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 13185 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 13186 return (0); 13187 } 13188 13189 bxe_set_power_state(sc, PCI_PM_D0); 13190 13191 /* 13192 * If parity occurred during the unload, then attentions and/or 13193 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 13194 * loaded on the current engine to complete the recovery. Parity recovery 13195 * is only relevant for PF driver. 13196 */ 13197 if (IS_PF(sc)) { 13198 other_load_status = bxe_get_load_status(sc, other_engine); 13199 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 13200 13201 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 13202 bxe_chk_parity_attn(sc, &global, TRUE)) { 13203 do { 13204 /* 13205 * If there are attentions and they are in global blocks, set 13206 * the GLOBAL_RESET bit regardless whether it will be this 13207 * function that will complete the recovery or not. 13208 */ 13209 if (global) { 13210 bxe_set_reset_global(sc); 13211 } 13212 13213 /* 13214 * Only the first function on the current engine should try 13215 * to recover in open. In case of attentions in global blocks 13216 * only the first in the chip should try to recover. 13217 */ 13218 if ((!load_status && (!global || !other_load_status)) && 13219 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 13220 BLOGI(sc, "Recovered during init\n"); 13221 break; 13222 } 13223 13224 /* recovery has failed... */ 13225 bxe_set_power_state(sc, PCI_PM_D3hot); 13226 sc->recovery_state = BXE_RECOVERY_FAILED; 13227 13228 BLOGE(sc, "Recovery flow hasn't properly " 13229 "completed yet, try again later. " 13230 "If you still see this message after a " 13231 "few retries then power cycle is required.\n"); 13232 13233 rc = ENXIO; 13234 goto bxe_init_locked_done; 13235 } while (0); 13236 } 13237 } 13238 13239 sc->recovery_state = BXE_RECOVERY_DONE; 13240 13241 rc = bxe_nic_load(sc, LOAD_OPEN); 13242 13243bxe_init_locked_done: 13244 13245 if (rc) { 13246 /* Tell the stack the driver is NOT running! */ 13247 BLOGE(sc, "Initialization failed, " 13248 "stack notified driver is NOT running!\n"); 13249 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 13250 } 13251 13252 return (rc); 13253} 13254 13255static int 13256bxe_stop_locked(struct bxe_softc *sc) 13257{ 13258 BXE_CORE_LOCK_ASSERT(sc); 13259 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13260} 13261 13262/* 13263 * Handles controller initialization when called from an unlocked routine. 13264 * ifconfig calls this function. 13265 * 13266 * Returns: 13267 * void 13268 */ 13269static void 13270bxe_init(void *xsc) 13271{ 13272 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13273 13274 BXE_CORE_LOCK(sc); 13275 bxe_init_locked(sc); 13276 BXE_CORE_UNLOCK(sc); 13277} 13278 13279static int 13280bxe_init_ifnet(struct bxe_softc *sc) 13281{ 13282 if_t ifp; 13283 int capabilities; 13284 13285 /* ifconfig entrypoint for media type/status reporting */ 13286 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13287 bxe_ifmedia_update, 13288 bxe_ifmedia_status); 13289 13290 /* set the default interface values */ 13291 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13292 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13293 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13294 13295 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13296 13297 /* allocate the ifnet structure */ 13298 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { 13299 BLOGE(sc, "Interface allocation failed!\n"); 13300 return (ENXIO); 13301 } 13302 13303 if_setsoftc(ifp, sc); 13304 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13305 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); 13306 if_setioctlfn(ifp, bxe_ioctl); 13307 if_setstartfn(ifp, bxe_tx_start); 13308 if_setgetcounterfn(ifp, bxe_get_counter); 13309#if __FreeBSD_version >= 800000 13310 if_settransmitfn(ifp, bxe_tx_mq_start); 13311 if_setqflushfn(ifp, bxe_mq_flush); 13312#endif 13313#ifdef FreeBSD8_0 13314 if_settimer(ifp, 0); 13315#endif 13316 if_setinitfn(ifp, bxe_init); 13317 if_setmtu(ifp, sc->mtu); 13318 if_sethwassist(ifp, (CSUM_IP | 13319 CSUM_TCP | 13320 CSUM_UDP | 13321 CSUM_TSO | 13322 CSUM_TCP_IPV6 | 13323 CSUM_UDP_IPV6)); 13324 13325 capabilities = 13326#if __FreeBSD_version < 700000 13327 (IFCAP_VLAN_MTU | 13328 IFCAP_VLAN_HWTAGGING | 13329 IFCAP_HWCSUM | 13330 IFCAP_JUMBO_MTU | 13331 IFCAP_LRO); 13332#else 13333 (IFCAP_VLAN_MTU | 13334 IFCAP_VLAN_HWTAGGING | 13335 IFCAP_VLAN_HWTSO | 13336 IFCAP_VLAN_HWFILTER | 13337 IFCAP_VLAN_HWCSUM | 13338 IFCAP_HWCSUM | 13339 IFCAP_JUMBO_MTU | 13340 IFCAP_LRO | 13341 IFCAP_TSO4 | 13342 IFCAP_TSO6 | 13343 IFCAP_WOL_MAGIC); 13344#endif 13345 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ 13346 if_setbaudrate(ifp, IF_Gbps(10)); 13347/* XXX */ 13348 if_setsendqlen(ifp, sc->tx_ring_size); 13349 if_setsendqready(ifp); 13350/* XXX */ 13351 13352 sc->ifp = ifp; 13353 13354 /* attach to the Ethernet interface list */ 13355 ether_ifattach(ifp, sc->link_params.mac_addr); 13356 13357 return (0); 13358} 13359 13360static void 13361bxe_deallocate_bars(struct bxe_softc *sc) 13362{ 13363 int i; 13364 13365 for (i = 0; i < MAX_BARS; i++) { 13366 if (sc->bar[i].resource != NULL) { 13367 bus_release_resource(sc->dev, 13368 SYS_RES_MEMORY, 13369 sc->bar[i].rid, 13370 sc->bar[i].resource); 13371 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13372 i, PCIR_BAR(i)); 13373 } 13374 } 13375} 13376 13377static int 13378bxe_allocate_bars(struct bxe_softc *sc) 13379{ 13380 u_int flags; 13381 int i; 13382 13383 memset(sc->bar, 0, sizeof(sc->bar)); 13384 13385 for (i = 0; i < MAX_BARS; i++) { 13386 13387 /* memory resources reside at BARs 0, 2, 4 */ 13388 /* Run `pciconf -lb` to see mappings */ 13389 if ((i != 0) && (i != 2) && (i != 4)) { 13390 continue; 13391 } 13392 13393 sc->bar[i].rid = PCIR_BAR(i); 13394 13395 flags = RF_ACTIVE; 13396 if (i == 0) { 13397 flags |= RF_SHAREABLE; 13398 } 13399 13400 if ((sc->bar[i].resource = 13401 bus_alloc_resource_any(sc->dev, 13402 SYS_RES_MEMORY, 13403 &sc->bar[i].rid, 13404 flags)) == NULL) { 13405#if 0 13406 /* BAR4 doesn't exist for E1 */ 13407 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", 13408 i, PCIR_BAR(i)); 13409#endif 13410 return (0); 13411 } 13412 13413 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13414 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13415 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13416 13417 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", 13418 i, PCIR_BAR(i), 13419 (void *)rman_get_start(sc->bar[i].resource), 13420 (void *)rman_get_end(sc->bar[i].resource), 13421 rman_get_size(sc->bar[i].resource), 13422 (void *)sc->bar[i].kva); 13423 } 13424 13425 return (0); 13426} 13427 13428static void 13429bxe_get_function_num(struct bxe_softc *sc) 13430{ 13431 uint32_t val = 0; 13432 13433 /* 13434 * Read the ME register to get the function number. The ME register 13435 * holds the relative-function number and absolute-function number. The 13436 * absolute-function number appears only in E2 and above. Before that 13437 * these bits always contained zero, therefore we cannot blindly use them. 13438 */ 13439 13440 val = REG_RD(sc, BAR_ME_REGISTER); 13441 13442 sc->pfunc_rel = 13443 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13444 sc->path_id = 13445 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13446 13447 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13448 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13449 } else { 13450 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13451 } 13452 13453 BLOGD(sc, DBG_LOAD, 13454 "Relative function %d, Absolute function %d, Path %d\n", 13455 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13456} 13457 13458static uint32_t 13459bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13460{ 13461 uint32_t shmem2_size; 13462 uint32_t offset; 13463 uint32_t mf_cfg_offset_value; 13464 13465 /* Non 57712 */ 13466 offset = (SHMEM_RD(sc, func_mb) + 13467 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13468 13469 /* 57712 plus */ 13470 if (sc->devinfo.shmem2_base != 0) { 13471 shmem2_size = SHMEM2_RD(sc, size); 13472 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13473 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13474 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13475 offset = mf_cfg_offset_value; 13476 } 13477 } 13478 } 13479 13480 return (offset); 13481} 13482 13483static uint32_t 13484bxe_pcie_capability_read(struct bxe_softc *sc, 13485 int reg, 13486 int width) 13487{ 13488 int pcie_reg; 13489 13490 /* ensure PCIe capability is enabled */ 13491 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13492 if (pcie_reg != 0) { 13493 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13494 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13495 } 13496 } 13497 13498 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13499 13500 return (0); 13501} 13502 13503static uint8_t 13504bxe_is_pcie_pending(struct bxe_softc *sc) 13505{ 13506 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13507 PCIM_EXP_STA_TRANSACTION_PND); 13508} 13509 13510/* 13511 * Walk the PCI capabiites list for the device to find what features are 13512 * supported. These capabilites may be enabled/disabled by firmware so it's 13513 * best to walk the list rather than make assumptions. 13514 */ 13515static void 13516bxe_probe_pci_caps(struct bxe_softc *sc) 13517{ 13518 uint16_t link_status; 13519 int reg; 13520 13521 /* check if PCI Power Management is enabled */ 13522 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13523 if (reg != 0) { 13524 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13525 13526 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13527 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13528 } 13529 } 13530 13531 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13532 13533 /* handle PCIe 2.0 workarounds for 57710 */ 13534 if (CHIP_IS_E1(sc)) { 13535 /* workaround for 57710 errata E4_57710_27462 */ 13536 sc->devinfo.pcie_link_speed = 13537 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13538 13539 /* workaround for 57710 errata E4_57710_27488 */ 13540 sc->devinfo.pcie_link_width = 13541 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13542 if (sc->devinfo.pcie_link_speed > 1) { 13543 sc->devinfo.pcie_link_width = 13544 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13545 } 13546 } else { 13547 sc->devinfo.pcie_link_speed = 13548 (link_status & PCIM_LINK_STA_SPEED); 13549 sc->devinfo.pcie_link_width = 13550 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13551 } 13552 13553 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13554 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13555 13556 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13557 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13558 13559 /* check if MSI capability is enabled */ 13560 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13561 if (reg != 0) { 13562 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13563 13564 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13565 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13566 } 13567 } 13568 13569 /* check if MSI-X capability is enabled */ 13570 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13571 if (reg != 0) { 13572 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13573 13574 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13575 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13576 } 13577 } 13578} 13579 13580static int 13581bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13582{ 13583 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13584 uint32_t val; 13585 13586 /* get the outer vlan if we're in switch-dependent mode */ 13587 13588 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13589 mf_info->ext_id = (uint16_t)val; 13590 13591 mf_info->multi_vnics_mode = 1; 13592 13593 if (!VALID_OVLAN(mf_info->ext_id)) { 13594 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13595 return (1); 13596 } 13597 13598 /* get the capabilities */ 13599 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13600 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13601 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13602 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13603 FUNC_MF_CFG_PROTOCOL_FCOE) { 13604 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13605 } else { 13606 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13607 } 13608 13609 mf_info->vnics_per_port = 13610 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13611 13612 return (0); 13613} 13614 13615static uint32_t 13616bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13617{ 13618 uint32_t retval = 0; 13619 uint32_t val; 13620 13621 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13622 13623 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13624 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13625 retval |= MF_PROTO_SUPPORT_ETHERNET; 13626 } 13627 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13628 retval |= MF_PROTO_SUPPORT_ISCSI; 13629 } 13630 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13631 retval |= MF_PROTO_SUPPORT_FCOE; 13632 } 13633 } 13634 13635 return (retval); 13636} 13637 13638static int 13639bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13640{ 13641 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13642 uint32_t val; 13643 13644 /* 13645 * There is no outer vlan if we're in switch-independent mode. 13646 * If the mac is valid then assume multi-function. 13647 */ 13648 13649 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13650 13651 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13652 13653 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13654 13655 mf_info->vnics_per_port = 13656 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13657 13658 return (0); 13659} 13660 13661static int 13662bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13663{ 13664 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13665 uint32_t e1hov_tag; 13666 uint32_t func_config; 13667 uint32_t niv_config; 13668 13669 mf_info->multi_vnics_mode = 1; 13670 13671 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13672 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13673 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13674 13675 mf_info->ext_id = 13676 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13677 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13678 13679 mf_info->default_vlan = 13680 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13681 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13682 13683 mf_info->niv_allowed_priorities = 13684 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13685 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13686 13687 mf_info->niv_default_cos = 13688 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13689 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13690 13691 mf_info->afex_vlan_mode = 13692 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13693 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13694 13695 mf_info->niv_mba_enabled = 13696 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13697 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13698 13699 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13700 13701 mf_info->vnics_per_port = 13702 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13703 13704 return (0); 13705} 13706 13707static int 13708bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13709{ 13710 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13711 uint32_t mf_cfg1; 13712 uint32_t mf_cfg2; 13713 uint32_t ovlan1; 13714 uint32_t ovlan2; 13715 uint8_t i, j; 13716 13717 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13718 SC_PORT(sc)); 13719 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13720 mf_info->mf_config[SC_VN(sc)]); 13721 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13722 mf_info->multi_vnics_mode); 13723 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13724 mf_info->vnics_per_port); 13725 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13726 mf_info->ext_id); 13727 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13728 mf_info->min_bw[0], mf_info->min_bw[1], 13729 mf_info->min_bw[2], mf_info->min_bw[3]); 13730 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13731 mf_info->max_bw[0], mf_info->max_bw[1], 13732 mf_info->max_bw[2], mf_info->max_bw[3]); 13733 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13734 sc->mac_addr_str); 13735 13736 /* various MF mode sanity checks... */ 13737 13738 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13739 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13740 SC_PORT(sc)); 13741 return (1); 13742 } 13743 13744 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13745 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13746 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13747 return (1); 13748 } 13749 13750 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13751 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13752 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13753 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13754 SC_VN(sc), OVLAN(sc)); 13755 return (1); 13756 } 13757 13758 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13759 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13760 mf_info->multi_vnics_mode, OVLAN(sc)); 13761 return (1); 13762 } 13763 13764 /* 13765 * Verify all functions are either MF or SF mode. If MF, make sure 13766 * sure that all non-hidden functions have a valid ovlan. If SF, 13767 * make sure that all non-hidden functions have an invalid ovlan. 13768 */ 13769 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13770 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13771 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13772 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13773 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13774 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13775 BLOGE(sc, "mf_mode=SD function %d MF config " 13776 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13777 i, mf_info->multi_vnics_mode, ovlan1); 13778 return (1); 13779 } 13780 } 13781 13782 /* Verify all funcs on the same port each have a different ovlan. */ 13783 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13784 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13785 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13786 /* iterate from the next function on the port to the max func */ 13787 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13788 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13789 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13790 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13791 VALID_OVLAN(ovlan1) && 13792 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13793 VALID_OVLAN(ovlan2) && 13794 (ovlan1 == ovlan2)) { 13795 BLOGE(sc, "mf_mode=SD functions %d and %d " 13796 "have the same ovlan (%d)\n", 13797 i, j, ovlan1); 13798 return (1); 13799 } 13800 } 13801 } 13802 } /* MULTI_FUNCTION_SD */ 13803 13804 return (0); 13805} 13806 13807static int 13808bxe_get_mf_cfg_info(struct bxe_softc *sc) 13809{ 13810 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13811 uint32_t val, mac_upper; 13812 uint8_t i, vnic; 13813 13814 /* initialize mf_info defaults */ 13815 mf_info->vnics_per_port = 1; 13816 mf_info->multi_vnics_mode = FALSE; 13817 mf_info->path_has_ovlan = FALSE; 13818 mf_info->mf_mode = SINGLE_FUNCTION; 13819 13820 if (!CHIP_IS_MF_CAP(sc)) { 13821 return (0); 13822 } 13823 13824 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13825 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13826 return (1); 13827 } 13828 13829 /* get the MF mode (switch dependent / independent / single-function) */ 13830 13831 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13832 13833 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13834 { 13835 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13836 13837 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13838 13839 /* check for legal upper mac bytes */ 13840 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13841 mf_info->mf_mode = MULTI_FUNCTION_SI; 13842 } else { 13843 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13844 } 13845 13846 break; 13847 13848 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13849 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13850 13851 /* get outer vlan configuration */ 13852 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13853 13854 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13855 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13856 mf_info->mf_mode = MULTI_FUNCTION_SD; 13857 } else { 13858 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13859 } 13860 13861 break; 13862 13863 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13864 13865 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13866 return (0); 13867 13868 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13869 13870 /* 13871 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13872 * and the MAC address is valid. 13873 */ 13874 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13875 13876 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13877 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13878 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13879 } else { 13880 BLOGE(sc, "Invalid config for AFEX mode\n"); 13881 } 13882 13883 break; 13884 13885 default: 13886 13887 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13888 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13889 13890 return (1); 13891 } 13892 13893 /* set path mf_mode (which could be different than function mf_mode) */ 13894 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13895 mf_info->path_has_ovlan = TRUE; 13896 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13897 /* 13898 * Decide on path multi vnics mode. If we're not in MF mode and in 13899 * 4-port mode, this is good enough to check vnic-0 of the other port 13900 * on the same path 13901 */ 13902 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13903 uint8_t other_port = !(PORT_ID(sc) & 1); 13904 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13905 13906 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13907 13908 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13909 } 13910 } 13911 13912 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13913 /* invalid MF config */ 13914 if (SC_VN(sc) >= 1) { 13915 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13916 return (1); 13917 } 13918 13919 return (0); 13920 } 13921 13922 /* get the MF configuration */ 13923 mf_info->mf_config[SC_VN(sc)] = 13924 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13925 13926 switch(mf_info->mf_mode) 13927 { 13928 case MULTI_FUNCTION_SD: 13929 13930 bxe_get_shmem_mf_cfg_info_sd(sc); 13931 break; 13932 13933 case MULTI_FUNCTION_SI: 13934 13935 bxe_get_shmem_mf_cfg_info_si(sc); 13936 break; 13937 13938 case MULTI_FUNCTION_AFEX: 13939 13940 bxe_get_shmem_mf_cfg_info_niv(sc); 13941 break; 13942 13943 default: 13944 13945 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13946 mf_info->mf_mode); 13947 return (1); 13948 } 13949 13950 /* get the congestion management parameters */ 13951 13952 vnic = 0; 13953 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13954 /* get min/max bw */ 13955 val = MFCFG_RD(sc, func_mf_config[i].config); 13956 mf_info->min_bw[vnic] = 13957 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13958 mf_info->max_bw[vnic] = 13959 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13960 vnic++; 13961 } 13962 13963 return (bxe_check_valid_mf_cfg(sc)); 13964} 13965 13966static int 13967bxe_get_shmem_info(struct bxe_softc *sc) 13968{ 13969 int port; 13970 uint32_t mac_hi, mac_lo, val; 13971 13972 port = SC_PORT(sc); 13973 mac_hi = mac_lo = 0; 13974 13975 sc->link_params.sc = sc; 13976 sc->link_params.port = port; 13977 13978 /* get the hardware config info */ 13979 sc->devinfo.hw_config = 13980 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13981 sc->devinfo.hw_config2 = 13982 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13983 13984 sc->link_params.hw_led_mode = 13985 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13986 SHARED_HW_CFG_LED_MODE_SHIFT); 13987 13988 /* get the port feature config */ 13989 sc->port.config = 13990 SHMEM_RD(sc, dev_info.port_feature_config[port].config), 13991 13992 /* get the link params */ 13993 sc->link_params.speed_cap_mask[0] = 13994 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13995 sc->link_params.speed_cap_mask[1] = 13996 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13997 13998 /* get the lane config */ 13999 sc->link_params.lane_config = 14000 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 14001 14002 /* get the link config */ 14003 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 14004 sc->port.link_config[ELINK_INT_PHY] = val; 14005 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 14006 sc->port.link_config[ELINK_EXT_PHY1] = 14007 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 14008 14009 /* get the override preemphasis flag and enable it or turn it off */ 14010 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 14011 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 14012 sc->link_params.feature_config_flags |= 14013 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14014 } else { 14015 sc->link_params.feature_config_flags &= 14016 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14017 } 14018 14019 /* get the initial value of the link params */ 14020 sc->link_params.multi_phy_config = 14021 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 14022 14023 /* get external phy info */ 14024 sc->port.ext_phy_config = 14025 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 14026 14027 /* get the multifunction configuration */ 14028 bxe_get_mf_cfg_info(sc); 14029 14030 /* get the mac address */ 14031 if (IS_MF(sc)) { 14032 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 14033 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 14034 } else { 14035 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 14036 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 14037 } 14038 14039 if ((mac_lo == 0) && (mac_hi == 0)) { 14040 *sc->mac_addr_str = 0; 14041 BLOGE(sc, "No Ethernet address programmed!\n"); 14042 } else { 14043 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 14044 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 14045 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 14046 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 14047 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 14048 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 14049 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 14050 "%02x:%02x:%02x:%02x:%02x:%02x", 14051 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 14052 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 14053 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 14054 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 14055 } 14056 14057#if 0 14058 if (!IS_MF(sc) && 14059 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14060 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { 14061 sc->flags |= BXE_NO_ISCSI; 14062 } 14063 if (!IS_MF(sc) && 14064 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14065 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { 14066 sc->flags |= BXE_NO_FCOE_FLAG; 14067 } 14068#endif 14069 14070 return (0); 14071} 14072 14073static void 14074bxe_get_tunable_params(struct bxe_softc *sc) 14075{ 14076 /* sanity checks */ 14077 14078 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 14079 (bxe_interrupt_mode != INTR_MODE_MSI) && 14080 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 14081 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 14082 bxe_interrupt_mode = INTR_MODE_MSIX; 14083 } 14084 14085 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 14086 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 14087 bxe_queue_count = 0; 14088 } 14089 14090 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 14091 if (bxe_max_rx_bufs == 0) { 14092 bxe_max_rx_bufs = RX_BD_USABLE; 14093 } else { 14094 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 14095 bxe_max_rx_bufs = 2048; 14096 } 14097 } 14098 14099 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 14100 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 14101 bxe_hc_rx_ticks = 25; 14102 } 14103 14104 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 14105 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 14106 bxe_hc_tx_ticks = 50; 14107 } 14108 14109 if (bxe_max_aggregation_size == 0) { 14110 bxe_max_aggregation_size = TPA_AGG_SIZE; 14111 } 14112 14113 if (bxe_max_aggregation_size > 0xffff) { 14114 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 14115 bxe_max_aggregation_size); 14116 bxe_max_aggregation_size = TPA_AGG_SIZE; 14117 } 14118 14119 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 14120 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 14121 bxe_mrrs = -1; 14122 } 14123 14124 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 14125 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 14126 bxe_autogreeen = 0; 14127 } 14128 14129 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 14130 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 14131 bxe_udp_rss = 0; 14132 } 14133 14134 /* pull in user settings */ 14135 14136 sc->interrupt_mode = bxe_interrupt_mode; 14137 sc->max_rx_bufs = bxe_max_rx_bufs; 14138 sc->hc_rx_ticks = bxe_hc_rx_ticks; 14139 sc->hc_tx_ticks = bxe_hc_tx_ticks; 14140 sc->max_aggregation_size = bxe_max_aggregation_size; 14141 sc->mrrs = bxe_mrrs; 14142 sc->autogreeen = bxe_autogreeen; 14143 sc->udp_rss = bxe_udp_rss; 14144 14145 if (bxe_interrupt_mode == INTR_MODE_INTX) { 14146 sc->num_queues = 1; 14147 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 14148 sc->num_queues = 14149 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 14150 MAX_RSS_CHAINS); 14151 if (sc->num_queues > mp_ncpus) { 14152 sc->num_queues = mp_ncpus; 14153 } 14154 } 14155 14156 BLOGD(sc, DBG_LOAD, 14157 "User Config: " 14158 "debug=0x%lx " 14159 "interrupt_mode=%d " 14160 "queue_count=%d " 14161 "hc_rx_ticks=%d " 14162 "hc_tx_ticks=%d " 14163 "rx_budget=%d " 14164 "max_aggregation_size=%d " 14165 "mrrs=%d " 14166 "autogreeen=%d " 14167 "udp_rss=%d\n", 14168 bxe_debug, 14169 sc->interrupt_mode, 14170 sc->num_queues, 14171 sc->hc_rx_ticks, 14172 sc->hc_tx_ticks, 14173 bxe_rx_budget, 14174 sc->max_aggregation_size, 14175 sc->mrrs, 14176 sc->autogreeen, 14177 sc->udp_rss); 14178} 14179 14180static void 14181bxe_media_detect(struct bxe_softc *sc) 14182{ 14183 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 14184 switch (sc->link_params.phy[phy_idx].media_type) { 14185 case ELINK_ETH_PHY_SFPP_10G_FIBER: 14186 case ELINK_ETH_PHY_XFP_FIBER: 14187 BLOGI(sc, "Found 10Gb Fiber media.\n"); 14188 sc->media = IFM_10G_SR; 14189 break; 14190 case ELINK_ETH_PHY_SFP_1G_FIBER: 14191 BLOGI(sc, "Found 1Gb Fiber media.\n"); 14192 sc->media = IFM_1000_SX; 14193 break; 14194 case ELINK_ETH_PHY_KR: 14195 case ELINK_ETH_PHY_CX4: 14196 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 14197 sc->media = IFM_10G_CX4; 14198 break; 14199 case ELINK_ETH_PHY_DA_TWINAX: 14200 BLOGI(sc, "Found 10Gb Twinax media.\n"); 14201 sc->media = IFM_10G_TWINAX; 14202 break; 14203 case ELINK_ETH_PHY_BASE_T: 14204 if (sc->link_params.speed_cap_mask[0] & 14205 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 14206 BLOGI(sc, "Found 10GBase-T media.\n"); 14207 sc->media = IFM_10G_T; 14208 } else { 14209 BLOGI(sc, "Found 1000Base-T media.\n"); 14210 sc->media = IFM_1000_T; 14211 } 14212 break; 14213 case ELINK_ETH_PHY_NOT_PRESENT: 14214 BLOGI(sc, "Media not present.\n"); 14215 sc->media = 0; 14216 break; 14217 case ELINK_ETH_PHY_UNSPECIFIED: 14218 default: 14219 BLOGI(sc, "Unknown media!\n"); 14220 sc->media = 0; 14221 break; 14222 } 14223} 14224 14225#define GET_FIELD(value, fname) \ 14226 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 14227#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 14228#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 14229 14230static int 14231bxe_get_igu_cam_info(struct bxe_softc *sc) 14232{ 14233 int pfid = SC_FUNC(sc); 14234 int igu_sb_id; 14235 uint32_t val; 14236 uint8_t fid, igu_sb_cnt = 0; 14237 14238 sc->igu_base_sb = 0xff; 14239 14240 if (CHIP_INT_MODE_IS_BC(sc)) { 14241 int vn = SC_VN(sc); 14242 igu_sb_cnt = sc->igu_sb_cnt; 14243 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14244 FP_SB_MAX_E1x); 14245 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14246 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14247 return (0); 14248 } 14249 14250 /* IGU in normal mode - read CAM */ 14251 for (igu_sb_id = 0; 14252 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14253 igu_sb_id++) { 14254 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14255 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14256 continue; 14257 } 14258 fid = IGU_FID(val); 14259 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14260 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14261 continue; 14262 } 14263 if (IGU_VEC(val) == 0) { 14264 /* default status block */ 14265 sc->igu_dsb_id = igu_sb_id; 14266 } else { 14267 if (sc->igu_base_sb == 0xff) { 14268 sc->igu_base_sb = igu_sb_id; 14269 } 14270 igu_sb_cnt++; 14271 } 14272 } 14273 } 14274 14275 /* 14276 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14277 * that number of CAM entries will not be equal to the value advertised in 14278 * PCI. Driver should use the minimal value of both as the actual status 14279 * block count 14280 */ 14281 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14282 14283 if (igu_sb_cnt == 0) { 14284 BLOGE(sc, "CAM configuration error\n"); 14285 return (-1); 14286 } 14287 14288 return (0); 14289} 14290 14291/* 14292 * Gather various information from the device config space, the device itself, 14293 * shmem, and the user input. 14294 */ 14295static int 14296bxe_get_device_info(struct bxe_softc *sc) 14297{ 14298 uint32_t val; 14299 int rc; 14300 14301 /* Get the data for the device */ 14302 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14303 sc->devinfo.device_id = pci_get_device(sc->dev); 14304 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14305 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14306 14307 /* get the chip revision (chip metal comes from pci config space) */ 14308 sc->devinfo.chip_id = 14309 sc->link_params.chip_id = 14310 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14311 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14312 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14313 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14314 14315 /* force 57811 according to MISC register */ 14316 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14317 if (CHIP_IS_57810(sc)) { 14318 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14319 (sc->devinfo.chip_id & 0x0000ffff)); 14320 } else if (CHIP_IS_57810_MF(sc)) { 14321 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14322 (sc->devinfo.chip_id & 0x0000ffff)); 14323 } 14324 sc->devinfo.chip_id |= 0x1; 14325 } 14326 14327 BLOGD(sc, DBG_LOAD, 14328 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14329 sc->devinfo.chip_id, 14330 ((sc->devinfo.chip_id >> 16) & 0xffff), 14331 ((sc->devinfo.chip_id >> 12) & 0xf), 14332 ((sc->devinfo.chip_id >> 4) & 0xff), 14333 ((sc->devinfo.chip_id >> 0) & 0xf)); 14334 14335 val = (REG_RD(sc, 0x2874) & 0x55); 14336 if ((sc->devinfo.chip_id & 0x1) || 14337 (CHIP_IS_E1(sc) && val) || 14338 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14339 sc->flags |= BXE_ONE_PORT_FLAG; 14340 BLOGD(sc, DBG_LOAD, "single port device\n"); 14341 } 14342 14343 /* set the doorbell size */ 14344 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14345 14346 /* determine whether the device is in 2 port or 4 port mode */ 14347 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14348 if (CHIP_IS_E2E3(sc)) { 14349 /* 14350 * Read port4mode_en_ovwr[0]: 14351 * If 1, four port mode is in port4mode_en_ovwr[1]. 14352 * If 0, four port mode is in port4mode_en[0]. 14353 */ 14354 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14355 if (val & 1) { 14356 val = ((val >> 1) & 1); 14357 } else { 14358 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14359 } 14360 14361 sc->devinfo.chip_port_mode = 14362 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14363 14364 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14365 } 14366 14367 /* get the function and path info for the device */ 14368 bxe_get_function_num(sc); 14369 14370 /* get the shared memory base address */ 14371 sc->devinfo.shmem_base = 14372 sc->link_params.shmem_base = 14373 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14374 sc->devinfo.shmem2_base = 14375 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14376 MISC_REG_GENERIC_CR_0)); 14377 14378 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14379 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14380 14381 if (!sc->devinfo.shmem_base) { 14382 /* this should ONLY prevent upcoming shmem reads */ 14383 BLOGI(sc, "MCP not active\n"); 14384 sc->flags |= BXE_NO_MCP_FLAG; 14385 return (0); 14386 } 14387 14388 /* make sure the shared memory contents are valid */ 14389 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14390 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14391 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14392 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14393 return (0); 14394 } 14395 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14396 14397 /* get the bootcode version */ 14398 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14399 snprintf(sc->devinfo.bc_ver_str, 14400 sizeof(sc->devinfo.bc_ver_str), 14401 "%d.%d.%d", 14402 ((sc->devinfo.bc_ver >> 24) & 0xff), 14403 ((sc->devinfo.bc_ver >> 16) & 0xff), 14404 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14405 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14406 14407 /* get the bootcode shmem address */ 14408 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14409 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14410 14411 /* clean indirect addresses as they're not used */ 14412 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14413 if (IS_PF(sc)) { 14414 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14415 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14416 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14417 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14418 if (CHIP_IS_E1x(sc)) { 14419 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14420 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14421 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14422 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14423 } 14424 14425 /* 14426 * Enable internal target-read (in case we are probed after PF 14427 * FLR). Must be done prior to any BAR read access. Only for 14428 * 57712 and up 14429 */ 14430 if (!CHIP_IS_E1x(sc)) { 14431 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14432 } 14433 } 14434 14435 /* get the nvram size */ 14436 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14437 sc->devinfo.flash_size = 14438 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14439 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14440 14441 /* get PCI capabilites */ 14442 bxe_probe_pci_caps(sc); 14443 14444 bxe_set_power_state(sc, PCI_PM_D0); 14445 14446 /* get various configuration parameters from shmem */ 14447 bxe_get_shmem_info(sc); 14448 14449 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14450 val = pci_read_config(sc->dev, 14451 (sc->devinfo.pcie_msix_cap_reg + 14452 PCIR_MSIX_CTRL), 14453 2); 14454 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14455 } else { 14456 sc->igu_sb_cnt = 1; 14457 } 14458 14459 sc->igu_base_addr = BAR_IGU_INTMEM; 14460 14461 /* initialize IGU parameters */ 14462 if (CHIP_IS_E1x(sc)) { 14463 sc->devinfo.int_block = INT_BLOCK_HC; 14464 sc->igu_dsb_id = DEF_SB_IGU_ID; 14465 sc->igu_base_sb = 0; 14466 } else { 14467 sc->devinfo.int_block = INT_BLOCK_IGU; 14468 14469 /* do not allow device reset during IGU info preocessing */ 14470 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14471 14472 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14473 14474 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14475 int tout = 5000; 14476 14477 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14478 14479 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14480 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14481 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14482 14483 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14484 tout--; 14485 DELAY(1000); 14486 } 14487 14488 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14489 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14490 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14491 return (-1); 14492 } 14493 } 14494 14495 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14496 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14497 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14498 } else { 14499 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14500 } 14501 14502 rc = bxe_get_igu_cam_info(sc); 14503 14504 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14505 14506 if (rc) { 14507 return (rc); 14508 } 14509 } 14510 14511 /* 14512 * Get base FW non-default (fast path) status block ID. This value is 14513 * used to initialize the fw_sb_id saved on the fp/queue structure to 14514 * determine the id used by the FW. 14515 */ 14516 if (CHIP_IS_E1x(sc)) { 14517 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14518 } else { 14519 /* 14520 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14521 * the same queue are indicated on the same IGU SB). So we prefer 14522 * FW and IGU SBs to be the same value. 14523 */ 14524 sc->base_fw_ndsb = sc->igu_base_sb; 14525 } 14526 14527 BLOGD(sc, DBG_LOAD, 14528 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14529 sc->igu_dsb_id, sc->igu_base_sb, 14530 sc->igu_sb_cnt, sc->base_fw_ndsb); 14531 14532 elink_phy_probe(&sc->link_params); 14533 14534 return (0); 14535} 14536 14537static void 14538bxe_link_settings_supported(struct bxe_softc *sc, 14539 uint32_t switch_cfg) 14540{ 14541 uint32_t cfg_size = 0; 14542 uint32_t idx; 14543 uint8_t port = SC_PORT(sc); 14544 14545 /* aggregation of supported attributes of all external phys */ 14546 sc->port.supported[0] = 0; 14547 sc->port.supported[1] = 0; 14548 14549 switch (sc->link_params.num_phys) { 14550 case 1: 14551 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14552 cfg_size = 1; 14553 break; 14554 case 2: 14555 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14556 cfg_size = 1; 14557 break; 14558 case 3: 14559 if (sc->link_params.multi_phy_config & 14560 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14561 sc->port.supported[1] = 14562 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14563 sc->port.supported[0] = 14564 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14565 } else { 14566 sc->port.supported[0] = 14567 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14568 sc->port.supported[1] = 14569 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14570 } 14571 cfg_size = 2; 14572 break; 14573 } 14574 14575 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14576 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14577 SHMEM_RD(sc, 14578 dev_info.port_hw_config[port].external_phy_config), 14579 SHMEM_RD(sc, 14580 dev_info.port_hw_config[port].external_phy_config2)); 14581 return; 14582 } 14583 14584 if (CHIP_IS_E3(sc)) 14585 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14586 else { 14587 switch (switch_cfg) { 14588 case ELINK_SWITCH_CFG_1G: 14589 sc->port.phy_addr = 14590 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14591 break; 14592 case ELINK_SWITCH_CFG_10G: 14593 sc->port.phy_addr = 14594 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14595 break; 14596 default: 14597 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14598 sc->port.link_config[0]); 14599 return; 14600 } 14601 } 14602 14603 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14604 14605 /* mask what we support according to speed_cap_mask per configuration */ 14606 for (idx = 0; idx < cfg_size; idx++) { 14607 if (!(sc->link_params.speed_cap_mask[idx] & 14608 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14609 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14610 } 14611 14612 if (!(sc->link_params.speed_cap_mask[idx] & 14613 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14614 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14615 } 14616 14617 if (!(sc->link_params.speed_cap_mask[idx] & 14618 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14619 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14620 } 14621 14622 if (!(sc->link_params.speed_cap_mask[idx] & 14623 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14624 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14625 } 14626 14627 if (!(sc->link_params.speed_cap_mask[idx] & 14628 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14629 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14630 } 14631 14632 if (!(sc->link_params.speed_cap_mask[idx] & 14633 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14634 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14635 } 14636 14637 if (!(sc->link_params.speed_cap_mask[idx] & 14638 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14639 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14640 } 14641 14642 if (!(sc->link_params.speed_cap_mask[idx] & 14643 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14644 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14645 } 14646 } 14647 14648 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14649 sc->port.supported[0], sc->port.supported[1]); 14650} 14651 14652static void 14653bxe_link_settings_requested(struct bxe_softc *sc) 14654{ 14655 uint32_t link_config; 14656 uint32_t idx; 14657 uint32_t cfg_size = 0; 14658 14659 sc->port.advertising[0] = 0; 14660 sc->port.advertising[1] = 0; 14661 14662 switch (sc->link_params.num_phys) { 14663 case 1: 14664 case 2: 14665 cfg_size = 1; 14666 break; 14667 case 3: 14668 cfg_size = 2; 14669 break; 14670 } 14671 14672 for (idx = 0; idx < cfg_size; idx++) { 14673 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14674 link_config = sc->port.link_config[idx]; 14675 14676 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14677 case PORT_FEATURE_LINK_SPEED_AUTO: 14678 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14679 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14680 sc->port.advertising[idx] |= sc->port.supported[idx]; 14681 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14682 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14683 sc->port.advertising[idx] |= 14684 (ELINK_SUPPORTED_100baseT_Half | 14685 ELINK_SUPPORTED_100baseT_Full); 14686 } else { 14687 /* force 10G, no AN */ 14688 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14689 sc->port.advertising[idx] |= 14690 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14691 continue; 14692 } 14693 break; 14694 14695 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14696 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14697 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14698 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14699 ADVERTISED_TP); 14700 } else { 14701 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14702 "speed_cap_mask=0x%08x\n", 14703 link_config, sc->link_params.speed_cap_mask[idx]); 14704 return; 14705 } 14706 break; 14707 14708 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14709 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14710 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14711 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14712 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14713 ADVERTISED_TP); 14714 } else { 14715 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14716 "speed_cap_mask=0x%08x\n", 14717 link_config, sc->link_params.speed_cap_mask[idx]); 14718 return; 14719 } 14720 break; 14721 14722 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14723 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14724 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14725 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14726 ADVERTISED_TP); 14727 } else { 14728 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14729 "speed_cap_mask=0x%08x\n", 14730 link_config, sc->link_params.speed_cap_mask[idx]); 14731 return; 14732 } 14733 break; 14734 14735 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14736 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14737 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14738 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14739 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14740 ADVERTISED_TP); 14741 } else { 14742 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14743 "speed_cap_mask=0x%08x\n", 14744 link_config, sc->link_params.speed_cap_mask[idx]); 14745 return; 14746 } 14747 break; 14748 14749 case PORT_FEATURE_LINK_SPEED_1G: 14750 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14751 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14752 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14753 ADVERTISED_TP); 14754 } else { 14755 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14756 "speed_cap_mask=0x%08x\n", 14757 link_config, sc->link_params.speed_cap_mask[idx]); 14758 return; 14759 } 14760 break; 14761 14762 case PORT_FEATURE_LINK_SPEED_2_5G: 14763 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14764 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14765 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14766 ADVERTISED_TP); 14767 } else { 14768 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14769 "speed_cap_mask=0x%08x\n", 14770 link_config, sc->link_params.speed_cap_mask[idx]); 14771 return; 14772 } 14773 break; 14774 14775 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14776 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14777 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14778 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14779 ADVERTISED_FIBRE); 14780 } else { 14781 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14782 "speed_cap_mask=0x%08x\n", 14783 link_config, sc->link_params.speed_cap_mask[idx]); 14784 return; 14785 } 14786 break; 14787 14788 case PORT_FEATURE_LINK_SPEED_20G: 14789 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14790 break; 14791 14792 default: 14793 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14794 "speed_cap_mask=0x%08x\n", 14795 link_config, sc->link_params.speed_cap_mask[idx]); 14796 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14797 sc->port.advertising[idx] = sc->port.supported[idx]; 14798 break; 14799 } 14800 14801 sc->link_params.req_flow_ctrl[idx] = 14802 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14803 14804 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14805 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14806 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14807 } else { 14808 bxe_set_requested_fc(sc); 14809 } 14810 } 14811 14812 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14813 "req_flow_ctrl=0x%x advertising=0x%x\n", 14814 sc->link_params.req_line_speed[idx], 14815 sc->link_params.req_duplex[idx], 14816 sc->link_params.req_flow_ctrl[idx], 14817 sc->port.advertising[idx]); 14818 } 14819} 14820 14821static void 14822bxe_get_phy_info(struct bxe_softc *sc) 14823{ 14824 uint8_t port = SC_PORT(sc); 14825 uint32_t config = sc->port.config; 14826 uint32_t eee_mode; 14827 14828 /* shmem data already read in bxe_get_shmem_info() */ 14829 14830 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14831 "link_config0=0x%08x\n", 14832 sc->link_params.lane_config, 14833 sc->link_params.speed_cap_mask[0], 14834 sc->port.link_config[0]); 14835 14836 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14837 bxe_link_settings_requested(sc); 14838 14839 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14840 sc->link_params.feature_config_flags |= 14841 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14842 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14843 sc->link_params.feature_config_flags &= 14844 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14845 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14846 sc->link_params.feature_config_flags |= 14847 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14848 } 14849 14850 /* configure link feature according to nvram value */ 14851 eee_mode = 14852 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14853 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14854 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14855 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14856 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14857 ELINK_EEE_MODE_ENABLE_LPI | 14858 ELINK_EEE_MODE_OUTPUT_TIME); 14859 } else { 14860 sc->link_params.eee_mode = 0; 14861 } 14862 14863 /* get the media type */ 14864 bxe_media_detect(sc); 14865} 14866 14867static void 14868bxe_get_params(struct bxe_softc *sc) 14869{ 14870 /* get user tunable params */ 14871 bxe_get_tunable_params(sc); 14872 14873 /* select the RX and TX ring sizes */ 14874 sc->tx_ring_size = TX_BD_USABLE; 14875 sc->rx_ring_size = RX_BD_USABLE; 14876 14877 /* XXX disable WoL */ 14878 sc->wol = 0; 14879} 14880 14881static void 14882bxe_set_modes_bitmap(struct bxe_softc *sc) 14883{ 14884 uint32_t flags = 0; 14885 14886 if (CHIP_REV_IS_FPGA(sc)) { 14887 SET_FLAGS(flags, MODE_FPGA); 14888 } else if (CHIP_REV_IS_EMUL(sc)) { 14889 SET_FLAGS(flags, MODE_EMUL); 14890 } else { 14891 SET_FLAGS(flags, MODE_ASIC); 14892 } 14893 14894 if (CHIP_IS_MODE_4_PORT(sc)) { 14895 SET_FLAGS(flags, MODE_PORT4); 14896 } else { 14897 SET_FLAGS(flags, MODE_PORT2); 14898 } 14899 14900 if (CHIP_IS_E2(sc)) { 14901 SET_FLAGS(flags, MODE_E2); 14902 } else if (CHIP_IS_E3(sc)) { 14903 SET_FLAGS(flags, MODE_E3); 14904 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14905 SET_FLAGS(flags, MODE_E3_A0); 14906 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14907 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14908 } 14909 } 14910 14911 if (IS_MF(sc)) { 14912 SET_FLAGS(flags, MODE_MF); 14913 switch (sc->devinfo.mf_info.mf_mode) { 14914 case MULTI_FUNCTION_SD: 14915 SET_FLAGS(flags, MODE_MF_SD); 14916 break; 14917 case MULTI_FUNCTION_SI: 14918 SET_FLAGS(flags, MODE_MF_SI); 14919 break; 14920 case MULTI_FUNCTION_AFEX: 14921 SET_FLAGS(flags, MODE_MF_AFEX); 14922 break; 14923 } 14924 } else { 14925 SET_FLAGS(flags, MODE_SF); 14926 } 14927 14928#if defined(__LITTLE_ENDIAN) 14929 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14930#else /* __BIG_ENDIAN */ 14931 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14932#endif 14933 14934 INIT_MODE_FLAGS(sc) = flags; 14935} 14936 14937static int 14938bxe_alloc_hsi_mem(struct bxe_softc *sc) 14939{ 14940 struct bxe_fastpath *fp; 14941 bus_addr_t busaddr; 14942 int max_agg_queues; 14943 int max_segments; 14944 bus_size_t max_size; 14945 bus_size_t max_seg_size; 14946 char buf[32]; 14947 int rc; 14948 int i, j; 14949 14950 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14951 14952 /* allocate the parent bus DMA tag */ 14953 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14954 1, /* alignment */ 14955 0, /* boundary limit */ 14956 BUS_SPACE_MAXADDR, /* restricted low */ 14957 BUS_SPACE_MAXADDR, /* restricted hi */ 14958 NULL, /* addr filter() */ 14959 NULL, /* addr filter() arg */ 14960 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14961 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14962 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14963 0, /* flags */ 14964 NULL, /* lock() */ 14965 NULL, /* lock() arg */ 14966 &sc->parent_dma_tag); /* returned dma tag */ 14967 if (rc != 0) { 14968 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14969 return (1); 14970 } 14971 14972 /************************/ 14973 /* DEFAULT STATUS BLOCK */ 14974 /************************/ 14975 14976 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14977 &sc->def_sb_dma, "default status block") != 0) { 14978 /* XXX */ 14979 bus_dma_tag_destroy(sc->parent_dma_tag); 14980 return (1); 14981 } 14982 14983 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14984 14985 /***************/ 14986 /* EVENT QUEUE */ 14987 /***************/ 14988 14989 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14990 &sc->eq_dma, "event queue") != 0) { 14991 /* XXX */ 14992 bxe_dma_free(sc, &sc->def_sb_dma); 14993 sc->def_sb = NULL; 14994 bus_dma_tag_destroy(sc->parent_dma_tag); 14995 return (1); 14996 } 14997 14998 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14999 15000 /*************/ 15001 /* SLOW PATH */ 15002 /*************/ 15003 15004 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 15005 &sc->sp_dma, "slow path") != 0) { 15006 /* XXX */ 15007 bxe_dma_free(sc, &sc->eq_dma); 15008 sc->eq = NULL; 15009 bxe_dma_free(sc, &sc->def_sb_dma); 15010 sc->def_sb = NULL; 15011 bus_dma_tag_destroy(sc->parent_dma_tag); 15012 return (1); 15013 } 15014 15015 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 15016 15017 /*******************/ 15018 /* SLOW PATH QUEUE */ 15019 /*******************/ 15020 15021 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 15022 &sc->spq_dma, "slow path queue") != 0) { 15023 /* XXX */ 15024 bxe_dma_free(sc, &sc->sp_dma); 15025 sc->sp = NULL; 15026 bxe_dma_free(sc, &sc->eq_dma); 15027 sc->eq = NULL; 15028 bxe_dma_free(sc, &sc->def_sb_dma); 15029 sc->def_sb = NULL; 15030 bus_dma_tag_destroy(sc->parent_dma_tag); 15031 return (1); 15032 } 15033 15034 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 15035 15036 /***************************/ 15037 /* FW DECOMPRESSION BUFFER */ 15038 /***************************/ 15039 15040 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 15041 "fw decompression buffer") != 0) { 15042 /* XXX */ 15043 bxe_dma_free(sc, &sc->spq_dma); 15044 sc->spq = NULL; 15045 bxe_dma_free(sc, &sc->sp_dma); 15046 sc->sp = NULL; 15047 bxe_dma_free(sc, &sc->eq_dma); 15048 sc->eq = NULL; 15049 bxe_dma_free(sc, &sc->def_sb_dma); 15050 sc->def_sb = NULL; 15051 bus_dma_tag_destroy(sc->parent_dma_tag); 15052 return (1); 15053 } 15054 15055 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 15056 15057 if ((sc->gz_strm = 15058 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 15059 /* XXX */ 15060 bxe_dma_free(sc, &sc->gz_buf_dma); 15061 sc->gz_buf = NULL; 15062 bxe_dma_free(sc, &sc->spq_dma); 15063 sc->spq = NULL; 15064 bxe_dma_free(sc, &sc->sp_dma); 15065 sc->sp = NULL; 15066 bxe_dma_free(sc, &sc->eq_dma); 15067 sc->eq = NULL; 15068 bxe_dma_free(sc, &sc->def_sb_dma); 15069 sc->def_sb = NULL; 15070 bus_dma_tag_destroy(sc->parent_dma_tag); 15071 return (1); 15072 } 15073 15074 /*************/ 15075 /* FASTPATHS */ 15076 /*************/ 15077 15078 /* allocate DMA memory for each fastpath structure */ 15079 for (i = 0; i < sc->num_queues; i++) { 15080 fp = &sc->fp[i]; 15081 fp->sc = sc; 15082 fp->index = i; 15083 15084 /*******************/ 15085 /* FP STATUS BLOCK */ 15086 /*******************/ 15087 15088 snprintf(buf, sizeof(buf), "fp %d status block", i); 15089 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 15090 &fp->sb_dma, buf) != 0) { 15091 /* XXX unwind and free previous fastpath allocations */ 15092 BLOGE(sc, "Failed to alloc %s\n", buf); 15093 return (1); 15094 } else { 15095 if (CHIP_IS_E2E3(sc)) { 15096 fp->status_block.e2_sb = 15097 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 15098 } else { 15099 fp->status_block.e1x_sb = 15100 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 15101 } 15102 } 15103 15104 /******************/ 15105 /* FP TX BD CHAIN */ 15106 /******************/ 15107 15108 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 15109 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 15110 &fp->tx_dma, buf) != 0) { 15111 /* XXX unwind and free previous fastpath allocations */ 15112 BLOGE(sc, "Failed to alloc %s\n", buf); 15113 return (1); 15114 } else { 15115 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 15116 } 15117 15118 /* link together the tx bd chain pages */ 15119 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 15120 /* index into the tx bd chain array to last entry per page */ 15121 struct eth_tx_next_bd *tx_next_bd = 15122 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 15123 /* point to the next page and wrap from last page */ 15124 busaddr = (fp->tx_dma.paddr + 15125 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 15126 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 15127 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 15128 } 15129 15130 /******************/ 15131 /* FP RX BD CHAIN */ 15132 /******************/ 15133 15134 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 15135 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 15136 &fp->rx_dma, buf) != 0) { 15137 /* XXX unwind and free previous fastpath allocations */ 15138 BLOGE(sc, "Failed to alloc %s\n", buf); 15139 return (1); 15140 } else { 15141 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 15142 } 15143 15144 /* link together the rx bd chain pages */ 15145 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 15146 /* index into the rx bd chain array to last entry per page */ 15147 struct eth_rx_bd *rx_bd = 15148 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 15149 /* point to the next page and wrap from last page */ 15150 busaddr = (fp->rx_dma.paddr + 15151 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 15152 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 15153 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 15154 } 15155 15156 /*******************/ 15157 /* FP RX RCQ CHAIN */ 15158 /*******************/ 15159 15160 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 15161 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 15162 &fp->rcq_dma, buf) != 0) { 15163 /* XXX unwind and free previous fastpath allocations */ 15164 BLOGE(sc, "Failed to alloc %s\n", buf); 15165 return (1); 15166 } else { 15167 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 15168 } 15169 15170 /* link together the rcq chain pages */ 15171 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 15172 /* index into the rcq chain array to last entry per page */ 15173 struct eth_rx_cqe_next_page *rx_cqe_next = 15174 (struct eth_rx_cqe_next_page *) 15175 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 15176 /* point to the next page and wrap from last page */ 15177 busaddr = (fp->rcq_dma.paddr + 15178 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 15179 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 15180 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 15181 } 15182 15183 /*******************/ 15184 /* FP RX SGE CHAIN */ 15185 /*******************/ 15186 15187 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 15188 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 15189 &fp->rx_sge_dma, buf) != 0) { 15190 /* XXX unwind and free previous fastpath allocations */ 15191 BLOGE(sc, "Failed to alloc %s\n", buf); 15192 return (1); 15193 } else { 15194 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 15195 } 15196 15197 /* link together the sge chain pages */ 15198 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 15199 /* index into the rcq chain array to last entry per page */ 15200 struct eth_rx_sge *rx_sge = 15201 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 15202 /* point to the next page and wrap from last page */ 15203 busaddr = (fp->rx_sge_dma.paddr + 15204 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 15205 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 15206 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 15207 } 15208 15209 /***********************/ 15210 /* FP TX MBUF DMA MAPS */ 15211 /***********************/ 15212 15213 /* set required sizes before mapping to conserve resources */ 15214 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 15215 max_size = BXE_TSO_MAX_SIZE; 15216 max_segments = BXE_TSO_MAX_SEGMENTS; 15217 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 15218 } else { 15219 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 15220 max_segments = BXE_MAX_SEGMENTS; 15221 max_seg_size = MCLBYTES; 15222 } 15223 15224 /* create a dma tag for the tx mbufs */ 15225 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15226 1, /* alignment */ 15227 0, /* boundary limit */ 15228 BUS_SPACE_MAXADDR, /* restricted low */ 15229 BUS_SPACE_MAXADDR, /* restricted hi */ 15230 NULL, /* addr filter() */ 15231 NULL, /* addr filter() arg */ 15232 max_size, /* max map size */ 15233 max_segments, /* num discontinuous */ 15234 max_seg_size, /* max seg size */ 15235 0, /* flags */ 15236 NULL, /* lock() */ 15237 NULL, /* lock() arg */ 15238 &fp->tx_mbuf_tag); /* returned dma tag */ 15239 if (rc != 0) { 15240 /* XXX unwind and free previous fastpath allocations */ 15241 BLOGE(sc, "Failed to create dma tag for " 15242 "'fp %d tx mbufs' (%d)\n", 15243 i, rc); 15244 return (1); 15245 } 15246 15247 /* create dma maps for each of the tx mbuf clusters */ 15248 for (j = 0; j < TX_BD_TOTAL; j++) { 15249 if (bus_dmamap_create(fp->tx_mbuf_tag, 15250 BUS_DMA_NOWAIT, 15251 &fp->tx_mbuf_chain[j].m_map)) { 15252 /* XXX unwind and free previous fastpath allocations */ 15253 BLOGE(sc, "Failed to create dma map for " 15254 "'fp %d tx mbuf %d' (%d)\n", 15255 i, j, rc); 15256 return (1); 15257 } 15258 } 15259 15260 /***********************/ 15261 /* FP RX MBUF DMA MAPS */ 15262 /***********************/ 15263 15264 /* create a dma tag for the rx mbufs */ 15265 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15266 1, /* alignment */ 15267 0, /* boundary limit */ 15268 BUS_SPACE_MAXADDR, /* restricted low */ 15269 BUS_SPACE_MAXADDR, /* restricted hi */ 15270 NULL, /* addr filter() */ 15271 NULL, /* addr filter() arg */ 15272 MJUM9BYTES, /* max map size */ 15273 1, /* num discontinuous */ 15274 MJUM9BYTES, /* max seg size */ 15275 0, /* flags */ 15276 NULL, /* lock() */ 15277 NULL, /* lock() arg */ 15278 &fp->rx_mbuf_tag); /* returned dma tag */ 15279 if (rc != 0) { 15280 /* XXX unwind and free previous fastpath allocations */ 15281 BLOGE(sc, "Failed to create dma tag for " 15282 "'fp %d rx mbufs' (%d)\n", 15283 i, rc); 15284 return (1); 15285 } 15286 15287 /* create dma maps for each of the rx mbuf clusters */ 15288 for (j = 0; j < RX_BD_TOTAL; j++) { 15289 if (bus_dmamap_create(fp->rx_mbuf_tag, 15290 BUS_DMA_NOWAIT, 15291 &fp->rx_mbuf_chain[j].m_map)) { 15292 /* XXX unwind and free previous fastpath allocations */ 15293 BLOGE(sc, "Failed to create dma map for " 15294 "'fp %d rx mbuf %d' (%d)\n", 15295 i, j, rc); 15296 return (1); 15297 } 15298 } 15299 15300 /* create dma map for the spare rx mbuf cluster */ 15301 if (bus_dmamap_create(fp->rx_mbuf_tag, 15302 BUS_DMA_NOWAIT, 15303 &fp->rx_mbuf_spare_map)) { 15304 /* XXX unwind and free previous fastpath allocations */ 15305 BLOGE(sc, "Failed to create dma map for " 15306 "'fp %d spare rx mbuf' (%d)\n", 15307 i, rc); 15308 return (1); 15309 } 15310 15311 /***************************/ 15312 /* FP RX SGE MBUF DMA MAPS */ 15313 /***************************/ 15314 15315 /* create a dma tag for the rx sge mbufs */ 15316 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15317 1, /* alignment */ 15318 0, /* boundary limit */ 15319 BUS_SPACE_MAXADDR, /* restricted low */ 15320 BUS_SPACE_MAXADDR, /* restricted hi */ 15321 NULL, /* addr filter() */ 15322 NULL, /* addr filter() arg */ 15323 BCM_PAGE_SIZE, /* max map size */ 15324 1, /* num discontinuous */ 15325 BCM_PAGE_SIZE, /* max seg size */ 15326 0, /* flags */ 15327 NULL, /* lock() */ 15328 NULL, /* lock() arg */ 15329 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15330 if (rc != 0) { 15331 /* XXX unwind and free previous fastpath allocations */ 15332 BLOGE(sc, "Failed to create dma tag for " 15333 "'fp %d rx sge mbufs' (%d)\n", 15334 i, rc); 15335 return (1); 15336 } 15337 15338 /* create dma maps for the rx sge mbuf clusters */ 15339 for (j = 0; j < RX_SGE_TOTAL; j++) { 15340 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15341 BUS_DMA_NOWAIT, 15342 &fp->rx_sge_mbuf_chain[j].m_map)) { 15343 /* XXX unwind and free previous fastpath allocations */ 15344 BLOGE(sc, "Failed to create dma map for " 15345 "'fp %d rx sge mbuf %d' (%d)\n", 15346 i, j, rc); 15347 return (1); 15348 } 15349 } 15350 15351 /* create dma map for the spare rx sge mbuf cluster */ 15352 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15353 BUS_DMA_NOWAIT, 15354 &fp->rx_sge_mbuf_spare_map)) { 15355 /* XXX unwind and free previous fastpath allocations */ 15356 BLOGE(sc, "Failed to create dma map for " 15357 "'fp %d spare rx sge mbuf' (%d)\n", 15358 i, rc); 15359 return (1); 15360 } 15361 15362 /***************************/ 15363 /* FP RX TPA MBUF DMA MAPS */ 15364 /***************************/ 15365 15366 /* create dma maps for the rx tpa mbuf clusters */ 15367 max_agg_queues = MAX_AGG_QS(sc); 15368 15369 for (j = 0; j < max_agg_queues; j++) { 15370 if (bus_dmamap_create(fp->rx_mbuf_tag, 15371 BUS_DMA_NOWAIT, 15372 &fp->rx_tpa_info[j].bd.m_map)) { 15373 /* XXX unwind and free previous fastpath allocations */ 15374 BLOGE(sc, "Failed to create dma map for " 15375 "'fp %d rx tpa mbuf %d' (%d)\n", 15376 i, j, rc); 15377 return (1); 15378 } 15379 } 15380 15381 /* create dma map for the spare rx tpa mbuf cluster */ 15382 if (bus_dmamap_create(fp->rx_mbuf_tag, 15383 BUS_DMA_NOWAIT, 15384 &fp->rx_tpa_info_mbuf_spare_map)) { 15385 /* XXX unwind and free previous fastpath allocations */ 15386 BLOGE(sc, "Failed to create dma map for " 15387 "'fp %d spare rx tpa mbuf' (%d)\n", 15388 i, rc); 15389 return (1); 15390 } 15391 15392 bxe_init_sge_ring_bit_mask(fp); 15393 } 15394 15395 return (0); 15396} 15397 15398static void 15399bxe_free_hsi_mem(struct bxe_softc *sc) 15400{ 15401 struct bxe_fastpath *fp; 15402 int max_agg_queues; 15403 int i, j; 15404 15405 if (sc->parent_dma_tag == NULL) { 15406 return; /* assume nothing was allocated */ 15407 } 15408 15409 for (i = 0; i < sc->num_queues; i++) { 15410 fp = &sc->fp[i]; 15411 15412 /*******************/ 15413 /* FP STATUS BLOCK */ 15414 /*******************/ 15415 15416 bxe_dma_free(sc, &fp->sb_dma); 15417 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15418 15419 /******************/ 15420 /* FP TX BD CHAIN */ 15421 /******************/ 15422 15423 bxe_dma_free(sc, &fp->tx_dma); 15424 fp->tx_chain = NULL; 15425 15426 /******************/ 15427 /* FP RX BD CHAIN */ 15428 /******************/ 15429 15430 bxe_dma_free(sc, &fp->rx_dma); 15431 fp->rx_chain = NULL; 15432 15433 /*******************/ 15434 /* FP RX RCQ CHAIN */ 15435 /*******************/ 15436 15437 bxe_dma_free(sc, &fp->rcq_dma); 15438 fp->rcq_chain = NULL; 15439 15440 /*******************/ 15441 /* FP RX SGE CHAIN */ 15442 /*******************/ 15443 15444 bxe_dma_free(sc, &fp->rx_sge_dma); 15445 fp->rx_sge_chain = NULL; 15446 15447 /***********************/ 15448 /* FP TX MBUF DMA MAPS */ 15449 /***********************/ 15450 15451 if (fp->tx_mbuf_tag != NULL) { 15452 for (j = 0; j < TX_BD_TOTAL; j++) { 15453 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15454 bus_dmamap_unload(fp->tx_mbuf_tag, 15455 fp->tx_mbuf_chain[j].m_map); 15456 bus_dmamap_destroy(fp->tx_mbuf_tag, 15457 fp->tx_mbuf_chain[j].m_map); 15458 } 15459 } 15460 15461 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15462 fp->tx_mbuf_tag = NULL; 15463 } 15464 15465 /***********************/ 15466 /* FP RX MBUF DMA MAPS */ 15467 /***********************/ 15468 15469 if (fp->rx_mbuf_tag != NULL) { 15470 for (j = 0; j < RX_BD_TOTAL; j++) { 15471 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15472 bus_dmamap_unload(fp->rx_mbuf_tag, 15473 fp->rx_mbuf_chain[j].m_map); 15474 bus_dmamap_destroy(fp->rx_mbuf_tag, 15475 fp->rx_mbuf_chain[j].m_map); 15476 } 15477 } 15478 15479 if (fp->rx_mbuf_spare_map != NULL) { 15480 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15481 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15482 } 15483 15484 /***************************/ 15485 /* FP RX TPA MBUF DMA MAPS */ 15486 /***************************/ 15487 15488 max_agg_queues = MAX_AGG_QS(sc); 15489 15490 for (j = 0; j < max_agg_queues; j++) { 15491 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15492 bus_dmamap_unload(fp->rx_mbuf_tag, 15493 fp->rx_tpa_info[j].bd.m_map); 15494 bus_dmamap_destroy(fp->rx_mbuf_tag, 15495 fp->rx_tpa_info[j].bd.m_map); 15496 } 15497 } 15498 15499 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15500 bus_dmamap_unload(fp->rx_mbuf_tag, 15501 fp->rx_tpa_info_mbuf_spare_map); 15502 bus_dmamap_destroy(fp->rx_mbuf_tag, 15503 fp->rx_tpa_info_mbuf_spare_map); 15504 } 15505 15506 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15507 fp->rx_mbuf_tag = NULL; 15508 } 15509 15510 /***************************/ 15511 /* FP RX SGE MBUF DMA MAPS */ 15512 /***************************/ 15513 15514 if (fp->rx_sge_mbuf_tag != NULL) { 15515 for (j = 0; j < RX_SGE_TOTAL; j++) { 15516 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15517 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15518 fp->rx_sge_mbuf_chain[j].m_map); 15519 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15520 fp->rx_sge_mbuf_chain[j].m_map); 15521 } 15522 } 15523 15524 if (fp->rx_sge_mbuf_spare_map != NULL) { 15525 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15526 fp->rx_sge_mbuf_spare_map); 15527 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15528 fp->rx_sge_mbuf_spare_map); 15529 } 15530 15531 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15532 fp->rx_sge_mbuf_tag = NULL; 15533 } 15534 } 15535 15536 /***************************/ 15537 /* FW DECOMPRESSION BUFFER */ 15538 /***************************/ 15539 15540 bxe_dma_free(sc, &sc->gz_buf_dma); 15541 sc->gz_buf = NULL; 15542 free(sc->gz_strm, M_DEVBUF); 15543 sc->gz_strm = NULL; 15544 15545 /*******************/ 15546 /* SLOW PATH QUEUE */ 15547 /*******************/ 15548 15549 bxe_dma_free(sc, &sc->spq_dma); 15550 sc->spq = NULL; 15551 15552 /*************/ 15553 /* SLOW PATH */ 15554 /*************/ 15555 15556 bxe_dma_free(sc, &sc->sp_dma); 15557 sc->sp = NULL; 15558 15559 /***************/ 15560 /* EVENT QUEUE */ 15561 /***************/ 15562 15563 bxe_dma_free(sc, &sc->eq_dma); 15564 sc->eq = NULL; 15565 15566 /************************/ 15567 /* DEFAULT STATUS BLOCK */ 15568 /************************/ 15569 15570 bxe_dma_free(sc, &sc->def_sb_dma); 15571 sc->def_sb = NULL; 15572 15573 bus_dma_tag_destroy(sc->parent_dma_tag); 15574 sc->parent_dma_tag = NULL; 15575} 15576 15577/* 15578 * Previous driver DMAE transaction may have occurred when pre-boot stage 15579 * ended and boot began. This would invalidate the addresses of the 15580 * transaction, resulting in was-error bit set in the PCI causing all 15581 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15582 * the interrupt which detected this from the pglueb and the was-done bit 15583 */ 15584static void 15585bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15586{ 15587 uint32_t val; 15588 15589 if (!CHIP_IS_E1x(sc)) { 15590 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15591 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15592 BLOGD(sc, DBG_LOAD, 15593 "Clearing 'was-error' bit that was set in pglueb"); 15594 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15595 } 15596 } 15597} 15598 15599static int 15600bxe_prev_mcp_done(struct bxe_softc *sc) 15601{ 15602 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15603 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15604 if (!rc) { 15605 BLOGE(sc, "MCP response failure, aborting\n"); 15606 return (-1); 15607 } 15608 15609 return (0); 15610} 15611 15612static struct bxe_prev_list_node * 15613bxe_prev_path_get_entry(struct bxe_softc *sc) 15614{ 15615 struct bxe_prev_list_node *tmp; 15616 15617 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15618 if ((sc->pcie_bus == tmp->bus) && 15619 (sc->pcie_device == tmp->slot) && 15620 (SC_PATH(sc) == tmp->path)) { 15621 return (tmp); 15622 } 15623 } 15624 15625 return (NULL); 15626} 15627 15628static uint8_t 15629bxe_prev_is_path_marked(struct bxe_softc *sc) 15630{ 15631 struct bxe_prev_list_node *tmp; 15632 int rc = FALSE; 15633 15634 mtx_lock(&bxe_prev_mtx); 15635 15636 tmp = bxe_prev_path_get_entry(sc); 15637 if (tmp) { 15638 if (tmp->aer) { 15639 BLOGD(sc, DBG_LOAD, 15640 "Path %d/%d/%d was marked by AER\n", 15641 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15642 } else { 15643 rc = TRUE; 15644 BLOGD(sc, DBG_LOAD, 15645 "Path %d/%d/%d was already cleaned from previous drivers\n", 15646 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15647 } 15648 } 15649 15650 mtx_unlock(&bxe_prev_mtx); 15651 15652 return (rc); 15653} 15654 15655static int 15656bxe_prev_mark_path(struct bxe_softc *sc, 15657 uint8_t after_undi) 15658{ 15659 struct bxe_prev_list_node *tmp; 15660 15661 mtx_lock(&bxe_prev_mtx); 15662 15663 /* Check whether the entry for this path already exists */ 15664 tmp = bxe_prev_path_get_entry(sc); 15665 if (tmp) { 15666 if (!tmp->aer) { 15667 BLOGD(sc, DBG_LOAD, 15668 "Re-marking AER in path %d/%d/%d\n", 15669 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15670 } else { 15671 BLOGD(sc, DBG_LOAD, 15672 "Removing AER indication from path %d/%d/%d\n", 15673 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15674 tmp->aer = 0; 15675 } 15676 15677 mtx_unlock(&bxe_prev_mtx); 15678 return (0); 15679 } 15680 15681 mtx_unlock(&bxe_prev_mtx); 15682 15683 /* Create an entry for this path and add it */ 15684 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15685 (M_NOWAIT | M_ZERO)); 15686 if (!tmp) { 15687 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15688 return (-1); 15689 } 15690 15691 tmp->bus = sc->pcie_bus; 15692 tmp->slot = sc->pcie_device; 15693 tmp->path = SC_PATH(sc); 15694 tmp->aer = 0; 15695 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15696 15697 mtx_lock(&bxe_prev_mtx); 15698 15699 BLOGD(sc, DBG_LOAD, 15700 "Marked path %d/%d/%d - finished previous unload\n", 15701 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15702 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15703 15704 mtx_unlock(&bxe_prev_mtx); 15705 15706 return (0); 15707} 15708 15709static int 15710bxe_do_flr(struct bxe_softc *sc) 15711{ 15712 int i; 15713 15714 /* only E2 and onwards support FLR */ 15715 if (CHIP_IS_E1x(sc)) { 15716 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15717 return (-1); 15718 } 15719 15720 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15721 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15722 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15723 sc->devinfo.bc_ver); 15724 return (-1); 15725 } 15726 15727 /* Wait for Transaction Pending bit clean */ 15728 for (i = 0; i < 4; i++) { 15729 if (i) { 15730 DELAY(((1 << (i - 1)) * 100) * 1000); 15731 } 15732 15733 if (!bxe_is_pcie_pending(sc)) { 15734 goto clear; 15735 } 15736 } 15737 15738 BLOGE(sc, "PCIE transaction is not cleared, " 15739 "proceeding with reset anyway\n"); 15740 15741clear: 15742 15743 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15744 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15745 15746 return (0); 15747} 15748 15749struct bxe_mac_vals { 15750 uint32_t xmac_addr; 15751 uint32_t xmac_val; 15752 uint32_t emac_addr; 15753 uint32_t emac_val; 15754 uint32_t umac_addr; 15755 uint32_t umac_val; 15756 uint32_t bmac_addr; 15757 uint32_t bmac_val[2]; 15758}; 15759 15760static void 15761bxe_prev_unload_close_mac(struct bxe_softc *sc, 15762 struct bxe_mac_vals *vals) 15763{ 15764 uint32_t val, base_addr, offset, mask, reset_reg; 15765 uint8_t mac_stopped = FALSE; 15766 uint8_t port = SC_PORT(sc); 15767 uint32_t wb_data[2]; 15768 15769 /* reset addresses as they also mark which values were changed */ 15770 vals->bmac_addr = 0; 15771 vals->umac_addr = 0; 15772 vals->xmac_addr = 0; 15773 vals->emac_addr = 0; 15774 15775 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15776 15777 if (!CHIP_IS_E3(sc)) { 15778 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15779 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15780 if ((mask & reset_reg) && val) { 15781 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15782 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15783 : NIG_REG_INGRESS_BMAC0_MEM; 15784 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15785 : BIGMAC_REGISTER_BMAC_CONTROL; 15786 15787 /* 15788 * use rd/wr since we cannot use dmae. This is safe 15789 * since MCP won't access the bus due to the request 15790 * to unload, and no function on the path can be 15791 * loaded at this time. 15792 */ 15793 wb_data[0] = REG_RD(sc, base_addr + offset); 15794 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15795 vals->bmac_addr = base_addr + offset; 15796 vals->bmac_val[0] = wb_data[0]; 15797 vals->bmac_val[1] = wb_data[1]; 15798 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15799 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15800 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15801 } 15802 15803 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15804 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15805 vals->emac_val = REG_RD(sc, vals->emac_addr); 15806 REG_WR(sc, vals->emac_addr, 0); 15807 mac_stopped = TRUE; 15808 } else { 15809 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15810 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15811 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15812 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15813 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15814 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15815 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15816 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15817 REG_WR(sc, vals->xmac_addr, 0); 15818 mac_stopped = TRUE; 15819 } 15820 15821 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15822 if (mask & reset_reg) { 15823 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15824 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15825 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15826 vals->umac_val = REG_RD(sc, vals->umac_addr); 15827 REG_WR(sc, vals->umac_addr, 0); 15828 mac_stopped = TRUE; 15829 } 15830 } 15831 15832 if (mac_stopped) { 15833 DELAY(20000); 15834 } 15835} 15836 15837#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15838#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15839#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15840#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15841 15842static void 15843bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15844 uint8_t port, 15845 uint8_t inc) 15846{ 15847 uint16_t rcq, bd; 15848 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15849 15850 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15851 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15852 15853 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15854 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15855 15856 BLOGD(sc, DBG_LOAD, 15857 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15858 port, bd, rcq); 15859} 15860 15861static int 15862bxe_prev_unload_common(struct bxe_softc *sc) 15863{ 15864 uint32_t reset_reg, tmp_reg = 0, rc; 15865 uint8_t prev_undi = FALSE; 15866 struct bxe_mac_vals mac_vals; 15867 uint32_t timer_count = 1000; 15868 uint32_t prev_brb; 15869 15870 /* 15871 * It is possible a previous function received 'common' answer, 15872 * but hasn't loaded yet, therefore creating a scenario of 15873 * multiple functions receiving 'common' on the same path. 15874 */ 15875 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15876 15877 memset(&mac_vals, 0, sizeof(mac_vals)); 15878 15879 if (bxe_prev_is_path_marked(sc)) { 15880 return (bxe_prev_mcp_done(sc)); 15881 } 15882 15883 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15884 15885 /* Reset should be performed after BRB is emptied */ 15886 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15887 /* Close the MAC Rx to prevent BRB from filling up */ 15888 bxe_prev_unload_close_mac(sc, &mac_vals); 15889 15890 /* close LLH filters towards the BRB */ 15891 elink_set_rx_filter(&sc->link_params, 0); 15892 15893 /* 15894 * Check if the UNDI driver was previously loaded. 15895 * UNDI driver initializes CID offset for normal bell to 0x7 15896 */ 15897 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15898 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15899 if (tmp_reg == 0x7) { 15900 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15901 prev_undi = TRUE; 15902 /* clear the UNDI indication */ 15903 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15904 /* clear possible idle check errors */ 15905 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15906 } 15907 } 15908 15909 /* wait until BRB is empty */ 15910 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15911 while (timer_count) { 15912 prev_brb = tmp_reg; 15913 15914 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15915 if (!tmp_reg) { 15916 break; 15917 } 15918 15919 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15920 15921 /* reset timer as long as BRB actually gets emptied */ 15922 if (prev_brb > tmp_reg) { 15923 timer_count = 1000; 15924 } else { 15925 timer_count--; 15926 } 15927 15928 /* If UNDI resides in memory, manually increment it */ 15929 if (prev_undi) { 15930 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15931 } 15932 15933 DELAY(10); 15934 } 15935 15936 if (!timer_count) { 15937 BLOGE(sc, "Failed to empty BRB\n"); 15938 } 15939 } 15940 15941 /* No packets are in the pipeline, path is ready for reset */ 15942 bxe_reset_common(sc); 15943 15944 if (mac_vals.xmac_addr) { 15945 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15946 } 15947 if (mac_vals.umac_addr) { 15948 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15949 } 15950 if (mac_vals.emac_addr) { 15951 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15952 } 15953 if (mac_vals.bmac_addr) { 15954 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15955 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15956 } 15957 15958 rc = bxe_prev_mark_path(sc, prev_undi); 15959 if (rc) { 15960 bxe_prev_mcp_done(sc); 15961 return (rc); 15962 } 15963 15964 return (bxe_prev_mcp_done(sc)); 15965} 15966 15967static int 15968bxe_prev_unload_uncommon(struct bxe_softc *sc) 15969{ 15970 int rc; 15971 15972 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15973 15974 /* Test if previous unload process was already finished for this path */ 15975 if (bxe_prev_is_path_marked(sc)) { 15976 return (bxe_prev_mcp_done(sc)); 15977 } 15978 15979 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15980 15981 /* 15982 * If function has FLR capabilities, and existing FW version matches 15983 * the one required, then FLR will be sufficient to clean any residue 15984 * left by previous driver 15985 */ 15986 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15987 if (!rc) { 15988 /* fw version is good */ 15989 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15990 rc = bxe_do_flr(sc); 15991 } 15992 15993 if (!rc) { 15994 /* FLR was performed */ 15995 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15996 return (0); 15997 } 15998 15999 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 16000 16001 /* Close the MCP request, return failure*/ 16002 rc = bxe_prev_mcp_done(sc); 16003 if (!rc) { 16004 rc = BXE_PREV_WAIT_NEEDED; 16005 } 16006 16007 return (rc); 16008} 16009 16010static int 16011bxe_prev_unload(struct bxe_softc *sc) 16012{ 16013 int time_counter = 10; 16014 uint32_t fw, hw_lock_reg, hw_lock_val; 16015 uint32_t rc = 0; 16016 16017 /* 16018 * Clear HW from errors which may have resulted from an interrupted 16019 * DMAE transaction. 16020 */ 16021 bxe_prev_interrupted_dmae(sc); 16022 16023 /* Release previously held locks */ 16024 hw_lock_reg = 16025 (SC_FUNC(sc) <= 5) ? 16026 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 16027 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 16028 16029 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 16030 if (hw_lock_val) { 16031 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 16032 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 16033 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 16034 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 16035 } 16036 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 16037 REG_WR(sc, hw_lock_reg, 0xffffffff); 16038 } else { 16039 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 16040 } 16041 16042 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 16043 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 16044 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 16045 } 16046 16047 do { 16048 /* Lock MCP using an unload request */ 16049 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 16050 if (!fw) { 16051 BLOGE(sc, "MCP response failure, aborting\n"); 16052 rc = -1; 16053 break; 16054 } 16055 16056 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 16057 rc = bxe_prev_unload_common(sc); 16058 break; 16059 } 16060 16061 /* non-common reply from MCP night require looping */ 16062 rc = bxe_prev_unload_uncommon(sc); 16063 if (rc != BXE_PREV_WAIT_NEEDED) { 16064 break; 16065 } 16066 16067 DELAY(20000); 16068 } while (--time_counter); 16069 16070 if (!time_counter || rc) { 16071 BLOGE(sc, "Failed to unload previous driver!\n"); 16072 rc = -1; 16073 } 16074 16075 return (rc); 16076} 16077 16078void 16079bxe_dcbx_set_state(struct bxe_softc *sc, 16080 uint8_t dcb_on, 16081 uint32_t dcbx_enabled) 16082{ 16083 if (!CHIP_IS_E1x(sc)) { 16084 sc->dcb_state = dcb_on; 16085 sc->dcbx_enabled = dcbx_enabled; 16086 } else { 16087 sc->dcb_state = FALSE; 16088 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 16089 } 16090 BLOGD(sc, DBG_LOAD, 16091 "DCB state [%s:%s]\n", 16092 dcb_on ? "ON" : "OFF", 16093 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 16094 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 16095 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 16096 "on-chip with negotiation" : "invalid"); 16097} 16098 16099/* must be called after sriov-enable */ 16100static int 16101bxe_set_qm_cid_count(struct bxe_softc *sc) 16102{ 16103 int cid_count = BXE_L2_MAX_CID(sc); 16104 16105 if (IS_SRIOV(sc)) { 16106 cid_count += BXE_VF_CIDS; 16107 } 16108 16109 if (CNIC_SUPPORT(sc)) { 16110 cid_count += CNIC_CID_MAX; 16111 } 16112 16113 return (roundup(cid_count, QM_CID_ROUND)); 16114} 16115 16116static void 16117bxe_init_multi_cos(struct bxe_softc *sc) 16118{ 16119 int pri, cos; 16120 16121 uint32_t pri_map = 0; /* XXX change to user config */ 16122 16123 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 16124 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 16125 if (cos < sc->max_cos) { 16126 sc->prio_to_cos[pri] = cos; 16127 } else { 16128 BLOGW(sc, "Invalid COS %d for priority %d " 16129 "(max COS is %d), setting to 0\n", 16130 cos, pri, (sc->max_cos - 1)); 16131 sc->prio_to_cos[pri] = 0; 16132 } 16133 } 16134} 16135 16136static int 16137bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 16138{ 16139 struct bxe_softc *sc; 16140 int error, result; 16141 16142 result = 0; 16143 error = sysctl_handle_int(oidp, &result, 0, req); 16144 16145 if (error || !req->newptr) { 16146 return (error); 16147 } 16148 16149 if (result == 1) { 16150 uint32_t temp; 16151 sc = (struct bxe_softc *)arg1; 16152 16153 BLOGI(sc, "... dumping driver state ...\n"); 16154 temp = SHMEM2_RD(sc, temperature_in_half_celsius); 16155 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); 16156 } 16157 16158 return (error); 16159} 16160 16161static int 16162bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS) 16163{ 16164 struct bxe_softc *sc; 16165 int error, result; 16166 16167 result = 0; 16168 error = sysctl_handle_int(oidp, &result, 0, req); 16169 16170 if (error || !req->newptr) { 16171 return (error); 16172 } 16173 16174 if (result == 1) { 16175 sc = (struct bxe_softc *)arg1; 16176 16177 BLOGI(sc, "... grcdump start ...\n"); 16178 bxe_grc_dump(sc); 16179 BLOGI(sc, "... grcdump done ...\n"); 16180 } 16181 16182 return (error); 16183} 16184 16185static int 16186bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 16187{ 16188 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16189 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 16190 uint32_t *offset; 16191 uint64_t value = 0; 16192 int index = (int)arg2; 16193 16194 if (index >= BXE_NUM_ETH_STATS) { 16195 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 16196 return (-1); 16197 } 16198 16199 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 16200 16201 switch (bxe_eth_stats_arr[index].size) { 16202 case 4: 16203 value = (uint64_t)*offset; 16204 break; 16205 case 8: 16206 value = HILO_U64(*offset, *(offset + 1)); 16207 break; 16208 default: 16209 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 16210 index, bxe_eth_stats_arr[index].size); 16211 return (-1); 16212 } 16213 16214 return (sysctl_handle_64(oidp, &value, 0, req)); 16215} 16216 16217static int 16218bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 16219{ 16220 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16221 uint32_t *eth_stats; 16222 uint32_t *offset; 16223 uint64_t value = 0; 16224 uint32_t q_stat = (uint32_t)arg2; 16225 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 16226 uint32_t index = (q_stat & 0xffff); 16227 16228 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 16229 16230 if (index >= BXE_NUM_ETH_Q_STATS) { 16231 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 16232 return (-1); 16233 } 16234 16235 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 16236 16237 switch (bxe_eth_q_stats_arr[index].size) { 16238 case 4: 16239 value = (uint64_t)*offset; 16240 break; 16241 case 8: 16242 value = HILO_U64(*offset, *(offset + 1)); 16243 break; 16244 default: 16245 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 16246 index, bxe_eth_q_stats_arr[index].size); 16247 return (-1); 16248 } 16249 16250 return (sysctl_handle_64(oidp, &value, 0, req)); 16251} 16252 16253static void 16254bxe_add_sysctls(struct bxe_softc *sc) 16255{ 16256 struct sysctl_ctx_list *ctx; 16257 struct sysctl_oid_list *children; 16258 struct sysctl_oid *queue_top, *queue; 16259 struct sysctl_oid_list *queue_top_children, *queue_children; 16260 char queue_num_buf[32]; 16261 uint32_t q_stat; 16262 int i, j; 16263 16264 ctx = device_get_sysctl_ctx(sc->dev); 16265 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16266 16267 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16268 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16269 "version"); 16270 16271 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16272 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 16273 "bootcode version"); 16274 16275 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16276 BCM_5710_FW_MAJOR_VERSION, 16277 BCM_5710_FW_MINOR_VERSION, 16278 BCM_5710_FW_REVISION_VERSION, 16279 BCM_5710_FW_ENGINEERING_VERSION); 16280 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16281 CTLFLAG_RD, sc->fw_ver_str, 0, 16282 "firmware version"); 16283 16284 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16285 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16286 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16287 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16288 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16289 "Unknown")); 16290 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16291 CTLFLAG_RD, sc->mf_mode_str, 0, 16292 "multifunction mode"); 16293 16294 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16295 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16296 "multifunction vnics per port"); 16297 16298 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16299 CTLFLAG_RD, sc->mac_addr_str, 0, 16300 "mac address"); 16301 16302 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16303 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16304 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16305 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16306 "???GT/s"), 16307 sc->devinfo.pcie_link_width); 16308 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16309 CTLFLAG_RD, sc->pci_link_str, 0, 16310 "pci link status"); 16311 16312 sc->debug = bxe_debug; 16313 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 16314 CTLFLAG_RW, &sc->debug, 16315 "debug logging mode"); 16316 16317 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump", 16318 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16319 bxe_sysctl_trigger_grcdump, "IU", 16320 "set by driver when a grcdump is needed"); 16321 16322 sc->grcdump_done = 0; 16323 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", 16324 CTLFLAG_RW, &sc->grcdump_done, 0, 16325 "set by driver when grcdump is done"); 16326 16327 sc->rx_budget = bxe_rx_budget; 16328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16329 CTLFLAG_RW, &sc->rx_budget, 0, 16330 "rx processing budget"); 16331 16332 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16333 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16334 bxe_sysctl_state, "IU", "dump driver state"); 16335 16336 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16337 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16338 bxe_eth_stats_arr[i].string, 16339 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16340 bxe_sysctl_eth_stat, "LU", 16341 bxe_eth_stats_arr[i].string); 16342 } 16343 16344 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16345 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16346 CTLFLAG_RD, NULL, "queue"); 16347 queue_top_children = SYSCTL_CHILDREN(queue_top); 16348 16349 for (i = 0; i < sc->num_queues; i++) { 16350 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16351 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16352 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16353 queue_num_buf, CTLFLAG_RD, NULL, 16354 "single queue"); 16355 queue_children = SYSCTL_CHILDREN(queue); 16356 16357 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16358 q_stat = ((i << 16) | j); 16359 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16360 bxe_eth_q_stats_arr[j].string, 16361 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16362 bxe_sysctl_eth_q_stat, "LU", 16363 bxe_eth_q_stats_arr[j].string); 16364 } 16365 } 16366} 16367 16368/* 16369 * Device attach function. 16370 * 16371 * Allocates device resources, performs secondary chip identification, and 16372 * initializes driver instance variables. This function is called from driver 16373 * load after a successful probe. 16374 * 16375 * Returns: 16376 * 0 = Success, >0 = Failure 16377 */ 16378static int 16379bxe_attach(device_t dev) 16380{ 16381 struct bxe_softc *sc; 16382 16383 sc = device_get_softc(dev); 16384 16385 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16386 16387 sc->state = BXE_STATE_CLOSED; 16388 16389 sc->dev = dev; 16390 sc->unit = device_get_unit(dev); 16391 16392 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16393 16394 sc->pcie_bus = pci_get_bus(dev); 16395 sc->pcie_device = pci_get_slot(dev); 16396 sc->pcie_func = pci_get_function(dev); 16397 16398 /* enable bus master capability */ 16399 pci_enable_busmaster(dev); 16400 16401 /* get the BARs */ 16402 if (bxe_allocate_bars(sc) != 0) { 16403 return (ENXIO); 16404 } 16405 16406 /* initialize the mutexes */ 16407 bxe_init_mutexes(sc); 16408 16409 /* prepare the periodic callout */ 16410 callout_init(&sc->periodic_callout, 0); 16411 16412 /* prepare the chip taskqueue */ 16413 sc->chip_tq_flags = CHIP_TQ_NONE; 16414 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16415 "bxe%d_chip_tq", sc->unit); 16416 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16417 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16418 taskqueue_thread_enqueue, 16419 &sc->chip_tq); 16420 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16421 "%s", sc->chip_tq_name); 16422 16423 /* get device info and set params */ 16424 if (bxe_get_device_info(sc) != 0) { 16425 BLOGE(sc, "getting device info\n"); 16426 bxe_deallocate_bars(sc); 16427 pci_disable_busmaster(dev); 16428 return (ENXIO); 16429 } 16430 16431 /* get final misc params */ 16432 bxe_get_params(sc); 16433 16434 /* set the default MTU (changed via ifconfig) */ 16435 sc->mtu = ETHERMTU; 16436 16437 bxe_set_modes_bitmap(sc); 16438 16439 /* XXX 16440 * If in AFEX mode and the function is configured for FCoE 16441 * then bail... no L2 allowed. 16442 */ 16443 16444 /* get phy settings from shmem and 'and' against admin settings */ 16445 bxe_get_phy_info(sc); 16446 16447 /* initialize the FreeBSD ifnet interface */ 16448 if (bxe_init_ifnet(sc) != 0) { 16449 bxe_release_mutexes(sc); 16450 bxe_deallocate_bars(sc); 16451 pci_disable_busmaster(dev); 16452 return (ENXIO); 16453 } 16454 16455 if (bxe_add_cdev(sc) != 0) { 16456 if (sc->ifp != NULL) { 16457 ether_ifdetach(sc->ifp); 16458 } 16459 ifmedia_removeall(&sc->ifmedia); 16460 bxe_release_mutexes(sc); 16461 bxe_deallocate_bars(sc); 16462 pci_disable_busmaster(dev); 16463 return (ENXIO); 16464 } 16465 16466 /* allocate device interrupts */ 16467 if (bxe_interrupt_alloc(sc) != 0) { 16468 bxe_del_cdev(sc); 16469 if (sc->ifp != NULL) { 16470 ether_ifdetach(sc->ifp); 16471 } 16472 ifmedia_removeall(&sc->ifmedia); 16473 bxe_release_mutexes(sc); 16474 bxe_deallocate_bars(sc); 16475 pci_disable_busmaster(dev); 16476 return (ENXIO); 16477 } 16478 16479 /* allocate ilt */ 16480 if (bxe_alloc_ilt_mem(sc) != 0) { 16481 bxe_interrupt_free(sc); 16482 bxe_del_cdev(sc); 16483 if (sc->ifp != NULL) { 16484 ether_ifdetach(sc->ifp); 16485 } 16486 ifmedia_removeall(&sc->ifmedia); 16487 bxe_release_mutexes(sc); 16488 bxe_deallocate_bars(sc); 16489 pci_disable_busmaster(dev); 16490 return (ENXIO); 16491 } 16492 16493 /* allocate the host hardware/software hsi structures */ 16494 if (bxe_alloc_hsi_mem(sc) != 0) { 16495 bxe_free_ilt_mem(sc); 16496 bxe_interrupt_free(sc); 16497 bxe_del_cdev(sc); 16498 if (sc->ifp != NULL) { 16499 ether_ifdetach(sc->ifp); 16500 } 16501 ifmedia_removeall(&sc->ifmedia); 16502 bxe_release_mutexes(sc); 16503 bxe_deallocate_bars(sc); 16504 pci_disable_busmaster(dev); 16505 return (ENXIO); 16506 } 16507 16508 /* need to reset chip if UNDI was active */ 16509 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16510 /* init fw_seq */ 16511 sc->fw_seq = 16512 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16513 DRV_MSG_SEQ_NUMBER_MASK); 16514 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16515 bxe_prev_unload(sc); 16516 } 16517 16518#if 1 16519 /* XXX */ 16520 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16521#else 16522 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16523 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16524 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16525 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16526 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16527 bxe_dcbx_init_params(sc); 16528 } else { 16529 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16530 } 16531#endif 16532 16533 /* calculate qm_cid_count */ 16534 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16535 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16536 16537 sc->max_cos = 1; 16538 bxe_init_multi_cos(sc); 16539 16540 bxe_add_sysctls(sc); 16541 16542 return (0); 16543} 16544 16545/* 16546 * Device detach function. 16547 * 16548 * Stops the controller, resets the controller, and releases resources. 16549 * 16550 * Returns: 16551 * 0 = Success, >0 = Failure 16552 */ 16553static int 16554bxe_detach(device_t dev) 16555{ 16556 struct bxe_softc *sc; 16557 if_t ifp; 16558 16559 sc = device_get_softc(dev); 16560 16561 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16562 16563 ifp = sc->ifp; 16564 if (ifp != NULL && if_vlantrunkinuse(ifp)) { 16565 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16566 return(EBUSY); 16567 } 16568 16569 bxe_del_cdev(sc); 16570 16571 /* stop the periodic callout */ 16572 bxe_periodic_stop(sc); 16573 16574 /* stop the chip taskqueue */ 16575 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16576 if (sc->chip_tq) { 16577 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16578 taskqueue_free(sc->chip_tq); 16579 sc->chip_tq = NULL; 16580 } 16581 16582 /* stop and reset the controller if it was open */ 16583 if (sc->state != BXE_STATE_CLOSED) { 16584 BXE_CORE_LOCK(sc); 16585 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16586 BXE_CORE_UNLOCK(sc); 16587 } 16588 16589 /* release the network interface */ 16590 if (ifp != NULL) { 16591 ether_ifdetach(ifp); 16592 } 16593 ifmedia_removeall(&sc->ifmedia); 16594 16595 /* XXX do the following based on driver state... */ 16596 16597 /* free the host hardware/software hsi structures */ 16598 bxe_free_hsi_mem(sc); 16599 16600 /* free ilt */ 16601 bxe_free_ilt_mem(sc); 16602 16603 /* release the interrupts */ 16604 bxe_interrupt_free(sc); 16605 16606 /* Release the mutexes*/ 16607 bxe_release_mutexes(sc); 16608 16609 /* Release the PCIe BAR mapped memory */ 16610 bxe_deallocate_bars(sc); 16611 16612 /* Release the FreeBSD interface. */ 16613 if (sc->ifp != NULL) { 16614 if_free(sc->ifp); 16615 } 16616 16617 pci_disable_busmaster(dev); 16618 16619 return (0); 16620} 16621 16622/* 16623 * Device shutdown function. 16624 * 16625 * Stops and resets the controller. 16626 * 16627 * Returns: 16628 * Nothing 16629 */ 16630static int 16631bxe_shutdown(device_t dev) 16632{ 16633 struct bxe_softc *sc; 16634 16635 sc = device_get_softc(dev); 16636 16637 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16638 16639 /* stop the periodic callout */ 16640 bxe_periodic_stop(sc); 16641 16642 BXE_CORE_LOCK(sc); 16643 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16644 BXE_CORE_UNLOCK(sc); 16645 16646 return (0); 16647} 16648 16649void 16650bxe_igu_ack_sb(struct bxe_softc *sc, 16651 uint8_t igu_sb_id, 16652 uint8_t segment, 16653 uint16_t index, 16654 uint8_t op, 16655 uint8_t update) 16656{ 16657 uint32_t igu_addr = sc->igu_base_addr; 16658 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16659 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16660} 16661 16662static void 16663bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16664 uint8_t func, 16665 uint8_t idu_sb_id, 16666 uint8_t is_pf) 16667{ 16668 uint32_t data, ctl, cnt = 100; 16669 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16670 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16671 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16672 uint32_t sb_bit = 1 << (idu_sb_id%32); 16673 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16674 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16675 16676 /* Not supported in BC mode */ 16677 if (CHIP_INT_MODE_IS_BC(sc)) { 16678 return; 16679 } 16680 16681 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16682 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16683 IGU_REGULAR_CLEANUP_SET | 16684 IGU_REGULAR_BCLEANUP); 16685 16686 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16687 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16688 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16689 16690 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16691 data, igu_addr_data); 16692 REG_WR(sc, igu_addr_data, data); 16693 16694 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16695 BUS_SPACE_BARRIER_WRITE); 16696 mb(); 16697 16698 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16699 ctl, igu_addr_ctl); 16700 REG_WR(sc, igu_addr_ctl, ctl); 16701 16702 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16703 BUS_SPACE_BARRIER_WRITE); 16704 mb(); 16705 16706 /* wait for clean up to finish */ 16707 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16708 DELAY(20000); 16709 } 16710 16711 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16712 BLOGD(sc, DBG_LOAD, 16713 "Unable to finish IGU cleanup: " 16714 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16715 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16716 } 16717} 16718 16719static void 16720bxe_igu_clear_sb(struct bxe_softc *sc, 16721 uint8_t idu_sb_id) 16722{ 16723 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16724} 16725 16726 16727 16728 16729 16730 16731 16732/*******************/ 16733/* ECORE CALLBACKS */ 16734/*******************/ 16735 16736static void 16737bxe_reset_common(struct bxe_softc *sc) 16738{ 16739 uint32_t val = 0x1400; 16740 16741 /* reset_common */ 16742 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16743 16744 if (CHIP_IS_E3(sc)) { 16745 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16746 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16747 } 16748 16749 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16750} 16751 16752static void 16753bxe_common_init_phy(struct bxe_softc *sc) 16754{ 16755 uint32_t shmem_base[2]; 16756 uint32_t shmem2_base[2]; 16757 16758 /* Avoid common init in case MFW supports LFA */ 16759 if (SHMEM2_RD(sc, size) > 16760 (uint32_t)offsetof(struct shmem2_region, 16761 lfa_host_addr[SC_PORT(sc)])) { 16762 return; 16763 } 16764 16765 shmem_base[0] = sc->devinfo.shmem_base; 16766 shmem2_base[0] = sc->devinfo.shmem2_base; 16767 16768 if (!CHIP_IS_E1x(sc)) { 16769 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16770 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16771 } 16772 16773 bxe_acquire_phy_lock(sc); 16774 elink_common_init_phy(sc, shmem_base, shmem2_base, 16775 sc->devinfo.chip_id, 0); 16776 bxe_release_phy_lock(sc); 16777} 16778 16779static void 16780bxe_pf_disable(struct bxe_softc *sc) 16781{ 16782 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16783 16784 val &= ~IGU_PF_CONF_FUNC_EN; 16785 16786 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16787 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16788 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16789} 16790 16791static void 16792bxe_init_pxp(struct bxe_softc *sc) 16793{ 16794 uint16_t devctl; 16795 int r_order, w_order; 16796 16797 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16798 16799 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16800 16801 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16802 16803 if (sc->mrrs == -1) { 16804 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16805 } else { 16806 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16807 r_order = sc->mrrs; 16808 } 16809 16810 ecore_init_pxp_arb(sc, r_order, w_order); 16811} 16812 16813static uint32_t 16814bxe_get_pretend_reg(struct bxe_softc *sc) 16815{ 16816 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16817 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16818 return (base + (SC_ABS_FUNC(sc)) * stride); 16819} 16820 16821/* 16822 * Called only on E1H or E2. 16823 * When pretending to be PF, the pretend value is the function number 0..7. 16824 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16825 * combination. 16826 */ 16827static int 16828bxe_pretend_func(struct bxe_softc *sc, 16829 uint16_t pretend_func_val) 16830{ 16831 uint32_t pretend_reg; 16832 16833 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16834 return (-1); 16835 } 16836 16837 /* get my own pretend register */ 16838 pretend_reg = bxe_get_pretend_reg(sc); 16839 REG_WR(sc, pretend_reg, pretend_func_val); 16840 REG_RD(sc, pretend_reg); 16841 return (0); 16842} 16843 16844static void 16845bxe_iov_init_dmae(struct bxe_softc *sc) 16846{ 16847 return; 16848#if 0 16849 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); 16850 16851 if (!IS_SRIOV(sc)) { 16852 return; 16853 } 16854 16855 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); 16856#endif 16857} 16858 16859#if 0 16860static int 16861bxe_iov_init_ilt(struct bxe_softc *sc, 16862 uint16_t line) 16863{ 16864 return (line); 16865#if 0 16866 int i; 16867 struct ecore_ilt* ilt = sc->ilt; 16868 16869 if (!IS_SRIOV(sc)) { 16870 return (line); 16871 } 16872 16873 /* set vfs ilt lines */ 16874 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { 16875 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); 16876 ilt->lines[line+i].page = hw_cxt->addr; 16877 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 16878 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 16879 } 16880 return (line+i); 16881#endif 16882} 16883#endif 16884 16885static void 16886bxe_iov_init_dq(struct bxe_softc *sc) 16887{ 16888 return; 16889#if 0 16890 if (!IS_SRIOV(sc)) { 16891 return; 16892 } 16893 16894 /* Set the DQ such that the CID reflect the abs_vfid */ 16895 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); 16896 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 16897 16898 /* 16899 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to 16900 * the PF L2 queues 16901 */ 16902 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 16903 16904 /* The VF window size is the log2 of the max number of CIDs per VF */ 16905 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 16906 16907 /* 16908 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 16909 * the Pf doorbell size although the 2 are independent. 16910 */ 16911 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, 16912 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 16913 16914 /* 16915 * No security checks for now - 16916 * configure single rule (out of 16) mask = 0x1, value = 0x0, 16917 * CID range 0 - 0x1ffff 16918 */ 16919 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); 16920 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); 16921 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 16922 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 16923 16924 /* set the number of VF alllowed doorbells to the full DQ range */ 16925 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 16926 16927 /* set the VF doorbell threshold */ 16928 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 16929#endif 16930} 16931 16932/* send a NIG loopback debug packet */ 16933static void 16934bxe_lb_pckt(struct bxe_softc *sc) 16935{ 16936 uint32_t wb_write[3]; 16937 16938 /* Ethernet source and destination addresses */ 16939 wb_write[0] = 0x55555555; 16940 wb_write[1] = 0x55555555; 16941 wb_write[2] = 0x20; /* SOP */ 16942 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16943 16944 /* NON-IP protocol */ 16945 wb_write[0] = 0x09000000; 16946 wb_write[1] = 0x55555555; 16947 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16948 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16949} 16950 16951/* 16952 * Some of the internal memories are not directly readable from the driver. 16953 * To test them we send debug packets. 16954 */ 16955static int 16956bxe_int_mem_test(struct bxe_softc *sc) 16957{ 16958 int factor; 16959 int count, i; 16960 uint32_t val = 0; 16961 16962 if (CHIP_REV_IS_FPGA(sc)) { 16963 factor = 120; 16964 } else if (CHIP_REV_IS_EMUL(sc)) { 16965 factor = 200; 16966 } else { 16967 factor = 1; 16968 } 16969 16970 /* disable inputs of parser neighbor blocks */ 16971 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16972 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16973 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16974 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16975 16976 /* write 0 to parser credits for CFC search request */ 16977 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16978 16979 /* send Ethernet packet */ 16980 bxe_lb_pckt(sc); 16981 16982 /* TODO do i reset NIG statistic? */ 16983 /* Wait until NIG register shows 1 packet of size 0x10 */ 16984 count = 1000 * factor; 16985 while (count) { 16986 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16987 val = *BXE_SP(sc, wb_data[0]); 16988 if (val == 0x10) { 16989 break; 16990 } 16991 16992 DELAY(10000); 16993 count--; 16994 } 16995 16996 if (val != 0x10) { 16997 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16998 return (-1); 16999 } 17000 17001 /* wait until PRS register shows 1 packet */ 17002 count = (1000 * factor); 17003 while (count) { 17004 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17005 if (val == 1) { 17006 break; 17007 } 17008 17009 DELAY(10000); 17010 count--; 17011 } 17012 17013 if (val != 0x1) { 17014 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17015 return (-2); 17016 } 17017 17018 /* Reset and init BRB, PRS */ 17019 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 17020 DELAY(50000); 17021 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 17022 DELAY(50000); 17023 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17024 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17025 17026 /* Disable inputs of parser neighbor blocks */ 17027 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 17028 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 17029 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 17030 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 17031 17032 /* Write 0 to parser credits for CFC search request */ 17033 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 17034 17035 /* send 10 Ethernet packets */ 17036 for (i = 0; i < 10; i++) { 17037 bxe_lb_pckt(sc); 17038 } 17039 17040 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 17041 count = (1000 * factor); 17042 while (count) { 17043 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17044 val = *BXE_SP(sc, wb_data[0]); 17045 if (val == 0xb0) { 17046 break; 17047 } 17048 17049 DELAY(10000); 17050 count--; 17051 } 17052 17053 if (val != 0xb0) { 17054 BLOGE(sc, "NIG timeout val=0x%x\n", val); 17055 return (-3); 17056 } 17057 17058 /* Wait until PRS register shows 2 packets */ 17059 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17060 if (val != 2) { 17061 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17062 } 17063 17064 /* Write 1 to parser credits for CFC search request */ 17065 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 17066 17067 /* Wait until PRS register shows 3 packets */ 17068 DELAY(10000 * factor); 17069 17070 /* Wait until NIG register shows 1 packet of size 0x10 */ 17071 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17072 if (val != 3) { 17073 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17074 } 17075 17076 /* clear NIG EOP FIFO */ 17077 for (i = 0; i < 11; i++) { 17078 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 17079 } 17080 17081 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 17082 if (val != 1) { 17083 BLOGE(sc, "clear of NIG failed\n"); 17084 return (-4); 17085 } 17086 17087 /* Reset and init BRB, PRS, NIG */ 17088 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 17089 DELAY(50000); 17090 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 17091 DELAY(50000); 17092 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17093 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17094 if (!CNIC_SUPPORT(sc)) { 17095 /* set NIC mode */ 17096 REG_WR(sc, PRS_REG_NIC_MODE, 1); 17097 } 17098 17099 /* Enable inputs of parser neighbor blocks */ 17100 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 17101 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 17102 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 17103 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 17104 17105 return (0); 17106} 17107 17108static void 17109bxe_setup_fan_failure_detection(struct bxe_softc *sc) 17110{ 17111 int is_required; 17112 uint32_t val; 17113 int port; 17114 17115 is_required = 0; 17116 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 17117 SHARED_HW_CFG_FAN_FAILURE_MASK); 17118 17119 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17120 is_required = 1; 17121 } 17122 /* 17123 * The fan failure mechanism is usually related to the PHY type since 17124 * the power consumption of the board is affected by the PHY. Currently, 17125 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17126 */ 17127 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17128 for (port = PORT_0; port < PORT_MAX; port++) { 17129 is_required |= elink_fan_failure_det_req(sc, 17130 sc->devinfo.shmem_base, 17131 sc->devinfo.shmem2_base, 17132 port); 17133 } 17134 } 17135 17136 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17137 17138 if (is_required == 0) { 17139 return; 17140 } 17141 17142 /* Fan failure is indicated by SPIO 5 */ 17143 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17144 17145 /* set to active low mode */ 17146 val = REG_RD(sc, MISC_REG_SPIO_INT); 17147 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17148 REG_WR(sc, MISC_REG_SPIO_INT, val); 17149 17150 /* enable interrupt to signal the IGU */ 17151 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17152 val |= MISC_SPIO_SPIO5; 17153 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17154} 17155 17156static void 17157bxe_enable_blocks_attention(struct bxe_softc *sc) 17158{ 17159 uint32_t val; 17160 17161 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17162 if (!CHIP_IS_E1x(sc)) { 17163 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17164 } else { 17165 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17166 } 17167 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17168 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17169 /* 17170 * mask read length error interrupts in brb for parser 17171 * (parsing unit and 'checksum and crc' unit) 17172 * these errors are legal (PU reads fixed length and CAC can cause 17173 * read length error on truncated packets) 17174 */ 17175 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17176 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17177 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17178 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17179 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17180 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17181/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17182/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17183 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17184 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17185 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17186/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17187/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17188 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17189 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17190 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17191 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17192/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17193/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17194 17195 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17196 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17197 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17198 if (!CHIP_IS_E1x(sc)) { 17199 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17200 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17201 } 17202 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17203 17204 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17205 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17206 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17207/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17208 17209 if (!CHIP_IS_E1x(sc)) { 17210 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17211 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17212 } 17213 17214 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17215 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17216/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17217 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17218} 17219 17220/** 17221 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17222 * 17223 * @sc: driver handle 17224 */ 17225static int 17226bxe_init_hw_common(struct bxe_softc *sc) 17227{ 17228 uint8_t abs_func_id; 17229 uint32_t val; 17230 17231 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17232 SC_ABS_FUNC(sc)); 17233 17234 /* 17235 * take the RESET lock to protect undi_unload flow from accessing 17236 * registers while we are resetting the chip 17237 */ 17238 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17239 17240 bxe_reset_common(sc); 17241 17242 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17243 17244 val = 0xfffc; 17245 if (CHIP_IS_E3(sc)) { 17246 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17247 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17248 } 17249 17250 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17251 17252 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17253 17254 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17255 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17256 17257 if (!CHIP_IS_E1x(sc)) { 17258 /* 17259 * 4-port mode or 2-port mode we need to turn off master-enable for 17260 * everyone. After that we turn it back on for self. So, we disregard 17261 * multi-function, and always disable all functions on the given path, 17262 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17263 */ 17264 for (abs_func_id = SC_PATH(sc); 17265 abs_func_id < (E2_FUNC_MAX * 2); 17266 abs_func_id += 2) { 17267 if (abs_func_id == SC_ABS_FUNC(sc)) { 17268 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17269 continue; 17270 } 17271 17272 bxe_pretend_func(sc, abs_func_id); 17273 17274 /* clear pf enable */ 17275 bxe_pf_disable(sc); 17276 17277 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17278 } 17279 } 17280 17281 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17282 17283 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17284 17285 if (CHIP_IS_E1(sc)) { 17286 /* 17287 * enable HW interrupt from PXP on USDM overflow 17288 * bit 16 on INT_MASK_0 17289 */ 17290 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17291 } 17292 17293 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17294 bxe_init_pxp(sc); 17295 17296#ifdef __BIG_ENDIAN 17297 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17298 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17299 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17300 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17301 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17302 /* make sure this value is 0 */ 17303 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17304 17305 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17306 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17307 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17308 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17309 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17310#endif 17311 17312 ecore_ilt_init_page_size(sc, INITOP_SET); 17313 17314 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17315 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17316 } 17317 17318 /* let the HW do it's magic... */ 17319 DELAY(100000); 17320 17321 /* finish PXP init */ 17322 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17323 if (val != 1) { 17324 BLOGE(sc, "PXP2 CFG failed\n"); 17325 return (-1); 17326 } 17327 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17328 if (val != 1) { 17329 BLOGE(sc, "PXP2 RD_INIT failed\n"); 17330 return (-1); 17331 } 17332 17333 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17334 17335 /* 17336 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17337 * entries with value "0" and valid bit on. This needs to be done by the 17338 * first PF that is loaded in a path (i.e. common phase) 17339 */ 17340 if (!CHIP_IS_E1x(sc)) { 17341/* 17342 * In E2 there is a bug in the timers block that can cause function 6 / 7 17343 * (i.e. vnic3) to start even if it is marked as "scan-off". 17344 * This occurs when a different function (func2,3) is being marked 17345 * as "scan-off". Real-life scenario for example: if a driver is being 17346 * load-unloaded while func6,7 are down. This will cause the timer to access 17347 * the ilt, translate to a logical address and send a request to read/write. 17348 * Since the ilt for the function that is down is not valid, this will cause 17349 * a translation error which is unrecoverable. 17350 * The Workaround is intended to make sure that when this happens nothing 17351 * fatal will occur. The workaround: 17352 * 1. First PF driver which loads on a path will: 17353 * a. After taking the chip out of reset, by using pretend, 17354 * it will write "0" to the following registers of 17355 * the other vnics. 17356 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17357 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17358 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17359 * And for itself it will write '1' to 17360 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17361 * dmae-operations (writing to pram for example.) 17362 * note: can be done for only function 6,7 but cleaner this 17363 * way. 17364 * b. Write zero+valid to the entire ILT. 17365 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17366 * VNIC3 (of that port). The range allocated will be the 17367 * entire ILT. This is needed to prevent ILT range error. 17368 * 2. Any PF driver load flow: 17369 * a. ILT update with the physical addresses of the allocated 17370 * logical pages. 17371 * b. Wait 20msec. - note that this timeout is needed to make 17372 * sure there are no requests in one of the PXP internal 17373 * queues with "old" ILT addresses. 17374 * c. PF enable in the PGLC. 17375 * d. Clear the was_error of the PF in the PGLC. (could have 17376 * occurred while driver was down) 17377 * e. PF enable in the CFC (WEAK + STRONG) 17378 * f. Timers scan enable 17379 * 3. PF driver unload flow: 17380 * a. Clear the Timers scan_en. 17381 * b. Polling for scan_on=0 for that PF. 17382 * c. Clear the PF enable bit in the PXP. 17383 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17384 * e. Write zero+valid to all ILT entries (The valid bit must 17385 * stay set) 17386 * f. If this is VNIC 3 of a port then also init 17387 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17388 * to the last enrty in the ILT. 17389 * 17390 * Notes: 17391 * Currently the PF error in the PGLC is non recoverable. 17392 * In the future the there will be a recovery routine for this error. 17393 * Currently attention is masked. 17394 * Having an MCP lock on the load/unload process does not guarantee that 17395 * there is no Timer disable during Func6/7 enable. This is because the 17396 * Timers scan is currently being cleared by the MCP on FLR. 17397 * Step 2.d can be done only for PF6/7 and the driver can also check if 17398 * there is error before clearing it. But the flow above is simpler and 17399 * more general. 17400 * All ILT entries are written by zero+valid and not just PF6/7 17401 * ILT entries since in the future the ILT entries allocation for 17402 * PF-s might be dynamic. 17403 */ 17404 struct ilt_client_info ilt_cli; 17405 struct ecore_ilt ilt; 17406 17407 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17408 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17409 17410 /* initialize dummy TM client */ 17411 ilt_cli.start = 0; 17412 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17413 ilt_cli.client_num = ILT_CLIENT_TM; 17414 17415 /* 17416 * Step 1: set zeroes to all ilt page entries with valid bit on 17417 * Step 2: set the timers first/last ilt entry to point 17418 * to the entire range to prevent ILT range error for 3rd/4th 17419 * vnic (this code assumes existence of the vnic) 17420 * 17421 * both steps performed by call to ecore_ilt_client_init_op() 17422 * with dummy TM client 17423 * 17424 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17425 * and his brother are split registers 17426 */ 17427 17428 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17429 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17430 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17431 17432 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17433 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17434 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17435 } 17436 17437 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17438 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17439 17440 if (!CHIP_IS_E1x(sc)) { 17441 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17442 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17443 17444 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17445 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17446 17447 /* let the HW do it's magic... */ 17448 do { 17449 DELAY(200000); 17450 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17451 } while (factor-- && (val != 1)); 17452 17453 if (val != 1) { 17454 BLOGE(sc, "ATC_INIT failed\n"); 17455 return (-1); 17456 } 17457 } 17458 17459 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17460 17461 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17462 17463 bxe_iov_init_dmae(sc); 17464 17465 /* clean the DMAE memory */ 17466 sc->dmae_ready = 1; 17467 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17468 17469 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17470 17471 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17472 17473 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17474 17475 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17476 17477 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17478 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17479 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17480 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17481 17482 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17483 17484 /* QM queues pointers table */ 17485 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17486 17487 /* soft reset pulse */ 17488 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17489 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17490 17491 if (CNIC_SUPPORT(sc)) 17492 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17493 17494 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17495 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17496 if (!CHIP_REV_IS_SLOW(sc)) { 17497 /* enable hw interrupt from doorbell Q */ 17498 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17499 } 17500 17501 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17502 17503 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17504 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17505 17506 if (!CHIP_IS_E1(sc)) { 17507 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17508 } 17509 17510 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17511 if (IS_MF_AFEX(sc)) { 17512 /* 17513 * configure that AFEX and VLAN headers must be 17514 * received in AFEX mode 17515 */ 17516 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17517 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17518 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17519 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17520 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17521 } else { 17522 /* 17523 * Bit-map indicating which L2 hdrs may appear 17524 * after the basic Ethernet header 17525 */ 17526 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17527 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17528 } 17529 } 17530 17531 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17532 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17533 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17534 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17535 17536 if (!CHIP_IS_E1x(sc)) { 17537 /* reset VFC memories */ 17538 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17539 VFC_MEMORIES_RST_REG_CAM_RST | 17540 VFC_MEMORIES_RST_REG_RAM_RST); 17541 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17542 VFC_MEMORIES_RST_REG_CAM_RST | 17543 VFC_MEMORIES_RST_REG_RAM_RST); 17544 17545 DELAY(20000); 17546 } 17547 17548 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17549 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17550 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17551 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17552 17553 /* sync semi rtc */ 17554 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17555 0x80000000); 17556 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17557 0x80000000); 17558 17559 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17560 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17561 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17562 17563 if (!CHIP_IS_E1x(sc)) { 17564 if (IS_MF_AFEX(sc)) { 17565 /* 17566 * configure that AFEX and VLAN headers must be 17567 * sent in AFEX mode 17568 */ 17569 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17570 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17571 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17572 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17573 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17574 } else { 17575 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17576 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17577 } 17578 } 17579 17580 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17581 17582 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17583 17584 if (CNIC_SUPPORT(sc)) { 17585 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17586 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17587 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17588 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17589 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17590 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17591 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17592 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17593 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17594 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17595 } 17596 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17597 17598 if (sizeof(union cdu_context) != 1024) { 17599 /* we currently assume that a context is 1024 bytes */ 17600 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17601 (long)sizeof(union cdu_context)); 17602 } 17603 17604 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17605 val = (4 << 24) + (0 << 12) + 1024; 17606 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17607 17608 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17609 17610 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17611 /* enable context validation interrupt from CFC */ 17612 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17613 17614 /* set the thresholds to prevent CFC/CDU race */ 17615 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17616 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17617 17618 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17619 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17620 } 17621 17622 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17623 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17624 17625 /* Reset PCIE errors for debug */ 17626 REG_WR(sc, 0x2814, 0xffffffff); 17627 REG_WR(sc, 0x3820, 0xffffffff); 17628 17629 if (!CHIP_IS_E1x(sc)) { 17630 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17631 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17632 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17633 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17634 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17635 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17636 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17637 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17638 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17639 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17640 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17641 } 17642 17643 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17644 17645 if (!CHIP_IS_E1(sc)) { 17646 /* in E3 this done in per-port section */ 17647 if (!CHIP_IS_E3(sc)) 17648 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17649 } 17650 17651 if (CHIP_IS_E1H(sc)) { 17652 /* not applicable for E2 (and above ...) */ 17653 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17654 } 17655 17656 if (CHIP_REV_IS_SLOW(sc)) { 17657 DELAY(200000); 17658 } 17659 17660 /* finish CFC init */ 17661 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17662 if (val != 1) { 17663 BLOGE(sc, "CFC LL_INIT failed\n"); 17664 return (-1); 17665 } 17666 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17667 if (val != 1) { 17668 BLOGE(sc, "CFC AC_INIT failed\n"); 17669 return (-1); 17670 } 17671 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17672 if (val != 1) { 17673 BLOGE(sc, "CFC CAM_INIT failed\n"); 17674 return (-1); 17675 } 17676 REG_WR(sc, CFC_REG_DEBUG0, 0); 17677 17678 if (CHIP_IS_E1(sc)) { 17679 /* read NIG statistic to see if this is our first up since powerup */ 17680 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17681 val = *BXE_SP(sc, wb_data[0]); 17682 17683 /* do internal memory self test */ 17684 if ((val == 0) && bxe_int_mem_test(sc)) { 17685 BLOGE(sc, "internal mem self test failed\n"); 17686 return (-1); 17687 } 17688 } 17689 17690 bxe_setup_fan_failure_detection(sc); 17691 17692 /* clear PXP2 attentions */ 17693 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17694 17695 bxe_enable_blocks_attention(sc); 17696 17697 if (!CHIP_REV_IS_SLOW(sc)) { 17698 ecore_enable_blocks_parity(sc); 17699 } 17700 17701 if (!BXE_NOMCP(sc)) { 17702 if (CHIP_IS_E1x(sc)) { 17703 bxe_common_init_phy(sc); 17704 } 17705 } 17706 17707 return (0); 17708} 17709 17710/** 17711 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17712 * 17713 * @sc: driver handle 17714 */ 17715static int 17716bxe_init_hw_common_chip(struct bxe_softc *sc) 17717{ 17718 int rc = bxe_init_hw_common(sc); 17719 17720 if (rc) { 17721 return (rc); 17722 } 17723 17724 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17725 if (!BXE_NOMCP(sc)) { 17726 bxe_common_init_phy(sc); 17727 } 17728 17729 return (0); 17730} 17731 17732static int 17733bxe_init_hw_port(struct bxe_softc *sc) 17734{ 17735 int port = SC_PORT(sc); 17736 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17737 uint32_t low, high; 17738 uint32_t val; 17739 17740 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17741 17742 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17743 17744 ecore_init_block(sc, BLOCK_MISC, init_phase); 17745 ecore_init_block(sc, BLOCK_PXP, init_phase); 17746 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17747 17748 /* 17749 * Timers bug workaround: disables the pf_master bit in pglue at 17750 * common phase, we need to enable it here before any dmae access are 17751 * attempted. Therefore we manually added the enable-master to the 17752 * port phase (it also happens in the function phase) 17753 */ 17754 if (!CHIP_IS_E1x(sc)) { 17755 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17756 } 17757 17758 ecore_init_block(sc, BLOCK_ATC, init_phase); 17759 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17760 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17761 ecore_init_block(sc, BLOCK_QM, init_phase); 17762 17763 ecore_init_block(sc, BLOCK_TCM, init_phase); 17764 ecore_init_block(sc, BLOCK_UCM, init_phase); 17765 ecore_init_block(sc, BLOCK_CCM, init_phase); 17766 ecore_init_block(sc, BLOCK_XCM, init_phase); 17767 17768 /* QM cid (connection) count */ 17769 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17770 17771 if (CNIC_SUPPORT(sc)) { 17772 ecore_init_block(sc, BLOCK_TM, init_phase); 17773 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17774 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17775 } 17776 17777 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17778 17779 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17780 17781 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17782 if (IS_MF(sc)) { 17783 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17784 } else if (sc->mtu > 4096) { 17785 if (BXE_ONE_PORT(sc)) { 17786 low = 160; 17787 } else { 17788 val = sc->mtu; 17789 /* (24*1024 + val*4)/256 */ 17790 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17791 } 17792 } else { 17793 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17794 } 17795 high = (low + 56); /* 14*1024/256 */ 17796 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17797 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17798 } 17799 17800 if (CHIP_IS_MODE_4_PORT(sc)) { 17801 REG_WR(sc, SC_PORT(sc) ? 17802 BRB1_REG_MAC_GUARANTIED_1 : 17803 BRB1_REG_MAC_GUARANTIED_0, 40); 17804 } 17805 17806 ecore_init_block(sc, BLOCK_PRS, init_phase); 17807 if (CHIP_IS_E3B0(sc)) { 17808 if (IS_MF_AFEX(sc)) { 17809 /* configure headers for AFEX mode */ 17810 REG_WR(sc, SC_PORT(sc) ? 17811 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17812 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17813 REG_WR(sc, SC_PORT(sc) ? 17814 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17815 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17816 REG_WR(sc, SC_PORT(sc) ? 17817 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17818 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17819 } else { 17820 /* Ovlan exists only if we are in multi-function + 17821 * switch-dependent mode, in switch-independent there 17822 * is no ovlan headers 17823 */ 17824 REG_WR(sc, SC_PORT(sc) ? 17825 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17826 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17827 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17828 } 17829 } 17830 17831 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17832 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17833 ecore_init_block(sc, BLOCK_USDM, init_phase); 17834 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17835 17836 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17837 ecore_init_block(sc, BLOCK_USEM, init_phase); 17838 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17839 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17840 17841 ecore_init_block(sc, BLOCK_UPB, init_phase); 17842 ecore_init_block(sc, BLOCK_XPB, init_phase); 17843 17844 ecore_init_block(sc, BLOCK_PBF, init_phase); 17845 17846 if (CHIP_IS_E1x(sc)) { 17847 /* configure PBF to work without PAUSE mtu 9000 */ 17848 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17849 17850 /* update threshold */ 17851 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17852 /* update init credit */ 17853 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17854 17855 /* probe changes */ 17856 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17857 DELAY(50); 17858 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17859 } 17860 17861 if (CNIC_SUPPORT(sc)) { 17862 ecore_init_block(sc, BLOCK_SRC, init_phase); 17863 } 17864 17865 ecore_init_block(sc, BLOCK_CDU, init_phase); 17866 ecore_init_block(sc, BLOCK_CFC, init_phase); 17867 17868 if (CHIP_IS_E1(sc)) { 17869 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17870 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17871 } 17872 ecore_init_block(sc, BLOCK_HC, init_phase); 17873 17874 ecore_init_block(sc, BLOCK_IGU, init_phase); 17875 17876 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17877 /* init aeu_mask_attn_func_0/1: 17878 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17879 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17880 * bits 4-7 are used for "per vn group attention" */ 17881 val = IS_MF(sc) ? 0xF7 : 0x7; 17882 /* Enable DCBX attention for all but E1 */ 17883 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17884 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17885 17886 ecore_init_block(sc, BLOCK_NIG, init_phase); 17887 17888 if (!CHIP_IS_E1x(sc)) { 17889 /* Bit-map indicating which L2 hdrs may appear after the 17890 * basic Ethernet header 17891 */ 17892 if (IS_MF_AFEX(sc)) { 17893 REG_WR(sc, SC_PORT(sc) ? 17894 NIG_REG_P1_HDRS_AFTER_BASIC : 17895 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17896 } else { 17897 REG_WR(sc, SC_PORT(sc) ? 17898 NIG_REG_P1_HDRS_AFTER_BASIC : 17899 NIG_REG_P0_HDRS_AFTER_BASIC, 17900 IS_MF_SD(sc) ? 7 : 6); 17901 } 17902 17903 if (CHIP_IS_E3(sc)) { 17904 REG_WR(sc, SC_PORT(sc) ? 17905 NIG_REG_LLH1_MF_MODE : 17906 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17907 } 17908 } 17909 if (!CHIP_IS_E3(sc)) { 17910 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17911 } 17912 17913 if (!CHIP_IS_E1(sc)) { 17914 /* 0x2 disable mf_ov, 0x1 enable */ 17915 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17916 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17917 17918 if (!CHIP_IS_E1x(sc)) { 17919 val = 0; 17920 switch (sc->devinfo.mf_info.mf_mode) { 17921 case MULTI_FUNCTION_SD: 17922 val = 1; 17923 break; 17924 case MULTI_FUNCTION_SI: 17925 case MULTI_FUNCTION_AFEX: 17926 val = 2; 17927 break; 17928 } 17929 17930 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17931 NIG_REG_LLH0_CLS_TYPE), val); 17932 } 17933 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17934 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17935 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17936 } 17937 17938 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17939 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17940 if (val & MISC_SPIO_SPIO5) { 17941 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17943 val = REG_RD(sc, reg_addr); 17944 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17945 REG_WR(sc, reg_addr, val); 17946 } 17947 17948 return (0); 17949} 17950 17951static uint32_t 17952bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17953 uint32_t reg, 17954 uint32_t expected, 17955 uint32_t poll_count) 17956{ 17957 uint32_t cur_cnt = poll_count; 17958 uint32_t val; 17959 17960 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17961 DELAY(FLR_WAIT_INTERVAL); 17962 } 17963 17964 return (val); 17965} 17966 17967static int 17968bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17969 uint32_t reg, 17970 char *msg, 17971 uint32_t poll_cnt) 17972{ 17973 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17974 17975 if (val != 0) { 17976 BLOGE(sc, "%s usage count=%d\n", msg, val); 17977 return (1); 17978 } 17979 17980 return (0); 17981} 17982 17983/* Common routines with VF FLR cleanup */ 17984static uint32_t 17985bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17986{ 17987 /* adjust polling timeout */ 17988 if (CHIP_REV_IS_EMUL(sc)) { 17989 return (FLR_POLL_CNT * 2000); 17990 } 17991 17992 if (CHIP_REV_IS_FPGA(sc)) { 17993 return (FLR_POLL_CNT * 120); 17994 } 17995 17996 return (FLR_POLL_CNT); 17997} 17998 17999static int 18000bxe_poll_hw_usage_counters(struct bxe_softc *sc, 18001 uint32_t poll_cnt) 18002{ 18003 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 18004 if (bxe_flr_clnup_poll_hw_counter(sc, 18005 CFC_REG_NUM_LCIDS_INSIDE_PF, 18006 "CFC PF usage counter timed out", 18007 poll_cnt)) { 18008 return (1); 18009 } 18010 18011 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 18012 if (bxe_flr_clnup_poll_hw_counter(sc, 18013 DORQ_REG_PF_USAGE_CNT, 18014 "DQ PF usage counter timed out", 18015 poll_cnt)) { 18016 return (1); 18017 } 18018 18019 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 18020 if (bxe_flr_clnup_poll_hw_counter(sc, 18021 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 18022 "QM PF usage counter timed out", 18023 poll_cnt)) { 18024 return (1); 18025 } 18026 18027 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 18028 if (bxe_flr_clnup_poll_hw_counter(sc, 18029 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 18030 "Timers VNIC usage counter timed out", 18031 poll_cnt)) { 18032 return (1); 18033 } 18034 18035 if (bxe_flr_clnup_poll_hw_counter(sc, 18036 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 18037 "Timers NUM_SCANS usage counter timed out", 18038 poll_cnt)) { 18039 return (1); 18040 } 18041 18042 /* Wait DMAE PF usage counter to zero */ 18043 if (bxe_flr_clnup_poll_hw_counter(sc, 18044 dmae_reg_go_c[INIT_DMAE_C(sc)], 18045 "DMAE dommand register timed out", 18046 poll_cnt)) { 18047 return (1); 18048 } 18049 18050 return (0); 18051} 18052 18053#define OP_GEN_PARAM(param) \ 18054 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 18055#define OP_GEN_TYPE(type) \ 18056 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 18057#define OP_GEN_AGG_VECT(index) \ 18058 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 18059 18060static int 18061bxe_send_final_clnup(struct bxe_softc *sc, 18062 uint8_t clnup_func, 18063 uint32_t poll_cnt) 18064{ 18065 uint32_t op_gen_command = 0; 18066 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 18067 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 18068 int ret = 0; 18069 18070 if (REG_RD(sc, comp_addr)) { 18071 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 18072 return (1); 18073 } 18074 18075 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 18076 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 18077 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 18078 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 18079 18080 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 18081 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 18082 18083 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 18084 BLOGE(sc, "FW final cleanup did not succeed\n"); 18085 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 18086 (REG_RD(sc, comp_addr))); 18087 bxe_panic(sc, ("FLR cleanup failed\n")); 18088 return (1); 18089 } 18090 18091 /* Zero completion for nxt FLR */ 18092 REG_WR(sc, comp_addr, 0); 18093 18094 return (ret); 18095} 18096 18097static void 18098bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 18099 struct pbf_pN_buf_regs *regs, 18100 uint32_t poll_count) 18101{ 18102 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 18103 uint32_t cur_cnt = poll_count; 18104 18105 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 18106 crd = crd_start = REG_RD(sc, regs->crd); 18107 init_crd = REG_RD(sc, regs->init_crd); 18108 18109 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 18110 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 18111 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 18112 18113 while ((crd != init_crd) && 18114 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 18115 (init_crd - crd_start))) { 18116 if (cur_cnt--) { 18117 DELAY(FLR_WAIT_INTERVAL); 18118 crd = REG_RD(sc, regs->crd); 18119 crd_freed = REG_RD(sc, regs->crd_freed); 18120 } else { 18121 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18122 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18123 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18124 break; 18125 } 18126 } 18127 18128 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18129 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18130} 18131 18132static void 18133bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18134 struct pbf_pN_cmd_regs *regs, 18135 uint32_t poll_count) 18136{ 18137 uint32_t occup, to_free, freed, freed_start; 18138 uint32_t cur_cnt = poll_count; 18139 18140 occup = to_free = REG_RD(sc, regs->lines_occup); 18141 freed = freed_start = REG_RD(sc, regs->lines_freed); 18142 18143 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18144 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18145 18146 while (occup && 18147 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18148 if (cur_cnt--) { 18149 DELAY(FLR_WAIT_INTERVAL); 18150 occup = REG_RD(sc, regs->lines_occup); 18151 freed = REG_RD(sc, regs->lines_freed); 18152 } else { 18153 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18154 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18155 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18156 break; 18157 } 18158 } 18159 18160 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18161 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18162} 18163 18164static void 18165bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18166{ 18167 struct pbf_pN_cmd_regs cmd_regs[] = { 18168 {0, (CHIP_IS_E3B0(sc)) ? 18169 PBF_REG_TQ_OCCUPANCY_Q0 : 18170 PBF_REG_P0_TQ_OCCUPANCY, 18171 (CHIP_IS_E3B0(sc)) ? 18172 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18173 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18174 {1, (CHIP_IS_E3B0(sc)) ? 18175 PBF_REG_TQ_OCCUPANCY_Q1 : 18176 PBF_REG_P1_TQ_OCCUPANCY, 18177 (CHIP_IS_E3B0(sc)) ? 18178 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18179 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18180 {4, (CHIP_IS_E3B0(sc)) ? 18181 PBF_REG_TQ_OCCUPANCY_LB_Q : 18182 PBF_REG_P4_TQ_OCCUPANCY, 18183 (CHIP_IS_E3B0(sc)) ? 18184 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18185 PBF_REG_P4_TQ_LINES_FREED_CNT} 18186 }; 18187 18188 struct pbf_pN_buf_regs buf_regs[] = { 18189 {0, (CHIP_IS_E3B0(sc)) ? 18190 PBF_REG_INIT_CRD_Q0 : 18191 PBF_REG_P0_INIT_CRD , 18192 (CHIP_IS_E3B0(sc)) ? 18193 PBF_REG_CREDIT_Q0 : 18194 PBF_REG_P0_CREDIT, 18195 (CHIP_IS_E3B0(sc)) ? 18196 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18197 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18198 {1, (CHIP_IS_E3B0(sc)) ? 18199 PBF_REG_INIT_CRD_Q1 : 18200 PBF_REG_P1_INIT_CRD, 18201 (CHIP_IS_E3B0(sc)) ? 18202 PBF_REG_CREDIT_Q1 : 18203 PBF_REG_P1_CREDIT, 18204 (CHIP_IS_E3B0(sc)) ? 18205 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18206 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18207 {4, (CHIP_IS_E3B0(sc)) ? 18208 PBF_REG_INIT_CRD_LB_Q : 18209 PBF_REG_P4_INIT_CRD, 18210 (CHIP_IS_E3B0(sc)) ? 18211 PBF_REG_CREDIT_LB_Q : 18212 PBF_REG_P4_CREDIT, 18213 (CHIP_IS_E3B0(sc)) ? 18214 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18215 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18216 }; 18217 18218 int i; 18219 18220 /* Verify the command queues are flushed P0, P1, P4 */ 18221 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18222 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18223 } 18224 18225 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18226 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18227 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18228 } 18229} 18230 18231static void 18232bxe_hw_enable_status(struct bxe_softc *sc) 18233{ 18234 uint32_t val; 18235 18236 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18237 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18238 18239 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18240 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18241 18242 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18243 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18244 18245 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18246 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18247 18248 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18249 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18250 18251 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18252 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18253 18254 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18255 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18256 18257 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18258 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18259} 18260 18261static int 18262bxe_pf_flr_clnup(struct bxe_softc *sc) 18263{ 18264 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18265 18266 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18267 18268 /* Re-enable PF target read access */ 18269 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18270 18271 /* Poll HW usage counters */ 18272 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18273 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18274 return (-1); 18275 } 18276 18277 /* Zero the igu 'trailing edge' and 'leading edge' */ 18278 18279 /* Send the FW cleanup command */ 18280 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18281 return (-1); 18282 } 18283 18284 /* ATC cleanup */ 18285 18286 /* Verify TX hw is flushed */ 18287 bxe_tx_hw_flushed(sc, poll_cnt); 18288 18289 /* Wait 100ms (not adjusted according to platform) */ 18290 DELAY(100000); 18291 18292 /* Verify no pending pci transactions */ 18293 if (bxe_is_pcie_pending(sc)) { 18294 BLOGE(sc, "PCIE Transactions still pending\n"); 18295 } 18296 18297 /* Debug */ 18298 bxe_hw_enable_status(sc); 18299 18300 /* 18301 * Master enable - Due to WB DMAE writes performed before this 18302 * register is re-initialized as part of the regular function init 18303 */ 18304 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18305 18306 return (0); 18307} 18308 18309#if 0 18310static void 18311bxe_init_searcher(struct bxe_softc *sc) 18312{ 18313 int port = SC_PORT(sc); 18314 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); 18315 /* T1 hash bits value determines the T1 number of entries */ 18316 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 18317} 18318#endif 18319 18320static int 18321bxe_init_hw_func(struct bxe_softc *sc) 18322{ 18323 int port = SC_PORT(sc); 18324 int func = SC_FUNC(sc); 18325 int init_phase = PHASE_PF0 + func; 18326 struct ecore_ilt *ilt = sc->ilt; 18327 uint16_t cdu_ilt_start; 18328 uint32_t addr, val; 18329 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18330 int i, main_mem_width, rc; 18331 18332 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18333 18334 /* FLR cleanup */ 18335 if (!CHIP_IS_E1x(sc)) { 18336 rc = bxe_pf_flr_clnup(sc); 18337 if (rc) { 18338 BLOGE(sc, "FLR cleanup failed!\n"); 18339 // XXX bxe_fw_dump(sc); 18340 // XXX bxe_idle_chk(sc); 18341 return (rc); 18342 } 18343 } 18344 18345 /* set MSI reconfigure capability */ 18346 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18347 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18348 val = REG_RD(sc, addr); 18349 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18350 REG_WR(sc, addr, val); 18351 } 18352 18353 ecore_init_block(sc, BLOCK_PXP, init_phase); 18354 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18355 18356 ilt = sc->ilt; 18357 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18358 18359#if 0 18360 if (IS_SRIOV(sc)) { 18361 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; 18362 } 18363 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); 18364 18365#if (BXE_FIRST_VF_CID > 0) 18366 /* 18367 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes 18368 * those of the VFs, so start line should be reset 18369 */ 18370 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18371#endif 18372#endif 18373 18374 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18375 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18376 ilt->lines[cdu_ilt_start + i].page_mapping = 18377 sc->context[i].vcxt_dma.paddr; 18378 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18379 } 18380 ecore_ilt_init_op(sc, INITOP_SET); 18381 18382#if 0 18383 if (!CONFIGURE_NIC_MODE(sc)) { 18384 bxe_init_searcher(sc); 18385 REG_WR(sc, PRS_REG_NIC_MODE, 0); 18386 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); 18387 } else 18388#endif 18389 { 18390 /* Set NIC mode */ 18391 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18392 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18393 } 18394 18395 if (!CHIP_IS_E1x(sc)) { 18396 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18397 18398 /* Turn on a single ISR mode in IGU if driver is going to use 18399 * INT#x or MSI 18400 */ 18401 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18402 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18403 } 18404 18405 /* 18406 * Timers workaround bug: function init part. 18407 * Need to wait 20msec after initializing ILT, 18408 * needed to make sure there are no requests in 18409 * one of the PXP internal queues with "old" ILT addresses 18410 */ 18411 DELAY(20000); 18412 18413 /* 18414 * Master enable - Due to WB DMAE writes performed before this 18415 * register is re-initialized as part of the regular function 18416 * init 18417 */ 18418 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18419 /* Enable the function in IGU */ 18420 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18421 } 18422 18423 sc->dmae_ready = 1; 18424 18425 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18426 18427 if (!CHIP_IS_E1x(sc)) 18428 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18429 18430 ecore_init_block(sc, BLOCK_ATC, init_phase); 18431 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18432 ecore_init_block(sc, BLOCK_NIG, init_phase); 18433 ecore_init_block(sc, BLOCK_SRC, init_phase); 18434 ecore_init_block(sc, BLOCK_MISC, init_phase); 18435 ecore_init_block(sc, BLOCK_TCM, init_phase); 18436 ecore_init_block(sc, BLOCK_UCM, init_phase); 18437 ecore_init_block(sc, BLOCK_CCM, init_phase); 18438 ecore_init_block(sc, BLOCK_XCM, init_phase); 18439 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18440 ecore_init_block(sc, BLOCK_USEM, init_phase); 18441 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18442 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18443 18444 if (!CHIP_IS_E1x(sc)) 18445 REG_WR(sc, QM_REG_PF_EN, 1); 18446 18447 if (!CHIP_IS_E1x(sc)) { 18448 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18449 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18450 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18451 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18452 } 18453 ecore_init_block(sc, BLOCK_QM, init_phase); 18454 18455 ecore_init_block(sc, BLOCK_TM, init_phase); 18456 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18457 18458 bxe_iov_init_dq(sc); 18459 18460 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18461 ecore_init_block(sc, BLOCK_PRS, init_phase); 18462 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18463 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18464 ecore_init_block(sc, BLOCK_USDM, init_phase); 18465 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18466 ecore_init_block(sc, BLOCK_UPB, init_phase); 18467 ecore_init_block(sc, BLOCK_XPB, init_phase); 18468 ecore_init_block(sc, BLOCK_PBF, init_phase); 18469 if (!CHIP_IS_E1x(sc)) 18470 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18471 18472 ecore_init_block(sc, BLOCK_CDU, init_phase); 18473 18474 ecore_init_block(sc, BLOCK_CFC, init_phase); 18475 18476 if (!CHIP_IS_E1x(sc)) 18477 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18478 18479 if (IS_MF(sc)) { 18480 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18481 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18482 } 18483 18484 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18485 18486 /* HC init per function */ 18487 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18488 if (CHIP_IS_E1H(sc)) { 18489 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18490 18491 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18492 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18493 } 18494 ecore_init_block(sc, BLOCK_HC, init_phase); 18495 18496 } else { 18497 int num_segs, sb_idx, prod_offset; 18498 18499 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18500 18501 if (!CHIP_IS_E1x(sc)) { 18502 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18503 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18504 } 18505 18506 ecore_init_block(sc, BLOCK_IGU, init_phase); 18507 18508 if (!CHIP_IS_E1x(sc)) { 18509 int dsb_idx = 0; 18510 /** 18511 * Producer memory: 18512 * E2 mode: address 0-135 match to the mapping memory; 18513 * 136 - PF0 default prod; 137 - PF1 default prod; 18514 * 138 - PF2 default prod; 139 - PF3 default prod; 18515 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18516 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18517 * 144-147 reserved. 18518 * 18519 * E1.5 mode - In backward compatible mode; 18520 * for non default SB; each even line in the memory 18521 * holds the U producer and each odd line hold 18522 * the C producer. The first 128 producers are for 18523 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18524 * producers are for the DSB for each PF. 18525 * Each PF has five segments: (the order inside each 18526 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18527 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18528 * 144-147 attn prods; 18529 */ 18530 /* non-default-status-blocks */ 18531 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18532 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18533 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18534 prod_offset = (sc->igu_base_sb + sb_idx) * 18535 num_segs; 18536 18537 for (i = 0; i < num_segs; i++) { 18538 addr = IGU_REG_PROD_CONS_MEMORY + 18539 (prod_offset + i) * 4; 18540 REG_WR(sc, addr, 0); 18541 } 18542 /* send consumer update with value 0 */ 18543 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18544 USTORM_ID, 0, IGU_INT_NOP, 1); 18545 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18546 } 18547 18548 /* default-status-blocks */ 18549 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18550 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18551 18552 if (CHIP_IS_MODE_4_PORT(sc)) 18553 dsb_idx = SC_FUNC(sc); 18554 else 18555 dsb_idx = SC_VN(sc); 18556 18557 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18558 IGU_BC_BASE_DSB_PROD + dsb_idx : 18559 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18560 18561 /* 18562 * igu prods come in chunks of E1HVN_MAX (4) - 18563 * does not matters what is the current chip mode 18564 */ 18565 for (i = 0; i < (num_segs * E1HVN_MAX); 18566 i += E1HVN_MAX) { 18567 addr = IGU_REG_PROD_CONS_MEMORY + 18568 (prod_offset + i)*4; 18569 REG_WR(sc, addr, 0); 18570 } 18571 /* send consumer update with 0 */ 18572 if (CHIP_INT_MODE_IS_BC(sc)) { 18573 bxe_ack_sb(sc, sc->igu_dsb_id, 18574 USTORM_ID, 0, IGU_INT_NOP, 1); 18575 bxe_ack_sb(sc, sc->igu_dsb_id, 18576 CSTORM_ID, 0, IGU_INT_NOP, 1); 18577 bxe_ack_sb(sc, sc->igu_dsb_id, 18578 XSTORM_ID, 0, IGU_INT_NOP, 1); 18579 bxe_ack_sb(sc, sc->igu_dsb_id, 18580 TSTORM_ID, 0, IGU_INT_NOP, 1); 18581 bxe_ack_sb(sc, sc->igu_dsb_id, 18582 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18583 } else { 18584 bxe_ack_sb(sc, sc->igu_dsb_id, 18585 USTORM_ID, 0, IGU_INT_NOP, 1); 18586 bxe_ack_sb(sc, sc->igu_dsb_id, 18587 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18588 } 18589 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18590 18591 /* !!! these should become driver const once 18592 rf-tool supports split-68 const */ 18593 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18594 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18595 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18596 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18597 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18598 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18599 } 18600 } 18601 18602 /* Reset PCIE errors for debug */ 18603 REG_WR(sc, 0x2114, 0xffffffff); 18604 REG_WR(sc, 0x2120, 0xffffffff); 18605 18606 if (CHIP_IS_E1x(sc)) { 18607 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18608 main_mem_base = HC_REG_MAIN_MEMORY + 18609 SC_PORT(sc) * (main_mem_size * 4); 18610 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18611 main_mem_width = 8; 18612 18613 val = REG_RD(sc, main_mem_prty_clr); 18614 if (val) { 18615 BLOGD(sc, DBG_LOAD, 18616 "Parity errors in HC block during function init (0x%x)!\n", 18617 val); 18618 } 18619 18620 /* Clear "false" parity errors in MSI-X table */ 18621 for (i = main_mem_base; 18622 i < main_mem_base + main_mem_size * 4; 18623 i += main_mem_width) { 18624 bxe_read_dmae(sc, i, main_mem_width / 4); 18625 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18626 i, main_mem_width / 4); 18627 } 18628 /* Clear HC parity attention */ 18629 REG_RD(sc, main_mem_prty_clr); 18630 } 18631 18632#if 1 18633 /* Enable STORMs SP logging */ 18634 REG_WR8(sc, BAR_USTRORM_INTMEM + 18635 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18636 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18637 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18638 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18639 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18640 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18641 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18642#endif 18643 18644 elink_phy_probe(&sc->link_params); 18645 18646 return (0); 18647} 18648 18649static void 18650bxe_link_reset(struct bxe_softc *sc) 18651{ 18652 if (!BXE_NOMCP(sc)) { 18653 bxe_acquire_phy_lock(sc); 18654 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18655 bxe_release_phy_lock(sc); 18656 } else { 18657 if (!CHIP_REV_IS_SLOW(sc)) { 18658 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18659 } 18660 } 18661} 18662 18663static void 18664bxe_reset_port(struct bxe_softc *sc) 18665{ 18666 int port = SC_PORT(sc); 18667 uint32_t val; 18668 18669 /* reset physical Link */ 18670 bxe_link_reset(sc); 18671 18672 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18673 18674 /* Do not rcv packets to BRB */ 18675 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18676 /* Do not direct rcv packets that are not for MCP to the BRB */ 18677 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18678 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18679 18680 /* Configure AEU */ 18681 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18682 18683 DELAY(100000); 18684 18685 /* Check for BRB port occupancy */ 18686 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18687 if (val) { 18688 BLOGD(sc, DBG_LOAD, 18689 "BRB1 is not empty, %d blocks are occupied\n", val); 18690 } 18691 18692 /* TODO: Close Doorbell port? */ 18693} 18694 18695static void 18696bxe_ilt_wr(struct bxe_softc *sc, 18697 uint32_t index, 18698 bus_addr_t addr) 18699{ 18700 int reg; 18701 uint32_t wb_write[2]; 18702 18703 if (CHIP_IS_E1(sc)) { 18704 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18705 } else { 18706 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18707 } 18708 18709 wb_write[0] = ONCHIP_ADDR1(addr); 18710 wb_write[1] = ONCHIP_ADDR2(addr); 18711 REG_WR_DMAE(sc, reg, wb_write, 2); 18712} 18713 18714static void 18715bxe_clear_func_ilt(struct bxe_softc *sc, 18716 uint32_t func) 18717{ 18718 uint32_t i, base = FUNC_ILT_BASE(func); 18719 for (i = base; i < base + ILT_PER_FUNC; i++) { 18720 bxe_ilt_wr(sc, i, 0); 18721 } 18722} 18723 18724static void 18725bxe_reset_func(struct bxe_softc *sc) 18726{ 18727 struct bxe_fastpath *fp; 18728 int port = SC_PORT(sc); 18729 int func = SC_FUNC(sc); 18730 int i; 18731 18732 /* Disable the function in the FW */ 18733 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18734 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18735 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18736 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18737 18738 /* FP SBs */ 18739 FOR_EACH_ETH_QUEUE(sc, i) { 18740 fp = &sc->fp[i]; 18741 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18742 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18743 SB_DISABLED); 18744 } 18745 18746#if 0 18747 if (CNIC_LOADED(sc)) { 18748 /* CNIC SB */ 18749 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18750 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 18751 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED); 18752 } 18753#endif 18754 18755 /* SP SB */ 18756 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18757 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18758 SB_DISABLED); 18759 18760 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18761 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18762 } 18763 18764 /* Configure IGU */ 18765 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18766 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18767 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18768 } else { 18769 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18770 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18771 } 18772 18773 if (CNIC_LOADED(sc)) { 18774 /* Disable Timer scan */ 18775 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18776 /* 18777 * Wait for at least 10ms and up to 2 second for the timers 18778 * scan to complete 18779 */ 18780 for (i = 0; i < 200; i++) { 18781 DELAY(10000); 18782 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18783 break; 18784 } 18785 } 18786 18787 /* Clear ILT */ 18788 bxe_clear_func_ilt(sc, func); 18789 18790 /* 18791 * Timers workaround bug for E2: if this is vnic-3, 18792 * we need to set the entire ilt range for this timers. 18793 */ 18794 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18795 struct ilt_client_info ilt_cli; 18796 /* use dummy TM client */ 18797 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18798 ilt_cli.start = 0; 18799 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18800 ilt_cli.client_num = ILT_CLIENT_TM; 18801 18802 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18803 } 18804 18805 /* this assumes that reset_port() called before reset_func()*/ 18806 if (!CHIP_IS_E1x(sc)) { 18807 bxe_pf_disable(sc); 18808 } 18809 18810 sc->dmae_ready = 0; 18811} 18812 18813static int 18814bxe_gunzip_init(struct bxe_softc *sc) 18815{ 18816 return (0); 18817} 18818 18819static void 18820bxe_gunzip_end(struct bxe_softc *sc) 18821{ 18822 return; 18823} 18824 18825static int 18826bxe_init_firmware(struct bxe_softc *sc) 18827{ 18828 if (CHIP_IS_E1(sc)) { 18829 ecore_init_e1_firmware(sc); 18830 sc->iro_array = e1_iro_arr; 18831 } else if (CHIP_IS_E1H(sc)) { 18832 ecore_init_e1h_firmware(sc); 18833 sc->iro_array = e1h_iro_arr; 18834 } else if (!CHIP_IS_E1x(sc)) { 18835 ecore_init_e2_firmware(sc); 18836 sc->iro_array = e2_iro_arr; 18837 } else { 18838 BLOGE(sc, "Unsupported chip revision\n"); 18839 return (-1); 18840 } 18841 18842 return (0); 18843} 18844 18845static void 18846bxe_release_firmware(struct bxe_softc *sc) 18847{ 18848 /* Do nothing */ 18849 return; 18850} 18851 18852static int 18853ecore_gunzip(struct bxe_softc *sc, 18854 const uint8_t *zbuf, 18855 int len) 18856{ 18857 /* XXX : Implement... */ 18858 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18859 return (FALSE); 18860} 18861 18862static void 18863ecore_reg_wr_ind(struct bxe_softc *sc, 18864 uint32_t addr, 18865 uint32_t val) 18866{ 18867 bxe_reg_wr_ind(sc, addr, val); 18868} 18869 18870static void 18871ecore_write_dmae_phys_len(struct bxe_softc *sc, 18872 bus_addr_t phys_addr, 18873 uint32_t addr, 18874 uint32_t len) 18875{ 18876 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18877} 18878 18879void 18880ecore_storm_memset_struct(struct bxe_softc *sc, 18881 uint32_t addr, 18882 size_t size, 18883 uint32_t *data) 18884{ 18885 uint8_t i; 18886 for (i = 0; i < size/4; i++) { 18887 REG_WR(sc, addr + (i * 4), data[i]); 18888 } 18889} 18890 18891 18892/* 18893 * character device - ioctl interface definitions 18894 */ 18895 18896 18897#include "bxe_dump.h" 18898#include "bxe_ioctl.h" 18899#include <sys/conf.h> 18900 18901static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 18902 struct thread *td); 18903 18904static struct cdevsw bxe_cdevsw = { 18905 .d_version = D_VERSION, 18906 .d_ioctl = bxe_eioctl, 18907 .d_name = "bxecnic", 18908}; 18909 18910#define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) 18911 18912 18913#define DUMP_ALL_PRESETS 0x1FFF 18914#define DUMP_MAX_PRESETS 13 18915#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) 18916#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) 18917#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) 18918#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) 18919#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) 18920 18921#define IS_REG_IN_PRESET(presets, idx) \ 18922 ((presets & (1 << (idx-1))) == (1 << (idx-1))) 18923 18924 18925static int 18926bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) 18927{ 18928 if (CHIP_IS_E1(sc)) 18929 return dump_num_registers[0][preset-1]; 18930 else if (CHIP_IS_E1H(sc)) 18931 return dump_num_registers[1][preset-1]; 18932 else if (CHIP_IS_E2(sc)) 18933 return dump_num_registers[2][preset-1]; 18934 else if (CHIP_IS_E3A0(sc)) 18935 return dump_num_registers[3][preset-1]; 18936 else if (CHIP_IS_E3B0(sc)) 18937 return dump_num_registers[4][preset-1]; 18938 else 18939 return 0; 18940} 18941 18942static int 18943bxe_get_total_regs_len32(struct bxe_softc *sc) 18944{ 18945 uint32_t preset_idx; 18946 int regdump_len32 = 0; 18947 18948 18949 /* Calculate the total preset regs length */ 18950 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 18951 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); 18952 } 18953 18954 return regdump_len32; 18955} 18956 18957static const uint32_t * 18958__bxe_get_page_addr_ar(struct bxe_softc *sc) 18959{ 18960 if (CHIP_IS_E2(sc)) 18961 return page_vals_e2; 18962 else if (CHIP_IS_E3(sc)) 18963 return page_vals_e3; 18964 else 18965 return NULL; 18966} 18967 18968static uint32_t 18969__bxe_get_page_reg_num(struct bxe_softc *sc) 18970{ 18971 if (CHIP_IS_E2(sc)) 18972 return PAGE_MODE_VALUES_E2; 18973 else if (CHIP_IS_E3(sc)) 18974 return PAGE_MODE_VALUES_E3; 18975 else 18976 return 0; 18977} 18978 18979static const uint32_t * 18980__bxe_get_page_write_ar(struct bxe_softc *sc) 18981{ 18982 if (CHIP_IS_E2(sc)) 18983 return page_write_regs_e2; 18984 else if (CHIP_IS_E3(sc)) 18985 return page_write_regs_e3; 18986 else 18987 return NULL; 18988} 18989 18990static uint32_t 18991__bxe_get_page_write_num(struct bxe_softc *sc) 18992{ 18993 if (CHIP_IS_E2(sc)) 18994 return PAGE_WRITE_REGS_E2; 18995 else if (CHIP_IS_E3(sc)) 18996 return PAGE_WRITE_REGS_E3; 18997 else 18998 return 0; 18999} 19000 19001static const struct reg_addr * 19002__bxe_get_page_read_ar(struct bxe_softc *sc) 19003{ 19004 if (CHIP_IS_E2(sc)) 19005 return page_read_regs_e2; 19006 else if (CHIP_IS_E3(sc)) 19007 return page_read_regs_e3; 19008 else 19009 return NULL; 19010} 19011 19012static uint32_t 19013__bxe_get_page_read_num(struct bxe_softc *sc) 19014{ 19015 if (CHIP_IS_E2(sc)) 19016 return PAGE_READ_REGS_E2; 19017 else if (CHIP_IS_E3(sc)) 19018 return PAGE_READ_REGS_E3; 19019 else 19020 return 0; 19021} 19022 19023static bool 19024bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) 19025{ 19026 if (CHIP_IS_E1(sc)) 19027 return IS_E1_REG(reg_info->chips); 19028 else if (CHIP_IS_E1H(sc)) 19029 return IS_E1H_REG(reg_info->chips); 19030 else if (CHIP_IS_E2(sc)) 19031 return IS_E2_REG(reg_info->chips); 19032 else if (CHIP_IS_E3A0(sc)) 19033 return IS_E3A0_REG(reg_info->chips); 19034 else if (CHIP_IS_E3B0(sc)) 19035 return IS_E3B0_REG(reg_info->chips); 19036 else 19037 return 0; 19038} 19039 19040static bool 19041bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) 19042{ 19043 if (CHIP_IS_E1(sc)) 19044 return IS_E1_REG(wreg_info->chips); 19045 else if (CHIP_IS_E1H(sc)) 19046 return IS_E1H_REG(wreg_info->chips); 19047 else if (CHIP_IS_E2(sc)) 19048 return IS_E2_REG(wreg_info->chips); 19049 else if (CHIP_IS_E3A0(sc)) 19050 return IS_E3A0_REG(wreg_info->chips); 19051 else if (CHIP_IS_E3B0(sc)) 19052 return IS_E3B0_REG(wreg_info->chips); 19053 else 19054 return 0; 19055} 19056 19057/** 19058 * bxe_read_pages_regs - read "paged" registers 19059 * 19060 * @bp device handle 19061 * @p output buffer 19062 * 19063 * Reads "paged" memories: memories that may only be read by first writing to a 19064 * specific address ("write address") and then reading from a specific address 19065 * ("read address"). There may be more than one write address per "page" and 19066 * more than one read address per write address. 19067 */ 19068static void 19069bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 19070{ 19071 uint32_t i, j, k, n; 19072 19073 /* addresses of the paged registers */ 19074 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); 19075 /* number of paged registers */ 19076 int num_pages = __bxe_get_page_reg_num(sc); 19077 /* write addresses */ 19078 const uint32_t *write_addr = __bxe_get_page_write_ar(sc); 19079 /* number of write addresses */ 19080 int write_num = __bxe_get_page_write_num(sc); 19081 /* read addresses info */ 19082 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); 19083 /* number of read addresses */ 19084 int read_num = __bxe_get_page_read_num(sc); 19085 uint32_t addr, size; 19086 19087 for (i = 0; i < num_pages; i++) { 19088 for (j = 0; j < write_num; j++) { 19089 REG_WR(sc, write_addr[j], page_addr[i]); 19090 19091 for (k = 0; k < read_num; k++) { 19092 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { 19093 size = read_addr[k].size; 19094 for (n = 0; n < size; n++) { 19095 addr = read_addr[k].addr + n*4; 19096 *p++ = REG_RD(sc, addr); 19097 } 19098 } 19099 } 19100 } 19101 } 19102 return; 19103} 19104 19105 19106static int 19107bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 19108{ 19109 uint32_t i, j, addr; 19110 const struct wreg_addr *wreg_addr_p = NULL; 19111 19112 if (CHIP_IS_E1(sc)) 19113 wreg_addr_p = &wreg_addr_e1; 19114 else if (CHIP_IS_E1H(sc)) 19115 wreg_addr_p = &wreg_addr_e1h; 19116 else if (CHIP_IS_E2(sc)) 19117 wreg_addr_p = &wreg_addr_e2; 19118 else if (CHIP_IS_E3A0(sc)) 19119 wreg_addr_p = &wreg_addr_e3; 19120 else if (CHIP_IS_E3B0(sc)) 19121 wreg_addr_p = &wreg_addr_e3b0; 19122 else 19123 return (-1); 19124 19125 /* Read the idle_chk registers */ 19126 for (i = 0; i < IDLE_REGS_COUNT; i++) { 19127 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && 19128 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { 19129 for (j = 0; j < idle_reg_addrs[i].size; j++) 19130 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); 19131 } 19132 } 19133 19134 /* Read the regular registers */ 19135 for (i = 0; i < REGS_COUNT; i++) { 19136 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && 19137 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { 19138 for (j = 0; j < reg_addrs[i].size; j++) 19139 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); 19140 } 19141 } 19142 19143 /* Read the CAM registers */ 19144 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && 19145 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { 19146 for (i = 0; i < wreg_addr_p->size; i++) { 19147 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); 19148 19149 /* In case of wreg_addr register, read additional 19150 registers from read_regs array 19151 */ 19152 for (j = 0; j < wreg_addr_p->read_regs_count; j++) { 19153 addr = *(wreg_addr_p->read_regs); 19154 *p++ = REG_RD(sc, addr + j*4); 19155 } 19156 } 19157 } 19158 19159 /* Paged registers are supported in E2 & E3 only */ 19160 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 19161 /* Read "paged" registers */ 19162 bxe_read_pages_regs(sc, p, preset); 19163 } 19164 19165 return 0; 19166} 19167 19168static int 19169bxe_grc_dump(struct bxe_softc *sc) 19170{ 19171 int rval = 0; 19172 uint32_t preset_idx; 19173 uint8_t *buf; 19174 uint32_t size; 19175 struct dump_header *d_hdr; 19176 19177 if (sc->grcdump_done) 19178 return (rval); 19179 19180 ecore_disable_blocks_parity(sc); 19181 19182 buf = sc->grc_dump; 19183 d_hdr = sc->grc_dump; 19184 19185 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; 19186 d_hdr->version = BNX2X_DUMP_VERSION; 19187 d_hdr->preset = DUMP_ALL_PRESETS; 19188 19189 if (CHIP_IS_E1(sc)) { 19190 d_hdr->dump_meta_data = DUMP_CHIP_E1; 19191 } else if (CHIP_IS_E1H(sc)) { 19192 d_hdr->dump_meta_data = DUMP_CHIP_E1H; 19193 } else if (CHIP_IS_E2(sc)) { 19194 d_hdr->dump_meta_data = DUMP_CHIP_E2 | 19195 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19196 } else if (CHIP_IS_E3A0(sc)) { 19197 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | 19198 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19199 } else if (CHIP_IS_E3B0(sc)) { 19200 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | 19201 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19202 } 19203 19204 buf += sizeof(struct dump_header); 19205 19206 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 19207 19208 /* Skip presets with IOR */ 19209 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || 19210 (preset_idx == 11)) 19211 continue; 19212 19213 rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx); 19214 19215 if (rval) 19216 break; 19217 19218 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); 19219 19220 buf += size; 19221 } 19222 19223 ecore_clear_blocks_parity(sc); 19224 ecore_enable_blocks_parity(sc); 19225 19226 sc->grcdump_done = 1; 19227 return(rval); 19228} 19229 19230static int 19231bxe_add_cdev(struct bxe_softc *sc) 19232{ 19233 int grc_dump_size; 19234 19235 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19236 sizeof(struct dump_header); 19237 19238 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); 19239 19240 if (sc->grc_dump == NULL) 19241 return (-1); 19242 19243 sc->ioctl_dev = make_dev(&bxe_cdevsw, 19244 sc->ifp->if_dunit, 19245 UID_ROOT, 19246 GID_WHEEL, 19247 0600, 19248 "%s", 19249 if_name(sc->ifp)); 19250 19251 if (sc->ioctl_dev == NULL) { 19252 19253 free(sc->grc_dump, M_DEVBUF); 19254 19255 return (-1); 19256 } 19257 19258 sc->ioctl_dev->si_drv1 = sc; 19259 19260 return (0); 19261} 19262 19263static void 19264bxe_del_cdev(struct bxe_softc *sc) 19265{ 19266 if (sc->ioctl_dev != NULL) 19267 destroy_dev(sc->ioctl_dev); 19268 19269 if (sc->grc_dump == NULL) 19270 free(sc->grc_dump, M_DEVBUF); 19271 19272 return; 19273} 19274 19275static int 19276bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 19277 struct thread *td) 19278{ 19279 struct bxe_softc *sc; 19280 int rval = 0; 19281 device_t pci_dev; 19282 bxe_grcdump_t *dump = NULL; 19283 int grc_dump_size; 19284 19285 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) 19286 return ENXIO; 19287 19288 pci_dev= sc->dev; 19289 19290 dump = (bxe_grcdump_t *)data; 19291 19292 switch(cmd) { 19293 19294 case BXE_GRC_DUMP_SIZE: 19295 dump->pci_func = sc->pcie_func; 19296 dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19297 sizeof(struct dump_header); 19298 break; 19299 19300 case BXE_GRC_DUMP: 19301 19302 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19303 sizeof(struct dump_header); 19304 19305 if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) || 19306 (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) { 19307 rval = EINVAL; 19308 break; 19309 } 19310 dump->grcdump_dwords = grc_dump_size >> 2; 19311 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); 19312 sc->grcdump_done = 0; 19313 19314 break; 19315 19316 default: 19317 break; 19318 } 19319 19320 return (rval); 19321} 19322