bxe.c revision 292638
1/*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/bxe/bxe.c 292638 2015-12-23 01:31:32Z davidcs $"); 29 30#define BXE_DRIVER_VERSION "1.78.79" 31 32#include "bxe.h" 33#include "ecore_sp.h" 34#include "ecore_init.h" 35#include "ecore_init_ops.h" 36 37#include "57710_int_offsets.h" 38#include "57711_int_offsets.h" 39#include "57712_int_offsets.h" 40 41/* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45#ifndef CTLTYPE_U64 46#define CTLTYPE_U64 CTLTYPE_QUAD 47#define sysctl_handle_64 sysctl_handle_quad 48#endif 49 50/* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55#ifndef CSUM_TCP_IPV6 56#define CSUM_TCP_IPV6 0 57#define CSUM_UDP_IPV6 0 58#endif 59 60/* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64#if __FreeBSD_version < 900035 65#define pci_find_cap pci_find_extcap 66#endif 67 68#define BXE_DEF_SB_ATT_IDX 0x0001 69#define BXE_DEF_SB_IDX 0x0002 70 71/* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75#define FLR_WAIT_USEC 10000 /* 10 msecs */ 76#define FLR_WAIT_INTERVAL 50 /* usecs */ 77#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84}; 85 86struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90}; 91 92/* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95#define BXE_DEVDESC_MAX 64 96static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127#if 0 128 { 129 BRCM_VENDORID, 130 CHIP_NUM_57712_VF, 131 PCI_ANY_ID, PCI_ANY_ID, 132 "QLogic NetXtreme II BCM57712 VF 10GbE" 133 }, 134#endif 135 { 136 BRCM_VENDORID, 137 CHIP_NUM_57800, 138 PCI_ANY_ID, PCI_ANY_ID, 139 "QLogic NetXtreme II BCM57800 10GbE" 140 }, 141 { 142 BRCM_VENDORID, 143 CHIP_NUM_57800_MF, 144 PCI_ANY_ID, PCI_ANY_ID, 145 "QLogic NetXtreme II BCM57800 MF 10GbE" 146 }, 147#if 0 148 { 149 BRCM_VENDORID, 150 CHIP_NUM_57800_VF, 151 PCI_ANY_ID, PCI_ANY_ID, 152 "QLogic NetXtreme II BCM57800 VF 10GbE" 153 }, 154#endif 155 { 156 BRCM_VENDORID, 157 CHIP_NUM_57810, 158 PCI_ANY_ID, PCI_ANY_ID, 159 "QLogic NetXtreme II BCM57810 10GbE" 160 }, 161 { 162 BRCM_VENDORID, 163 CHIP_NUM_57810_MF, 164 PCI_ANY_ID, PCI_ANY_ID, 165 "QLogic NetXtreme II BCM57810 MF 10GbE" 166 }, 167#if 0 168 { 169 BRCM_VENDORID, 170 CHIP_NUM_57810_VF, 171 PCI_ANY_ID, PCI_ANY_ID, 172 "QLogic NetXtreme II BCM57810 VF 10GbE" 173 }, 174#endif 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57811, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57811 10GbE" 180 }, 181 { 182 BRCM_VENDORID, 183 CHIP_NUM_57811_MF, 184 PCI_ANY_ID, PCI_ANY_ID, 185 "QLogic NetXtreme II BCM57811 MF 10GbE" 186 }, 187#if 0 188 { 189 BRCM_VENDORID, 190 CHIP_NUM_57811_VF, 191 PCI_ANY_ID, PCI_ANY_ID, 192 "QLogic NetXtreme II BCM57811 VF 10GbE" 193 }, 194#endif 195 { 196 BRCM_VENDORID, 197 CHIP_NUM_57840_4_10, 198 PCI_ANY_ID, PCI_ANY_ID, 199 "QLogic NetXtreme II BCM57840 4x10GbE" 200 }, 201#if 0 202 { 203 BRCM_VENDORID, 204 CHIP_NUM_57840_2_20, 205 PCI_ANY_ID, PCI_ANY_ID, 206 "QLogic NetXtreme II BCM57840 2x20GbE" 207 }, 208#endif 209 { 210 BRCM_VENDORID, 211 CHIP_NUM_57840_MF, 212 PCI_ANY_ID, PCI_ANY_ID, 213 "QLogic NetXtreme II BCM57840 MF 10GbE" 214 }, 215#if 0 216 { 217 BRCM_VENDORID, 218 CHIP_NUM_57840_VF, 219 PCI_ANY_ID, PCI_ANY_ID, 220 "QLogic NetXtreme II BCM57840 VF 10GbE" 221 }, 222#endif 223 { 224 0, 0, 0, 0, NULL 225 } 226}; 227 228MALLOC_DECLARE(M_BXE_ILT); 229MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 230 231/* 232 * FreeBSD device entry points. 233 */ 234static int bxe_probe(device_t); 235static int bxe_attach(device_t); 236static int bxe_detach(device_t); 237static int bxe_shutdown(device_t); 238 239/* 240 * FreeBSD KLD module/device interface event handler method. 241 */ 242static device_method_t bxe_methods[] = { 243 /* Device interface (device_if.h) */ 244 DEVMETHOD(device_probe, bxe_probe), 245 DEVMETHOD(device_attach, bxe_attach), 246 DEVMETHOD(device_detach, bxe_detach), 247 DEVMETHOD(device_shutdown, bxe_shutdown), 248#if 0 249 DEVMETHOD(device_suspend, bxe_suspend), 250 DEVMETHOD(device_resume, bxe_resume), 251#endif 252 /* Bus interface (bus_if.h) */ 253 DEVMETHOD(bus_print_child, bus_generic_print_child), 254 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 255 KOBJMETHOD_END 256}; 257 258/* 259 * FreeBSD KLD Module data declaration 260 */ 261static driver_t bxe_driver = { 262 "bxe", /* module name */ 263 bxe_methods, /* event handler */ 264 sizeof(struct bxe_softc) /* extra data */ 265}; 266 267/* 268 * FreeBSD dev class is needed to manage dev instances and 269 * to associate with a bus type 270 */ 271static devclass_t bxe_devclass; 272 273MODULE_DEPEND(bxe, pci, 1, 1, 1); 274MODULE_DEPEND(bxe, ether, 1, 1, 1); 275DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 276 277/* resources needed for unloading a previously loaded device */ 278 279#define BXE_PREV_WAIT_NEEDED 1 280struct mtx bxe_prev_mtx; 281MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 282struct bxe_prev_list_node { 283 LIST_ENTRY(bxe_prev_list_node) node; 284 uint8_t bus; 285 uint8_t slot; 286 uint8_t path; 287 uint8_t aer; /* XXX automatic error recovery */ 288 uint8_t undi; 289}; 290static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 291 292static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 293 294/* Tunable device values... */ 295 296SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 297 298/* Debug */ 299unsigned long bxe_debug = 0; 300SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, 301 &bxe_debug, 0, "Debug logging mode"); 302 303/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 304static int bxe_interrupt_mode = INTR_MODE_MSIX; 305SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 306 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 307 308/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 309static int bxe_queue_count = 4; 310SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 311 &bxe_queue_count, 0, "Multi-Queue queue count"); 312 313/* max number of buffers per queue (default RX_BD_USABLE) */ 314static int bxe_max_rx_bufs = 0; 315SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 316 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 317 318/* Host interrupt coalescing RX tick timer (usecs) */ 319static int bxe_hc_rx_ticks = 25; 320SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 321 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 322 323/* Host interrupt coalescing TX tick timer (usecs) */ 324static int bxe_hc_tx_ticks = 50; 325SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 326 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 327 328/* Maximum number of Rx packets to process at a time */ 329static int bxe_rx_budget = 0xffffffff; 330SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 331 &bxe_rx_budget, 0, "Rx processing budget"); 332 333/* Maximum LRO aggregation size */ 334static int bxe_max_aggregation_size = 0; 335SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 336 &bxe_max_aggregation_size, 0, "max aggregation size"); 337 338/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 339static int bxe_mrrs = -1; 340SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 341 &bxe_mrrs, 0, "PCIe maximum read request size"); 342 343/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 344static int bxe_autogreeen = 0; 345SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 346 &bxe_autogreeen, 0, "AutoGrEEEn support"); 347 348/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 349static int bxe_udp_rss = 0; 350SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 351 &bxe_udp_rss, 0, "UDP RSS support"); 352 353 354#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 355 356#define STATS_OFFSET32(stat_name) \ 357 (offsetof(struct bxe_eth_stats, stat_name) / 4) 358 359#define Q_STATS_OFFSET32(stat_name) \ 360 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 361 362static const struct { 363 uint32_t offset; 364 uint32_t size; 365 uint32_t flags; 366#define STATS_FLAGS_PORT 1 367#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 368#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 369 char string[STAT_NAME_LEN]; 370} bxe_eth_stats_arr[] = { 371 { STATS_OFFSET32(total_bytes_received_hi), 372 8, STATS_FLAGS_BOTH, "rx_bytes" }, 373 { STATS_OFFSET32(error_bytes_received_hi), 374 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 375 { STATS_OFFSET32(total_unicast_packets_received_hi), 376 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 377 { STATS_OFFSET32(total_multicast_packets_received_hi), 378 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 379 { STATS_OFFSET32(total_broadcast_packets_received_hi), 380 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 381 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 382 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 383 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 384 8, STATS_FLAGS_PORT, "rx_align_errors" }, 385 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 386 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 387 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 388 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 389 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 390 8, STATS_FLAGS_PORT, "rx_fragments" }, 391 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 392 8, STATS_FLAGS_PORT, "rx_jabbers" }, 393 { STATS_OFFSET32(no_buff_discard_hi), 394 8, STATS_FLAGS_BOTH, "rx_discards" }, 395 { STATS_OFFSET32(mac_filter_discard), 396 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 397 { STATS_OFFSET32(mf_tag_discard), 398 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 399 { STATS_OFFSET32(pfc_frames_received_hi), 400 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 401 { STATS_OFFSET32(pfc_frames_sent_hi), 402 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 403 { STATS_OFFSET32(brb_drop_hi), 404 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 405 { STATS_OFFSET32(brb_truncate_hi), 406 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 407 { STATS_OFFSET32(pause_frames_received_hi), 408 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 409 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 410 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 411 { STATS_OFFSET32(nig_timer_max), 412 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 413 { STATS_OFFSET32(total_bytes_transmitted_hi), 414 8, STATS_FLAGS_BOTH, "tx_bytes" }, 415 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 416 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 417 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 418 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 419 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 420 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 421 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 422 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 423 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 424 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 425 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 426 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 427 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 428 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 429 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 430 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 431 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 432 8, STATS_FLAGS_PORT, "tx_deferred" }, 433 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 434 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 435 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 436 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 437 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 438 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 439 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 440 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 441 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 442 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 443 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 444 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 445 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 446 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 447 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 448 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 449 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 450 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 451 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 452 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 453 { STATS_OFFSET32(pause_frames_sent_hi), 454 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 455 { STATS_OFFSET32(total_tpa_aggregations_hi), 456 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 457 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 458 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 459 { STATS_OFFSET32(total_tpa_bytes_hi), 460 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 461#if 0 462 { STATS_OFFSET32(recoverable_error), 463 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 464 { STATS_OFFSET32(unrecoverable_error), 465 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 466#endif 467 { STATS_OFFSET32(eee_tx_lpi), 468 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 469 { STATS_OFFSET32(rx_calls), 470 4, STATS_FLAGS_FUNC, "rx_calls"}, 471 { STATS_OFFSET32(rx_pkts), 472 4, STATS_FLAGS_FUNC, "rx_pkts"}, 473 { STATS_OFFSET32(rx_tpa_pkts), 474 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 475 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 476 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, 477 { STATS_OFFSET32(rx_bxe_service_rxsgl), 478 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, 479 { STATS_OFFSET32(rx_jumbo_sge_pkts), 480 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, 481 { STATS_OFFSET32(rx_soft_errors), 482 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 483 { STATS_OFFSET32(rx_hw_csum_errors), 484 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 485 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 486 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 487 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 488 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 489 { STATS_OFFSET32(rx_budget_reached), 490 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 491 { STATS_OFFSET32(tx_pkts), 492 4, STATS_FLAGS_FUNC, "tx_pkts"}, 493 { STATS_OFFSET32(tx_soft_errors), 494 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 495 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 496 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 497 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 498 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 499 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 500 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 501 { STATS_OFFSET32(tx_ofld_frames_lso), 502 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 503 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 504 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 505 { STATS_OFFSET32(tx_encap_failures), 506 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 507 { STATS_OFFSET32(tx_hw_queue_full), 508 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 509 { STATS_OFFSET32(tx_hw_max_queue_depth), 510 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 511 { STATS_OFFSET32(tx_dma_mapping_failure), 512 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 513 { STATS_OFFSET32(tx_max_drbr_queue_depth), 514 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 515 { STATS_OFFSET32(tx_window_violation_std), 516 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 517 { STATS_OFFSET32(tx_window_violation_tso), 518 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 519#if 0 520 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 521 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, 522 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 523 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, 524#endif 525 { STATS_OFFSET32(tx_chain_lost_mbuf), 526 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 527 { STATS_OFFSET32(tx_frames_deferred), 528 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 529 { STATS_OFFSET32(tx_queue_xoff), 530 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 531 { STATS_OFFSET32(mbuf_defrag_attempts), 532 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 533 { STATS_OFFSET32(mbuf_defrag_failures), 534 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 535 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 536 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 537 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 538 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 539 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 540 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 541 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 542 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 543 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 544 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 545 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 546 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 547 { STATS_OFFSET32(mbuf_alloc_tx), 548 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 549 { STATS_OFFSET32(mbuf_alloc_rx), 550 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 551 { STATS_OFFSET32(mbuf_alloc_sge), 552 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 553 { STATS_OFFSET32(mbuf_alloc_tpa), 554 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} 555}; 556 557static const struct { 558 uint32_t offset; 559 uint32_t size; 560 char string[STAT_NAME_LEN]; 561} bxe_eth_q_stats_arr[] = { 562 { Q_STATS_OFFSET32(total_bytes_received_hi), 563 8, "rx_bytes" }, 564 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 565 8, "rx_ucast_packets" }, 566 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 567 8, "rx_mcast_packets" }, 568 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 569 8, "rx_bcast_packets" }, 570 { Q_STATS_OFFSET32(no_buff_discard_hi), 571 8, "rx_discards" }, 572 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 573 8, "tx_bytes" }, 574 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 575 8, "tx_ucast_packets" }, 576 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 577 8, "tx_mcast_packets" }, 578 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 579 8, "tx_bcast_packets" }, 580 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 581 8, "tpa_aggregations" }, 582 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 583 8, "tpa_aggregated_frames"}, 584 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 585 8, "tpa_bytes"}, 586 { Q_STATS_OFFSET32(rx_calls), 587 4, "rx_calls"}, 588 { Q_STATS_OFFSET32(rx_pkts), 589 4, "rx_pkts"}, 590 { Q_STATS_OFFSET32(rx_tpa_pkts), 591 4, "rx_tpa_pkts"}, 592 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 593 4, "rx_erroneous_jumbo_sge_pkts"}, 594 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 595 4, "rx_bxe_service_rxsgl"}, 596 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 597 4, "rx_jumbo_sge_pkts"}, 598 { Q_STATS_OFFSET32(rx_soft_errors), 599 4, "rx_soft_errors"}, 600 { Q_STATS_OFFSET32(rx_hw_csum_errors), 601 4, "rx_hw_csum_errors"}, 602 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 603 4, "rx_ofld_frames_csum_ip"}, 604 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 605 4, "rx_ofld_frames_csum_tcp_udp"}, 606 { Q_STATS_OFFSET32(rx_budget_reached), 607 4, "rx_budget_reached"}, 608 { Q_STATS_OFFSET32(tx_pkts), 609 4, "tx_pkts"}, 610 { Q_STATS_OFFSET32(tx_soft_errors), 611 4, "tx_soft_errors"}, 612 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 613 4, "tx_ofld_frames_csum_ip"}, 614 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 615 4, "tx_ofld_frames_csum_tcp"}, 616 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 617 4, "tx_ofld_frames_csum_udp"}, 618 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 619 4, "tx_ofld_frames_lso"}, 620 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 621 4, "tx_ofld_frames_lso_hdr_splits"}, 622 { Q_STATS_OFFSET32(tx_encap_failures), 623 4, "tx_encap_failures"}, 624 { Q_STATS_OFFSET32(tx_hw_queue_full), 625 4, "tx_hw_queue_full"}, 626 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 627 4, "tx_hw_max_queue_depth"}, 628 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 629 4, "tx_dma_mapping_failure"}, 630 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 631 4, "tx_max_drbr_queue_depth"}, 632 { Q_STATS_OFFSET32(tx_window_violation_std), 633 4, "tx_window_violation_std"}, 634 { Q_STATS_OFFSET32(tx_window_violation_tso), 635 4, "tx_window_violation_tso"}, 636#if 0 637 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 638 4, "tx_unsupported_tso_request_ipv6"}, 639 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 640 4, "tx_unsupported_tso_request_not_tcp"}, 641#endif 642 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 643 4, "tx_chain_lost_mbuf"}, 644 { Q_STATS_OFFSET32(tx_frames_deferred), 645 4, "tx_frames_deferred"}, 646 { Q_STATS_OFFSET32(tx_queue_xoff), 647 4, "tx_queue_xoff"}, 648 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 649 4, "mbuf_defrag_attempts"}, 650 { Q_STATS_OFFSET32(mbuf_defrag_failures), 651 4, "mbuf_defrag_failures"}, 652 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 653 4, "mbuf_rx_bd_alloc_failed"}, 654 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 655 4, "mbuf_rx_bd_mapping_failed"}, 656 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 657 4, "mbuf_rx_tpa_alloc_failed"}, 658 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 659 4, "mbuf_rx_tpa_mapping_failed"}, 660 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 661 4, "mbuf_rx_sge_alloc_failed"}, 662 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 663 4, "mbuf_rx_sge_mapping_failed"}, 664 { Q_STATS_OFFSET32(mbuf_alloc_tx), 665 4, "mbuf_alloc_tx"}, 666 { Q_STATS_OFFSET32(mbuf_alloc_rx), 667 4, "mbuf_alloc_rx"}, 668 { Q_STATS_OFFSET32(mbuf_alloc_sge), 669 4, "mbuf_alloc_sge"}, 670 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 671 4, "mbuf_alloc_tpa"} 672}; 673 674#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 675#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 676 677 678static void bxe_cmng_fns_init(struct bxe_softc *sc, 679 uint8_t read_cfg, 680 uint8_t cmng_type); 681static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 682static void storm_memset_cmng(struct bxe_softc *sc, 683 struct cmng_init *cmng, 684 uint8_t port); 685static void bxe_set_reset_global(struct bxe_softc *sc); 686static void bxe_set_reset_in_progress(struct bxe_softc *sc); 687static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 688 int engine); 689static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 690static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 691 uint8_t *global, 692 uint8_t print); 693static void bxe_int_disable(struct bxe_softc *sc); 694static int bxe_release_leader_lock(struct bxe_softc *sc); 695static void bxe_pf_disable(struct bxe_softc *sc); 696static void bxe_free_fp_buffers(struct bxe_softc *sc); 697static inline void bxe_update_rx_prod(struct bxe_softc *sc, 698 struct bxe_fastpath *fp, 699 uint16_t rx_bd_prod, 700 uint16_t rx_cq_prod, 701 uint16_t rx_sge_prod); 702static void bxe_link_report_locked(struct bxe_softc *sc); 703static void bxe_link_report(struct bxe_softc *sc); 704static void bxe_link_status_update(struct bxe_softc *sc); 705static void bxe_periodic_callout_func(void *xsc); 706static void bxe_periodic_start(struct bxe_softc *sc); 707static void bxe_periodic_stop(struct bxe_softc *sc); 708static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 709 uint16_t prev_index, 710 uint16_t index); 711static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 712 int queue); 713static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 714 uint16_t index); 715static uint8_t bxe_txeof(struct bxe_softc *sc, 716 struct bxe_fastpath *fp); 717static void bxe_task_fp(struct bxe_fastpath *fp); 718static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 719 struct mbuf *m, 720 uint8_t contents); 721static int bxe_alloc_mem(struct bxe_softc *sc); 722static void bxe_free_mem(struct bxe_softc *sc); 723static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 724static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 725static int bxe_interrupt_attach(struct bxe_softc *sc); 726static void bxe_interrupt_detach(struct bxe_softc *sc); 727static void bxe_set_rx_mode(struct bxe_softc *sc); 728static int bxe_init_locked(struct bxe_softc *sc); 729static int bxe_stop_locked(struct bxe_softc *sc); 730static __noinline int bxe_nic_load(struct bxe_softc *sc, 731 int load_mode); 732static __noinline int bxe_nic_unload(struct bxe_softc *sc, 733 uint32_t unload_mode, 734 uint8_t keep_link); 735 736static void bxe_handle_sp_tq(void *context, int pending); 737static void bxe_handle_fp_tq(void *context, int pending); 738 739 740/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 741uint32_t 742calc_crc32(uint8_t *crc32_packet, 743 uint32_t crc32_length, 744 uint32_t crc32_seed, 745 uint8_t complement) 746{ 747 uint32_t byte = 0; 748 uint32_t bit = 0; 749 uint8_t msb = 0; 750 uint32_t temp = 0; 751 uint32_t shft = 0; 752 uint8_t current_byte = 0; 753 uint32_t crc32_result = crc32_seed; 754 const uint32_t CRC32_POLY = 0x1edc6f41; 755 756 if ((crc32_packet == NULL) || 757 (crc32_length == 0) || 758 ((crc32_length % 8) != 0)) 759 { 760 return (crc32_result); 761 } 762 763 for (byte = 0; byte < crc32_length; byte = byte + 1) 764 { 765 current_byte = crc32_packet[byte]; 766 for (bit = 0; bit < 8; bit = bit + 1) 767 { 768 /* msb = crc32_result[31]; */ 769 msb = (uint8_t)(crc32_result >> 31); 770 771 crc32_result = crc32_result << 1; 772 773 /* it (msb != current_byte[bit]) */ 774 if (msb != (0x1 & (current_byte >> bit))) 775 { 776 crc32_result = crc32_result ^ CRC32_POLY; 777 /* crc32_result[0] = 1 */ 778 crc32_result |= 1; 779 } 780 } 781 } 782 783 /* Last step is to: 784 * 1. "mirror" every bit 785 * 2. swap the 4 bytes 786 * 3. complement each bit 787 */ 788 789 /* Mirror */ 790 temp = crc32_result; 791 shft = sizeof(crc32_result) * 8 - 1; 792 793 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 794 { 795 temp <<= 1; 796 temp |= crc32_result & 1; 797 shft-- ; 798 } 799 800 /* temp[31-bit] = crc32_result[bit] */ 801 temp <<= shft; 802 803 /* Swap */ 804 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 805 { 806 uint32_t t0, t1, t2, t3; 807 t0 = (0x000000ff & (temp >> 24)); 808 t1 = (0x0000ff00 & (temp >> 8)); 809 t2 = (0x00ff0000 & (temp << 8)); 810 t3 = (0xff000000 & (temp << 24)); 811 crc32_result = t0 | t1 | t2 | t3; 812 } 813 814 /* Complement */ 815 if (complement) 816 { 817 crc32_result = ~crc32_result; 818 } 819 820 return (crc32_result); 821} 822 823int 824bxe_test_bit(int nr, 825 volatile unsigned long *addr) 826{ 827 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 828} 829 830void 831bxe_set_bit(unsigned int nr, 832 volatile unsigned long *addr) 833{ 834 atomic_set_acq_long(addr, (1 << nr)); 835} 836 837void 838bxe_clear_bit(int nr, 839 volatile unsigned long *addr) 840{ 841 atomic_clear_acq_long(addr, (1 << nr)); 842} 843 844int 845bxe_test_and_set_bit(int nr, 846 volatile unsigned long *addr) 847{ 848 unsigned long x; 849 nr = (1 << nr); 850 do { 851 x = *addr; 852 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 853 // if (x & nr) bit_was_set; else bit_was_not_set; 854 return (x & nr); 855} 856 857int 858bxe_test_and_clear_bit(int nr, 859 volatile unsigned long *addr) 860{ 861 unsigned long x; 862 nr = (1 << nr); 863 do { 864 x = *addr; 865 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 866 // if (x & nr) bit_was_set; else bit_was_not_set; 867 return (x & nr); 868} 869 870int 871bxe_cmpxchg(volatile int *addr, 872 int old, 873 int new) 874{ 875 int x; 876 do { 877 x = *addr; 878 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 879 return (x); 880} 881 882/* 883 * Get DMA memory from the OS. 884 * 885 * Validates that the OS has provided DMA buffers in response to a 886 * bus_dmamap_load call and saves the physical address of those buffers. 887 * When the callback is used the OS will return 0 for the mapping function 888 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 889 * failures back to the caller. 890 * 891 * Returns: 892 * Nothing. 893 */ 894static void 895bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 896{ 897 struct bxe_dma *dma = arg; 898 899 if (error) { 900 dma->paddr = 0; 901 dma->nseg = 0; 902 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 903 } else { 904 dma->paddr = segs->ds_addr; 905 dma->nseg = nseg; 906#if 0 907 BLOGD(dma->sc, DBG_LOAD, 908 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 909 dma->msg, dma->vaddr, (void *)dma->paddr, 910 dma->nseg, dma->size); 911#endif 912 } 913} 914 915/* 916 * Allocate a block of memory and map it for DMA. No partial completions 917 * allowed and release any resources acquired if we can't acquire all 918 * resources. 919 * 920 * Returns: 921 * 0 = Success, !0 = Failure 922 */ 923int 924bxe_dma_alloc(struct bxe_softc *sc, 925 bus_size_t size, 926 struct bxe_dma *dma, 927 const char *msg) 928{ 929 int rc; 930 931 if (dma->size > 0) { 932 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 933 (unsigned long)dma->size); 934 return (1); 935 } 936 937 memset(dma, 0, sizeof(*dma)); /* sanity */ 938 dma->sc = sc; 939 dma->size = size; 940 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 941 942 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 943 BCM_PAGE_SIZE, /* alignment */ 944 0, /* boundary limit */ 945 BUS_SPACE_MAXADDR, /* restricted low */ 946 BUS_SPACE_MAXADDR, /* restricted hi */ 947 NULL, /* addr filter() */ 948 NULL, /* addr filter() arg */ 949 size, /* max map size */ 950 1, /* num discontinuous */ 951 size, /* max seg size */ 952 BUS_DMA_ALLOCNOW, /* flags */ 953 NULL, /* lock() */ 954 NULL, /* lock() arg */ 955 &dma->tag); /* returned dma tag */ 956 if (rc != 0) { 957 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 958 memset(dma, 0, sizeof(*dma)); 959 return (1); 960 } 961 962 rc = bus_dmamem_alloc(dma->tag, 963 (void **)&dma->vaddr, 964 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 965 &dma->map); 966 if (rc != 0) { 967 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 968 bus_dma_tag_destroy(dma->tag); 969 memset(dma, 0, sizeof(*dma)); 970 return (1); 971 } 972 973 rc = bus_dmamap_load(dma->tag, 974 dma->map, 975 dma->vaddr, 976 size, 977 bxe_dma_map_addr, /* BLOGD in here */ 978 dma, 979 BUS_DMA_NOWAIT); 980 if (rc != 0) { 981 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 982 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 983 bus_dma_tag_destroy(dma->tag); 984 memset(dma, 0, sizeof(*dma)); 985 return (1); 986 } 987 988 return (0); 989} 990 991void 992bxe_dma_free(struct bxe_softc *sc, 993 struct bxe_dma *dma) 994{ 995 if (dma->size > 0) { 996#if 0 997 BLOGD(sc, DBG_LOAD, 998 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 999 dma->msg, dma->vaddr, (void *)dma->paddr, 1000 dma->nseg, dma->size); 1001#endif 1002 1003 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 1004 1005 bus_dmamap_sync(dma->tag, dma->map, 1006 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 1007 bus_dmamap_unload(dma->tag, dma->map); 1008 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1009 bus_dma_tag_destroy(dma->tag); 1010 } 1011 1012 memset(dma, 0, sizeof(*dma)); 1013} 1014 1015/* 1016 * These indirect read and write routines are only during init. 1017 * The locking is handled by the MCP. 1018 */ 1019 1020void 1021bxe_reg_wr_ind(struct bxe_softc *sc, 1022 uint32_t addr, 1023 uint32_t val) 1024{ 1025 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1026 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 1027 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1028} 1029 1030uint32_t 1031bxe_reg_rd_ind(struct bxe_softc *sc, 1032 uint32_t addr) 1033{ 1034 uint32_t val; 1035 1036 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1037 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1038 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1039 1040 return (val); 1041} 1042 1043#if 0 1044void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) 1045{ 1046 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; 1047 1048 switch (dmae->opcode & DMAE_COMMAND_DST) { 1049 case DMAE_CMD_DST_PCI: 1050 if (src_type == DMAE_CMD_SRC_PCI) 1051 DP(msglvl, "DMAE: opcode 0x%08x\n" 1052 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 1053 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1054 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1055 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1056 dmae->comp_addr_hi, dmae->comp_addr_lo, 1057 dmae->comp_val); 1058 else 1059 DP(msglvl, "DMAE: opcode 0x%08x\n" 1060 "src [%08x], len [%d*4], dst [%x:%08x]\n" 1061 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1062 dmae->opcode, dmae->src_addr_lo >> 2, 1063 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1064 dmae->comp_addr_hi, dmae->comp_addr_lo, 1065 dmae->comp_val); 1066 break; 1067 case DMAE_CMD_DST_GRC: 1068 if (src_type == DMAE_CMD_SRC_PCI) 1069 DP(msglvl, "DMAE: opcode 0x%08x\n" 1070 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 1071 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1072 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1073 dmae->len, dmae->dst_addr_lo >> 2, 1074 dmae->comp_addr_hi, dmae->comp_addr_lo, 1075 dmae->comp_val); 1076 else 1077 DP(msglvl, "DMAE: opcode 0x%08x\n" 1078 "src [%08x], len [%d*4], dst [%08x]\n" 1079 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1080 dmae->opcode, dmae->src_addr_lo >> 2, 1081 dmae->len, dmae->dst_addr_lo >> 2, 1082 dmae->comp_addr_hi, dmae->comp_addr_lo, 1083 dmae->comp_val); 1084 break; 1085 default: 1086 if (src_type == DMAE_CMD_SRC_PCI) 1087 DP(msglvl, "DMAE: opcode 0x%08x\n" 1088 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 1089 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1090 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1091 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1092 dmae->comp_val); 1093 else 1094 DP(msglvl, "DMAE: opcode 0x%08x\n" 1095 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 1096 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1097 dmae->opcode, dmae->src_addr_lo >> 2, 1098 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1099 dmae->comp_val); 1100 break; 1101 } 1102 1103} 1104#endif 1105 1106static int 1107bxe_acquire_hw_lock(struct bxe_softc *sc, 1108 uint32_t resource) 1109{ 1110 uint32_t lock_status; 1111 uint32_t resource_bit = (1 << resource); 1112 int func = SC_FUNC(sc); 1113 uint32_t hw_lock_control_reg; 1114 int cnt; 1115 1116 /* validate the resource is within range */ 1117 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1118 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1119 return (-1); 1120 } 1121 1122 if (func <= 5) { 1123 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1124 } else { 1125 hw_lock_control_reg = 1126 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1127 } 1128 1129 /* validate the resource is not already taken */ 1130 lock_status = REG_RD(sc, hw_lock_control_reg); 1131 if (lock_status & resource_bit) { 1132 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", 1133 lock_status, resource_bit); 1134 return (-1); 1135 } 1136 1137 /* try every 5ms for 5 seconds */ 1138 for (cnt = 0; cnt < 1000; cnt++) { 1139 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1140 lock_status = REG_RD(sc, hw_lock_control_reg); 1141 if (lock_status & resource_bit) { 1142 return (0); 1143 } 1144 DELAY(5000); 1145 } 1146 1147 BLOGE(sc, "Resource lock timeout!\n"); 1148 return (-1); 1149} 1150 1151static int 1152bxe_release_hw_lock(struct bxe_softc *sc, 1153 uint32_t resource) 1154{ 1155 uint32_t lock_status; 1156 uint32_t resource_bit = (1 << resource); 1157 int func = SC_FUNC(sc); 1158 uint32_t hw_lock_control_reg; 1159 1160 /* validate the resource is within range */ 1161 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1162 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1163 return (-1); 1164 } 1165 1166 if (func <= 5) { 1167 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1168 } else { 1169 hw_lock_control_reg = 1170 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1171 } 1172 1173 /* validate the resource is currently taken */ 1174 lock_status = REG_RD(sc, hw_lock_control_reg); 1175 if (!(lock_status & resource_bit)) { 1176 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", 1177 lock_status, resource_bit); 1178 return (-1); 1179 } 1180 1181 REG_WR(sc, hw_lock_control_reg, resource_bit); 1182 return (0); 1183} 1184static void bxe_acquire_phy_lock(struct bxe_softc *sc) 1185{ 1186 BXE_PHY_LOCK(sc); 1187 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1188} 1189 1190static void bxe_release_phy_lock(struct bxe_softc *sc) 1191{ 1192 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1193 BXE_PHY_UNLOCK(sc); 1194} 1195/* 1196 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1197 * had we done things the other way around, if two pfs from the same port 1198 * would attempt to access nvram at the same time, we could run into a 1199 * scenario such as: 1200 * pf A takes the port lock. 1201 * pf B succeeds in taking the same lock since they are from the same port. 1202 * pf A takes the per pf misc lock. Performs eeprom access. 1203 * pf A finishes. Unlocks the per pf misc lock. 1204 * Pf B takes the lock and proceeds to perform it's own access. 1205 * pf A unlocks the per port lock, while pf B is still working (!). 1206 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1207 * access corrupted by pf B).* 1208 */ 1209static int 1210bxe_acquire_nvram_lock(struct bxe_softc *sc) 1211{ 1212 int port = SC_PORT(sc); 1213 int count, i; 1214 uint32_t val = 0; 1215 1216 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1217 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1218 1219 /* adjust timeout for emulation/FPGA */ 1220 count = NVRAM_TIMEOUT_COUNT; 1221 if (CHIP_REV_IS_SLOW(sc)) { 1222 count *= 100; 1223 } 1224 1225 /* request access to nvram interface */ 1226 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1227 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1228 1229 for (i = 0; i < count*10; i++) { 1230 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1231 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1232 break; 1233 } 1234 1235 DELAY(5); 1236 } 1237 1238 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1239 BLOGE(sc, "Cannot get access to nvram interface\n"); 1240 return (-1); 1241 } 1242 1243 return (0); 1244} 1245 1246static int 1247bxe_release_nvram_lock(struct bxe_softc *sc) 1248{ 1249 int port = SC_PORT(sc); 1250 int count, i; 1251 uint32_t val = 0; 1252 1253 /* adjust timeout for emulation/FPGA */ 1254 count = NVRAM_TIMEOUT_COUNT; 1255 if (CHIP_REV_IS_SLOW(sc)) { 1256 count *= 100; 1257 } 1258 1259 /* relinquish nvram interface */ 1260 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1261 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1262 1263 for (i = 0; i < count*10; i++) { 1264 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1265 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1266 break; 1267 } 1268 1269 DELAY(5); 1270 } 1271 1272 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1273 BLOGE(sc, "Cannot free access to nvram interface\n"); 1274 return (-1); 1275 } 1276 1277 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1278 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1279 1280 return (0); 1281} 1282 1283static void 1284bxe_enable_nvram_access(struct bxe_softc *sc) 1285{ 1286 uint32_t val; 1287 1288 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1289 1290 /* enable both bits, even on read */ 1291 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1292 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1293} 1294 1295static void 1296bxe_disable_nvram_access(struct bxe_softc *sc) 1297{ 1298 uint32_t val; 1299 1300 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1301 1302 /* disable both bits, even after read */ 1303 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1304 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1305 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1306} 1307 1308static int 1309bxe_nvram_read_dword(struct bxe_softc *sc, 1310 uint32_t offset, 1311 uint32_t *ret_val, 1312 uint32_t cmd_flags) 1313{ 1314 int count, i, rc; 1315 uint32_t val; 1316 1317 /* build the command word */ 1318 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1319 1320 /* need to clear DONE bit separately */ 1321 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1322 1323 /* address of the NVRAM to read from */ 1324 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1325 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1326 1327 /* issue a read command */ 1328 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1329 1330 /* adjust timeout for emulation/FPGA */ 1331 count = NVRAM_TIMEOUT_COUNT; 1332 if (CHIP_REV_IS_SLOW(sc)) { 1333 count *= 100; 1334 } 1335 1336 /* wait for completion */ 1337 *ret_val = 0; 1338 rc = -1; 1339 for (i = 0; i < count; i++) { 1340 DELAY(5); 1341 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1342 1343 if (val & MCPR_NVM_COMMAND_DONE) { 1344 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1345 /* we read nvram data in cpu order 1346 * but ethtool sees it as an array of bytes 1347 * converting to big-endian will do the work 1348 */ 1349 *ret_val = htobe32(val); 1350 rc = 0; 1351 break; 1352 } 1353 } 1354 1355 if (rc == -1) { 1356 BLOGE(sc, "nvram read timeout expired\n"); 1357 } 1358 1359 return (rc); 1360} 1361 1362static int 1363bxe_nvram_read(struct bxe_softc *sc, 1364 uint32_t offset, 1365 uint8_t *ret_buf, 1366 int buf_size) 1367{ 1368 uint32_t cmd_flags; 1369 uint32_t val; 1370 int rc; 1371 1372 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1373 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1374 offset, buf_size); 1375 return (-1); 1376 } 1377 1378 if ((offset + buf_size) > sc->devinfo.flash_size) { 1379 BLOGE(sc, "Invalid parameter, " 1380 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1381 offset, buf_size, sc->devinfo.flash_size); 1382 return (-1); 1383 } 1384 1385 /* request access to nvram interface */ 1386 rc = bxe_acquire_nvram_lock(sc); 1387 if (rc) { 1388 return (rc); 1389 } 1390 1391 /* enable access to nvram interface */ 1392 bxe_enable_nvram_access(sc); 1393 1394 /* read the first word(s) */ 1395 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1396 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1397 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1398 memcpy(ret_buf, &val, 4); 1399 1400 /* advance to the next dword */ 1401 offset += sizeof(uint32_t); 1402 ret_buf += sizeof(uint32_t); 1403 buf_size -= sizeof(uint32_t); 1404 cmd_flags = 0; 1405 } 1406 1407 if (rc == 0) { 1408 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1409 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1410 memcpy(ret_buf, &val, 4); 1411 } 1412 1413 /* disable access to nvram interface */ 1414 bxe_disable_nvram_access(sc); 1415 bxe_release_nvram_lock(sc); 1416 1417 return (rc); 1418} 1419 1420static int 1421bxe_nvram_write_dword(struct bxe_softc *sc, 1422 uint32_t offset, 1423 uint32_t val, 1424 uint32_t cmd_flags) 1425{ 1426 int count, i, rc; 1427 1428 /* build the command word */ 1429 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1430 1431 /* need to clear DONE bit separately */ 1432 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1433 1434 /* write the data */ 1435 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1436 1437 /* address of the NVRAM to write to */ 1438 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1439 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1440 1441 /* issue the write command */ 1442 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1443 1444 /* adjust timeout for emulation/FPGA */ 1445 count = NVRAM_TIMEOUT_COUNT; 1446 if (CHIP_REV_IS_SLOW(sc)) { 1447 count *= 100; 1448 } 1449 1450 /* wait for completion */ 1451 rc = -1; 1452 for (i = 0; i < count; i++) { 1453 DELAY(5); 1454 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1455 if (val & MCPR_NVM_COMMAND_DONE) { 1456 rc = 0; 1457 break; 1458 } 1459 } 1460 1461 if (rc == -1) { 1462 BLOGE(sc, "nvram write timeout expired\n"); 1463 } 1464 1465 return (rc); 1466} 1467 1468#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1469 1470static int 1471bxe_nvram_write1(struct bxe_softc *sc, 1472 uint32_t offset, 1473 uint8_t *data_buf, 1474 int buf_size) 1475{ 1476 uint32_t cmd_flags; 1477 uint32_t align_offset; 1478 uint32_t val; 1479 int rc; 1480 1481 if ((offset + buf_size) > sc->devinfo.flash_size) { 1482 BLOGE(sc, "Invalid parameter, " 1483 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1484 offset, buf_size, sc->devinfo.flash_size); 1485 return (-1); 1486 } 1487 1488 /* request access to nvram interface */ 1489 rc = bxe_acquire_nvram_lock(sc); 1490 if (rc) { 1491 return (rc); 1492 } 1493 1494 /* enable access to nvram interface */ 1495 bxe_enable_nvram_access(sc); 1496 1497 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1498 align_offset = (offset & ~0x03); 1499 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1500 1501 if (rc == 0) { 1502 val &= ~(0xff << BYTE_OFFSET(offset)); 1503 val |= (*data_buf << BYTE_OFFSET(offset)); 1504 1505 /* nvram data is returned as an array of bytes 1506 * convert it back to cpu order 1507 */ 1508 val = be32toh(val); 1509 1510 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1511 } 1512 1513 /* disable access to nvram interface */ 1514 bxe_disable_nvram_access(sc); 1515 bxe_release_nvram_lock(sc); 1516 1517 return (rc); 1518} 1519 1520static int 1521bxe_nvram_write(struct bxe_softc *sc, 1522 uint32_t offset, 1523 uint8_t *data_buf, 1524 int buf_size) 1525{ 1526 uint32_t cmd_flags; 1527 uint32_t val; 1528 uint32_t written_so_far; 1529 int rc; 1530 1531 if (buf_size == 1) { 1532 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1533 } 1534 1535 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1536 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1537 offset, buf_size); 1538 return (-1); 1539 } 1540 1541 if (buf_size == 0) { 1542 return (0); /* nothing to do */ 1543 } 1544 1545 if ((offset + buf_size) > sc->devinfo.flash_size) { 1546 BLOGE(sc, "Invalid parameter, " 1547 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1548 offset, buf_size, sc->devinfo.flash_size); 1549 return (-1); 1550 } 1551 1552 /* request access to nvram interface */ 1553 rc = bxe_acquire_nvram_lock(sc); 1554 if (rc) { 1555 return (rc); 1556 } 1557 1558 /* enable access to nvram interface */ 1559 bxe_enable_nvram_access(sc); 1560 1561 written_so_far = 0; 1562 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1563 while ((written_so_far < buf_size) && (rc == 0)) { 1564 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1565 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1566 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1567 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1568 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1569 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1570 } 1571 1572 memcpy(&val, data_buf, 4); 1573 1574 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1575 1576 /* advance to the next dword */ 1577 offset += sizeof(uint32_t); 1578 data_buf += sizeof(uint32_t); 1579 written_so_far += sizeof(uint32_t); 1580 cmd_flags = 0; 1581 } 1582 1583 /* disable access to nvram interface */ 1584 bxe_disable_nvram_access(sc); 1585 bxe_release_nvram_lock(sc); 1586 1587 return (rc); 1588} 1589 1590/* copy command into DMAE command memory and set DMAE command Go */ 1591void 1592bxe_post_dmae(struct bxe_softc *sc, 1593 struct dmae_command *dmae, 1594 int idx) 1595{ 1596 uint32_t cmd_offset; 1597 int i; 1598 1599 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 1600 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 1601 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1602 } 1603 1604 REG_WR(sc, dmae_reg_go_c[idx], 1); 1605} 1606 1607uint32_t 1608bxe_dmae_opcode_add_comp(uint32_t opcode, 1609 uint8_t comp_type) 1610{ 1611 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 1612 DMAE_COMMAND_C_TYPE_ENABLE)); 1613} 1614 1615uint32_t 1616bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1617{ 1618 return (opcode & ~DMAE_COMMAND_SRC_RESET); 1619} 1620 1621uint32_t 1622bxe_dmae_opcode(struct bxe_softc *sc, 1623 uint8_t src_type, 1624 uint8_t dst_type, 1625 uint8_t with_comp, 1626 uint8_t comp_type) 1627{ 1628 uint32_t opcode = 0; 1629 1630 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 1631 (dst_type << DMAE_COMMAND_DST_SHIFT)); 1632 1633 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 1634 1635 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1636 1637 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 1638 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 1639 1640 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 1641 1642#ifdef __BIG_ENDIAN 1643 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1644#else 1645 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1646#endif 1647 1648 if (with_comp) { 1649 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1650 } 1651 1652 return (opcode); 1653} 1654 1655static void 1656bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1657 struct dmae_command *dmae, 1658 uint8_t src_type, 1659 uint8_t dst_type) 1660{ 1661 memset(dmae, 0, sizeof(struct dmae_command)); 1662 1663 /* set the opcode */ 1664 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1665 TRUE, DMAE_COMP_PCI); 1666 1667 /* fill in the completion parameters */ 1668 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1669 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1670 dmae->comp_val = DMAE_COMP_VAL; 1671} 1672 1673/* issue a DMAE command over the init channel and wait for completion */ 1674static int 1675bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1676 struct dmae_command *dmae) 1677{ 1678 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1679 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1680 1681 BXE_DMAE_LOCK(sc); 1682 1683 /* reset completion */ 1684 *wb_comp = 0; 1685 1686 /* post the command on the channel used for initializations */ 1687 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1688 1689 /* wait for completion */ 1690 DELAY(5); 1691 1692 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1693 if (!timeout || 1694 (sc->recovery_state != BXE_RECOVERY_DONE && 1695 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1696 BLOGE(sc, "DMAE timeout!\n"); 1697 BXE_DMAE_UNLOCK(sc); 1698 return (DMAE_TIMEOUT); 1699 } 1700 1701 timeout--; 1702 DELAY(50); 1703 } 1704 1705 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1706 BLOGE(sc, "DMAE PCI error!\n"); 1707 BXE_DMAE_UNLOCK(sc); 1708 return (DMAE_PCI_ERROR); 1709 } 1710 1711 BXE_DMAE_UNLOCK(sc); 1712 return (0); 1713} 1714 1715void 1716bxe_read_dmae(struct bxe_softc *sc, 1717 uint32_t src_addr, 1718 uint32_t len32) 1719{ 1720 struct dmae_command dmae; 1721 uint32_t *data; 1722 int i, rc; 1723 1724 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1725 1726 if (!sc->dmae_ready) { 1727 data = BXE_SP(sc, wb_data[0]); 1728 1729 for (i = 0; i < len32; i++) { 1730 data[i] = (CHIP_IS_E1(sc)) ? 1731 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1732 REG_RD(sc, (src_addr + (i * 4))); 1733 } 1734 1735 return; 1736 } 1737 1738 /* set opcode and fixed command fields */ 1739 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1740 1741 /* fill in addresses and len */ 1742 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1743 dmae.src_addr_hi = 0; 1744 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1745 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1746 dmae.len = len32; 1747 1748 /* issue the command and wait for completion */ 1749 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1750 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1751 }; 1752} 1753 1754void 1755bxe_write_dmae(struct bxe_softc *sc, 1756 bus_addr_t dma_addr, 1757 uint32_t dst_addr, 1758 uint32_t len32) 1759{ 1760 struct dmae_command dmae; 1761 int rc; 1762 1763 if (!sc->dmae_ready) { 1764 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1765 1766 if (CHIP_IS_E1(sc)) { 1767 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1768 } else { 1769 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1770 } 1771 1772 return; 1773 } 1774 1775 /* set opcode and fixed command fields */ 1776 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1777 1778 /* fill in addresses and len */ 1779 dmae.src_addr_lo = U64_LO(dma_addr); 1780 dmae.src_addr_hi = U64_HI(dma_addr); 1781 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1782 dmae.dst_addr_hi = 0; 1783 dmae.len = len32; 1784 1785 /* issue the command and wait for completion */ 1786 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1787 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1788 } 1789} 1790 1791void 1792bxe_write_dmae_phys_len(struct bxe_softc *sc, 1793 bus_addr_t phys_addr, 1794 uint32_t addr, 1795 uint32_t len) 1796{ 1797 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1798 int offset = 0; 1799 1800 while (len > dmae_wr_max) { 1801 bxe_write_dmae(sc, 1802 (phys_addr + offset), /* src DMA address */ 1803 (addr + offset), /* dst GRC address */ 1804 dmae_wr_max); 1805 offset += (dmae_wr_max * 4); 1806 len -= dmae_wr_max; 1807 } 1808 1809 bxe_write_dmae(sc, 1810 (phys_addr + offset), /* src DMA address */ 1811 (addr + offset), /* dst GRC address */ 1812 len); 1813} 1814 1815void 1816bxe_set_ctx_validation(struct bxe_softc *sc, 1817 struct eth_context *cxt, 1818 uint32_t cid) 1819{ 1820 /* ustorm cxt validation */ 1821 cxt->ustorm_ag_context.cdu_usage = 1822 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1823 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1824 /* xcontext validation */ 1825 cxt->xstorm_ag_context.cdu_reserved = 1826 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1827 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1828} 1829 1830static void 1831bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1832 uint8_t port, 1833 uint8_t fw_sb_id, 1834 uint8_t sb_index, 1835 uint8_t ticks) 1836{ 1837 uint32_t addr = 1838 (BAR_CSTRORM_INTMEM + 1839 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1840 1841 REG_WR8(sc, addr, ticks); 1842 1843 BLOGD(sc, DBG_LOAD, 1844 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1845 port, fw_sb_id, sb_index, ticks); 1846} 1847 1848static void 1849bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1850 uint8_t port, 1851 uint16_t fw_sb_id, 1852 uint8_t sb_index, 1853 uint8_t disable) 1854{ 1855 uint32_t enable_flag = 1856 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1857 uint32_t addr = 1858 (BAR_CSTRORM_INTMEM + 1859 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1860 uint8_t flags; 1861 1862 /* clear and set */ 1863 flags = REG_RD8(sc, addr); 1864 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1865 flags |= enable_flag; 1866 REG_WR8(sc, addr, flags); 1867 1868 BLOGD(sc, DBG_LOAD, 1869 "port %d fw_sb_id %d sb_index %d disable %d\n", 1870 port, fw_sb_id, sb_index, disable); 1871} 1872 1873void 1874bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1875 uint8_t fw_sb_id, 1876 uint8_t sb_index, 1877 uint8_t disable, 1878 uint16_t usec) 1879{ 1880 int port = SC_PORT(sc); 1881 uint8_t ticks = (usec / 4); /* XXX ??? */ 1882 1883 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1884 1885 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1886 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1887} 1888 1889void 1890elink_cb_udelay(struct bxe_softc *sc, 1891 uint32_t usecs) 1892{ 1893 DELAY(usecs); 1894} 1895 1896uint32_t 1897elink_cb_reg_read(struct bxe_softc *sc, 1898 uint32_t reg_addr) 1899{ 1900 return (REG_RD(sc, reg_addr)); 1901} 1902 1903void 1904elink_cb_reg_write(struct bxe_softc *sc, 1905 uint32_t reg_addr, 1906 uint32_t val) 1907{ 1908 REG_WR(sc, reg_addr, val); 1909} 1910 1911void 1912elink_cb_reg_wb_write(struct bxe_softc *sc, 1913 uint32_t offset, 1914 uint32_t *wb_write, 1915 uint16_t len) 1916{ 1917 REG_WR_DMAE(sc, offset, wb_write, len); 1918} 1919 1920void 1921elink_cb_reg_wb_read(struct bxe_softc *sc, 1922 uint32_t offset, 1923 uint32_t *wb_write, 1924 uint16_t len) 1925{ 1926 REG_RD_DMAE(sc, offset, wb_write, len); 1927} 1928 1929uint8_t 1930elink_cb_path_id(struct bxe_softc *sc) 1931{ 1932 return (SC_PATH(sc)); 1933} 1934 1935void 1936elink_cb_event_log(struct bxe_softc *sc, 1937 const elink_log_id_t elink_log_id, 1938 ...) 1939{ 1940 /* XXX */ 1941#if 0 1942 //va_list ap; 1943 va_start(ap, elink_log_id); 1944 _XXX_(sc, lm_log_id, ap); 1945 va_end(ap); 1946#endif 1947 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1948} 1949 1950static int 1951bxe_set_spio(struct bxe_softc *sc, 1952 int spio, 1953 uint32_t mode) 1954{ 1955 uint32_t spio_reg; 1956 1957 /* Only 2 SPIOs are configurable */ 1958 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1959 BLOGE(sc, "Invalid SPIO 0x%x\n", spio); 1960 return (-1); 1961 } 1962 1963 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1964 1965 /* read SPIO and mask except the float bits */ 1966 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1967 1968 switch (mode) { 1969 case MISC_SPIO_OUTPUT_LOW: 1970 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1971 /* clear FLOAT and set CLR */ 1972 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1973 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1974 break; 1975 1976 case MISC_SPIO_OUTPUT_HIGH: 1977 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1978 /* clear FLOAT and set SET */ 1979 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1980 spio_reg |= (spio << MISC_SPIO_SET_POS); 1981 break; 1982 1983 case MISC_SPIO_INPUT_HI_Z: 1984 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1985 /* set FLOAT */ 1986 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1987 break; 1988 1989 default: 1990 break; 1991 } 1992 1993 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1994 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1995 1996 return (0); 1997} 1998 1999static int 2000bxe_gpio_read(struct bxe_softc *sc, 2001 int gpio_num, 2002 uint8_t port) 2003{ 2004 /* The GPIO should be swapped if swap register is set and active */ 2005 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2006 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2007 int gpio_shift = (gpio_num + 2008 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2009 uint32_t gpio_mask = (1 << gpio_shift); 2010 uint32_t gpio_reg; 2011 2012 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2013 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2014 return (-1); 2015 } 2016 2017 /* read GPIO value */ 2018 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2019 2020 /* get the requested pin value */ 2021 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 2022} 2023 2024static int 2025bxe_gpio_write(struct bxe_softc *sc, 2026 int gpio_num, 2027 uint32_t mode, 2028 uint8_t port) 2029{ 2030 /* The GPIO should be swapped if swap register is set and active */ 2031 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2032 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2033 int gpio_shift = (gpio_num + 2034 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2035 uint32_t gpio_mask = (1 << gpio_shift); 2036 uint32_t gpio_reg; 2037 2038 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2039 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2040 return (-1); 2041 } 2042 2043 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2044 2045 /* read GPIO and mask except the float bits */ 2046 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2047 2048 switch (mode) { 2049 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2050 BLOGD(sc, DBG_PHY, 2051 "Set GPIO %d (shift %d) -> output low\n", 2052 gpio_num, gpio_shift); 2053 /* clear FLOAT and set CLR */ 2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2056 break; 2057 2058 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2059 BLOGD(sc, DBG_PHY, 2060 "Set GPIO %d (shift %d) -> output high\n", 2061 gpio_num, gpio_shift); 2062 /* clear FLOAT and set SET */ 2063 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2064 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2065 break; 2066 2067 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2068 BLOGD(sc, DBG_PHY, 2069 "Set GPIO %d (shift %d) -> input\n", 2070 gpio_num, gpio_shift); 2071 /* set FLOAT */ 2072 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2073 break; 2074 2075 default: 2076 break; 2077 } 2078 2079 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2080 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2081 2082 return (0); 2083} 2084 2085static int 2086bxe_gpio_mult_write(struct bxe_softc *sc, 2087 uint8_t pins, 2088 uint32_t mode) 2089{ 2090 uint32_t gpio_reg; 2091 2092 /* any port swapping should be handled by caller */ 2093 2094 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2095 2096 /* read GPIO and mask except the float bits */ 2097 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2098 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2099 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2100 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2101 2102 switch (mode) { 2103 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2104 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2105 /* set CLR */ 2106 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2107 break; 2108 2109 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2110 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2111 /* set SET */ 2112 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2113 break; 2114 2115 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2116 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2117 /* set FLOAT */ 2118 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2119 break; 2120 2121 default: 2122 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); 2123 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2124 return (-1); 2125 } 2126 2127 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2128 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2129 2130 return (0); 2131} 2132 2133static int 2134bxe_gpio_int_write(struct bxe_softc *sc, 2135 int gpio_num, 2136 uint32_t mode, 2137 uint8_t port) 2138{ 2139 /* The GPIO should be swapped if swap register is set and active */ 2140 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2141 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2142 int gpio_shift = (gpio_num + 2143 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2144 uint32_t gpio_mask = (1 << gpio_shift); 2145 uint32_t gpio_reg; 2146 2147 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2148 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2149 return (-1); 2150 } 2151 2152 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2153 2154 /* read GPIO int */ 2155 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2156 2157 switch (mode) { 2158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2159 BLOGD(sc, DBG_PHY, 2160 "Clear GPIO INT %d (shift %d) -> output low\n", 2161 gpio_num, gpio_shift); 2162 /* clear SET and set CLR */ 2163 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2164 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2165 break; 2166 2167 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2168 BLOGD(sc, DBG_PHY, 2169 "Set GPIO INT %d (shift %d) -> output high\n", 2170 gpio_num, gpio_shift); 2171 /* clear CLR and set SET */ 2172 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2173 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2174 break; 2175 2176 default: 2177 break; 2178 } 2179 2180 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2181 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2182 2183 return (0); 2184} 2185 2186uint32_t 2187elink_cb_gpio_read(struct bxe_softc *sc, 2188 uint16_t gpio_num, 2189 uint8_t port) 2190{ 2191 return (bxe_gpio_read(sc, gpio_num, port)); 2192} 2193 2194uint8_t 2195elink_cb_gpio_write(struct bxe_softc *sc, 2196 uint16_t gpio_num, 2197 uint8_t mode, /* 0=low 1=high */ 2198 uint8_t port) 2199{ 2200 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2201} 2202 2203uint8_t 2204elink_cb_gpio_mult_write(struct bxe_softc *sc, 2205 uint8_t pins, 2206 uint8_t mode) /* 0=low 1=high */ 2207{ 2208 return (bxe_gpio_mult_write(sc, pins, mode)); 2209} 2210 2211uint8_t 2212elink_cb_gpio_int_write(struct bxe_softc *sc, 2213 uint16_t gpio_num, 2214 uint8_t mode, /* 0=low 1=high */ 2215 uint8_t port) 2216{ 2217 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2218} 2219 2220void 2221elink_cb_notify_link_changed(struct bxe_softc *sc) 2222{ 2223 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2224 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2225} 2226 2227/* send the MCP a request, block until there is a reply */ 2228uint32_t 2229elink_cb_fw_command(struct bxe_softc *sc, 2230 uint32_t command, 2231 uint32_t param) 2232{ 2233 int mb_idx = SC_FW_MB_IDX(sc); 2234 uint32_t seq; 2235 uint32_t rc = 0; 2236 uint32_t cnt = 1; 2237 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2238 2239 BXE_FWMB_LOCK(sc); 2240 2241 seq = ++sc->fw_seq; 2242 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2243 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2244 2245 BLOGD(sc, DBG_PHY, 2246 "wrote command 0x%08x to FW MB param 0x%08x\n", 2247 (command | seq), param); 2248 2249 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2250 do { 2251 DELAY(delay * 1000); 2252 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2253 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2254 2255 BLOGD(sc, DBG_PHY, 2256 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2257 cnt*delay, rc, seq); 2258 2259 /* is this a reply to our command? */ 2260 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2261 rc &= FW_MSG_CODE_MASK; 2262 } else { 2263 /* Ruh-roh! */ 2264 BLOGE(sc, "FW failed to respond!\n"); 2265 // XXX bxe_fw_dump(sc); 2266 rc = 0; 2267 } 2268 2269 BXE_FWMB_UNLOCK(sc); 2270 return (rc); 2271} 2272 2273static uint32_t 2274bxe_fw_command(struct bxe_softc *sc, 2275 uint32_t command, 2276 uint32_t param) 2277{ 2278 return (elink_cb_fw_command(sc, command, param)); 2279} 2280 2281static void 2282__storm_memset_dma_mapping(struct bxe_softc *sc, 2283 uint32_t addr, 2284 bus_addr_t mapping) 2285{ 2286 REG_WR(sc, addr, U64_LO(mapping)); 2287 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2288} 2289 2290static void 2291storm_memset_spq_addr(struct bxe_softc *sc, 2292 bus_addr_t mapping, 2293 uint16_t abs_fid) 2294{ 2295 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2296 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2297 __storm_memset_dma_mapping(sc, addr, mapping); 2298} 2299 2300static void 2301storm_memset_vf_to_pf(struct bxe_softc *sc, 2302 uint16_t abs_fid, 2303 uint16_t pf_id) 2304{ 2305 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2306 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2307 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2308 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2309} 2310 2311static void 2312storm_memset_func_en(struct bxe_softc *sc, 2313 uint16_t abs_fid, 2314 uint8_t enable) 2315{ 2316 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2317 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2318 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2319 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2320} 2321 2322static void 2323storm_memset_eq_data(struct bxe_softc *sc, 2324 struct event_ring_data *eq_data, 2325 uint16_t pfid) 2326{ 2327 uint32_t addr; 2328 size_t size; 2329 2330 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2331 size = sizeof(struct event_ring_data); 2332 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2333} 2334 2335static void 2336storm_memset_eq_prod(struct bxe_softc *sc, 2337 uint16_t eq_prod, 2338 uint16_t pfid) 2339{ 2340 uint32_t addr = (BAR_CSTRORM_INTMEM + 2341 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2342 REG_WR16(sc, addr, eq_prod); 2343} 2344 2345/* 2346 * Post a slowpath command. 2347 * 2348 * A slowpath command is used to propogate a configuration change through 2349 * the controller in a controlled manner, allowing each STORM processor and 2350 * other H/W blocks to phase in the change. The commands sent on the 2351 * slowpath are referred to as ramrods. Depending on the ramrod used the 2352 * completion of the ramrod will occur in different ways. Here's a 2353 * breakdown of ramrods and how they complete: 2354 * 2355 * RAMROD_CMD_ID_ETH_PORT_SETUP 2356 * Used to setup the leading connection on a port. Completes on the 2357 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2358 * 2359 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2360 * Used to setup an additional connection on a port. Completes on the 2361 * RCQ of the multi-queue/RSS connection being initialized. 2362 * 2363 * RAMROD_CMD_ID_ETH_STAT_QUERY 2364 * Used to force the storm processors to update the statistics database 2365 * in host memory. This ramrod is send on the leading connection CID and 2366 * completes as an index increment of the CSTORM on the default status 2367 * block. 2368 * 2369 * RAMROD_CMD_ID_ETH_UPDATE 2370 * Used to update the state of the leading connection, usually to udpate 2371 * the RSS indirection table. Completes on the RCQ of the leading 2372 * connection. (Not currently used under FreeBSD until OS support becomes 2373 * available.) 2374 * 2375 * RAMROD_CMD_ID_ETH_HALT 2376 * Used when tearing down a connection prior to driver unload. Completes 2377 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2378 * use this on the leading connection. 2379 * 2380 * RAMROD_CMD_ID_ETH_SET_MAC 2381 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2382 * the RCQ of the leading connection. 2383 * 2384 * RAMROD_CMD_ID_ETH_CFC_DEL 2385 * Used when tearing down a conneciton prior to driver unload. Completes 2386 * on the RCQ of the leading connection (since the current connection 2387 * has been completely removed from controller memory). 2388 * 2389 * RAMROD_CMD_ID_ETH_PORT_DEL 2390 * Used to tear down the leading connection prior to driver unload, 2391 * typically fp[0]. Completes as an index increment of the CSTORM on the 2392 * default status block. 2393 * 2394 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2395 * Used for connection offload. Completes on the RCQ of the multi-queue 2396 * RSS connection that is being offloaded. (Not currently used under 2397 * FreeBSD.) 2398 * 2399 * There can only be one command pending per function. 2400 * 2401 * Returns: 2402 * 0 = Success, !0 = Failure. 2403 */ 2404 2405/* must be called under the spq lock */ 2406static inline 2407struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2408{ 2409 struct eth_spe *next_spe = sc->spq_prod_bd; 2410 2411 if (sc->spq_prod_bd == sc->spq_last_bd) { 2412 /* wrap back to the first eth_spq */ 2413 sc->spq_prod_bd = sc->spq; 2414 sc->spq_prod_idx = 0; 2415 } else { 2416 sc->spq_prod_bd++; 2417 sc->spq_prod_idx++; 2418 } 2419 2420 return (next_spe); 2421} 2422 2423/* must be called under the spq lock */ 2424static inline 2425void bxe_sp_prod_update(struct bxe_softc *sc) 2426{ 2427 int func = SC_FUNC(sc); 2428 2429 /* 2430 * Make sure that BD data is updated before writing the producer. 2431 * BD data is written to the memory, the producer is read from the 2432 * memory, thus we need a full memory barrier to ensure the ordering. 2433 */ 2434 mb(); 2435 2436 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2437 sc->spq_prod_idx); 2438 2439 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2440 BUS_SPACE_BARRIER_WRITE); 2441} 2442 2443/** 2444 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2445 * 2446 * @cmd: command to check 2447 * @cmd_type: command type 2448 */ 2449static inline 2450int bxe_is_contextless_ramrod(int cmd, 2451 int cmd_type) 2452{ 2453 if ((cmd_type == NONE_CONNECTION_TYPE) || 2454 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2455 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2456 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2457 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2458 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2459 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2460 return (TRUE); 2461 } else { 2462 return (FALSE); 2463 } 2464} 2465 2466/** 2467 * bxe_sp_post - place a single command on an SP ring 2468 * 2469 * @sc: driver handle 2470 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2471 * @cid: SW CID the command is related to 2472 * @data_hi: command private data address (high 32 bits) 2473 * @data_lo: command private data address (low 32 bits) 2474 * @cmd_type: command type (e.g. NONE, ETH) 2475 * 2476 * SP data is handled as if it's always an address pair, thus data fields are 2477 * not swapped to little endian in upper functions. Instead this function swaps 2478 * data as if it's two uint32 fields. 2479 */ 2480int 2481bxe_sp_post(struct bxe_softc *sc, 2482 int command, 2483 int cid, 2484 uint32_t data_hi, 2485 uint32_t data_lo, 2486 int cmd_type) 2487{ 2488 struct eth_spe *spe; 2489 uint16_t type; 2490 int common; 2491 2492 common = bxe_is_contextless_ramrod(command, cmd_type); 2493 2494 BXE_SP_LOCK(sc); 2495 2496 if (common) { 2497 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2498 BLOGE(sc, "EQ ring is full!\n"); 2499 BXE_SP_UNLOCK(sc); 2500 return (-1); 2501 } 2502 } else { 2503 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2504 BLOGE(sc, "SPQ ring is full!\n"); 2505 BXE_SP_UNLOCK(sc); 2506 return (-1); 2507 } 2508 } 2509 2510 spe = bxe_sp_get_next(sc); 2511 2512 /* CID needs port number to be encoded int it */ 2513 spe->hdr.conn_and_cmd_data = 2514 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2515 2516 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 2517 2518 /* TBD: Check if it works for VFs */ 2519 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 2520 SPE_HDR_FUNCTION_ID); 2521 2522 spe->hdr.type = htole16(type); 2523 2524 spe->data.update_data_addr.hi = htole32(data_hi); 2525 spe->data.update_data_addr.lo = htole32(data_lo); 2526 2527 /* 2528 * It's ok if the actual decrement is issued towards the memory 2529 * somewhere between the lock and unlock. Thus no more explict 2530 * memory barrier is needed. 2531 */ 2532 if (common) { 2533 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2534 } else { 2535 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2536 } 2537 2538 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2539 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2540 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2541 BLOGD(sc, DBG_SP, 2542 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2543 sc->spq_prod_idx, 2544 (uint32_t)U64_HI(sc->spq_dma.paddr), 2545 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2546 command, 2547 common, 2548 HW_CID(sc, cid), 2549 data_hi, 2550 data_lo, 2551 type, 2552 atomic_load_acq_long(&sc->cq_spq_left), 2553 atomic_load_acq_long(&sc->eq_spq_left)); 2554 2555 bxe_sp_prod_update(sc); 2556 2557 BXE_SP_UNLOCK(sc); 2558 return (0); 2559} 2560 2561/** 2562 * bxe_debug_print_ind_table - prints the indirection table configuration. 2563 * 2564 * @sc: driver hanlde 2565 * @p: pointer to rss configuration 2566 */ 2567#if 0 2568static void 2569bxe_debug_print_ind_table(struct bxe_softc *sc, 2570 struct ecore_config_rss_params *p) 2571{ 2572 int i; 2573 2574 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); 2575 BLOGD(sc, DBG_LOAD, " 0x0000: "); 2576 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2577 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); 2578 2579 /* Print 4 bytes in a line */ 2580 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 2581 (((i + 1) & 0x3) == 0)) { 2582 BLOGD(sc, DBG_LOAD, "\n"); 2583 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); 2584 } 2585 } 2586 2587 BLOGD(sc, DBG_LOAD, "\n"); 2588} 2589#endif 2590 2591/* 2592 * FreeBSD Device probe function. 2593 * 2594 * Compares the device found to the driver's list of supported devices and 2595 * reports back to the bsd loader whether this is the right driver for the device. 2596 * This is the driver entry function called from the "kldload" command. 2597 * 2598 * Returns: 2599 * BUS_PROBE_DEFAULT on success, positive value on failure. 2600 */ 2601static int 2602bxe_probe(device_t dev) 2603{ 2604 struct bxe_softc *sc; 2605 struct bxe_device_type *t; 2606 char *descbuf; 2607 uint16_t did, sdid, svid, vid; 2608 2609 /* Find our device structure */ 2610 sc = device_get_softc(dev); 2611 sc->dev = dev; 2612 t = bxe_devs; 2613 2614 /* Get the data for the device to be probed. */ 2615 vid = pci_get_vendor(dev); 2616 did = pci_get_device(dev); 2617 svid = pci_get_subvendor(dev); 2618 sdid = pci_get_subdevice(dev); 2619 2620 BLOGD(sc, DBG_LOAD, 2621 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 2622 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 2623 2624 /* Look through the list of known devices for a match. */ 2625 while (t->bxe_name != NULL) { 2626 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2627 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2628 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2629 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2630 if (descbuf == NULL) 2631 return (ENOMEM); 2632 2633 /* Print out the device identity. */ 2634 snprintf(descbuf, BXE_DEVDESC_MAX, 2635 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2636 (((pci_read_config(dev, PCIR_REVID, 4) & 2637 0xf0) >> 4) + 'A'), 2638 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2639 BXE_DRIVER_VERSION); 2640 2641 device_set_desc_copy(dev, descbuf); 2642 free(descbuf, M_TEMP); 2643 return (BUS_PROBE_DEFAULT); 2644 } 2645 t++; 2646 } 2647 2648 return (ENXIO); 2649} 2650 2651static void 2652bxe_init_mutexes(struct bxe_softc *sc) 2653{ 2654#ifdef BXE_CORE_LOCK_SX 2655 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2656 "bxe%d_core_lock", sc->unit); 2657 sx_init(&sc->core_sx, sc->core_sx_name); 2658#else 2659 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2660 "bxe%d_core_lock", sc->unit); 2661 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2662#endif 2663 2664 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2665 "bxe%d_sp_lock", sc->unit); 2666 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2667 2668 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2669 "bxe%d_dmae_lock", sc->unit); 2670 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2671 2672 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2673 "bxe%d_phy_lock", sc->unit); 2674 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2675 2676 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2677 "bxe%d_fwmb_lock", sc->unit); 2678 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2679 2680 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2681 "bxe%d_print_lock", sc->unit); 2682 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2683 2684 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2685 "bxe%d_stats_lock", sc->unit); 2686 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2687 2688 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2689 "bxe%d_mcast_lock", sc->unit); 2690 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2691} 2692 2693static void 2694bxe_release_mutexes(struct bxe_softc *sc) 2695{ 2696#ifdef BXE_CORE_LOCK_SX 2697 sx_destroy(&sc->core_sx); 2698#else 2699 if (mtx_initialized(&sc->core_mtx)) { 2700 mtx_destroy(&sc->core_mtx); 2701 } 2702#endif 2703 2704 if (mtx_initialized(&sc->sp_mtx)) { 2705 mtx_destroy(&sc->sp_mtx); 2706 } 2707 2708 if (mtx_initialized(&sc->dmae_mtx)) { 2709 mtx_destroy(&sc->dmae_mtx); 2710 } 2711 2712 if (mtx_initialized(&sc->port.phy_mtx)) { 2713 mtx_destroy(&sc->port.phy_mtx); 2714 } 2715 2716 if (mtx_initialized(&sc->fwmb_mtx)) { 2717 mtx_destroy(&sc->fwmb_mtx); 2718 } 2719 2720 if (mtx_initialized(&sc->print_mtx)) { 2721 mtx_destroy(&sc->print_mtx); 2722 } 2723 2724 if (mtx_initialized(&sc->stats_mtx)) { 2725 mtx_destroy(&sc->stats_mtx); 2726 } 2727 2728 if (mtx_initialized(&sc->mcast_mtx)) { 2729 mtx_destroy(&sc->mcast_mtx); 2730 } 2731} 2732 2733static void 2734bxe_tx_disable(struct bxe_softc* sc) 2735{ 2736 if_t ifp = sc->ifp; 2737 2738 /* tell the stack the driver is stopped and TX queue is full */ 2739 if (ifp != NULL) { 2740 if_setdrvflags(ifp, 0); 2741 } 2742} 2743 2744static void 2745bxe_drv_pulse(struct bxe_softc *sc) 2746{ 2747 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2748 sc->fw_drv_pulse_wr_seq); 2749} 2750 2751static inline uint16_t 2752bxe_tx_avail(struct bxe_softc *sc, 2753 struct bxe_fastpath *fp) 2754{ 2755 int16_t used; 2756 uint16_t prod; 2757 uint16_t cons; 2758 2759 prod = fp->tx_bd_prod; 2760 cons = fp->tx_bd_cons; 2761 2762 used = SUB_S16(prod, cons); 2763 2764#if 0 2765 KASSERT((used < 0), ("used tx bds < 0")); 2766 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); 2767 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), 2768 ("invalid number of tx bds used")); 2769#endif 2770 2771 return (int16_t)(sc->tx_ring_size) - used; 2772} 2773 2774static inline int 2775bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2776{ 2777 uint16_t hw_cons; 2778 2779 mb(); /* status block fields can change */ 2780 hw_cons = le16toh(*fp->tx_cons_sb); 2781 return (hw_cons != fp->tx_pkt_cons); 2782} 2783 2784static inline uint8_t 2785bxe_has_tx_work(struct bxe_fastpath *fp) 2786{ 2787 /* expand this for multi-cos if ever supported */ 2788 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2789} 2790 2791static inline int 2792bxe_has_rx_work(struct bxe_fastpath *fp) 2793{ 2794 uint16_t rx_cq_cons_sb; 2795 2796 mb(); /* status block fields can change */ 2797 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2798 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2799 rx_cq_cons_sb++; 2800 return (fp->rx_cq_cons != rx_cq_cons_sb); 2801} 2802 2803static void 2804bxe_sp_event(struct bxe_softc *sc, 2805 struct bxe_fastpath *fp, 2806 union eth_rx_cqe *rr_cqe) 2807{ 2808 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2809 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2810 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2811 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2812 2813 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2814 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2815 2816#if 0 2817 /* 2818 * If cid is within VF range, replace the slowpath object with the 2819 * one corresponding to this VF 2820 */ 2821 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { 2822 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); 2823 } 2824#endif 2825 2826 switch (command) { 2827 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2828 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2829 drv_cmd = ECORE_Q_CMD_UPDATE; 2830 break; 2831 2832 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2833 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2834 drv_cmd = ECORE_Q_CMD_SETUP; 2835 break; 2836 2837 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2838 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2839 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2840 break; 2841 2842 case (RAMROD_CMD_ID_ETH_HALT): 2843 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2844 drv_cmd = ECORE_Q_CMD_HALT; 2845 break; 2846 2847 case (RAMROD_CMD_ID_ETH_TERMINATE): 2848 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2849 drv_cmd = ECORE_Q_CMD_TERMINATE; 2850 break; 2851 2852 case (RAMROD_CMD_ID_ETH_EMPTY): 2853 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2854 drv_cmd = ECORE_Q_CMD_EMPTY; 2855 break; 2856 2857 default: 2858 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2859 command, fp->index); 2860 return; 2861 } 2862 2863 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2864 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2865 /* 2866 * q_obj->complete_cmd() failure means that this was 2867 * an unexpected completion. 2868 * 2869 * In this case we don't want to increase the sc->spq_left 2870 * because apparently we haven't sent this command the first 2871 * place. 2872 */ 2873 // bxe_panic(sc, ("Unexpected SP completion\n")); 2874 return; 2875 } 2876 2877#if 0 2878 /* SRIOV: reschedule any 'in_progress' operations */ 2879 bxe_iov_sp_event(sc, cid, TRUE); 2880#endif 2881 2882 atomic_add_acq_long(&sc->cq_spq_left, 1); 2883 2884 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2885 atomic_load_acq_long(&sc->cq_spq_left)); 2886 2887#if 0 2888 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 2889 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { 2890 /* 2891 * If Queue update ramrod is completed for last Queue in AFEX VIF set 2892 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to 2893 * prevent case that both bits are cleared. At the end of load/unload 2894 * driver checks that sp_state is cleared and this order prevents 2895 * races. 2896 */ 2897 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); 2898 wmb(); 2899 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); 2900 2901 /* schedule the sp task as MCP ack is required */ 2902 bxe_schedule_sp_task(sc); 2903 } 2904#endif 2905} 2906 2907/* 2908 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2909 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2910 * the current aggregation queue as in-progress. 2911 */ 2912static void 2913bxe_tpa_start(struct bxe_softc *sc, 2914 struct bxe_fastpath *fp, 2915 uint16_t queue, 2916 uint16_t cons, 2917 uint16_t prod, 2918 struct eth_fast_path_rx_cqe *cqe) 2919{ 2920 struct bxe_sw_rx_bd tmp_bd; 2921 struct bxe_sw_rx_bd *rx_buf; 2922 struct eth_rx_bd *rx_bd; 2923 int max_agg_queues; 2924 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2925 uint16_t index; 2926 2927 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2928 "cons=%d prod=%d\n", 2929 fp->index, queue, cons, prod); 2930 2931 max_agg_queues = MAX_AGG_QS(sc); 2932 2933 KASSERT((queue < max_agg_queues), 2934 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2935 fp->index, queue, max_agg_queues)); 2936 2937 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2938 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2939 fp->index, queue)); 2940 2941 /* copy the existing mbuf and mapping from the TPA pool */ 2942 tmp_bd = tpa_info->bd; 2943 2944 if (tmp_bd.m == NULL) { 2945 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", 2946 fp->index, queue); 2947 /* XXX Error handling? */ 2948 return; 2949 } 2950 2951 /* change the TPA queue to the start state */ 2952 tpa_info->state = BXE_TPA_STATE_START; 2953 tpa_info->placement_offset = cqe->placement_offset; 2954 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2955 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2956 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2957 2958 fp->rx_tpa_queue_used |= (1 << queue); 2959 2960 /* 2961 * If all the buffer descriptors are filled with mbufs then fill in 2962 * the current consumer index with a new BD. Else if a maximum Rx 2963 * buffer limit is imposed then fill in the next producer index. 2964 */ 2965 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2966 prod : cons; 2967 2968 /* move the received mbuf and mapping to TPA pool */ 2969 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2970 2971 /* release any existing RX BD mbuf mappings */ 2972 if (cons != index) { 2973 rx_buf = &fp->rx_mbuf_chain[cons]; 2974 2975 if (rx_buf->m_map != NULL) { 2976 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2977 BUS_DMASYNC_POSTREAD); 2978 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2979 } 2980 2981 /* 2982 * We get here when the maximum number of rx buffers is less than 2983 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2984 * it out here without concern of a memory leak. 2985 */ 2986 fp->rx_mbuf_chain[cons].m = NULL; 2987 } 2988 2989 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2990 fp->rx_mbuf_chain[index] = tmp_bd; 2991 2992 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2993 rx_bd = &fp->rx_chain[index]; 2994 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2995 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2996} 2997 2998/* 2999 * When a TPA aggregation is completed, loop through the individual mbufs 3000 * of the aggregation, combining them into a single mbuf which will be sent 3001 * up the stack. Refill all freed SGEs with mbufs as we go along. 3002 */ 3003static int 3004bxe_fill_frag_mbuf(struct bxe_softc *sc, 3005 struct bxe_fastpath *fp, 3006 struct bxe_sw_tpa_info *tpa_info, 3007 uint16_t queue, 3008 uint16_t pages, 3009 struct mbuf *m, 3010 struct eth_end_agg_rx_cqe *cqe, 3011 uint16_t cqe_idx) 3012{ 3013 struct mbuf *m_frag; 3014 uint32_t frag_len, frag_size, i; 3015 uint16_t sge_idx; 3016 int rc = 0; 3017 int j; 3018 3019 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 3020 3021 BLOGD(sc, DBG_LRO, 3022 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 3023 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 3024 3025 /* make sure the aggregated frame is not too big to handle */ 3026 if (pages > 8 * PAGES_PER_SGE) { 3027 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 3028 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 3029 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 3030 tpa_info->len_on_bd, frag_size); 3031 bxe_panic(sc, ("sge page count error\n")); 3032 return (EINVAL); 3033 } 3034 3035 /* 3036 * Scan through the scatter gather list pulling individual mbufs into a 3037 * single mbuf for the host stack. 3038 */ 3039 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 3040 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 3041 3042 /* 3043 * Firmware gives the indices of the SGE as if the ring is an array 3044 * (meaning that the "next" element will consume 2 indices). 3045 */ 3046 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 3047 3048 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 3049 "sge_idx=%d frag_size=%d frag_len=%d\n", 3050 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 3051 3052 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3053 3054 /* allocate a new mbuf for the SGE */ 3055 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3056 if (rc) { 3057 /* Leave all remaining SGEs in the ring! */ 3058 return (rc); 3059 } 3060 3061 /* update the fragment length */ 3062 m_frag->m_len = frag_len; 3063 3064 /* concatenate the fragment to the head mbuf */ 3065 m_cat(m, m_frag); 3066 fp->eth_q_stats.mbuf_alloc_sge--; 3067 3068 /* update the TPA mbuf size and remaining fragment size */ 3069 m->m_pkthdr.len += frag_len; 3070 frag_size -= frag_len; 3071 } 3072 3073 BLOGD(sc, DBG_LRO, 3074 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 3075 fp->index, queue, frag_size); 3076 3077 return (rc); 3078} 3079 3080static inline void 3081bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 3082{ 3083 int i, j; 3084 3085 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 3086 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 3087 3088 for (j = 0; j < 2; j++) { 3089 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 3090 idx--; 3091 } 3092 } 3093} 3094 3095static inline void 3096bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 3097{ 3098 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 3099 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 3100 3101 /* 3102 * Clear the two last indices in the page to 1. These are the indices that 3103 * correspond to the "next" element, hence will never be indicated and 3104 * should be removed from the calculations. 3105 */ 3106 bxe_clear_sge_mask_next_elems(fp); 3107} 3108 3109static inline void 3110bxe_update_last_max_sge(struct bxe_fastpath *fp, 3111 uint16_t idx) 3112{ 3113 uint16_t last_max = fp->last_max_sge; 3114 3115 if (SUB_S16(idx, last_max) > 0) { 3116 fp->last_max_sge = idx; 3117 } 3118} 3119 3120static inline void 3121bxe_update_sge_prod(struct bxe_softc *sc, 3122 struct bxe_fastpath *fp, 3123 uint16_t sge_len, 3124 union eth_sgl_or_raw_data *cqe) 3125{ 3126 uint16_t last_max, last_elem, first_elem; 3127 uint16_t delta = 0; 3128 uint16_t i; 3129 3130 if (!sge_len) { 3131 return; 3132 } 3133 3134 /* first mark all used pages */ 3135 for (i = 0; i < sge_len; i++) { 3136 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3137 RX_SGE(le16toh(cqe->sgl[i]))); 3138 } 3139 3140 BLOGD(sc, DBG_LRO, 3141 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3142 fp->index, sge_len - 1, 3143 le16toh(cqe->sgl[sge_len - 1])); 3144 3145 /* assume that the last SGE index is the biggest */ 3146 bxe_update_last_max_sge(fp, 3147 le16toh(cqe->sgl[sge_len - 1])); 3148 3149 last_max = RX_SGE(fp->last_max_sge); 3150 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3151 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3152 3153 /* if ring is not full */ 3154 if (last_elem + 1 != first_elem) { 3155 last_elem++; 3156 } 3157 3158 /* now update the prod */ 3159 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3160 if (__predict_true(fp->sge_mask[i])) { 3161 break; 3162 } 3163 3164 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3165 delta += BIT_VEC64_ELEM_SZ; 3166 } 3167 3168 if (delta > 0) { 3169 fp->rx_sge_prod += delta; 3170 /* clear page-end entries */ 3171 bxe_clear_sge_mask_next_elems(fp); 3172 } 3173 3174 BLOGD(sc, DBG_LRO, 3175 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3176 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3177} 3178 3179/* 3180 * The aggregation on the current TPA queue has completed. Pull the individual 3181 * mbuf fragments together into a single mbuf, perform all necessary checksum 3182 * calculations, and send the resuting mbuf to the stack. 3183 */ 3184static void 3185bxe_tpa_stop(struct bxe_softc *sc, 3186 struct bxe_fastpath *fp, 3187 struct bxe_sw_tpa_info *tpa_info, 3188 uint16_t queue, 3189 uint16_t pages, 3190 struct eth_end_agg_rx_cqe *cqe, 3191 uint16_t cqe_idx) 3192{ 3193 if_t ifp = sc->ifp; 3194 struct mbuf *m; 3195 int rc = 0; 3196 3197 BLOGD(sc, DBG_LRO, 3198 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3199 fp->index, queue, tpa_info->placement_offset, 3200 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3201 3202 m = tpa_info->bd.m; 3203 3204 /* allocate a replacement before modifying existing mbuf */ 3205 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3206 if (rc) { 3207 /* drop the frame and log an error */ 3208 fp->eth_q_stats.rx_soft_errors++; 3209 goto bxe_tpa_stop_exit; 3210 } 3211 3212 /* we have a replacement, fixup the current mbuf */ 3213 m_adj(m, tpa_info->placement_offset); 3214 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3215 3216 /* mark the checksums valid (taken care of by the firmware) */ 3217 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3218 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3219 m->m_pkthdr.csum_data = 0xffff; 3220 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3221 CSUM_IP_VALID | 3222 CSUM_DATA_VALID | 3223 CSUM_PSEUDO_HDR); 3224 3225 /* aggregate all of the SGEs into a single mbuf */ 3226 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3227 if (rc) { 3228 /* drop the packet and log an error */ 3229 fp->eth_q_stats.rx_soft_errors++; 3230 m_freem(m); 3231 } else { 3232 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) { 3233 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3234 m->m_flags |= M_VLANTAG; 3235 } 3236 3237 /* assign packet to this interface interface */ 3238 if_setrcvif(m, ifp); 3239 3240#if __FreeBSD_version >= 800000 3241 /* specify what RSS queue was used for this flow */ 3242 m->m_pkthdr.flowid = fp->index; 3243 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3244#endif 3245 3246 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3247 fp->eth_q_stats.rx_tpa_pkts++; 3248 3249 /* pass the frame to the stack */ 3250 if_input(ifp, m); 3251 } 3252 3253 /* we passed an mbuf up the stack or dropped the frame */ 3254 fp->eth_q_stats.mbuf_alloc_tpa--; 3255 3256bxe_tpa_stop_exit: 3257 3258 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3259 fp->rx_tpa_queue_used &= ~(1 << queue); 3260} 3261 3262static uint8_t 3263bxe_service_rxsgl( 3264 struct bxe_fastpath *fp, 3265 uint16_t len, 3266 uint16_t lenonbd, 3267 struct mbuf *m, 3268 struct eth_fast_path_rx_cqe *cqe_fp) 3269{ 3270 struct mbuf *m_frag; 3271 uint16_t frags, frag_len; 3272 uint16_t sge_idx = 0; 3273 uint16_t j; 3274 uint8_t i, rc = 0; 3275 uint32_t frag_size; 3276 3277 /* adjust the mbuf */ 3278 m->m_len = lenonbd; 3279 3280 frag_size = len - lenonbd; 3281 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3282 3283 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { 3284 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); 3285 3286 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3287 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); 3288 m_frag->m_len = frag_len; 3289 3290 /* allocate a new mbuf for the SGE */ 3291 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3292 if (rc) { 3293 /* Leave all remaining SGEs in the ring! */ 3294 return (rc); 3295 } 3296 fp->eth_q_stats.mbuf_alloc_sge--; 3297 3298 /* concatenate the fragment to the head mbuf */ 3299 m_cat(m, m_frag); 3300 3301 frag_size -= frag_len; 3302 } 3303 3304 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); 3305 3306 return rc; 3307} 3308 3309static uint8_t 3310bxe_rxeof(struct bxe_softc *sc, 3311 struct bxe_fastpath *fp) 3312{ 3313 if_t ifp = sc->ifp; 3314 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3315 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3316 int rx_pkts = 0; 3317 int rc = 0; 3318 3319 BXE_FP_RX_LOCK(fp); 3320 3321 /* CQ "next element" is of the size of the regular element */ 3322 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3323 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3324 hw_cq_cons++; 3325 } 3326 3327 bd_cons = fp->rx_bd_cons; 3328 bd_prod = fp->rx_bd_prod; 3329 bd_prod_fw = bd_prod; 3330 sw_cq_cons = fp->rx_cq_cons; 3331 sw_cq_prod = fp->rx_cq_prod; 3332 3333 /* 3334 * Memory barrier necessary as speculative reads of the rx 3335 * buffer can be ahead of the index in the status block 3336 */ 3337 rmb(); 3338 3339 BLOGD(sc, DBG_RX, 3340 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3341 fp->index, hw_cq_cons, sw_cq_cons); 3342 3343 while (sw_cq_cons != hw_cq_cons) { 3344 struct bxe_sw_rx_bd *rx_buf = NULL; 3345 union eth_rx_cqe *cqe; 3346 struct eth_fast_path_rx_cqe *cqe_fp; 3347 uint8_t cqe_fp_flags; 3348 enum eth_rx_cqe_type cqe_fp_type; 3349 uint16_t len, lenonbd, pad; 3350 struct mbuf *m = NULL; 3351 3352 comp_ring_cons = RCQ(sw_cq_cons); 3353 bd_prod = RX_BD(bd_prod); 3354 bd_cons = RX_BD(bd_cons); 3355 3356 cqe = &fp->rcq_chain[comp_ring_cons]; 3357 cqe_fp = &cqe->fast_path_cqe; 3358 cqe_fp_flags = cqe_fp->type_error_flags; 3359 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3360 3361 BLOGD(sc, DBG_RX, 3362 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3363 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3364 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", 3365 fp->index, 3366 hw_cq_cons, 3367 sw_cq_cons, 3368 bd_prod, 3369 bd_cons, 3370 CQE_TYPE(cqe_fp_flags), 3371 cqe_fp_flags, 3372 cqe_fp->status_flags, 3373 le32toh(cqe_fp->rss_hash_result), 3374 le16toh(cqe_fp->vlan_tag), 3375 le16toh(cqe_fp->pkt_len_or_gro_seg_len), 3376 le16toh(cqe_fp->len_on_bd)); 3377 3378 /* is this a slowpath msg? */ 3379 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3380 bxe_sp_event(sc, fp, cqe); 3381 goto next_cqe; 3382 } 3383 3384 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3385 3386 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3387 struct bxe_sw_tpa_info *tpa_info; 3388 uint16_t frag_size, pages; 3389 uint8_t queue; 3390 3391#if 0 3392 /* sanity check */ 3393 if (!fp->tpa_enable && 3394 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { 3395 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", 3396 CQE_TYPE(cqe_fp_type)); 3397 } 3398#endif 3399 3400 if (CQE_TYPE_START(cqe_fp_type)) { 3401 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3402 bd_cons, bd_prod, cqe_fp); 3403 m = NULL; /* packet not ready yet */ 3404 goto next_rx; 3405 } 3406 3407 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3408 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3409 3410 queue = cqe->end_agg_cqe.queue_index; 3411 tpa_info = &fp->rx_tpa_info[queue]; 3412 3413 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3414 fp->index, queue); 3415 3416 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3417 tpa_info->len_on_bd); 3418 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3419 3420 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3421 &cqe->end_agg_cqe, comp_ring_cons); 3422 3423 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); 3424 3425 goto next_cqe; 3426 } 3427 3428 /* non TPA */ 3429 3430 /* is this an error packet? */ 3431 if (__predict_false(cqe_fp_flags & 3432 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3433 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3434 fp->eth_q_stats.rx_soft_errors++; 3435 goto next_rx; 3436 } 3437 3438 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3439 lenonbd = le16toh(cqe_fp->len_on_bd); 3440 pad = cqe_fp->placement_offset; 3441 3442 m = rx_buf->m; 3443 3444 if (__predict_false(m == NULL)) { 3445 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3446 bd_cons, fp->index); 3447 goto next_rx; 3448 } 3449 3450 /* XXX double copy if packet length under a threshold */ 3451 3452 /* 3453 * If all the buffer descriptors are filled with mbufs then fill in 3454 * the current consumer index with a new BD. Else if a maximum Rx 3455 * buffer limit is imposed then fill in the next producer index. 3456 */ 3457 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3458 (sc->max_rx_bufs != RX_BD_USABLE) ? 3459 bd_prod : bd_cons); 3460 if (rc != 0) { 3461 3462 /* we simply reuse the received mbuf and don't post it to the stack */ 3463 m = NULL; 3464 3465 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3466 fp->index, rc); 3467 fp->eth_q_stats.rx_soft_errors++; 3468 3469 if (sc->max_rx_bufs != RX_BD_USABLE) { 3470 /* copy this consumer index to the producer index */ 3471 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3472 sizeof(struct bxe_sw_rx_bd)); 3473 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3474 } 3475 3476 goto next_rx; 3477 } 3478 3479 /* current mbuf was detached from the bd */ 3480 fp->eth_q_stats.mbuf_alloc_rx--; 3481 3482 /* we allocated a replacement mbuf, fixup the current one */ 3483 m_adj(m, pad); 3484 m->m_pkthdr.len = m->m_len = len; 3485 3486 if ((len > 60) && (len > lenonbd)) { 3487 fp->eth_q_stats.rx_bxe_service_rxsgl++; 3488 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); 3489 if (rc) 3490 break; 3491 fp->eth_q_stats.rx_jumbo_sge_pkts++; 3492 } else if (lenonbd < len) { 3493 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; 3494 } 3495 3496 /* assign packet to this interface interface */ 3497 if_setrcvif(m, ifp); 3498 3499 /* assume no hardware checksum has complated */ 3500 m->m_pkthdr.csum_flags = 0; 3501 3502 /* validate checksum if offload enabled */ 3503 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 3504 /* check for a valid IP frame */ 3505 if (!(cqe->fast_path_cqe.status_flags & 3506 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3507 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3508 if (__predict_false(cqe_fp_flags & 3509 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3510 fp->eth_q_stats.rx_hw_csum_errors++; 3511 } else { 3512 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3513 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3514 } 3515 } 3516 3517 /* check for a valid TCP/UDP frame */ 3518 if (!(cqe->fast_path_cqe.status_flags & 3519 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3520 if (__predict_false(cqe_fp_flags & 3521 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3522 fp->eth_q_stats.rx_hw_csum_errors++; 3523 } else { 3524 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3525 m->m_pkthdr.csum_data = 0xFFFF; 3526 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3527 CSUM_PSEUDO_HDR); 3528 } 3529 } 3530 } 3531 3532 /* if there is a VLAN tag then flag that info */ 3533 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) { 3534 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3535 m->m_flags |= M_VLANTAG; 3536 } 3537 3538#if __FreeBSD_version >= 800000 3539 /* specify what RSS queue was used for this flow */ 3540 m->m_pkthdr.flowid = fp->index; 3541 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3542#endif 3543 3544next_rx: 3545 3546 bd_cons = RX_BD_NEXT(bd_cons); 3547 bd_prod = RX_BD_NEXT(bd_prod); 3548 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3549 3550 /* pass the frame to the stack */ 3551 if (__predict_true(m != NULL)) { 3552 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3553 rx_pkts++; 3554 if_input(ifp, m); 3555 } 3556 3557next_cqe: 3558 3559 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3560 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3561 3562 /* limit spinning on the queue */ 3563 if (rc != 0) 3564 break; 3565 3566 if (rx_pkts == sc->rx_budget) { 3567 fp->eth_q_stats.rx_budget_reached++; 3568 break; 3569 } 3570 } /* while work to do */ 3571 3572 fp->rx_bd_cons = bd_cons; 3573 fp->rx_bd_prod = bd_prod_fw; 3574 fp->rx_cq_cons = sw_cq_cons; 3575 fp->rx_cq_prod = sw_cq_prod; 3576 3577 /* Update producers */ 3578 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3579 3580 fp->eth_q_stats.rx_pkts += rx_pkts; 3581 fp->eth_q_stats.rx_calls++; 3582 3583 BXE_FP_RX_UNLOCK(fp); 3584 3585 return (sw_cq_cons != hw_cq_cons); 3586} 3587 3588static uint16_t 3589bxe_free_tx_pkt(struct bxe_softc *sc, 3590 struct bxe_fastpath *fp, 3591 uint16_t idx) 3592{ 3593 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3594 struct eth_tx_start_bd *tx_start_bd; 3595 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3596 uint16_t new_cons; 3597 int nbd; 3598 3599 /* unmap the mbuf from non-paged memory */ 3600 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3601 3602 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3603 nbd = le16toh(tx_start_bd->nbd) - 1; 3604 3605#if 0 3606 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { 3607 bxe_panic(sc, ("BAD nbd!\n")); 3608 } 3609#endif 3610 3611 new_cons = (tx_buf->first_bd + nbd); 3612 3613#if 0 3614 struct eth_tx_bd *tx_data_bd; 3615 3616 /* 3617 * The following code doesn't do anything but is left here 3618 * for clarity on what the new value of new_cons skipped. 3619 */ 3620 3621 /* get the next bd */ 3622 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3623 3624 /* skip the parse bd */ 3625 --nbd; 3626 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3627 3628 /* skip the TSO split header bd since they have no mapping */ 3629 if (tx_buf->flags & BXE_TSO_SPLIT_BD) { 3630 --nbd; 3631 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3632 } 3633 3634 /* now free frags */ 3635 while (nbd > 0) { 3636 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; 3637 if (--nbd) { 3638 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3639 } 3640 } 3641#endif 3642 3643 /* free the mbuf */ 3644 if (__predict_true(tx_buf->m != NULL)) { 3645 m_freem(tx_buf->m); 3646 fp->eth_q_stats.mbuf_alloc_tx--; 3647 } else { 3648 fp->eth_q_stats.tx_chain_lost_mbuf++; 3649 } 3650 3651 tx_buf->m = NULL; 3652 tx_buf->first_bd = 0; 3653 3654 return (new_cons); 3655} 3656 3657/* transmit timeout watchdog */ 3658static int 3659bxe_watchdog(struct bxe_softc *sc, 3660 struct bxe_fastpath *fp) 3661{ 3662 BXE_FP_TX_LOCK(fp); 3663 3664 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3665 BXE_FP_TX_UNLOCK(fp); 3666 return (0); 3667 } 3668 3669 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3670 3671 BXE_FP_TX_UNLOCK(fp); 3672 3673 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 3674 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 3675 3676 return (-1); 3677} 3678 3679/* processes transmit completions */ 3680static uint8_t 3681bxe_txeof(struct bxe_softc *sc, 3682 struct bxe_fastpath *fp) 3683{ 3684 if_t ifp = sc->ifp; 3685 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3686 uint16_t tx_bd_avail; 3687 3688 BXE_FP_TX_LOCK_ASSERT(fp); 3689 3690 bd_cons = fp->tx_bd_cons; 3691 hw_cons = le16toh(*fp->tx_cons_sb); 3692 sw_cons = fp->tx_pkt_cons; 3693 3694 while (sw_cons != hw_cons) { 3695 pkt_cons = TX_BD(sw_cons); 3696 3697 BLOGD(sc, DBG_TX, 3698 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3699 fp->index, hw_cons, sw_cons, pkt_cons); 3700 3701 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3702 3703 sw_cons++; 3704 } 3705 3706 fp->tx_pkt_cons = sw_cons; 3707 fp->tx_bd_cons = bd_cons; 3708 3709 BLOGD(sc, DBG_TX, 3710 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3711 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3712 3713 mb(); 3714 3715 tx_bd_avail = bxe_tx_avail(sc, fp); 3716 3717 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3718 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 3719 } else { 3720 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 3721 } 3722 3723 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3724 /* reset the watchdog timer if there are pending transmits */ 3725 fp->watchdog_timer = BXE_TX_TIMEOUT; 3726 return (TRUE); 3727 } else { 3728 /* clear watchdog when there are no pending transmits */ 3729 fp->watchdog_timer = 0; 3730 return (FALSE); 3731 } 3732} 3733 3734static void 3735bxe_drain_tx_queues(struct bxe_softc *sc) 3736{ 3737 struct bxe_fastpath *fp; 3738 int i, count; 3739 3740 /* wait until all TX fastpath tasks have completed */ 3741 for (i = 0; i < sc->num_queues; i++) { 3742 fp = &sc->fp[i]; 3743 3744 count = 1000; 3745 3746 while (bxe_has_tx_work(fp)) { 3747 3748 BXE_FP_TX_LOCK(fp); 3749 bxe_txeof(sc, fp); 3750 BXE_FP_TX_UNLOCK(fp); 3751 3752 if (count == 0) { 3753 BLOGE(sc, "Timeout waiting for fp[%d] " 3754 "transmits to complete!\n", i); 3755 bxe_panic(sc, ("tx drain failure\n")); 3756 return; 3757 } 3758 3759 count--; 3760 DELAY(1000); 3761 rmb(); 3762 } 3763 } 3764 3765 return; 3766} 3767 3768static int 3769bxe_del_all_macs(struct bxe_softc *sc, 3770 struct ecore_vlan_mac_obj *mac_obj, 3771 int mac_type, 3772 uint8_t wait_for_comp) 3773{ 3774 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3775 int rc; 3776 3777 /* wait for completion of requested */ 3778 if (wait_for_comp) { 3779 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3780 } 3781 3782 /* Set the mac type of addresses we want to clear */ 3783 bxe_set_bit(mac_type, &vlan_mac_flags); 3784 3785 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3786 if (rc < 0) { 3787 BLOGE(sc, "Failed to delete MACs (%d)\n", rc); 3788 } 3789 3790 return (rc); 3791} 3792 3793static int 3794bxe_fill_accept_flags(struct bxe_softc *sc, 3795 uint32_t rx_mode, 3796 unsigned long *rx_accept_flags, 3797 unsigned long *tx_accept_flags) 3798{ 3799 /* Clear the flags first */ 3800 *rx_accept_flags = 0; 3801 *tx_accept_flags = 0; 3802 3803 switch (rx_mode) { 3804 case BXE_RX_MODE_NONE: 3805 /* 3806 * 'drop all' supersedes any accept flags that may have been 3807 * passed to the function. 3808 */ 3809 break; 3810 3811 case BXE_RX_MODE_NORMAL: 3812 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3813 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3814 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3815 3816 /* internal switching mode */ 3817 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3818 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3819 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3820 3821 break; 3822 3823 case BXE_RX_MODE_ALLMULTI: 3824 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3825 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3826 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3827 3828 /* internal switching mode */ 3829 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3830 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3831 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3832 3833 break; 3834 3835 case BXE_RX_MODE_PROMISC: 3836 /* 3837 * According to deffinition of SI mode, iface in promisc mode 3838 * should receive matched and unmatched (in resolution of port) 3839 * unicast packets. 3840 */ 3841 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3842 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3843 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3844 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3845 3846 /* internal switching mode */ 3847 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3848 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3849 3850 if (IS_MF_SI(sc)) { 3851 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3852 } else { 3853 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3854 } 3855 3856 break; 3857 3858 default: 3859 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); 3860 return (-1); 3861 } 3862 3863 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3864 if (rx_mode != BXE_RX_MODE_NONE) { 3865 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3866 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3867 } 3868 3869 return (0); 3870} 3871 3872static int 3873bxe_set_q_rx_mode(struct bxe_softc *sc, 3874 uint8_t cl_id, 3875 unsigned long rx_mode_flags, 3876 unsigned long rx_accept_flags, 3877 unsigned long tx_accept_flags, 3878 unsigned long ramrod_flags) 3879{ 3880 struct ecore_rx_mode_ramrod_params ramrod_param; 3881 int rc; 3882 3883 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3884 3885 /* Prepare ramrod parameters */ 3886 ramrod_param.cid = 0; 3887 ramrod_param.cl_id = cl_id; 3888 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3889 ramrod_param.func_id = SC_FUNC(sc); 3890 3891 ramrod_param.pstate = &sc->sp_state; 3892 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3893 3894 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3895 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3896 3897 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3898 3899 ramrod_param.ramrod_flags = ramrod_flags; 3900 ramrod_param.rx_mode_flags = rx_mode_flags; 3901 3902 ramrod_param.rx_accept_flags = rx_accept_flags; 3903 ramrod_param.tx_accept_flags = tx_accept_flags; 3904 3905 rc = ecore_config_rx_mode(sc, &ramrod_param); 3906 if (rc < 0) { 3907 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); 3908 return (rc); 3909 } 3910 3911 return (0); 3912} 3913 3914static int 3915bxe_set_storm_rx_mode(struct bxe_softc *sc) 3916{ 3917 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3918 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3919 int rc; 3920 3921 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3922 &tx_accept_flags); 3923 if (rc) { 3924 return (rc); 3925 } 3926 3927 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3928 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3929 3930 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3931 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3932 rx_accept_flags, tx_accept_flags, 3933 ramrod_flags)); 3934} 3935 3936/* returns the "mcp load_code" according to global load_count array */ 3937static int 3938bxe_nic_load_no_mcp(struct bxe_softc *sc) 3939{ 3940 int path = SC_PATH(sc); 3941 int port = SC_PORT(sc); 3942 3943 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3944 path, load_count[path][0], load_count[path][1], 3945 load_count[path][2]); 3946 load_count[path][0]++; 3947 load_count[path][1 + port]++; 3948 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3949 path, load_count[path][0], load_count[path][1], 3950 load_count[path][2]); 3951 if (load_count[path][0] == 1) { 3952 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3953 } else if (load_count[path][1 + port] == 1) { 3954 return (FW_MSG_CODE_DRV_LOAD_PORT); 3955 } else { 3956 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3957 } 3958} 3959 3960/* returns the "mcp load_code" according to global load_count array */ 3961static int 3962bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3963{ 3964 int port = SC_PORT(sc); 3965 int path = SC_PATH(sc); 3966 3967 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3968 path, load_count[path][0], load_count[path][1], 3969 load_count[path][2]); 3970 load_count[path][0]--; 3971 load_count[path][1 + port]--; 3972 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3973 path, load_count[path][0], load_count[path][1], 3974 load_count[path][2]); 3975 if (load_count[path][0] == 0) { 3976 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3977 } else if (load_count[path][1 + port] == 0) { 3978 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3979 } else { 3980 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3981 } 3982} 3983 3984/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3985static uint32_t 3986bxe_send_unload_req(struct bxe_softc *sc, 3987 int unload_mode) 3988{ 3989 uint32_t reset_code = 0; 3990#if 0 3991 int port = SC_PORT(sc); 3992 int path = SC_PATH(sc); 3993#endif 3994 3995 /* Select the UNLOAD request mode */ 3996 if (unload_mode == UNLOAD_NORMAL) { 3997 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3998 } 3999#if 0 4000 else if (sc->flags & BXE_NO_WOL_FLAG) { 4001 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 4002 } else if (sc->wol) { 4003 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 4004 uint8_t *mac_addr = sc->dev->dev_addr; 4005 uint32_t val; 4006 uint16_t pmc; 4007 4008 /* 4009 * The mac address is written to entries 1-4 to 4010 * preserve entry 0 which is used by the PMF 4011 */ 4012 uint8_t entry = (SC_VN(sc) + 1)*8; 4013 4014 val = (mac_addr[0] << 8) | mac_addr[1]; 4015 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); 4016 4017 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4018 (mac_addr[4] << 8) | mac_addr[5]; 4019 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 4020 4021 /* Enable the PME and clear the status */ 4022 pmc = pci_read_config(sc->dev, 4023 (sc->devinfo.pcie_pm_cap_reg + 4024 PCIR_POWER_STATUS), 4025 2); 4026 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; 4027 pci_write_config(sc->dev, 4028 (sc->devinfo.pcie_pm_cap_reg + 4029 PCIR_POWER_STATUS), 4030 pmc, 4); 4031 4032 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 4033 } 4034#endif 4035 else { 4036 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 4037 } 4038 4039 /* Send the request to the MCP */ 4040 if (!BXE_NOMCP(sc)) { 4041 reset_code = bxe_fw_command(sc, reset_code, 0); 4042 } else { 4043 reset_code = bxe_nic_unload_no_mcp(sc); 4044 } 4045 4046 return (reset_code); 4047} 4048 4049/* send UNLOAD_DONE command to the MCP */ 4050static void 4051bxe_send_unload_done(struct bxe_softc *sc, 4052 uint8_t keep_link) 4053{ 4054 uint32_t reset_param = 4055 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 4056 4057 /* Report UNLOAD_DONE to MCP */ 4058 if (!BXE_NOMCP(sc)) { 4059 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 4060 } 4061} 4062 4063static int 4064bxe_func_wait_started(struct bxe_softc *sc) 4065{ 4066 int tout = 50; 4067 4068 if (!sc->port.pmf) { 4069 return (0); 4070 } 4071 4072 /* 4073 * (assumption: No Attention from MCP at this stage) 4074 * PMF probably in the middle of TX disable/enable transaction 4075 * 1. Sync IRS for default SB 4076 * 2. Sync SP queue - this guarantees us that attention handling started 4077 * 3. Wait, that TX disable/enable transaction completes 4078 * 4079 * 1+2 guarantee that if DCBX attention was scheduled it already changed 4080 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 4081 * received completion for the transaction the state is TX_STOPPED. 4082 * State will return to STARTED after completion of TX_STOPPED-->STARTED 4083 * transaction. 4084 */ 4085 4086 /* XXX make sure default SB ISR is done */ 4087 /* need a way to synchronize an irq (intr_mtx?) */ 4088 4089 /* XXX flush any work queues */ 4090 4091 while (ecore_func_get_state(sc, &sc->func_obj) != 4092 ECORE_F_STATE_STARTED && tout--) { 4093 DELAY(20000); 4094 } 4095 4096 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 4097 /* 4098 * Failed to complete the transaction in a "good way" 4099 * Force both transactions with CLR bit. 4100 */ 4101 struct ecore_func_state_params func_params = { NULL }; 4102 4103 BLOGE(sc, "Unexpected function state! " 4104 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 4105 4106 func_params.f_obj = &sc->func_obj; 4107 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4108 4109 /* STARTED-->TX_STOPPED */ 4110 func_params.cmd = ECORE_F_CMD_TX_STOP; 4111 ecore_func_state_change(sc, &func_params); 4112 4113 /* TX_STOPPED-->STARTED */ 4114 func_params.cmd = ECORE_F_CMD_TX_START; 4115 return (ecore_func_state_change(sc, &func_params)); 4116 } 4117 4118 return (0); 4119} 4120 4121static int 4122bxe_stop_queue(struct bxe_softc *sc, 4123 int index) 4124{ 4125 struct bxe_fastpath *fp = &sc->fp[index]; 4126 struct ecore_queue_state_params q_params = { NULL }; 4127 int rc; 4128 4129 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 4130 4131 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 4132 /* We want to wait for completion in this context */ 4133 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 4134 4135 /* Stop the primary connection: */ 4136 4137 /* ...halt the connection */ 4138 q_params.cmd = ECORE_Q_CMD_HALT; 4139 rc = ecore_queue_state_change(sc, &q_params); 4140 if (rc) { 4141 return (rc); 4142 } 4143 4144 /* ...terminate the connection */ 4145 q_params.cmd = ECORE_Q_CMD_TERMINATE; 4146 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 4147 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 4148 rc = ecore_queue_state_change(sc, &q_params); 4149 if (rc) { 4150 return (rc); 4151 } 4152 4153 /* ...delete cfc entry */ 4154 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 4155 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 4156 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 4157 return (ecore_queue_state_change(sc, &q_params)); 4158} 4159 4160/* wait for the outstanding SP commands */ 4161static inline uint8_t 4162bxe_wait_sp_comp(struct bxe_softc *sc, 4163 unsigned long mask) 4164{ 4165 unsigned long tmp; 4166 int tout = 5000; /* wait for 5 secs tops */ 4167 4168 while (tout--) { 4169 mb(); 4170 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 4171 return (TRUE); 4172 } 4173 4174 DELAY(1000); 4175 } 4176 4177 mb(); 4178 4179 tmp = atomic_load_acq_long(&sc->sp_state); 4180 if (tmp & mask) { 4181 BLOGE(sc, "Filtering completion timed out: " 4182 "sp_state 0x%lx, mask 0x%lx\n", 4183 tmp, mask); 4184 return (FALSE); 4185 } 4186 4187 return (FALSE); 4188} 4189 4190static int 4191bxe_func_stop(struct bxe_softc *sc) 4192{ 4193 struct ecore_func_state_params func_params = { NULL }; 4194 int rc; 4195 4196 /* prepare parameters for function state transitions */ 4197 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4198 func_params.f_obj = &sc->func_obj; 4199 func_params.cmd = ECORE_F_CMD_STOP; 4200 4201 /* 4202 * Try to stop the function the 'good way'. If it fails (in case 4203 * of a parity error during bxe_chip_cleanup()) and we are 4204 * not in a debug mode, perform a state transaction in order to 4205 * enable further HW_RESET transaction. 4206 */ 4207 rc = ecore_func_state_change(sc, &func_params); 4208 if (rc) { 4209 BLOGE(sc, "FUNC_STOP ramrod failed. " 4210 "Running a dry transaction\n"); 4211 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4212 return (ecore_func_state_change(sc, &func_params)); 4213 } 4214 4215 return (0); 4216} 4217 4218static int 4219bxe_reset_hw(struct bxe_softc *sc, 4220 uint32_t load_code) 4221{ 4222 struct ecore_func_state_params func_params = { NULL }; 4223 4224 /* Prepare parameters for function state transitions */ 4225 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4226 4227 func_params.f_obj = &sc->func_obj; 4228 func_params.cmd = ECORE_F_CMD_HW_RESET; 4229 4230 func_params.params.hw_init.load_phase = load_code; 4231 4232 return (ecore_func_state_change(sc, &func_params)); 4233} 4234 4235static void 4236bxe_int_disable_sync(struct bxe_softc *sc, 4237 int disable_hw) 4238{ 4239 if (disable_hw) { 4240 /* prevent the HW from sending interrupts */ 4241 bxe_int_disable(sc); 4242 } 4243 4244 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4245 /* make sure all ISRs are done */ 4246 4247 /* XXX make sure sp_task is not running */ 4248 /* cancel and flush work queues */ 4249} 4250 4251static void 4252bxe_chip_cleanup(struct bxe_softc *sc, 4253 uint32_t unload_mode, 4254 uint8_t keep_link) 4255{ 4256 int port = SC_PORT(sc); 4257 struct ecore_mcast_ramrod_params rparam = { NULL }; 4258 uint32_t reset_code; 4259 int i, rc = 0; 4260 4261 bxe_drain_tx_queues(sc); 4262 4263 /* give HW time to discard old tx messages */ 4264 DELAY(1000); 4265 4266 /* Clean all ETH MACs */ 4267 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4268 if (rc < 0) { 4269 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4270 } 4271 4272 /* Clean up UC list */ 4273 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4274 if (rc < 0) { 4275 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4276 } 4277 4278 /* Disable LLH */ 4279 if (!CHIP_IS_E1(sc)) { 4280 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4281 } 4282 4283 /* Set "drop all" to stop Rx */ 4284 4285 /* 4286 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4287 * a race between the completion code and this code. 4288 */ 4289 BXE_MCAST_LOCK(sc); 4290 4291 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4292 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4293 } else { 4294 bxe_set_storm_rx_mode(sc); 4295 } 4296 4297 /* Clean up multicast configuration */ 4298 rparam.mcast_obj = &sc->mcast_obj; 4299 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4300 if (rc < 0) { 4301 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4302 } 4303 4304 BXE_MCAST_UNLOCK(sc); 4305 4306 // XXX bxe_iov_chip_cleanup(sc); 4307 4308 /* 4309 * Send the UNLOAD_REQUEST to the MCP. This will return if 4310 * this function should perform FUNCTION, PORT, or COMMON HW 4311 * reset. 4312 */ 4313 reset_code = bxe_send_unload_req(sc, unload_mode); 4314 4315 /* 4316 * (assumption: No Attention from MCP at this stage) 4317 * PMF probably in the middle of TX disable/enable transaction 4318 */ 4319 rc = bxe_func_wait_started(sc); 4320 if (rc) { 4321 BLOGE(sc, "bxe_func_wait_started failed\n"); 4322 } 4323 4324 /* 4325 * Close multi and leading connections 4326 * Completions for ramrods are collected in a synchronous way 4327 */ 4328 for (i = 0; i < sc->num_queues; i++) { 4329 if (bxe_stop_queue(sc, i)) { 4330 goto unload_error; 4331 } 4332 } 4333 4334 /* 4335 * If SP settings didn't get completed so far - something 4336 * very wrong has happen. 4337 */ 4338 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4339 BLOGE(sc, "Common slow path ramrods got stuck!\n"); 4340 } 4341 4342unload_error: 4343 4344 rc = bxe_func_stop(sc); 4345 if (rc) { 4346 BLOGE(sc, "Function stop failed!\n"); 4347 } 4348 4349 /* disable HW interrupts */ 4350 bxe_int_disable_sync(sc, TRUE); 4351 4352 /* detach interrupts */ 4353 bxe_interrupt_detach(sc); 4354 4355 /* Reset the chip */ 4356 rc = bxe_reset_hw(sc, reset_code); 4357 if (rc) { 4358 BLOGE(sc, "Hardware reset failed\n"); 4359 } 4360 4361 /* Report UNLOAD_DONE to MCP */ 4362 bxe_send_unload_done(sc, keep_link); 4363} 4364 4365static void 4366bxe_disable_close_the_gate(struct bxe_softc *sc) 4367{ 4368 uint32_t val; 4369 int port = SC_PORT(sc); 4370 4371 BLOGD(sc, DBG_LOAD, 4372 "Disabling 'close the gates'\n"); 4373 4374 if (CHIP_IS_E1(sc)) { 4375 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4376 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4377 val = REG_RD(sc, addr); 4378 val &= ~(0x300); 4379 REG_WR(sc, addr, val); 4380 } else { 4381 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4382 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4383 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4384 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4385 } 4386} 4387 4388/* 4389 * Cleans the object that have internal lists without sending 4390 * ramrods. Should be run when interrutps are disabled. 4391 */ 4392static void 4393bxe_squeeze_objects(struct bxe_softc *sc) 4394{ 4395 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4396 struct ecore_mcast_ramrod_params rparam = { NULL }; 4397 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4398 int rc; 4399 4400 /* Cleanup MACs' object first... */ 4401 4402 /* Wait for completion of requested */ 4403 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4404 /* Perform a dry cleanup */ 4405 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4406 4407 /* Clean ETH primary MAC */ 4408 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4409 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4410 &ramrod_flags); 4411 if (rc != 0) { 4412 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4413 } 4414 4415 /* Cleanup UC list */ 4416 vlan_mac_flags = 0; 4417 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4418 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4419 &ramrod_flags); 4420 if (rc != 0) { 4421 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4422 } 4423 4424 /* Now clean mcast object... */ 4425 4426 rparam.mcast_obj = &sc->mcast_obj; 4427 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4428 4429 /* Add a DEL command... */ 4430 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4431 if (rc < 0) { 4432 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4433 } 4434 4435 /* now wait until all pending commands are cleared */ 4436 4437 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4438 while (rc != 0) { 4439 if (rc < 0) { 4440 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4441 return; 4442 } 4443 4444 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4445 } 4446} 4447 4448/* stop the controller */ 4449static __noinline int 4450bxe_nic_unload(struct bxe_softc *sc, 4451 uint32_t unload_mode, 4452 uint8_t keep_link) 4453{ 4454 uint8_t global = FALSE; 4455 uint32_t val; 4456 4457 BXE_CORE_LOCK_ASSERT(sc); 4458 4459 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4460 4461 /* mark driver as unloaded in shmem2 */ 4462 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4463 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4464 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4465 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4466 } 4467 4468 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4469 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4470 /* 4471 * We can get here if the driver has been unloaded 4472 * during parity error recovery and is either waiting for a 4473 * leader to complete or for other functions to unload and 4474 * then ifconfig down has been issued. In this case we want to 4475 * unload and let other functions to complete a recovery 4476 * process. 4477 */ 4478 sc->recovery_state = BXE_RECOVERY_DONE; 4479 sc->is_leader = 0; 4480 bxe_release_leader_lock(sc); 4481 mb(); 4482 4483 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4484 BLOGE(sc, "Can't unload in closed or error state\n"); 4485 return (-1); 4486 } 4487 4488 /* 4489 * Nothing to do during unload if previous bxe_nic_load() 4490 * did not completed succesfully - all resourses are released. 4491 */ 4492 if ((sc->state == BXE_STATE_CLOSED) || 4493 (sc->state == BXE_STATE_ERROR)) { 4494 return (0); 4495 } 4496 4497 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4498 mb(); 4499 4500 /* stop tx */ 4501 bxe_tx_disable(sc); 4502 4503 sc->rx_mode = BXE_RX_MODE_NONE; 4504 /* XXX set rx mode ??? */ 4505 4506 if (IS_PF(sc)) { 4507 /* set ALWAYS_ALIVE bit in shmem */ 4508 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4509 4510 bxe_drv_pulse(sc); 4511 4512 bxe_stats_handle(sc, STATS_EVENT_STOP); 4513 bxe_save_statistics(sc); 4514 } 4515 4516 /* wait till consumers catch up with producers in all queues */ 4517 bxe_drain_tx_queues(sc); 4518 4519 /* if VF indicate to PF this function is going down (PF will delete sp 4520 * elements and clear initializations 4521 */ 4522 if (IS_VF(sc)) { 4523 ; /* bxe_vfpf_close_vf(sc); */ 4524 } else if (unload_mode != UNLOAD_RECOVERY) { 4525 /* if this is a normal/close unload need to clean up chip */ 4526 bxe_chip_cleanup(sc, unload_mode, keep_link); 4527 } else { 4528 /* Send the UNLOAD_REQUEST to the MCP */ 4529 bxe_send_unload_req(sc, unload_mode); 4530 4531 /* 4532 * Prevent transactions to host from the functions on the 4533 * engine that doesn't reset global blocks in case of global 4534 * attention once gloabl blocks are reset and gates are opened 4535 * (the engine which leader will perform the recovery 4536 * last). 4537 */ 4538 if (!CHIP_IS_E1x(sc)) { 4539 bxe_pf_disable(sc); 4540 } 4541 4542 /* disable HW interrupts */ 4543 bxe_int_disable_sync(sc, TRUE); 4544 4545 /* detach interrupts */ 4546 bxe_interrupt_detach(sc); 4547 4548 /* Report UNLOAD_DONE to MCP */ 4549 bxe_send_unload_done(sc, FALSE); 4550 } 4551 4552 /* 4553 * At this stage no more interrupts will arrive so we may safely clean 4554 * the queue'able objects here in case they failed to get cleaned so far. 4555 */ 4556 if (IS_PF(sc)) { 4557 bxe_squeeze_objects(sc); 4558 } 4559 4560 /* There should be no more pending SP commands at this stage */ 4561 sc->sp_state = 0; 4562 4563 sc->port.pmf = 0; 4564 4565 bxe_free_fp_buffers(sc); 4566 4567 if (IS_PF(sc)) { 4568 bxe_free_mem(sc); 4569 } 4570 4571 bxe_free_fw_stats_mem(sc); 4572 4573 sc->state = BXE_STATE_CLOSED; 4574 4575 /* 4576 * Check if there are pending parity attentions. If there are - set 4577 * RECOVERY_IN_PROGRESS. 4578 */ 4579 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4580 bxe_set_reset_in_progress(sc); 4581 4582 /* Set RESET_IS_GLOBAL if needed */ 4583 if (global) { 4584 bxe_set_reset_global(sc); 4585 } 4586 } 4587 4588 /* 4589 * The last driver must disable a "close the gate" if there is no 4590 * parity attention or "process kill" pending. 4591 */ 4592 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4593 bxe_reset_is_done(sc, SC_PATH(sc))) { 4594 bxe_disable_close_the_gate(sc); 4595 } 4596 4597 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4598 4599 return (0); 4600} 4601 4602/* 4603 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4604 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4605 */ 4606static int 4607bxe_ifmedia_update(struct ifnet *ifp) 4608{ 4609 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); 4610 struct ifmedia *ifm; 4611 4612 ifm = &sc->ifmedia; 4613 4614 /* We only support Ethernet media type. */ 4615 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4616 return (EINVAL); 4617 } 4618 4619 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4620 case IFM_AUTO: 4621 break; 4622 case IFM_10G_CX4: 4623 case IFM_10G_SR: 4624 case IFM_10G_T: 4625 case IFM_10G_TWINAX: 4626 default: 4627 /* We don't support changing the media type. */ 4628 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4629 IFM_SUBTYPE(ifm->ifm_media)); 4630 return (EINVAL); 4631 } 4632 4633 return (0); 4634} 4635 4636/* 4637 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4638 */ 4639static void 4640bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4641{ 4642 struct bxe_softc *sc = if_getsoftc(ifp); 4643 4644 /* Report link down if the driver isn't running. */ 4645 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 4646 ifmr->ifm_active |= IFM_NONE; 4647 return; 4648 } 4649 4650 /* Setup the default interface info. */ 4651 ifmr->ifm_status = IFM_AVALID; 4652 ifmr->ifm_active = IFM_ETHER; 4653 4654 if (sc->link_vars.link_up) { 4655 ifmr->ifm_status |= IFM_ACTIVE; 4656 } else { 4657 ifmr->ifm_active |= IFM_NONE; 4658 return; 4659 } 4660 4661 ifmr->ifm_active |= sc->media; 4662 4663 if (sc->link_vars.duplex == DUPLEX_FULL) { 4664 ifmr->ifm_active |= IFM_FDX; 4665 } else { 4666 ifmr->ifm_active |= IFM_HDX; 4667 } 4668} 4669 4670static int 4671bxe_ioctl_nvram(struct bxe_softc *sc, 4672 uint32_t priv_op, 4673 struct ifreq *ifr) 4674{ 4675 struct bxe_nvram_data nvdata_base; 4676 struct bxe_nvram_data *nvdata; 4677 int len; 4678 int error = 0; 4679 4680 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); 4681 4682 len = (sizeof(struct bxe_nvram_data) + 4683 nvdata_base.len - 4684 sizeof(uint32_t)); 4685 4686 if (len > sizeof(struct bxe_nvram_data)) { 4687 if ((nvdata = (struct bxe_nvram_data *) 4688 malloc(len, M_DEVBUF, 4689 (M_NOWAIT | M_ZERO))) == NULL) { 4690 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); 4691 return (1); 4692 } 4693 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); 4694 } else { 4695 nvdata = &nvdata_base; 4696 } 4697 4698 if (priv_op == BXE_IOC_RD_NVRAM) { 4699 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", 4700 nvdata->offset, nvdata->len); 4701 error = bxe_nvram_read(sc, 4702 nvdata->offset, 4703 (uint8_t *)nvdata->value, 4704 nvdata->len); 4705 copyout(nvdata, ifr->ifr_data, len); 4706 } else { /* BXE_IOC_WR_NVRAM */ 4707 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", 4708 nvdata->offset, nvdata->len); 4709 copyin(ifr->ifr_data, nvdata, len); 4710 error = bxe_nvram_write(sc, 4711 nvdata->offset, 4712 (uint8_t *)nvdata->value, 4713 nvdata->len); 4714 } 4715 4716 if (len > sizeof(struct bxe_nvram_data)) { 4717 free(nvdata, M_DEVBUF); 4718 } 4719 4720 return (error); 4721} 4722 4723static int 4724bxe_ioctl_stats_show(struct bxe_softc *sc, 4725 uint32_t priv_op, 4726 struct ifreq *ifr) 4727{ 4728 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); 4729 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); 4730 caddr_t p_tmp; 4731 uint32_t *offset; 4732 int i; 4733 4734 switch (priv_op) 4735 { 4736 case BXE_IOC_STATS_SHOW_NUM: 4737 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); 4738 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = 4739 BXE_NUM_ETH_STATS; 4740 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = 4741 STAT_NAME_LEN; 4742 return (0); 4743 4744 case BXE_IOC_STATS_SHOW_STR: 4745 memset(ifr->ifr_data, 0, str_size); 4746 p_tmp = ifr->ifr_data; 4747 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4748 strcpy(p_tmp, bxe_eth_stats_arr[i].string); 4749 p_tmp += STAT_NAME_LEN; 4750 } 4751 return (0); 4752 4753 case BXE_IOC_STATS_SHOW_CNT: 4754 memset(ifr->ifr_data, 0, stats_size); 4755 p_tmp = ifr->ifr_data; 4756 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4757 offset = ((uint32_t *)&sc->eth_stats + 4758 bxe_eth_stats_arr[i].offset); 4759 switch (bxe_eth_stats_arr[i].size) { 4760 case 4: 4761 *((uint64_t *)p_tmp) = (uint64_t)*offset; 4762 break; 4763 case 8: 4764 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); 4765 break; 4766 default: 4767 *((uint64_t *)p_tmp) = 0; 4768 } 4769 p_tmp += sizeof(uint64_t); 4770 } 4771 return (0); 4772 4773 default: 4774 return (-1); 4775 } 4776} 4777 4778static void 4779bxe_handle_chip_tq(void *context, 4780 int pending) 4781{ 4782 struct bxe_softc *sc = (struct bxe_softc *)context; 4783 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4784 4785 switch (work) 4786 { 4787 4788 case CHIP_TQ_REINIT: 4789 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 4790 /* restart the interface */ 4791 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4792 bxe_periodic_stop(sc); 4793 BXE_CORE_LOCK(sc); 4794 bxe_stop_locked(sc); 4795 bxe_init_locked(sc); 4796 BXE_CORE_UNLOCK(sc); 4797 } 4798 break; 4799 4800 default: 4801 break; 4802 } 4803} 4804 4805/* 4806 * Handles any IOCTL calls from the operating system. 4807 * 4808 * Returns: 4809 * 0 = Success, >0 Failure 4810 */ 4811static int 4812bxe_ioctl(if_t ifp, 4813 u_long command, 4814 caddr_t data) 4815{ 4816 struct bxe_softc *sc = if_getsoftc(ifp); 4817 struct ifreq *ifr = (struct ifreq *)data; 4818 struct bxe_nvram_data *nvdata; 4819 uint32_t priv_op; 4820 int mask = 0; 4821 int reinit = 0; 4822 int error = 0; 4823 4824 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4825 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4826 4827 switch (command) 4828 { 4829 case SIOCSIFMTU: 4830 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4831 ifr->ifr_mtu); 4832 4833 if (sc->mtu == ifr->ifr_mtu) { 4834 /* nothing to change */ 4835 break; 4836 } 4837 4838 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4839 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4840 ifr->ifr_mtu, mtu_min, mtu_max); 4841 error = EINVAL; 4842 break; 4843 } 4844 4845 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4846 (unsigned long)ifr->ifr_mtu); 4847 /* 4848 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), 4849 (unsigned long)ifr->ifr_mtu); 4850 XXX - Not sure why it needs to be atomic 4851 */ 4852 if_setmtu(ifp, ifr->ifr_mtu); 4853 reinit = 1; 4854 break; 4855 4856 case SIOCSIFFLAGS: 4857 /* toggle the interface state up or down */ 4858 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4859 4860 BXE_CORE_LOCK(sc); 4861 /* check if the interface is up */ 4862 if (if_getflags(ifp) & IFF_UP) { 4863 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4864 /* set the receive mode flags */ 4865 bxe_set_rx_mode(sc); 4866 } else { 4867 bxe_init_locked(sc); 4868 } 4869 } else { 4870 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4871 bxe_periodic_stop(sc); 4872 bxe_stop_locked(sc); 4873 } 4874 } 4875 BXE_CORE_UNLOCK(sc); 4876 4877 break; 4878 4879 case SIOCADDMULTI: 4880 case SIOCDELMULTI: 4881 /* add/delete multicast addresses */ 4882 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4883 4884 /* check if the interface is up */ 4885 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4886 /* set the receive mode flags */ 4887 BXE_CORE_LOCK(sc); 4888 bxe_set_rx_mode(sc); 4889 BXE_CORE_UNLOCK(sc); 4890 } 4891 4892 break; 4893 4894 case SIOCSIFCAP: 4895 /* find out which capabilities have changed */ 4896 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); 4897 4898 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4899 mask); 4900 4901 /* toggle the LRO capabilites enable flag */ 4902 if (mask & IFCAP_LRO) { 4903 if_togglecapenable(ifp, IFCAP_LRO); 4904 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4905 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); 4906 reinit = 1; 4907 } 4908 4909 /* toggle the TXCSUM checksum capabilites enable flag */ 4910 if (mask & IFCAP_TXCSUM) { 4911 if_togglecapenable(ifp, IFCAP_TXCSUM); 4912 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4913 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); 4914 if (if_getcapenable(ifp) & IFCAP_TXCSUM) { 4915 if_sethwassistbits(ifp, (CSUM_IP | 4916 CSUM_TCP | 4917 CSUM_UDP | 4918 CSUM_TSO | 4919 CSUM_TCP_IPV6 | 4920 CSUM_UDP_IPV6), 0); 4921 } else { 4922 if_clearhwassist(ifp); /* XXX */ 4923 } 4924 } 4925 4926 /* toggle the RXCSUM checksum capabilities enable flag */ 4927 if (mask & IFCAP_RXCSUM) { 4928 if_togglecapenable(ifp, IFCAP_RXCSUM); 4929 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4930 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); 4931 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 4932 if_sethwassistbits(ifp, (CSUM_IP | 4933 CSUM_TCP | 4934 CSUM_UDP | 4935 CSUM_TSO | 4936 CSUM_TCP_IPV6 | 4937 CSUM_UDP_IPV6), 0); 4938 } else { 4939 if_clearhwassist(ifp); /* XXX */ 4940 } 4941 } 4942 4943 /* toggle TSO4 capabilities enabled flag */ 4944 if (mask & IFCAP_TSO4) { 4945 if_togglecapenable(ifp, IFCAP_TSO4); 4946 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4947 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); 4948 } 4949 4950 /* toggle TSO6 capabilities enabled flag */ 4951 if (mask & IFCAP_TSO6) { 4952 if_togglecapenable(ifp, IFCAP_TSO6); 4953 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4954 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); 4955 } 4956 4957 /* toggle VLAN_HWTSO capabilities enabled flag */ 4958 if (mask & IFCAP_VLAN_HWTSO) { 4959 4960 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 4961 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4962 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4963 } 4964 4965 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4966 if (mask & IFCAP_VLAN_HWCSUM) { 4967 /* XXX investigate this... */ 4968 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4969 error = EINVAL; 4970 } 4971 4972 /* toggle VLAN_MTU capabilities enable flag */ 4973 if (mask & IFCAP_VLAN_MTU) { 4974 /* XXX investigate this... */ 4975 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4976 error = EINVAL; 4977 } 4978 4979 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4980 if (mask & IFCAP_VLAN_HWTAGGING) { 4981 /* XXX investigate this... */ 4982 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4983 error = EINVAL; 4984 } 4985 4986 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4987 if (mask & IFCAP_VLAN_HWFILTER) { 4988 /* XXX investigate this... */ 4989 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4990 error = EINVAL; 4991 } 4992 4993 /* XXX not yet... 4994 * IFCAP_WOL_MAGIC 4995 */ 4996 4997 break; 4998 4999 case SIOCSIFMEDIA: 5000 case SIOCGIFMEDIA: 5001 /* set/get interface media */ 5002 BLOGD(sc, DBG_IOCTL, 5003 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 5004 (command & 0xff)); 5005 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 5006 break; 5007 5008 case SIOCGPRIVATE_0: 5009 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); 5010 5011 switch (priv_op) 5012 { 5013 case BXE_IOC_RD_NVRAM: 5014 case BXE_IOC_WR_NVRAM: 5015 nvdata = (struct bxe_nvram_data *)ifr->ifr_data; 5016 BLOGD(sc, DBG_IOCTL, 5017 "Received Private NVRAM ioctl addr=0x%x size=%u\n", 5018 nvdata->offset, nvdata->len); 5019 error = bxe_ioctl_nvram(sc, priv_op, ifr); 5020 break; 5021 5022 case BXE_IOC_STATS_SHOW_NUM: 5023 case BXE_IOC_STATS_SHOW_STR: 5024 case BXE_IOC_STATS_SHOW_CNT: 5025 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", 5026 priv_op); 5027 error = bxe_ioctl_stats_show(sc, priv_op, ifr); 5028 break; 5029 5030 default: 5031 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); 5032 error = EINVAL; 5033 break; 5034 } 5035 5036 break; 5037 5038 default: 5039 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 5040 (command & 0xff)); 5041 error = ether_ioctl(ifp, command, data); 5042 break; 5043 } 5044 5045 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 5046 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 5047 "Re-initializing hardware from IOCTL change\n"); 5048 bxe_periodic_stop(sc); 5049 BXE_CORE_LOCK(sc); 5050 bxe_stop_locked(sc); 5051 bxe_init_locked(sc); 5052 BXE_CORE_UNLOCK(sc); 5053 } 5054 5055 return (error); 5056} 5057 5058static __noinline void 5059bxe_dump_mbuf(struct bxe_softc *sc, 5060 struct mbuf *m, 5061 uint8_t contents) 5062{ 5063 char * type; 5064 int i = 0; 5065 5066 if (!(sc->debug & DBG_MBUF)) { 5067 return; 5068 } 5069 5070 if (m == NULL) { 5071 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 5072 return; 5073 } 5074 5075 while (m) { 5076 BLOGD(sc, DBG_MBUF, 5077 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 5078 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 5079 5080 if (m->m_flags & M_PKTHDR) { 5081 BLOGD(sc, DBG_MBUF, 5082 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 5083 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 5084 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 5085 } 5086 5087 if (m->m_flags & M_EXT) { 5088 switch (m->m_ext.ext_type) { 5089 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 5090 case EXT_SFBUF: type = "EXT_SFBUF"; break; 5091 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 5092 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 5093 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 5094 case EXT_PACKET: type = "EXT_PACKET"; break; 5095 case EXT_MBUF: type = "EXT_MBUF"; break; 5096 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 5097 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 5098 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 5099 case EXT_EXTREF: type = "EXT_EXTREF"; break; 5100 default: type = "UNKNOWN"; break; 5101 } 5102 5103 BLOGD(sc, DBG_MBUF, 5104 "%02d: - m_ext: %p ext_size=%d type=%s\n", 5105 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 5106 } 5107 5108 if (contents) { 5109 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 5110 } 5111 5112 m = m->m_next; 5113 i++; 5114 } 5115} 5116 5117/* 5118 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 5119 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 5120 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 5121 * The headers comes in a seperate bd in FreeBSD so 13-3=10. 5122 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 5123 */ 5124static int 5125bxe_chktso_window(struct bxe_softc *sc, 5126 int nsegs, 5127 bus_dma_segment_t *segs, 5128 struct mbuf *m) 5129{ 5130 uint32_t num_wnds, wnd_size, wnd_sum; 5131 int32_t frag_idx, wnd_idx; 5132 unsigned short lso_mss; 5133 int defrag; 5134 5135 defrag = 0; 5136 wnd_sum = 0; 5137 wnd_size = 10; 5138 num_wnds = nsegs - wnd_size; 5139 lso_mss = htole16(m->m_pkthdr.tso_segsz); 5140 5141 /* 5142 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 5143 * first window sum of data while skipping the first assuming it is the 5144 * header in FreeBSD. 5145 */ 5146 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 5147 wnd_sum += htole16(segs[frag_idx].ds_len); 5148 } 5149 5150 /* check the first 10 bd window size */ 5151 if (wnd_sum < lso_mss) { 5152 return (1); 5153 } 5154 5155 /* run through the windows */ 5156 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 5157 /* subtract the first mbuf->m_len of the last wndw(-header) */ 5158 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 5159 /* add the next mbuf len to the len of our new window */ 5160 wnd_sum += htole16(segs[frag_idx].ds_len); 5161 if (wnd_sum < lso_mss) { 5162 return (1); 5163 } 5164 } 5165 5166 return (0); 5167} 5168 5169static uint8_t 5170bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 5171 struct mbuf *m, 5172 uint32_t *parsing_data) 5173{ 5174 struct ether_vlan_header *eh = NULL; 5175 struct ip *ip4 = NULL; 5176 struct ip6_hdr *ip6 = NULL; 5177 caddr_t ip = NULL; 5178 struct tcphdr *th = NULL; 5179 int e_hlen, ip_hlen, l4_off; 5180 uint16_t proto; 5181 5182 if (m->m_pkthdr.csum_flags == CSUM_IP) { 5183 /* no L4 checksum offload needed */ 5184 return (0); 5185 } 5186 5187 /* get the Ethernet header */ 5188 eh = mtod(m, struct ether_vlan_header *); 5189 5190 /* handle VLAN encapsulation if present */ 5191 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5192 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5193 proto = ntohs(eh->evl_proto); 5194 } else { 5195 e_hlen = ETHER_HDR_LEN; 5196 proto = ntohs(eh->evl_encap_proto); 5197 } 5198 5199 switch (proto) { 5200 case ETHERTYPE_IP: 5201 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5202 ip4 = (m->m_len < sizeof(struct ip)) ? 5203 (struct ip *)m->m_next->m_data : 5204 (struct ip *)(m->m_data + e_hlen); 5205 /* ip_hl is number of 32-bit words */ 5206 ip_hlen = (ip4->ip_hl << 2); 5207 ip = (caddr_t)ip4; 5208 break; 5209 case ETHERTYPE_IPV6: 5210 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5211 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5212 (struct ip6_hdr *)m->m_next->m_data : 5213 (struct ip6_hdr *)(m->m_data + e_hlen); 5214 /* XXX cannot support offload with IPv6 extensions */ 5215 ip_hlen = sizeof(struct ip6_hdr); 5216 ip = (caddr_t)ip6; 5217 break; 5218 default: 5219 /* We can't offload in this case... */ 5220 /* XXX error stat ??? */ 5221 return (0); 5222 } 5223 5224 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5225 l4_off = (e_hlen + ip_hlen); 5226 5227 *parsing_data |= 5228 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 5229 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 5230 5231 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5232 CSUM_TSO | 5233 CSUM_TCP_IPV6)) { 5234 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5235 th = (struct tcphdr *)(ip + ip_hlen); 5236 /* th_off is number of 32-bit words */ 5237 *parsing_data |= ((th->th_off << 5238 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 5239 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 5240 return (l4_off + (th->th_off << 2)); /* entire header length */ 5241 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5242 CSUM_UDP_IPV6)) { 5243 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5244 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 5245 } else { 5246 /* XXX error stat ??? */ 5247 return (0); 5248 } 5249} 5250 5251static uint8_t 5252bxe_set_pbd_csum(struct bxe_fastpath *fp, 5253 struct mbuf *m, 5254 struct eth_tx_parse_bd_e1x *pbd) 5255{ 5256 struct ether_vlan_header *eh = NULL; 5257 struct ip *ip4 = NULL; 5258 struct ip6_hdr *ip6 = NULL; 5259 caddr_t ip = NULL; 5260 struct tcphdr *th = NULL; 5261 struct udphdr *uh = NULL; 5262 int e_hlen, ip_hlen; 5263 uint16_t proto; 5264 uint8_t hlen; 5265 uint16_t tmp_csum; 5266 uint32_t *tmp_uh; 5267 5268 /* get the Ethernet header */ 5269 eh = mtod(m, struct ether_vlan_header *); 5270 5271 /* handle VLAN encapsulation if present */ 5272 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5273 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5274 proto = ntohs(eh->evl_proto); 5275 } else { 5276 e_hlen = ETHER_HDR_LEN; 5277 proto = ntohs(eh->evl_encap_proto); 5278 } 5279 5280 switch (proto) { 5281 case ETHERTYPE_IP: 5282 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5283 ip4 = (m->m_len < sizeof(struct ip)) ? 5284 (struct ip *)m->m_next->m_data : 5285 (struct ip *)(m->m_data + e_hlen); 5286 /* ip_hl is number of 32-bit words */ 5287 ip_hlen = (ip4->ip_hl << 1); 5288 ip = (caddr_t)ip4; 5289 break; 5290 case ETHERTYPE_IPV6: 5291 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5292 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5293 (struct ip6_hdr *)m->m_next->m_data : 5294 (struct ip6_hdr *)(m->m_data + e_hlen); 5295 /* XXX cannot support offload with IPv6 extensions */ 5296 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 5297 ip = (caddr_t)ip6; 5298 break; 5299 default: 5300 /* We can't offload in this case... */ 5301 /* XXX error stat ??? */ 5302 return (0); 5303 } 5304 5305 hlen = (e_hlen >> 1); 5306 5307 /* note that rest of global_data is indirectly zeroed here */ 5308 if (m->m_flags & M_VLANTAG) { 5309 pbd->global_data = 5310 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5311 } else { 5312 pbd->global_data = htole16(hlen); 5313 } 5314 5315 pbd->ip_hlen_w = ip_hlen; 5316 5317 hlen += pbd->ip_hlen_w; 5318 5319 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5320 5321 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5322 CSUM_TSO | 5323 CSUM_TCP_IPV6)) { 5324 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5325 /* th_off is number of 32-bit words */ 5326 hlen += (uint16_t)(th->th_off << 1); 5327 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5328 CSUM_UDP_IPV6)) { 5329 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5330 hlen += (sizeof(struct udphdr) / 2); 5331 } else { 5332 /* valid case as only CSUM_IP was set */ 5333 return (0); 5334 } 5335 5336 pbd->total_hlen_w = htole16(hlen); 5337 5338 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5339 CSUM_TSO | 5340 CSUM_TCP_IPV6)) { 5341 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5342 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5343 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5344 CSUM_UDP_IPV6)) { 5345 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5346 5347 /* 5348 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5349 * checksums and does not know anything about the UDP header and where 5350 * the checksum field is located. It only knows about TCP. Therefore 5351 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5352 * offload. Since the checksum field offset for TCP is 16 bytes and 5353 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5354 * bytes less than the start of the UDP header. This allows the 5355 * hardware to write the checksum in the correct spot. But the 5356 * hardware will compute a checksum which includes the last 10 bytes 5357 * of the IP header. To correct this we tweak the stack computed 5358 * pseudo checksum by folding in the calculation of the inverse 5359 * checksum for those final 10 bytes of the IP header. This allows 5360 * the correct checksum to be computed by the hardware. 5361 */ 5362 5363 /* set pointer 10 bytes before UDP header */ 5364 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5365 5366 /* calculate a pseudo header checksum over the first 10 bytes */ 5367 tmp_csum = in_pseudo(*tmp_uh, 5368 *(tmp_uh + 1), 5369 *(uint16_t *)(tmp_uh + 2)); 5370 5371 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5372 } 5373 5374 return (hlen * 2); /* entire header length, number of bytes */ 5375} 5376 5377static void 5378bxe_set_pbd_lso_e2(struct mbuf *m, 5379 uint32_t *parsing_data) 5380{ 5381 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5382 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5383 ETH_TX_PARSE_BD_E2_LSO_MSS); 5384 5385 /* XXX test for IPv6 with extension header... */ 5386#if 0 5387 struct ip6_hdr *ip6; 5388 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') 5389 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 5390#endif 5391} 5392 5393static void 5394bxe_set_pbd_lso(struct mbuf *m, 5395 struct eth_tx_parse_bd_e1x *pbd) 5396{ 5397 struct ether_vlan_header *eh = NULL; 5398 struct ip *ip = NULL; 5399 struct tcphdr *th = NULL; 5400 int e_hlen; 5401 5402 /* get the Ethernet header */ 5403 eh = mtod(m, struct ether_vlan_header *); 5404 5405 /* handle VLAN encapsulation if present */ 5406 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5407 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5408 5409 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5410 /* XXX assuming IPv4 */ 5411 ip = (struct ip *)(m->m_data + e_hlen); 5412 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5413 5414 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5415 pbd->tcp_send_seq = ntohl(th->th_seq); 5416 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5417 5418#if 1 5419 /* XXX IPv4 */ 5420 pbd->ip_id = ntohs(ip->ip_id); 5421 pbd->tcp_pseudo_csum = 5422 ntohs(in_pseudo(ip->ip_src.s_addr, 5423 ip->ip_dst.s_addr, 5424 htons(IPPROTO_TCP))); 5425#else 5426 /* XXX IPv6 */ 5427 pbd->tcp_pseudo_csum = 5428 ntohs(in_pseudo(&ip6->ip6_src, 5429 &ip6->ip6_dst, 5430 htons(IPPROTO_TCP))); 5431#endif 5432 5433 pbd->global_data |= 5434 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5435} 5436 5437/* 5438 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5439 * visible to the controller. 5440 * 5441 * If an mbuf is submitted to this routine and cannot be given to the 5442 * controller (e.g. it has too many fragments) then the function may free 5443 * the mbuf and return to the caller. 5444 * 5445 * Returns: 5446 * 0 = Success, !0 = Failure 5447 * Note the side effect that an mbuf may be freed if it causes a problem. 5448 */ 5449static int 5450bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5451{ 5452 bus_dma_segment_t segs[32]; 5453 struct mbuf *m0; 5454 struct bxe_sw_tx_bd *tx_buf; 5455 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5456 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5457 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5458 struct eth_tx_bd *tx_data_bd; 5459 struct eth_tx_bd *tx_total_pkt_size_bd; 5460 struct eth_tx_start_bd *tx_start_bd; 5461 uint16_t bd_prod, pkt_prod, total_pkt_size; 5462 uint8_t mac_type; 5463 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5464 struct bxe_softc *sc; 5465 uint16_t tx_bd_avail; 5466 struct ether_vlan_header *eh; 5467 uint32_t pbd_e2_parsing_data = 0; 5468 uint8_t hlen = 0; 5469 int tmp_bd; 5470 int i; 5471 5472 sc = fp->sc; 5473 5474 M_ASSERTPKTHDR(*m_head); 5475 5476 m0 = *m_head; 5477 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5478 tx_start_bd = NULL; 5479 tx_data_bd = NULL; 5480 tx_total_pkt_size_bd = NULL; 5481 5482 /* get the H/W pointer for packets and BDs */ 5483 pkt_prod = fp->tx_pkt_prod; 5484 bd_prod = fp->tx_bd_prod; 5485 5486 mac_type = UNICAST_ADDRESS; 5487 5488 /* map the mbuf into the next open DMAable memory */ 5489 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5490 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5491 tx_buf->m_map, m0, 5492 segs, &nsegs, BUS_DMA_NOWAIT); 5493 5494 /* mapping errors */ 5495 if(__predict_false(error != 0)) { 5496 fp->eth_q_stats.tx_dma_mapping_failure++; 5497 if (error == ENOMEM) { 5498 /* resource issue, try again later */ 5499 rc = ENOMEM; 5500 } else if (error == EFBIG) { 5501 /* possibly recoverable with defragmentation */ 5502 fp->eth_q_stats.mbuf_defrag_attempts++; 5503 m0 = m_defrag(*m_head, M_NOWAIT); 5504 if (m0 == NULL) { 5505 fp->eth_q_stats.mbuf_defrag_failures++; 5506 rc = ENOBUFS; 5507 } else { 5508 /* defrag successful, try mapping again */ 5509 *m_head = m0; 5510 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5511 tx_buf->m_map, m0, 5512 segs, &nsegs, BUS_DMA_NOWAIT); 5513 if (error) { 5514 fp->eth_q_stats.tx_dma_mapping_failure++; 5515 rc = error; 5516 } 5517 } 5518 } else { 5519 /* unknown, unrecoverable mapping error */ 5520 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5521 bxe_dump_mbuf(sc, m0, FALSE); 5522 rc = error; 5523 } 5524 5525 goto bxe_tx_encap_continue; 5526 } 5527 5528 tx_bd_avail = bxe_tx_avail(sc, fp); 5529 5530 /* make sure there is enough room in the send queue */ 5531 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5532 /* Recoverable, try again later. */ 5533 fp->eth_q_stats.tx_hw_queue_full++; 5534 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5535 rc = ENOMEM; 5536 goto bxe_tx_encap_continue; 5537 } 5538 5539 /* capture the current H/W TX chain high watermark */ 5540 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5541 (TX_BD_USABLE - tx_bd_avail))) { 5542 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5543 } 5544 5545 /* make sure it fits in the packet window */ 5546 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5547 /* 5548 * The mbuf may be to big for the controller to handle. If the frame 5549 * is a TSO frame we'll need to do an additional check. 5550 */ 5551 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5552 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5553 goto bxe_tx_encap_continue; /* OK to send */ 5554 } else { 5555 fp->eth_q_stats.tx_window_violation_tso++; 5556 } 5557 } else { 5558 fp->eth_q_stats.tx_window_violation_std++; 5559 } 5560 5561 /* lets try to defragment this mbuf and remap it */ 5562 fp->eth_q_stats.mbuf_defrag_attempts++; 5563 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5564 5565 m0 = m_defrag(*m_head, M_NOWAIT); 5566 if (m0 == NULL) { 5567 fp->eth_q_stats.mbuf_defrag_failures++; 5568 /* Ugh, just drop the frame... :( */ 5569 rc = ENOBUFS; 5570 } else { 5571 /* defrag successful, try mapping again */ 5572 *m_head = m0; 5573 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5574 tx_buf->m_map, m0, 5575 segs, &nsegs, BUS_DMA_NOWAIT); 5576 if (error) { 5577 fp->eth_q_stats.tx_dma_mapping_failure++; 5578 /* No sense in trying to defrag/copy chain, drop it. :( */ 5579 rc = error; 5580 } 5581 else { 5582 /* if the chain is still too long then drop it */ 5583 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5584 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5585 rc = ENODEV; 5586 } 5587 } 5588 } 5589 } 5590 5591bxe_tx_encap_continue: 5592 5593 /* Check for errors */ 5594 if (rc) { 5595 if (rc == ENOMEM) { 5596 /* recoverable try again later */ 5597 } else { 5598 fp->eth_q_stats.tx_soft_errors++; 5599 fp->eth_q_stats.mbuf_alloc_tx--; 5600 m_freem(*m_head); 5601 *m_head = NULL; 5602 } 5603 5604 return (rc); 5605 } 5606 5607 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5608 if (m0->m_flags & M_BCAST) { 5609 mac_type = BROADCAST_ADDRESS; 5610 } else if (m0->m_flags & M_MCAST) { 5611 mac_type = MULTICAST_ADDRESS; 5612 } 5613 5614 /* store the mbuf into the mbuf ring */ 5615 tx_buf->m = m0; 5616 tx_buf->first_bd = fp->tx_bd_prod; 5617 tx_buf->flags = 0; 5618 5619 /* prepare the first transmit (start) BD for the mbuf */ 5620 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5621 5622 BLOGD(sc, DBG_TX, 5623 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5624 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5625 5626 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5627 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5628 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5629 total_pkt_size += tx_start_bd->nbytes; 5630 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5631 5632 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5633 5634 /* all frames have at least Start BD + Parsing BD */ 5635 nbds = nsegs + 1; 5636 tx_start_bd->nbd = htole16(nbds); 5637 5638 if (m0->m_flags & M_VLANTAG) { 5639 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5640 tx_start_bd->bd_flags.as_bitfield |= 5641 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5642 } else { 5643 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5644 if (IS_VF(sc)) { 5645 /* map ethernet header to find type and header length */ 5646 eh = mtod(m0, struct ether_vlan_header *); 5647 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5648 } else { 5649 /* used by FW for packet accounting */ 5650 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5651#if 0 5652 /* 5653 * If NPAR-SD is active then FW should do the tagging regardless 5654 * of value of priority. Otherwise, if priority indicates this is 5655 * a control packet we need to indicate to FW to avoid tagging. 5656 */ 5657 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { 5658 SET_FLAG(tx_start_bd->general_data, 5659 ETH_TX_START_BD_FORCE_VLAN_MODE, 1); 5660 } 5661#endif 5662 } 5663 } 5664 5665 /* 5666 * add a parsing BD from the chain. The parsing BD is always added 5667 * though it is only used for TSO and chksum 5668 */ 5669 bd_prod = TX_BD_NEXT(bd_prod); 5670 5671 if (m0->m_pkthdr.csum_flags) { 5672 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5673 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5674 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5675 } 5676 5677 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5678 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5679 ETH_TX_BD_FLAGS_L4_CSUM); 5680 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5681 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5682 ETH_TX_BD_FLAGS_IS_UDP | 5683 ETH_TX_BD_FLAGS_L4_CSUM); 5684 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5685 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5686 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5687 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5688 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5689 ETH_TX_BD_FLAGS_IS_UDP); 5690 } 5691 } 5692 5693 if (!CHIP_IS_E1x(sc)) { 5694 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5695 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5696 5697 if (m0->m_pkthdr.csum_flags) { 5698 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5699 } 5700 5701#if 0 5702 /* 5703 * Add the MACs to the parsing BD if the module param was 5704 * explicitly set, if this is a vf, or in switch independent 5705 * mode. 5706 */ 5707 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { 5708 eh = mtod(m0, struct ether_vlan_header *); 5709 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 5710 &pbd_e2->data.mac_addr.src_mid, 5711 &pbd_e2->data.mac_addr.src_lo, 5712 eh->evl_shost); 5713 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 5714 &pbd_e2->data.mac_addr.dst_mid, 5715 &pbd_e2->data.mac_addr.dst_lo, 5716 eh->evl_dhost); 5717 } 5718#endif 5719 5720 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5721 mac_type); 5722 } else { 5723 uint16_t global_data = 0; 5724 5725 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5726 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5727 5728 if (m0->m_pkthdr.csum_flags) { 5729 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5730 } 5731 5732 SET_FLAG(global_data, 5733 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5734 pbd_e1x->global_data |= htole16(global_data); 5735 } 5736 5737 /* setup the parsing BD with TSO specific info */ 5738 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5739 fp->eth_q_stats.tx_ofld_frames_lso++; 5740 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5741 5742 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5743 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5744 5745 /* split the first BD into header/data making the fw job easy */ 5746 nbds++; 5747 tx_start_bd->nbd = htole16(nbds); 5748 tx_start_bd->nbytes = htole16(hlen); 5749 5750 bd_prod = TX_BD_NEXT(bd_prod); 5751 5752 /* new transmit BD after the tx_parse_bd */ 5753 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5754 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5755 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5756 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5757 if (tx_total_pkt_size_bd == NULL) { 5758 tx_total_pkt_size_bd = tx_data_bd; 5759 } 5760 5761 BLOGD(sc, DBG_TX, 5762 "TSO split header size is %d (%x:%x) nbds %d\n", 5763 le16toh(tx_start_bd->nbytes), 5764 le32toh(tx_start_bd->addr_hi), 5765 le32toh(tx_start_bd->addr_lo), 5766 nbds); 5767 } 5768 5769 if (!CHIP_IS_E1x(sc)) { 5770 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5771 } else { 5772 bxe_set_pbd_lso(m0, pbd_e1x); 5773 } 5774 } 5775 5776 if (pbd_e2_parsing_data) { 5777 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5778 } 5779 5780 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5781 for (i = 1; i < nsegs ; i++) { 5782 bd_prod = TX_BD_NEXT(bd_prod); 5783 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5784 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5785 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5786 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5787 if (tx_total_pkt_size_bd == NULL) { 5788 tx_total_pkt_size_bd = tx_data_bd; 5789 } 5790 total_pkt_size += tx_data_bd->nbytes; 5791 } 5792 5793 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5794 5795 if (tx_total_pkt_size_bd != NULL) { 5796 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5797 } 5798 5799 if (__predict_false(sc->debug & DBG_TX)) { 5800 tmp_bd = tx_buf->first_bd; 5801 for (i = 0; i < nbds; i++) 5802 { 5803 if (i == 0) { 5804 BLOGD(sc, DBG_TX, 5805 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5806 "bd_flags=0x%x hdr_nbds=%d\n", 5807 tx_start_bd, 5808 tmp_bd, 5809 le16toh(tx_start_bd->nbd), 5810 le16toh(tx_start_bd->vlan_or_ethertype), 5811 tx_start_bd->bd_flags.as_bitfield, 5812 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5813 } else if (i == 1) { 5814 if (pbd_e1x) { 5815 BLOGD(sc, DBG_TX, 5816 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5817 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5818 "tcp_seq=%u total_hlen_w=%u\n", 5819 pbd_e1x, 5820 tmp_bd, 5821 pbd_e1x->global_data, 5822 pbd_e1x->ip_hlen_w, 5823 pbd_e1x->ip_id, 5824 pbd_e1x->lso_mss, 5825 pbd_e1x->tcp_flags, 5826 pbd_e1x->tcp_pseudo_csum, 5827 pbd_e1x->tcp_send_seq, 5828 le16toh(pbd_e1x->total_hlen_w)); 5829 } else { /* if (pbd_e2) */ 5830 BLOGD(sc, DBG_TX, 5831 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5832 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5833 pbd_e2, 5834 tmp_bd, 5835 pbd_e2->data.mac_addr.dst_hi, 5836 pbd_e2->data.mac_addr.dst_mid, 5837 pbd_e2->data.mac_addr.dst_lo, 5838 pbd_e2->data.mac_addr.src_hi, 5839 pbd_e2->data.mac_addr.src_mid, 5840 pbd_e2->data.mac_addr.src_lo, 5841 pbd_e2->parsing_data); 5842 } 5843 } 5844 5845 if (i != 1) { /* skip parse db as it doesn't hold data */ 5846 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5847 BLOGD(sc, DBG_TX, 5848 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5849 tx_data_bd, 5850 tmp_bd, 5851 le16toh(tx_data_bd->nbytes), 5852 le32toh(tx_data_bd->addr_hi), 5853 le32toh(tx_data_bd->addr_lo)); 5854 } 5855 5856 tmp_bd = TX_BD_NEXT(tmp_bd); 5857 } 5858 } 5859 5860 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5861 5862 /* update TX BD producer index value for next TX */ 5863 bd_prod = TX_BD_NEXT(bd_prod); 5864 5865 /* 5866 * If the chain of tx_bd's describing this frame is adjacent to or spans 5867 * an eth_tx_next_bd element then we need to increment the nbds value. 5868 */ 5869 if (TX_BD_IDX(bd_prod) < nbds) { 5870 nbds++; 5871 } 5872 5873 /* don't allow reordering of writes for nbd and packets */ 5874 mb(); 5875 5876 fp->tx_db.data.prod += nbds; 5877 5878 /* producer points to the next free tx_bd at this point */ 5879 fp->tx_pkt_prod++; 5880 fp->tx_bd_prod = bd_prod; 5881 5882 DOORBELL(sc, fp->index, fp->tx_db.raw); 5883 5884 fp->eth_q_stats.tx_pkts++; 5885 5886 /* Prevent speculative reads from getting ahead of the status block. */ 5887 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5888 0, 0, BUS_SPACE_BARRIER_READ); 5889 5890 /* Prevent speculative reads from getting ahead of the doorbell. */ 5891 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5892 0, 0, BUS_SPACE_BARRIER_READ); 5893 5894 return (0); 5895} 5896 5897static void 5898bxe_tx_start_locked(struct bxe_softc *sc, 5899 if_t ifp, 5900 struct bxe_fastpath *fp) 5901{ 5902 struct mbuf *m = NULL; 5903 int tx_count = 0; 5904 uint16_t tx_bd_avail; 5905 5906 BXE_FP_TX_LOCK_ASSERT(fp); 5907 5908 /* keep adding entries while there are frames to send */ 5909 while (!if_sendq_empty(ifp)) { 5910 5911 /* 5912 * check for any frames to send 5913 * dequeue can still be NULL even if queue is not empty 5914 */ 5915 m = if_dequeue(ifp); 5916 if (__predict_false(m == NULL)) { 5917 break; 5918 } 5919 5920 /* the mbuf now belongs to us */ 5921 fp->eth_q_stats.mbuf_alloc_tx++; 5922 5923 /* 5924 * Put the frame into the transmit ring. If we don't have room, 5925 * place the mbuf back at the head of the TX queue, set the 5926 * OACTIVE flag, and wait for the NIC to drain the chain. 5927 */ 5928 if (__predict_false(bxe_tx_encap(fp, &m))) { 5929 fp->eth_q_stats.tx_encap_failures++; 5930 if (m != NULL) { 5931 /* mark the TX queue as full and return the frame */ 5932 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5933 if_sendq_prepend(ifp, m); 5934 fp->eth_q_stats.mbuf_alloc_tx--; 5935 fp->eth_q_stats.tx_queue_xoff++; 5936 } 5937 5938 /* stop looking for more work */ 5939 break; 5940 } 5941 5942 /* the frame was enqueued successfully */ 5943 tx_count++; 5944 5945 /* send a copy of the frame to any BPF listeners. */ 5946 if_etherbpfmtap(ifp, m); 5947 5948 tx_bd_avail = bxe_tx_avail(sc, fp); 5949 5950 /* handle any completions if we're running low */ 5951 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5952 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5953 bxe_txeof(sc, fp); 5954 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5955 break; 5956 } 5957 } 5958 } 5959 5960 /* all TX packets were dequeued and/or the tx ring is full */ 5961 if (tx_count > 0) { 5962 /* reset the TX watchdog timeout timer */ 5963 fp->watchdog_timer = BXE_TX_TIMEOUT; 5964 } 5965} 5966 5967/* Legacy (non-RSS) dispatch routine */ 5968static void 5969bxe_tx_start(if_t ifp) 5970{ 5971 struct bxe_softc *sc; 5972 struct bxe_fastpath *fp; 5973 5974 sc = if_getsoftc(ifp); 5975 5976 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 5977 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5978 return; 5979 } 5980 5981 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5982 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 5983 return; 5984 } 5985 5986 if (!sc->link_vars.link_up) { 5987 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5988 return; 5989 } 5990 5991 fp = &sc->fp[0]; 5992 5993 BXE_FP_TX_LOCK(fp); 5994 bxe_tx_start_locked(sc, ifp, fp); 5995 BXE_FP_TX_UNLOCK(fp); 5996} 5997 5998#if __FreeBSD_version >= 800000 5999 6000static int 6001bxe_tx_mq_start_locked(struct bxe_softc *sc, 6002 if_t ifp, 6003 struct bxe_fastpath *fp, 6004 struct mbuf *m) 6005{ 6006 struct buf_ring *tx_br = fp->tx_br; 6007 struct mbuf *next; 6008 int depth, rc, tx_count; 6009 uint16_t tx_bd_avail; 6010 6011 rc = tx_count = 0; 6012 6013 BXE_FP_TX_LOCK_ASSERT(fp); 6014 6015 if (!tx_br) { 6016 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 6017 return (EINVAL); 6018 } 6019 6020 if (!sc->link_vars.link_up || 6021 (ifp->if_drv_flags & 6022 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { 6023 rc = drbr_enqueue_drv(ifp, tx_br, m); 6024 goto bxe_tx_mq_start_locked_exit; 6025 } 6026 6027 /* fetch the depth of the driver queue */ 6028 depth = drbr_inuse_drv(ifp, tx_br); 6029 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 6030 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 6031 } 6032 6033 if (m == NULL) { 6034 /* no new work, check for pending frames */ 6035 next = drbr_dequeue_drv(ifp, tx_br); 6036 } else if (drbr_needs_enqueue_drv(ifp, tx_br)) { 6037 /* have both new and pending work, maintain packet order */ 6038 rc = drbr_enqueue_drv(ifp, tx_br, m); 6039 if (rc != 0) { 6040 fp->eth_q_stats.tx_soft_errors++; 6041 goto bxe_tx_mq_start_locked_exit; 6042 } 6043 next = drbr_dequeue_drv(ifp, tx_br); 6044 } else { 6045 /* new work only and nothing pending */ 6046 next = m; 6047 } 6048 6049 /* keep adding entries while there are frames to send */ 6050 while (next != NULL) { 6051 6052 /* the mbuf now belongs to us */ 6053 fp->eth_q_stats.mbuf_alloc_tx++; 6054 6055 /* 6056 * Put the frame into the transmit ring. If we don't have room, 6057 * place the mbuf back at the head of the TX queue, set the 6058 * OACTIVE flag, and wait for the NIC to drain the chain. 6059 */ 6060 rc = bxe_tx_encap(fp, &next); 6061 if (__predict_false(rc != 0)) { 6062 fp->eth_q_stats.tx_encap_failures++; 6063 if (next != NULL) { 6064 /* mark the TX queue as full and save the frame */ 6065 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 6066 /* XXX this may reorder the frame */ 6067 rc = drbr_enqueue_drv(ifp, tx_br, next); 6068 fp->eth_q_stats.mbuf_alloc_tx--; 6069 fp->eth_q_stats.tx_frames_deferred++; 6070 } 6071 6072 /* stop looking for more work */ 6073 break; 6074 } 6075 6076 /* the transmit frame was enqueued successfully */ 6077 tx_count++; 6078 6079 /* send a copy of the frame to any BPF listeners */ 6080 if_etherbpfmtap(ifp, next); 6081 6082 tx_bd_avail = bxe_tx_avail(sc, fp); 6083 6084 /* handle any completions if we're running low */ 6085 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 6086 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 6087 bxe_txeof(sc, fp); 6088 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 6089 break; 6090 } 6091 } 6092 6093 next = drbr_dequeue_drv(ifp, tx_br); 6094 } 6095 6096 /* all TX packets were dequeued and/or the tx ring is full */ 6097 if (tx_count > 0) { 6098 /* reset the TX watchdog timeout timer */ 6099 fp->watchdog_timer = BXE_TX_TIMEOUT; 6100 } 6101 6102bxe_tx_mq_start_locked_exit: 6103 6104 return (rc); 6105} 6106 6107/* Multiqueue (TSS) dispatch routine. */ 6108static int 6109bxe_tx_mq_start(struct ifnet *ifp, 6110 struct mbuf *m) 6111{ 6112 struct bxe_softc *sc = if_getsoftc(ifp); 6113 struct bxe_fastpath *fp; 6114 int fp_index, rc; 6115 6116 fp_index = 0; /* default is the first queue */ 6117 6118 /* check if flowid is set */ 6119 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 6120 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 6121 6122 fp = &sc->fp[fp_index]; 6123 6124 if (BXE_FP_TX_TRYLOCK(fp)) { 6125 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 6126 BXE_FP_TX_UNLOCK(fp); 6127 } else 6128 rc = drbr_enqueue_drv(ifp, fp->tx_br, m); 6129 6130 return (rc); 6131} 6132 6133static void 6134bxe_mq_flush(struct ifnet *ifp) 6135{ 6136 struct bxe_softc *sc = if_getsoftc(ifp); 6137 struct bxe_fastpath *fp; 6138 struct mbuf *m; 6139 int i; 6140 6141 for (i = 0; i < sc->num_queues; i++) { 6142 fp = &sc->fp[i]; 6143 6144 if (fp->state != BXE_FP_STATE_OPEN) { 6145 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 6146 fp->index, fp->state); 6147 continue; 6148 } 6149 6150 if (fp->tx_br != NULL) { 6151 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 6152 BXE_FP_TX_LOCK(fp); 6153 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6154 m_freem(m); 6155 } 6156 BXE_FP_TX_UNLOCK(fp); 6157 } 6158 } 6159 6160 if_qflush(ifp); 6161} 6162 6163#endif /* FreeBSD_version >= 800000 */ 6164 6165static uint16_t 6166bxe_cid_ilt_lines(struct bxe_softc *sc) 6167{ 6168 if (IS_SRIOV(sc)) { 6169 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 6170 } 6171 return (L2_ILT_LINES(sc)); 6172} 6173 6174static void 6175bxe_ilt_set_info(struct bxe_softc *sc) 6176{ 6177 struct ilt_client_info *ilt_client; 6178 struct ecore_ilt *ilt = sc->ilt; 6179 uint16_t line = 0; 6180 6181 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 6182 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 6183 6184 /* CDU */ 6185 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 6186 ilt_client->client_num = ILT_CLIENT_CDU; 6187 ilt_client->page_size = CDU_ILT_PAGE_SZ; 6188 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 6189 ilt_client->start = line; 6190 line += bxe_cid_ilt_lines(sc); 6191 6192 if (CNIC_SUPPORT(sc)) { 6193 line += CNIC_ILT_LINES; 6194 } 6195 6196 ilt_client->end = (line - 1); 6197 6198 BLOGD(sc, DBG_LOAD, 6199 "ilt client[CDU]: start %d, end %d, " 6200 "psz 0x%x, flags 0x%x, hw psz %d\n", 6201 ilt_client->start, ilt_client->end, 6202 ilt_client->page_size, 6203 ilt_client->flags, 6204 ilog2(ilt_client->page_size >> 12)); 6205 6206 /* QM */ 6207 if (QM_INIT(sc->qm_cid_count)) { 6208 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 6209 ilt_client->client_num = ILT_CLIENT_QM; 6210 ilt_client->page_size = QM_ILT_PAGE_SZ; 6211 ilt_client->flags = 0; 6212 ilt_client->start = line; 6213 6214 /* 4 bytes for each cid */ 6215 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 6216 QM_ILT_PAGE_SZ); 6217 6218 ilt_client->end = (line - 1); 6219 6220 BLOGD(sc, DBG_LOAD, 6221 "ilt client[QM]: start %d, end %d, " 6222 "psz 0x%x, flags 0x%x, hw psz %d\n", 6223 ilt_client->start, ilt_client->end, 6224 ilt_client->page_size, ilt_client->flags, 6225 ilog2(ilt_client->page_size >> 12)); 6226 } 6227 6228 if (CNIC_SUPPORT(sc)) { 6229 /* SRC */ 6230 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 6231 ilt_client->client_num = ILT_CLIENT_SRC; 6232 ilt_client->page_size = SRC_ILT_PAGE_SZ; 6233 ilt_client->flags = 0; 6234 ilt_client->start = line; 6235 line += SRC_ILT_LINES; 6236 ilt_client->end = (line - 1); 6237 6238 BLOGD(sc, DBG_LOAD, 6239 "ilt client[SRC]: start %d, end %d, " 6240 "psz 0x%x, flags 0x%x, hw psz %d\n", 6241 ilt_client->start, ilt_client->end, 6242 ilt_client->page_size, ilt_client->flags, 6243 ilog2(ilt_client->page_size >> 12)); 6244 6245 /* TM */ 6246 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 6247 ilt_client->client_num = ILT_CLIENT_TM; 6248 ilt_client->page_size = TM_ILT_PAGE_SZ; 6249 ilt_client->flags = 0; 6250 ilt_client->start = line; 6251 line += TM_ILT_LINES; 6252 ilt_client->end = (line - 1); 6253 6254 BLOGD(sc, DBG_LOAD, 6255 "ilt client[TM]: start %d, end %d, " 6256 "psz 0x%x, flags 0x%x, hw psz %d\n", 6257 ilt_client->start, ilt_client->end, 6258 ilt_client->page_size, ilt_client->flags, 6259 ilog2(ilt_client->page_size >> 12)); 6260 } 6261 6262 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 6263} 6264 6265static void 6266bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 6267{ 6268 int i; 6269 uint32_t rx_buf_size; 6270 6271 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 6272 6273 for (i = 0; i < sc->num_queues; i++) { 6274 if(rx_buf_size <= MCLBYTES){ 6275 sc->fp[i].rx_buf_size = rx_buf_size; 6276 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6277 }else if (rx_buf_size <= MJUMPAGESIZE){ 6278 sc->fp[i].rx_buf_size = rx_buf_size; 6279 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6280 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ 6281 sc->fp[i].rx_buf_size = MCLBYTES; 6282 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6283 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ 6284 sc->fp[i].rx_buf_size = MJUMPAGESIZE; 6285 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6286 }else { 6287 sc->fp[i].rx_buf_size = MCLBYTES; 6288 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6289 } 6290 } 6291} 6292 6293static int 6294bxe_alloc_ilt_mem(struct bxe_softc *sc) 6295{ 6296 int rc = 0; 6297 6298 if ((sc->ilt = 6299 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 6300 M_BXE_ILT, 6301 (M_NOWAIT | M_ZERO))) == NULL) { 6302 rc = 1; 6303 } 6304 6305 return (rc); 6306} 6307 6308static int 6309bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6310{ 6311 int rc = 0; 6312 6313 if ((sc->ilt->lines = 6314 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6315 M_BXE_ILT, 6316 (M_NOWAIT | M_ZERO))) == NULL) { 6317 rc = 1; 6318 } 6319 6320 return (rc); 6321} 6322 6323static void 6324bxe_free_ilt_mem(struct bxe_softc *sc) 6325{ 6326 if (sc->ilt != NULL) { 6327 free(sc->ilt, M_BXE_ILT); 6328 sc->ilt = NULL; 6329 } 6330} 6331 6332static void 6333bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6334{ 6335 if (sc->ilt->lines != NULL) { 6336 free(sc->ilt->lines, M_BXE_ILT); 6337 sc->ilt->lines = NULL; 6338 } 6339} 6340 6341static void 6342bxe_free_mem(struct bxe_softc *sc) 6343{ 6344 int i; 6345 6346#if 0 6347 if (!CONFIGURE_NIC_MODE(sc)) { 6348 /* free searcher T2 table */ 6349 bxe_dma_free(sc, &sc->t2); 6350 } 6351#endif 6352 6353 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6354 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6355 sc->context[i].vcxt = NULL; 6356 sc->context[i].size = 0; 6357 } 6358 6359 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6360 6361 bxe_free_ilt_lines_mem(sc); 6362 6363#if 0 6364 bxe_iov_free_mem(sc); 6365#endif 6366} 6367 6368static int 6369bxe_alloc_mem(struct bxe_softc *sc) 6370{ 6371 int context_size; 6372 int allocated; 6373 int i; 6374 6375#if 0 6376 if (!CONFIGURE_NIC_MODE(sc)) { 6377 /* allocate searcher T2 table */ 6378 if (bxe_dma_alloc(sc, SRC_T2_SZ, 6379 &sc->t2, "searcher t2 table") != 0) { 6380 return (-1); 6381 } 6382 } 6383#endif 6384 6385 /* 6386 * Allocate memory for CDU context: 6387 * This memory is allocated separately and not in the generic ILT 6388 * functions because CDU differs in few aspects: 6389 * 1. There can be multiple entities allocating memory for context - 6390 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6391 * its own ILT lines. 6392 * 2. Since CDU page-size is not a single 4KB page (which is the case 6393 * for the other ILT clients), to be efficient we want to support 6394 * allocation of sub-page-size in the last entry. 6395 * 3. Context pointers are used by the driver to pass to FW / update 6396 * the context (for the other ILT clients the pointers are used just to 6397 * free the memory during unload). 6398 */ 6399 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6400 for (i = 0, allocated = 0; allocated < context_size; i++) { 6401 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6402 (context_size - allocated)); 6403 6404 if (bxe_dma_alloc(sc, sc->context[i].size, 6405 &sc->context[i].vcxt_dma, 6406 "cdu context") != 0) { 6407 bxe_free_mem(sc); 6408 return (-1); 6409 } 6410 6411 sc->context[i].vcxt = 6412 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6413 6414 allocated += sc->context[i].size; 6415 } 6416 6417 bxe_alloc_ilt_lines_mem(sc); 6418 6419 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6420 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6421 { 6422 for (i = 0; i < 4; i++) { 6423 BLOGD(sc, DBG_LOAD, 6424 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6425 i, 6426 sc->ilt->clients[i].page_size, 6427 sc->ilt->clients[i].start, 6428 sc->ilt->clients[i].end, 6429 sc->ilt->clients[i].client_num, 6430 sc->ilt->clients[i].flags); 6431 } 6432 } 6433 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6434 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6435 bxe_free_mem(sc); 6436 return (-1); 6437 } 6438 6439#if 0 6440 if (bxe_iov_alloc_mem(sc)) { 6441 BLOGE(sc, "Failed to allocate memory for SRIOV\n"); 6442 bxe_free_mem(sc); 6443 return (-1); 6444 } 6445#endif 6446 6447 return (0); 6448} 6449 6450static void 6451bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6452{ 6453 struct bxe_softc *sc; 6454 int i; 6455 6456 sc = fp->sc; 6457 6458 if (fp->rx_mbuf_tag == NULL) { 6459 return; 6460 } 6461 6462 /* free all mbufs and unload all maps */ 6463 for (i = 0; i < RX_BD_TOTAL; i++) { 6464 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6465 bus_dmamap_sync(fp->rx_mbuf_tag, 6466 fp->rx_mbuf_chain[i].m_map, 6467 BUS_DMASYNC_POSTREAD); 6468 bus_dmamap_unload(fp->rx_mbuf_tag, 6469 fp->rx_mbuf_chain[i].m_map); 6470 } 6471 6472 if (fp->rx_mbuf_chain[i].m != NULL) { 6473 m_freem(fp->rx_mbuf_chain[i].m); 6474 fp->rx_mbuf_chain[i].m = NULL; 6475 fp->eth_q_stats.mbuf_alloc_rx--; 6476 } 6477 } 6478} 6479 6480static void 6481bxe_free_tpa_pool(struct bxe_fastpath *fp) 6482{ 6483 struct bxe_softc *sc; 6484 int i, max_agg_queues; 6485 6486 sc = fp->sc; 6487 6488 if (fp->rx_mbuf_tag == NULL) { 6489 return; 6490 } 6491 6492 max_agg_queues = MAX_AGG_QS(sc); 6493 6494 /* release all mbufs and unload all DMA maps in the TPA pool */ 6495 for (i = 0; i < max_agg_queues; i++) { 6496 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6497 bus_dmamap_sync(fp->rx_mbuf_tag, 6498 fp->rx_tpa_info[i].bd.m_map, 6499 BUS_DMASYNC_POSTREAD); 6500 bus_dmamap_unload(fp->rx_mbuf_tag, 6501 fp->rx_tpa_info[i].bd.m_map); 6502 } 6503 6504 if (fp->rx_tpa_info[i].bd.m != NULL) { 6505 m_freem(fp->rx_tpa_info[i].bd.m); 6506 fp->rx_tpa_info[i].bd.m = NULL; 6507 fp->eth_q_stats.mbuf_alloc_tpa--; 6508 } 6509 } 6510} 6511 6512static void 6513bxe_free_sge_chain(struct bxe_fastpath *fp) 6514{ 6515 struct bxe_softc *sc; 6516 int i; 6517 6518 sc = fp->sc; 6519 6520 if (fp->rx_sge_mbuf_tag == NULL) { 6521 return; 6522 } 6523 6524 /* rree all mbufs and unload all maps */ 6525 for (i = 0; i < RX_SGE_TOTAL; i++) { 6526 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6527 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6528 fp->rx_sge_mbuf_chain[i].m_map, 6529 BUS_DMASYNC_POSTREAD); 6530 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6531 fp->rx_sge_mbuf_chain[i].m_map); 6532 } 6533 6534 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6535 m_freem(fp->rx_sge_mbuf_chain[i].m); 6536 fp->rx_sge_mbuf_chain[i].m = NULL; 6537 fp->eth_q_stats.mbuf_alloc_sge--; 6538 } 6539 } 6540} 6541 6542static void 6543bxe_free_fp_buffers(struct bxe_softc *sc) 6544{ 6545 struct bxe_fastpath *fp; 6546 int i; 6547 6548 for (i = 0; i < sc->num_queues; i++) { 6549 fp = &sc->fp[i]; 6550 6551#if __FreeBSD_version >= 800000 6552 if (fp->tx_br != NULL) { 6553 /* just in case bxe_mq_flush() wasn't called */ 6554 if (mtx_initialized(&fp->tx_mtx)) { 6555 struct mbuf *m; 6556 6557 BXE_FP_TX_LOCK(fp); 6558 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) 6559 m_freem(m); 6560 BXE_FP_TX_UNLOCK(fp); 6561 } 6562 buf_ring_free(fp->tx_br, M_DEVBUF); 6563 fp->tx_br = NULL; 6564 } 6565#endif 6566 6567 /* free all RX buffers */ 6568 bxe_free_rx_bd_chain(fp); 6569 bxe_free_tpa_pool(fp); 6570 bxe_free_sge_chain(fp); 6571 6572 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6573 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6574 fp->eth_q_stats.mbuf_alloc_rx); 6575 } 6576 6577 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6578 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6579 fp->eth_q_stats.mbuf_alloc_sge); 6580 } 6581 6582 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6583 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6584 fp->eth_q_stats.mbuf_alloc_tpa); 6585 } 6586 6587 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6588 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6589 fp->eth_q_stats.mbuf_alloc_tx); 6590 } 6591 6592 /* XXX verify all mbufs were reclaimed */ 6593 6594 if (mtx_initialized(&fp->tx_mtx)) { 6595 mtx_destroy(&fp->tx_mtx); 6596 } 6597 6598 if (mtx_initialized(&fp->rx_mtx)) { 6599 mtx_destroy(&fp->rx_mtx); 6600 } 6601 } 6602} 6603 6604static int 6605bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6606 uint16_t prev_index, 6607 uint16_t index) 6608{ 6609 struct bxe_sw_rx_bd *rx_buf; 6610 struct eth_rx_bd *rx_bd; 6611 bus_dma_segment_t segs[1]; 6612 bus_dmamap_t map; 6613 struct mbuf *m; 6614 int nsegs, rc; 6615 6616 rc = 0; 6617 6618 /* allocate the new RX BD mbuf */ 6619 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6620 if (__predict_false(m == NULL)) { 6621 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6622 return (ENOBUFS); 6623 } 6624 6625 fp->eth_q_stats.mbuf_alloc_rx++; 6626 6627 /* initialize the mbuf buffer length */ 6628 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6629 6630 /* map the mbuf into non-paged pool */ 6631 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6632 fp->rx_mbuf_spare_map, 6633 m, segs, &nsegs, BUS_DMA_NOWAIT); 6634 if (__predict_false(rc != 0)) { 6635 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6636 m_freem(m); 6637 fp->eth_q_stats.mbuf_alloc_rx--; 6638 return (rc); 6639 } 6640 6641 /* all mbufs must map to a single segment */ 6642 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6643 6644 /* release any existing RX BD mbuf mappings */ 6645 6646 if (prev_index != index) { 6647 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6648 6649 if (rx_buf->m_map != NULL) { 6650 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6651 BUS_DMASYNC_POSTREAD); 6652 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6653 } 6654 6655 /* 6656 * We only get here from bxe_rxeof() when the maximum number 6657 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6658 * holds the mbuf in the prev_index so it's OK to NULL it out 6659 * here without concern of a memory leak. 6660 */ 6661 fp->rx_mbuf_chain[prev_index].m = NULL; 6662 } 6663 6664 rx_buf = &fp->rx_mbuf_chain[index]; 6665 6666 if (rx_buf->m_map != NULL) { 6667 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6668 BUS_DMASYNC_POSTREAD); 6669 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6670 } 6671 6672 /* save the mbuf and mapping info for a future packet */ 6673 map = (prev_index != index) ? 6674 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6675 rx_buf->m_map = fp->rx_mbuf_spare_map; 6676 fp->rx_mbuf_spare_map = map; 6677 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6678 BUS_DMASYNC_PREREAD); 6679 rx_buf->m = m; 6680 6681 rx_bd = &fp->rx_chain[index]; 6682 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6683 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6684 6685 return (rc); 6686} 6687 6688static int 6689bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6690 int queue) 6691{ 6692 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6693 bus_dma_segment_t segs[1]; 6694 bus_dmamap_t map; 6695 struct mbuf *m; 6696 int nsegs; 6697 int rc = 0; 6698 6699 /* allocate the new TPA mbuf */ 6700 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6701 if (__predict_false(m == NULL)) { 6702 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6703 return (ENOBUFS); 6704 } 6705 6706 fp->eth_q_stats.mbuf_alloc_tpa++; 6707 6708 /* initialize the mbuf buffer length */ 6709 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6710 6711 /* map the mbuf into non-paged pool */ 6712 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6713 fp->rx_tpa_info_mbuf_spare_map, 6714 m, segs, &nsegs, BUS_DMA_NOWAIT); 6715 if (__predict_false(rc != 0)) { 6716 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6717 m_free(m); 6718 fp->eth_q_stats.mbuf_alloc_tpa--; 6719 return (rc); 6720 } 6721 6722 /* all mbufs must map to a single segment */ 6723 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6724 6725 /* release any existing TPA mbuf mapping */ 6726 if (tpa_info->bd.m_map != NULL) { 6727 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6728 BUS_DMASYNC_POSTREAD); 6729 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6730 } 6731 6732 /* save the mbuf and mapping info for the TPA mbuf */ 6733 map = tpa_info->bd.m_map; 6734 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6735 fp->rx_tpa_info_mbuf_spare_map = map; 6736 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6737 BUS_DMASYNC_PREREAD); 6738 tpa_info->bd.m = m; 6739 tpa_info->seg = segs[0]; 6740 6741 return (rc); 6742} 6743 6744/* 6745 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6746 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6747 * chain. 6748 */ 6749static int 6750bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6751 uint16_t index) 6752{ 6753 struct bxe_sw_rx_bd *sge_buf; 6754 struct eth_rx_sge *sge; 6755 bus_dma_segment_t segs[1]; 6756 bus_dmamap_t map; 6757 struct mbuf *m; 6758 int nsegs; 6759 int rc = 0; 6760 6761 /* allocate a new SGE mbuf */ 6762 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6763 if (__predict_false(m == NULL)) { 6764 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6765 return (ENOMEM); 6766 } 6767 6768 fp->eth_q_stats.mbuf_alloc_sge++; 6769 6770 /* initialize the mbuf buffer length */ 6771 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6772 6773 /* map the SGE mbuf into non-paged pool */ 6774 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6775 fp->rx_sge_mbuf_spare_map, 6776 m, segs, &nsegs, BUS_DMA_NOWAIT); 6777 if (__predict_false(rc != 0)) { 6778 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6779 m_freem(m); 6780 fp->eth_q_stats.mbuf_alloc_sge--; 6781 return (rc); 6782 } 6783 6784 /* all mbufs must map to a single segment */ 6785 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6786 6787 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6788 6789 /* release any existing SGE mbuf mapping */ 6790 if (sge_buf->m_map != NULL) { 6791 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6792 BUS_DMASYNC_POSTREAD); 6793 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6794 } 6795 6796 /* save the mbuf and mapping info for a future packet */ 6797 map = sge_buf->m_map; 6798 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6799 fp->rx_sge_mbuf_spare_map = map; 6800 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6801 BUS_DMASYNC_PREREAD); 6802 sge_buf->m = m; 6803 6804 sge = &fp->rx_sge_chain[index]; 6805 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6806 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6807 6808 return (rc); 6809} 6810 6811static __noinline int 6812bxe_alloc_fp_buffers(struct bxe_softc *sc) 6813{ 6814 struct bxe_fastpath *fp; 6815 int i, j, rc = 0; 6816 int ring_prod, cqe_ring_prod; 6817 int max_agg_queues; 6818 6819 for (i = 0; i < sc->num_queues; i++) { 6820 fp = &sc->fp[i]; 6821 6822#if __FreeBSD_version >= 800000 6823 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 6824 M_NOWAIT, &fp->tx_mtx); 6825 if (fp->tx_br == NULL) { 6826 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); 6827 goto bxe_alloc_fp_buffers_error; 6828 } 6829#endif 6830 6831 ring_prod = cqe_ring_prod = 0; 6832 fp->rx_bd_cons = 0; 6833 fp->rx_cq_cons = 0; 6834 6835 /* allocate buffers for the RX BDs in RX BD chain */ 6836 for (j = 0; j < sc->max_rx_bufs; j++) { 6837 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6838 if (rc != 0) { 6839 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6840 i, rc); 6841 goto bxe_alloc_fp_buffers_error; 6842 } 6843 6844 ring_prod = RX_BD_NEXT(ring_prod); 6845 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6846 } 6847 6848 fp->rx_bd_prod = ring_prod; 6849 fp->rx_cq_prod = cqe_ring_prod; 6850 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6851 6852 max_agg_queues = MAX_AGG_QS(sc); 6853 6854 fp->tpa_enable = TRUE; 6855 6856 /* fill the TPA pool */ 6857 for (j = 0; j < max_agg_queues; j++) { 6858 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6859 if (rc != 0) { 6860 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6861 i, j); 6862 fp->tpa_enable = FALSE; 6863 goto bxe_alloc_fp_buffers_error; 6864 } 6865 6866 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6867 } 6868 6869 if (fp->tpa_enable) { 6870 /* fill the RX SGE chain */ 6871 ring_prod = 0; 6872 for (j = 0; j < RX_SGE_USABLE; j++) { 6873 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6874 if (rc != 0) { 6875 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6876 i, ring_prod); 6877 fp->tpa_enable = FALSE; 6878 ring_prod = 0; 6879 goto bxe_alloc_fp_buffers_error; 6880 } 6881 6882 ring_prod = RX_SGE_NEXT(ring_prod); 6883 } 6884 6885 fp->rx_sge_prod = ring_prod; 6886 } 6887 } 6888 6889 return (0); 6890 6891bxe_alloc_fp_buffers_error: 6892 6893 /* unwind what was already allocated */ 6894 bxe_free_rx_bd_chain(fp); 6895 bxe_free_tpa_pool(fp); 6896 bxe_free_sge_chain(fp); 6897 6898 return (ENOBUFS); 6899} 6900 6901static void 6902bxe_free_fw_stats_mem(struct bxe_softc *sc) 6903{ 6904 bxe_dma_free(sc, &sc->fw_stats_dma); 6905 6906 sc->fw_stats_num = 0; 6907 6908 sc->fw_stats_req_size = 0; 6909 sc->fw_stats_req = NULL; 6910 sc->fw_stats_req_mapping = 0; 6911 6912 sc->fw_stats_data_size = 0; 6913 sc->fw_stats_data = NULL; 6914 sc->fw_stats_data_mapping = 0; 6915} 6916 6917static int 6918bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6919{ 6920 uint8_t num_queue_stats; 6921 int num_groups; 6922 6923 /* number of queues for statistics is number of eth queues */ 6924 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6925 6926 /* 6927 * Total number of FW statistics requests = 6928 * 1 for port stats + 1 for PF stats + num of queues 6929 */ 6930 sc->fw_stats_num = (2 + num_queue_stats); 6931 6932 /* 6933 * Request is built from stats_query_header and an array of 6934 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6935 * rules. The real number or requests is configured in the 6936 * stats_query_header. 6937 */ 6938 num_groups = 6939 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6940 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6941 6942 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6943 sc->fw_stats_num, num_groups); 6944 6945 sc->fw_stats_req_size = 6946 (sizeof(struct stats_query_header) + 6947 (num_groups * sizeof(struct stats_query_cmd_group))); 6948 6949 /* 6950 * Data for statistics requests + stats_counter. 6951 * stats_counter holds per-STORM counters that are incremented when 6952 * STORM has finished with the current request. Memory for FCoE 6953 * offloaded statistics are counted anyway, even if they will not be sent. 6954 * VF stats are not accounted for here as the data of VF stats is stored 6955 * in memory allocated by the VF, not here. 6956 */ 6957 sc->fw_stats_data_size = 6958 (sizeof(struct stats_counter) + 6959 sizeof(struct per_port_stats) + 6960 sizeof(struct per_pf_stats) + 6961 /* sizeof(struct fcoe_statistics_params) + */ 6962 (sizeof(struct per_queue_stats) * num_queue_stats)); 6963 6964 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6965 &sc->fw_stats_dma, "fw stats") != 0) { 6966 bxe_free_fw_stats_mem(sc); 6967 return (-1); 6968 } 6969 6970 /* set up the shortcuts */ 6971 6972 sc->fw_stats_req = 6973 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6974 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6975 6976 sc->fw_stats_data = 6977 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6978 sc->fw_stats_req_size); 6979 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6980 sc->fw_stats_req_size); 6981 6982 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6983 (uintmax_t)sc->fw_stats_req_mapping); 6984 6985 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6986 (uintmax_t)sc->fw_stats_data_mapping); 6987 6988 return (0); 6989} 6990 6991/* 6992 * Bits map: 6993 * 0-7 - Engine0 load counter. 6994 * 8-15 - Engine1 load counter. 6995 * 16 - Engine0 RESET_IN_PROGRESS bit. 6996 * 17 - Engine1 RESET_IN_PROGRESS bit. 6997 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 6998 * function on the engine 6999 * 19 - Engine1 ONE_IS_LOADED. 7000 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 7001 * leader to complete (check for both RESET_IN_PROGRESS bits and not 7002 * for just the one belonging to its engine). 7003 */ 7004#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 7005#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 7006#define BXE_PATH0_LOAD_CNT_SHIFT 0 7007#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 7008#define BXE_PATH1_LOAD_CNT_SHIFT 8 7009#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 7010#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 7011#define BXE_GLOBAL_RESET_BIT 0x00040000 7012 7013/* set the GLOBAL_RESET bit, should be run under rtnl lock */ 7014static void 7015bxe_set_reset_global(struct bxe_softc *sc) 7016{ 7017 uint32_t val; 7018 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7019 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7020 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 7021 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7022} 7023 7024/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 7025static void 7026bxe_clear_reset_global(struct bxe_softc *sc) 7027{ 7028 uint32_t val; 7029 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7030 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7031 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 7032 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7033} 7034 7035/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 7036static uint8_t 7037bxe_reset_is_global(struct bxe_softc *sc) 7038{ 7039 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7040 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 7041 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 7042} 7043 7044/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 7045static void 7046bxe_set_reset_done(struct bxe_softc *sc) 7047{ 7048 uint32_t val; 7049 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7050 BXE_PATH0_RST_IN_PROG_BIT; 7051 7052 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7053 7054 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7055 /* Clear the bit */ 7056 val &= ~bit; 7057 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7058 7059 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7060} 7061 7062/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 7063static void 7064bxe_set_reset_in_progress(struct bxe_softc *sc) 7065{ 7066 uint32_t val; 7067 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7068 BXE_PATH0_RST_IN_PROG_BIT; 7069 7070 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7071 7072 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7073 /* Set the bit */ 7074 val |= bit; 7075 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7076 7077 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7078} 7079 7080/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 7081static uint8_t 7082bxe_reset_is_done(struct bxe_softc *sc, 7083 int engine) 7084{ 7085 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7086 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 7087 BXE_PATH0_RST_IN_PROG_BIT; 7088 7089 /* return false if bit is set */ 7090 return (val & bit) ? FALSE : TRUE; 7091} 7092 7093/* get the load status for an engine, should be run under rtnl lock */ 7094static uint8_t 7095bxe_get_load_status(struct bxe_softc *sc, 7096 int engine) 7097{ 7098 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 7099 BXE_PATH0_LOAD_CNT_MASK; 7100 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 7101 BXE_PATH0_LOAD_CNT_SHIFT; 7102 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7103 7104 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7105 7106 val = ((val & mask) >> shift); 7107 7108 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 7109 7110 return (val != 0); 7111} 7112 7113/* set pf load mark */ 7114/* XXX needs to be under rtnl lock */ 7115static void 7116bxe_set_pf_load(struct bxe_softc *sc) 7117{ 7118 uint32_t val; 7119 uint32_t val1; 7120 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7121 BXE_PATH0_LOAD_CNT_MASK; 7122 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7123 BXE_PATH0_LOAD_CNT_SHIFT; 7124 7125 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7126 7127 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7128 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7129 7130 /* get the current counter value */ 7131 val1 = ((val & mask) >> shift); 7132 7133 /* set bit of this PF */ 7134 val1 |= (1 << SC_ABS_FUNC(sc)); 7135 7136 /* clear the old value */ 7137 val &= ~mask; 7138 7139 /* set the new one */ 7140 val |= ((val1 << shift) & mask); 7141 7142 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7143 7144 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7145} 7146 7147/* clear pf load mark */ 7148/* XXX needs to be under rtnl lock */ 7149static uint8_t 7150bxe_clear_pf_load(struct bxe_softc *sc) 7151{ 7152 uint32_t val1, val; 7153 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7154 BXE_PATH0_LOAD_CNT_MASK; 7155 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7156 BXE_PATH0_LOAD_CNT_SHIFT; 7157 7158 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7159 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7160 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 7161 7162 /* get the current counter value */ 7163 val1 = (val & mask) >> shift; 7164 7165 /* clear bit of that PF */ 7166 val1 &= ~(1 << SC_ABS_FUNC(sc)); 7167 7168 /* clear the old value */ 7169 val &= ~mask; 7170 7171 /* set the new one */ 7172 val |= ((val1 << shift) & mask); 7173 7174 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7175 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7176 return (val1 != 0); 7177} 7178 7179/* send load requrest to mcp and analyze response */ 7180static int 7181bxe_nic_load_request(struct bxe_softc *sc, 7182 uint32_t *load_code) 7183{ 7184 /* init fw_seq */ 7185 sc->fw_seq = 7186 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 7187 DRV_MSG_SEQ_NUMBER_MASK); 7188 7189 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 7190 7191 /* get the current FW pulse sequence */ 7192 sc->fw_drv_pulse_wr_seq = 7193 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 7194 DRV_PULSE_SEQ_MASK); 7195 7196 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 7197 sc->fw_drv_pulse_wr_seq); 7198 7199 /* load request */ 7200 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 7201 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 7202 7203 /* if the MCP fails to respond we must abort */ 7204 if (!(*load_code)) { 7205 BLOGE(sc, "MCP response failure!\n"); 7206 return (-1); 7207 } 7208 7209 /* if MCP refused then must abort */ 7210 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7211 BLOGE(sc, "MCP refused load request\n"); 7212 return (-1); 7213 } 7214 7215 return (0); 7216} 7217 7218/* 7219 * Check whether another PF has already loaded FW to chip. In virtualized 7220 * environments a pf from anoth VM may have already initialized the device 7221 * including loading FW. 7222 */ 7223static int 7224bxe_nic_load_analyze_req(struct bxe_softc *sc, 7225 uint32_t load_code) 7226{ 7227 uint32_t my_fw, loaded_fw; 7228 7229 /* is another pf loaded on this engine? */ 7230 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 7231 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 7232 /* build my FW version dword */ 7233 my_fw = (BCM_5710_FW_MAJOR_VERSION + 7234 (BCM_5710_FW_MINOR_VERSION << 8 ) + 7235 (BCM_5710_FW_REVISION_VERSION << 16) + 7236 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 7237 7238 /* read loaded FW from chip */ 7239 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 7240 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 7241 loaded_fw, my_fw); 7242 7243 /* abort nic load if version mismatch */ 7244 if (my_fw != loaded_fw) { 7245 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 7246 loaded_fw, my_fw); 7247 return (-1); 7248 } 7249 } 7250 7251 return (0); 7252} 7253 7254/* mark PMF if applicable */ 7255static void 7256bxe_nic_load_pmf(struct bxe_softc *sc, 7257 uint32_t load_code) 7258{ 7259 uint32_t ncsi_oem_data_addr; 7260 7261 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 7262 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 7263 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 7264 /* 7265 * Barrier here for ordering between the writing to sc->port.pmf here 7266 * and reading it from the periodic task. 7267 */ 7268 sc->port.pmf = 1; 7269 mb(); 7270 } else { 7271 sc->port.pmf = 0; 7272 } 7273 7274 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 7275 7276 /* XXX needed? */ 7277 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 7278 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 7279 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 7280 if (ncsi_oem_data_addr) { 7281 REG_WR(sc, 7282 (ncsi_oem_data_addr + 7283 offsetof(struct glob_ncsi_oem_data, driver_version)), 7284 0); 7285 } 7286 } 7287 } 7288} 7289 7290static void 7291bxe_read_mf_cfg(struct bxe_softc *sc) 7292{ 7293 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 7294 int abs_func; 7295 int vn; 7296 7297 if (BXE_NOMCP(sc)) { 7298 return; /* what should be the default bvalue in this case */ 7299 } 7300 7301 /* 7302 * The formula for computing the absolute function number is... 7303 * For 2 port configuration (4 functions per port): 7304 * abs_func = 2 * vn + SC_PORT + SC_PATH 7305 * For 4 port configuration (2 functions per port): 7306 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 7307 */ 7308 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 7309 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 7310 if (abs_func >= E1H_FUNC_MAX) { 7311 break; 7312 } 7313 sc->devinfo.mf_info.mf_config[vn] = 7314 MFCFG_RD(sc, func_mf_config[abs_func].config); 7315 } 7316 7317 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 7318 FUNC_MF_CFG_FUNC_DISABLED) { 7319 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 7320 sc->flags |= BXE_MF_FUNC_DIS; 7321 } else { 7322 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 7323 sc->flags &= ~BXE_MF_FUNC_DIS; 7324 } 7325} 7326 7327/* acquire split MCP access lock register */ 7328static int bxe_acquire_alr(struct bxe_softc *sc) 7329{ 7330 uint32_t j, val; 7331 7332 for (j = 0; j < 1000; j++) { 7333 val = (1UL << 31); 7334 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 7335 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 7336 if (val & (1L << 31)) 7337 break; 7338 7339 DELAY(5000); 7340 } 7341 7342 if (!(val & (1L << 31))) { 7343 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 7344 return (-1); 7345 } 7346 7347 return (0); 7348} 7349 7350/* release split MCP access lock register */ 7351static void bxe_release_alr(struct bxe_softc *sc) 7352{ 7353 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7354} 7355 7356static void 7357bxe_fan_failure(struct bxe_softc *sc) 7358{ 7359 int port = SC_PORT(sc); 7360 uint32_t ext_phy_config; 7361 7362 /* mark the failure */ 7363 ext_phy_config = 7364 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7365 7366 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7367 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7368 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7369 ext_phy_config); 7370 7371 /* log the failure */ 7372 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7373 "the card to prevent permanent damage. " 7374 "Please contact OEM Support for assistance\n"); 7375 7376 /* XXX */ 7377#if 1 7378 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7379#else 7380 /* 7381 * Schedule device reset (unload) 7382 * This is due to some boards consuming sufficient power when driver is 7383 * up to overheat if fan fails. 7384 */ 7385 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7386 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7387#endif 7388} 7389 7390/* this function is called upon a link interrupt */ 7391static void 7392bxe_link_attn(struct bxe_softc *sc) 7393{ 7394 uint32_t pause_enabled = 0; 7395 struct host_port_stats *pstats; 7396 int cmng_fns; 7397 7398 /* Make sure that we are synced with the current statistics */ 7399 bxe_stats_handle(sc, STATS_EVENT_STOP); 7400 7401 elink_link_update(&sc->link_params, &sc->link_vars); 7402 7403 if (sc->link_vars.link_up) { 7404 7405 /* dropless flow control */ 7406 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7407 pause_enabled = 0; 7408 7409 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7410 pause_enabled = 1; 7411 } 7412 7413 REG_WR(sc, 7414 (BAR_USTRORM_INTMEM + 7415 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7416 pause_enabled); 7417 } 7418 7419 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7420 pstats = BXE_SP(sc, port_stats); 7421 /* reset old mac stats */ 7422 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7423 } 7424 7425 if (sc->state == BXE_STATE_OPEN) { 7426 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7427 } 7428 } 7429 7430 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7431 cmng_fns = bxe_get_cmng_fns_mode(sc); 7432 7433 if (cmng_fns != CMNG_FNS_NONE) { 7434 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7435 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7436 } else { 7437 /* rate shaping and fairness are disabled */ 7438 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7439 } 7440 } 7441 7442 bxe_link_report_locked(sc); 7443 7444 if (IS_MF(sc)) { 7445 ; // XXX bxe_link_sync_notify(sc); 7446 } 7447} 7448 7449static void 7450bxe_attn_int_asserted(struct bxe_softc *sc, 7451 uint32_t asserted) 7452{ 7453 int port = SC_PORT(sc); 7454 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7455 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7456 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7457 NIG_REG_MASK_INTERRUPT_PORT0; 7458 uint32_t aeu_mask; 7459 uint32_t nig_mask = 0; 7460 uint32_t reg_addr; 7461 uint32_t igu_acked; 7462 uint32_t cnt; 7463 7464 if (sc->attn_state & asserted) { 7465 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7466 } 7467 7468 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7469 7470 aeu_mask = REG_RD(sc, aeu_addr); 7471 7472 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7473 aeu_mask, asserted); 7474 7475 aeu_mask &= ~(asserted & 0x3ff); 7476 7477 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7478 7479 REG_WR(sc, aeu_addr, aeu_mask); 7480 7481 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7482 7483 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7484 sc->attn_state |= asserted; 7485 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7486 7487 if (asserted & ATTN_HARD_WIRED_MASK) { 7488 if (asserted & ATTN_NIG_FOR_FUNC) { 7489 7490 bxe_acquire_phy_lock(sc); 7491 /* save nig interrupt mask */ 7492 nig_mask = REG_RD(sc, nig_int_mask_addr); 7493 7494 /* If nig_mask is not set, no need to call the update function */ 7495 if (nig_mask) { 7496 REG_WR(sc, nig_int_mask_addr, 0); 7497 7498 bxe_link_attn(sc); 7499 } 7500 7501 /* handle unicore attn? */ 7502 } 7503 7504 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7505 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7506 } 7507 7508 if (asserted & GPIO_2_FUNC) { 7509 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7510 } 7511 7512 if (asserted & GPIO_3_FUNC) { 7513 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7514 } 7515 7516 if (asserted & GPIO_4_FUNC) { 7517 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7518 } 7519 7520 if (port == 0) { 7521 if (asserted & ATTN_GENERAL_ATTN_1) { 7522 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7523 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7524 } 7525 if (asserted & ATTN_GENERAL_ATTN_2) { 7526 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7527 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7528 } 7529 if (asserted & ATTN_GENERAL_ATTN_3) { 7530 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7531 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7532 } 7533 } else { 7534 if (asserted & ATTN_GENERAL_ATTN_4) { 7535 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7536 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7537 } 7538 if (asserted & ATTN_GENERAL_ATTN_5) { 7539 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7540 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7541 } 7542 if (asserted & ATTN_GENERAL_ATTN_6) { 7543 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7544 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7545 } 7546 } 7547 } /* hardwired */ 7548 7549 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7550 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7551 } else { 7552 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7553 } 7554 7555 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7556 asserted, 7557 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7558 REG_WR(sc, reg_addr, asserted); 7559 7560 /* now set back the mask */ 7561 if (asserted & ATTN_NIG_FOR_FUNC) { 7562 /* 7563 * Verify that IGU ack through BAR was written before restoring 7564 * NIG mask. This loop should exit after 2-3 iterations max. 7565 */ 7566 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7567 cnt = 0; 7568 7569 do { 7570 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7571 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7572 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7573 7574 if (!igu_acked) { 7575 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7576 } 7577 7578 mb(); 7579 } 7580 7581 REG_WR(sc, nig_int_mask_addr, nig_mask); 7582 7583 bxe_release_phy_lock(sc); 7584 } 7585} 7586 7587static void 7588bxe_print_next_block(struct bxe_softc *sc, 7589 int idx, 7590 const char *blk) 7591{ 7592 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7593} 7594 7595static int 7596bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7597 uint32_t sig, 7598 int par_num, 7599 uint8_t print) 7600{ 7601 uint32_t cur_bit = 0; 7602 int i = 0; 7603 7604 for (i = 0; sig; i++) { 7605 cur_bit = ((uint32_t)0x1 << i); 7606 if (sig & cur_bit) { 7607 switch (cur_bit) { 7608 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7609 if (print) 7610 bxe_print_next_block(sc, par_num++, "BRB"); 7611 break; 7612 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7613 if (print) 7614 bxe_print_next_block(sc, par_num++, "PARSER"); 7615 break; 7616 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7617 if (print) 7618 bxe_print_next_block(sc, par_num++, "TSDM"); 7619 break; 7620 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7621 if (print) 7622 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7623 break; 7624 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7625 if (print) 7626 bxe_print_next_block(sc, par_num++, "TCM"); 7627 break; 7628 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7629 if (print) 7630 bxe_print_next_block(sc, par_num++, "TSEMI"); 7631 break; 7632 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7633 if (print) 7634 bxe_print_next_block(sc, par_num++, "XPB"); 7635 break; 7636 } 7637 7638 /* Clear the bit */ 7639 sig &= ~cur_bit; 7640 } 7641 } 7642 7643 return (par_num); 7644} 7645 7646static int 7647bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7648 uint32_t sig, 7649 int par_num, 7650 uint8_t *global, 7651 uint8_t print) 7652{ 7653 int i = 0; 7654 uint32_t cur_bit = 0; 7655 for (i = 0; sig; i++) { 7656 cur_bit = ((uint32_t)0x1 << i); 7657 if (sig & cur_bit) { 7658 switch (cur_bit) { 7659 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7660 if (print) 7661 bxe_print_next_block(sc, par_num++, "PBF"); 7662 break; 7663 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7664 if (print) 7665 bxe_print_next_block(sc, par_num++, "QM"); 7666 break; 7667 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7668 if (print) 7669 bxe_print_next_block(sc, par_num++, "TM"); 7670 break; 7671 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7672 if (print) 7673 bxe_print_next_block(sc, par_num++, "XSDM"); 7674 break; 7675 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7676 if (print) 7677 bxe_print_next_block(sc, par_num++, "XCM"); 7678 break; 7679 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7680 if (print) 7681 bxe_print_next_block(sc, par_num++, "XSEMI"); 7682 break; 7683 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7684 if (print) 7685 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7686 break; 7687 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7688 if (print) 7689 bxe_print_next_block(sc, par_num++, "NIG"); 7690 break; 7691 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7692 if (print) 7693 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7694 *global = TRUE; 7695 break; 7696 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7697 if (print) 7698 bxe_print_next_block(sc, par_num++, "DEBUG"); 7699 break; 7700 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7701 if (print) 7702 bxe_print_next_block(sc, par_num++, "USDM"); 7703 break; 7704 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7705 if (print) 7706 bxe_print_next_block(sc, par_num++, "UCM"); 7707 break; 7708 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7709 if (print) 7710 bxe_print_next_block(sc, par_num++, "USEMI"); 7711 break; 7712 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7713 if (print) 7714 bxe_print_next_block(sc, par_num++, "UPB"); 7715 break; 7716 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7717 if (print) 7718 bxe_print_next_block(sc, par_num++, "CSDM"); 7719 break; 7720 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7721 if (print) 7722 bxe_print_next_block(sc, par_num++, "CCM"); 7723 break; 7724 } 7725 7726 /* Clear the bit */ 7727 sig &= ~cur_bit; 7728 } 7729 } 7730 7731 return (par_num); 7732} 7733 7734static int 7735bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7736 uint32_t sig, 7737 int par_num, 7738 uint8_t print) 7739{ 7740 uint32_t cur_bit = 0; 7741 int i = 0; 7742 7743 for (i = 0; sig; i++) { 7744 cur_bit = ((uint32_t)0x1 << i); 7745 if (sig & cur_bit) { 7746 switch (cur_bit) { 7747 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7748 if (print) 7749 bxe_print_next_block(sc, par_num++, "CSEMI"); 7750 break; 7751 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7752 if (print) 7753 bxe_print_next_block(sc, par_num++, "PXP"); 7754 break; 7755 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7756 if (print) 7757 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7758 break; 7759 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7760 if (print) 7761 bxe_print_next_block(sc, par_num++, "CFC"); 7762 break; 7763 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7764 if (print) 7765 bxe_print_next_block(sc, par_num++, "CDU"); 7766 break; 7767 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7768 if (print) 7769 bxe_print_next_block(sc, par_num++, "DMAE"); 7770 break; 7771 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7772 if (print) 7773 bxe_print_next_block(sc, par_num++, "IGU"); 7774 break; 7775 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7776 if (print) 7777 bxe_print_next_block(sc, par_num++, "MISC"); 7778 break; 7779 } 7780 7781 /* Clear the bit */ 7782 sig &= ~cur_bit; 7783 } 7784 } 7785 7786 return (par_num); 7787} 7788 7789static int 7790bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7791 uint32_t sig, 7792 int par_num, 7793 uint8_t *global, 7794 uint8_t print) 7795{ 7796 uint32_t cur_bit = 0; 7797 int i = 0; 7798 7799 for (i = 0; sig; i++) { 7800 cur_bit = ((uint32_t)0x1 << i); 7801 if (sig & cur_bit) { 7802 switch (cur_bit) { 7803 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7804 if (print) 7805 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7806 *global = TRUE; 7807 break; 7808 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7809 if (print) 7810 bxe_print_next_block(sc, par_num++, 7811 "MCP UMP RX"); 7812 *global = TRUE; 7813 break; 7814 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7815 if (print) 7816 bxe_print_next_block(sc, par_num++, 7817 "MCP UMP TX"); 7818 *global = TRUE; 7819 break; 7820 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7821 if (print) 7822 bxe_print_next_block(sc, par_num++, 7823 "MCP SCPAD"); 7824 *global = TRUE; 7825 break; 7826 } 7827 7828 /* Clear the bit */ 7829 sig &= ~cur_bit; 7830 } 7831 } 7832 7833 return (par_num); 7834} 7835 7836static int 7837bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7838 uint32_t sig, 7839 int par_num, 7840 uint8_t print) 7841{ 7842 uint32_t cur_bit = 0; 7843 int i = 0; 7844 7845 for (i = 0; sig; i++) { 7846 cur_bit = ((uint32_t)0x1 << i); 7847 if (sig & cur_bit) { 7848 switch (cur_bit) { 7849 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7850 if (print) 7851 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7852 break; 7853 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7854 if (print) 7855 bxe_print_next_block(sc, par_num++, "ATC"); 7856 break; 7857 } 7858 7859 /* Clear the bit */ 7860 sig &= ~cur_bit; 7861 } 7862 } 7863 7864 return (par_num); 7865} 7866 7867static uint8_t 7868bxe_parity_attn(struct bxe_softc *sc, 7869 uint8_t *global, 7870 uint8_t print, 7871 uint32_t *sig) 7872{ 7873 int par_num = 0; 7874 7875 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7876 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7877 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7878 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7879 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7880 BLOGE(sc, "Parity error: HW block parity attention:\n" 7881 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7882 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7883 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7884 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7885 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7886 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7887 7888 if (print) 7889 BLOGI(sc, "Parity errors detected in blocks: "); 7890 7891 par_num = 7892 bxe_check_blocks_with_parity0(sc, sig[0] & 7893 HW_PRTY_ASSERT_SET_0, 7894 par_num, print); 7895 par_num = 7896 bxe_check_blocks_with_parity1(sc, sig[1] & 7897 HW_PRTY_ASSERT_SET_1, 7898 par_num, global, print); 7899 par_num = 7900 bxe_check_blocks_with_parity2(sc, sig[2] & 7901 HW_PRTY_ASSERT_SET_2, 7902 par_num, print); 7903 par_num = 7904 bxe_check_blocks_with_parity3(sc, sig[3] & 7905 HW_PRTY_ASSERT_SET_3, 7906 par_num, global, print); 7907 par_num = 7908 bxe_check_blocks_with_parity4(sc, sig[4] & 7909 HW_PRTY_ASSERT_SET_4, 7910 par_num, print); 7911 7912 if (print) 7913 BLOGI(sc, "\n"); 7914 7915 return (TRUE); 7916 } 7917 7918 return (FALSE); 7919} 7920 7921static uint8_t 7922bxe_chk_parity_attn(struct bxe_softc *sc, 7923 uint8_t *global, 7924 uint8_t print) 7925{ 7926 struct attn_route attn = { {0} }; 7927 int port = SC_PORT(sc); 7928 7929 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7930 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7931 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7932 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7933 7934 if (!CHIP_IS_E1x(sc)) 7935 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7936 7937 return (bxe_parity_attn(sc, global, print, attn.sig)); 7938} 7939 7940static void 7941bxe_attn_int_deasserted4(struct bxe_softc *sc, 7942 uint32_t attn) 7943{ 7944 uint32_t val; 7945 7946 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7947 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7948 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7949 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7950 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7951 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7952 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7953 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7954 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7955 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7956 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7957 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7958 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7959 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7960 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7961 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7962 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7963 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7964 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7965 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7966 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7967 } 7968 7969 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7970 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7971 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7972 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7973 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7974 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7975 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7976 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7977 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7978 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7979 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7980 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7981 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7982 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7983 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7984 } 7985 7986 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7987 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7988 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7989 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7990 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7991 } 7992} 7993 7994static void 7995bxe_e1h_disable(struct bxe_softc *sc) 7996{ 7997 int port = SC_PORT(sc); 7998 7999 bxe_tx_disable(sc); 8000 8001 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8002} 8003 8004static void 8005bxe_e1h_enable(struct bxe_softc *sc) 8006{ 8007 int port = SC_PORT(sc); 8008 8009 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 8010 8011 // XXX bxe_tx_enable(sc); 8012} 8013 8014/* 8015 * called due to MCP event (on pmf): 8016 * reread new bandwidth configuration 8017 * configure FW 8018 * notify others function about the change 8019 */ 8020static void 8021bxe_config_mf_bw(struct bxe_softc *sc) 8022{ 8023 if (sc->link_vars.link_up) { 8024 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 8025 // XXX bxe_link_sync_notify(sc); 8026 } 8027 8028 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 8029} 8030 8031static void 8032bxe_set_mf_bw(struct bxe_softc *sc) 8033{ 8034 bxe_config_mf_bw(sc); 8035 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 8036} 8037 8038static void 8039bxe_handle_eee_event(struct bxe_softc *sc) 8040{ 8041 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 8042 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 8043} 8044 8045#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 8046 8047static void 8048bxe_drv_info_ether_stat(struct bxe_softc *sc) 8049{ 8050 struct eth_stats_info *ether_stat = 8051 &sc->sp->drv_info_to_mcp.ether_stat; 8052 8053 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 8054 ETH_STAT_INFO_VERSION_LEN); 8055 8056 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 8057 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 8058 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 8059 ether_stat->mac_local + MAC_PAD, 8060 MAC_PAD, ETH_ALEN); 8061 8062 ether_stat->mtu_size = sc->mtu; 8063 8064 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 8065 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 8066 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 8067 } 8068 8069 // XXX ether_stat->feature_flags |= ???; 8070 8071 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 8072 8073 ether_stat->txq_size = sc->tx_ring_size; 8074 ether_stat->rxq_size = sc->rx_ring_size; 8075} 8076 8077static void 8078bxe_handle_drv_info_req(struct bxe_softc *sc) 8079{ 8080 enum drv_info_opcode op_code; 8081 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 8082 8083 /* if drv_info version supported by MFW doesn't match - send NACK */ 8084 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 8085 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8086 return; 8087 } 8088 8089 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 8090 DRV_INFO_CONTROL_OP_CODE_SHIFT); 8091 8092 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 8093 8094 switch (op_code) { 8095 case ETH_STATS_OPCODE: 8096 bxe_drv_info_ether_stat(sc); 8097 break; 8098 case FCOE_STATS_OPCODE: 8099 case ISCSI_STATS_OPCODE: 8100 default: 8101 /* if op code isn't supported - send NACK */ 8102 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8103 return; 8104 } 8105 8106 /* 8107 * If we got drv_info attn from MFW then these fields are defined in 8108 * shmem2 for sure 8109 */ 8110 SHMEM2_WR(sc, drv_info_host_addr_lo, 8111 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8112 SHMEM2_WR(sc, drv_info_host_addr_hi, 8113 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8114 8115 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 8116} 8117 8118static void 8119bxe_dcc_event(struct bxe_softc *sc, 8120 uint32_t dcc_event) 8121{ 8122 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 8123 8124 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 8125 /* 8126 * This is the only place besides the function initialization 8127 * where the sc->flags can change so it is done without any 8128 * locks 8129 */ 8130 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 8131 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 8132 sc->flags |= BXE_MF_FUNC_DIS; 8133 bxe_e1h_disable(sc); 8134 } else { 8135 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 8136 sc->flags &= ~BXE_MF_FUNC_DIS; 8137 bxe_e1h_enable(sc); 8138 } 8139 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 8140 } 8141 8142 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 8143 bxe_config_mf_bw(sc); 8144 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 8145 } 8146 8147 /* Report results to MCP */ 8148 if (dcc_event) 8149 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 8150 else 8151 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 8152} 8153 8154static void 8155bxe_pmf_update(struct bxe_softc *sc) 8156{ 8157 int port = SC_PORT(sc); 8158 uint32_t val; 8159 8160 sc->port.pmf = 1; 8161 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 8162 8163 /* 8164 * We need the mb() to ensure the ordering between the writing to 8165 * sc->port.pmf here and reading it from the bxe_periodic_task(). 8166 */ 8167 mb(); 8168 8169 /* queue a periodic task */ 8170 // XXX schedule task... 8171 8172 // XXX bxe_dcbx_pmf_update(sc); 8173 8174 /* enable nig attention */ 8175 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 8176 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8177 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 8178 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 8179 } else if (!CHIP_IS_E1x(sc)) { 8180 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 8181 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 8182 } 8183 8184 bxe_stats_handle(sc, STATS_EVENT_PMF); 8185} 8186 8187static int 8188bxe_mc_assert(struct bxe_softc *sc) 8189{ 8190 char last_idx; 8191 int i, rc = 0; 8192 uint32_t row0, row1, row2, row3; 8193 8194 /* XSTORM */ 8195 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 8196 if (last_idx) 8197 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8198 8199 /* print the asserts */ 8200 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8201 8202 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 8203 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 8204 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 8205 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 8206 8207 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8208 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8209 i, row3, row2, row1, row0); 8210 rc++; 8211 } else { 8212 break; 8213 } 8214 } 8215 8216 /* TSTORM */ 8217 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 8218 if (last_idx) { 8219 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8220 } 8221 8222 /* print the asserts */ 8223 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8224 8225 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 8226 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 8227 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 8228 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 8229 8230 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8231 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8232 i, row3, row2, row1, row0); 8233 rc++; 8234 } else { 8235 break; 8236 } 8237 } 8238 8239 /* CSTORM */ 8240 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 8241 if (last_idx) { 8242 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8243 } 8244 8245 /* print the asserts */ 8246 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8247 8248 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 8249 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 8250 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 8251 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 8252 8253 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8254 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8255 i, row3, row2, row1, row0); 8256 rc++; 8257 } else { 8258 break; 8259 } 8260 } 8261 8262 /* USTORM */ 8263 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 8264 if (last_idx) { 8265 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8266 } 8267 8268 /* print the asserts */ 8269 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8270 8271 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 8272 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 8273 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 8274 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 8275 8276 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8277 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8278 i, row3, row2, row1, row0); 8279 rc++; 8280 } else { 8281 break; 8282 } 8283 } 8284 8285 return (rc); 8286} 8287 8288static void 8289bxe_attn_int_deasserted3(struct bxe_softc *sc, 8290 uint32_t attn) 8291{ 8292 int func = SC_FUNC(sc); 8293 uint32_t val; 8294 8295 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 8296 8297 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 8298 8299 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8300 bxe_read_mf_cfg(sc); 8301 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 8302 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8303 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 8304 8305 if (val & DRV_STATUS_DCC_EVENT_MASK) 8306 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 8307 8308 if (val & DRV_STATUS_SET_MF_BW) 8309 bxe_set_mf_bw(sc); 8310 8311 if (val & DRV_STATUS_DRV_INFO_REQ) 8312 bxe_handle_drv_info_req(sc); 8313 8314#if 0 8315 if (val & DRV_STATUS_VF_DISABLED) 8316 bxe_vf_handle_flr_event(sc); 8317#endif 8318 8319 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 8320 bxe_pmf_update(sc); 8321 8322#if 0 8323 if (sc->port.pmf && 8324 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 8325 (sc->dcbx_enabled > 0)) 8326 /* start dcbx state machine */ 8327 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); 8328#endif 8329 8330#if 0 8331 if (val & DRV_STATUS_AFEX_EVENT_MASK) 8332 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); 8333#endif 8334 8335 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8336 bxe_handle_eee_event(sc); 8337 8338 if (sc->link_vars.periodic_flags & 8339 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8340 /* sync with link */ 8341 bxe_acquire_phy_lock(sc); 8342 sc->link_vars.periodic_flags &= 8343 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8344 bxe_release_phy_lock(sc); 8345 if (IS_MF(sc)) 8346 ; // XXX bxe_link_sync_notify(sc); 8347 bxe_link_report(sc); 8348 } 8349 8350 /* 8351 * Always call it here: bxe_link_report() will 8352 * prevent the link indication duplication. 8353 */ 8354 bxe_link_status_update(sc); 8355 8356 } else if (attn & BXE_MC_ASSERT_BITS) { 8357 8358 BLOGE(sc, "MC assert!\n"); 8359 bxe_mc_assert(sc); 8360 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8361 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8362 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8363 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8364 bxe_panic(sc, ("MC assert!\n")); 8365 8366 } else if (attn & BXE_MCP_ASSERT) { 8367 8368 BLOGE(sc, "MCP assert!\n"); 8369 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8370 // XXX bxe_fw_dump(sc); 8371 8372 } else { 8373 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8374 } 8375 } 8376 8377 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8378 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8379 if (attn & BXE_GRC_TIMEOUT) { 8380 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8381 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8382 } 8383 if (attn & BXE_GRC_RSV) { 8384 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8385 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8386 } 8387 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8388 } 8389} 8390 8391static void 8392bxe_attn_int_deasserted2(struct bxe_softc *sc, 8393 uint32_t attn) 8394{ 8395 int port = SC_PORT(sc); 8396 int reg_offset; 8397 uint32_t val0, mask0, val1, mask1; 8398 uint32_t val; 8399 8400 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8401 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8402 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8403 /* CFC error attention */ 8404 if (val & 0x2) { 8405 BLOGE(sc, "FATAL error from CFC\n"); 8406 } 8407 } 8408 8409 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8410 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8411 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8412 /* RQ_USDMDP_FIFO_OVERFLOW */ 8413 if (val & 0x18000) { 8414 BLOGE(sc, "FATAL error from PXP\n"); 8415 } 8416 8417 if (!CHIP_IS_E1x(sc)) { 8418 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8419 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8420 } 8421 } 8422 8423#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8424#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8425 8426 if (attn & AEU_PXP2_HW_INT_BIT) { 8427 /* CQ47854 workaround do not panic on 8428 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8429 */ 8430 if (!CHIP_IS_E1x(sc)) { 8431 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8432 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8433 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8434 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8435 /* 8436 * If the olny PXP2_EOP_ERROR_BIT is set in 8437 * STS0 and STS1 - clear it 8438 * 8439 * probably we lose additional attentions between 8440 * STS0 and STS_CLR0, in this case user will not 8441 * be notified about them 8442 */ 8443 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8444 !(val1 & mask1)) 8445 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8446 8447 /* print the register, since no one can restore it */ 8448 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8449 8450 /* 8451 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8452 * then notify 8453 */ 8454 if (val0 & PXP2_EOP_ERROR_BIT) { 8455 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8456 8457 /* 8458 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8459 * set then clear attention from PXP2 block without panic 8460 */ 8461 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8462 ((val1 & mask1) == 0)) 8463 attn &= ~AEU_PXP2_HW_INT_BIT; 8464 } 8465 } 8466 } 8467 8468 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8469 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8470 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8471 8472 val = REG_RD(sc, reg_offset); 8473 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8474 REG_WR(sc, reg_offset, val); 8475 8476 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8477 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8478 bxe_panic(sc, ("HW block attention set2\n")); 8479 } 8480} 8481 8482static void 8483bxe_attn_int_deasserted1(struct bxe_softc *sc, 8484 uint32_t attn) 8485{ 8486 int port = SC_PORT(sc); 8487 int reg_offset; 8488 uint32_t val; 8489 8490 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8491 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8492 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8493 /* DORQ discard attention */ 8494 if (val & 0x2) { 8495 BLOGE(sc, "FATAL error from DORQ\n"); 8496 } 8497 } 8498 8499 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8500 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8501 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8502 8503 val = REG_RD(sc, reg_offset); 8504 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8505 REG_WR(sc, reg_offset, val); 8506 8507 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8508 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8509 bxe_panic(sc, ("HW block attention set1\n")); 8510 } 8511} 8512 8513static void 8514bxe_attn_int_deasserted0(struct bxe_softc *sc, 8515 uint32_t attn) 8516{ 8517 int port = SC_PORT(sc); 8518 int reg_offset; 8519 uint32_t val; 8520 8521 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8522 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8523 8524 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8525 val = REG_RD(sc, reg_offset); 8526 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8527 REG_WR(sc, reg_offset, val); 8528 8529 BLOGW(sc, "SPIO5 hw attention\n"); 8530 8531 /* Fan failure attention */ 8532 elink_hw_reset_phy(&sc->link_params); 8533 bxe_fan_failure(sc); 8534 } 8535 8536 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8537 bxe_acquire_phy_lock(sc); 8538 elink_handle_module_detect_int(&sc->link_params); 8539 bxe_release_phy_lock(sc); 8540 } 8541 8542 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8543 val = REG_RD(sc, reg_offset); 8544 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8545 REG_WR(sc, reg_offset, val); 8546 8547 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8548 (attn & HW_INTERRUT_ASSERT_SET_0))); 8549 } 8550} 8551 8552static void 8553bxe_attn_int_deasserted(struct bxe_softc *sc, 8554 uint32_t deasserted) 8555{ 8556 struct attn_route attn; 8557 struct attn_route *group_mask; 8558 int port = SC_PORT(sc); 8559 int index; 8560 uint32_t reg_addr; 8561 uint32_t val; 8562 uint32_t aeu_mask; 8563 uint8_t global = FALSE; 8564 8565 /* 8566 * Need to take HW lock because MCP or other port might also 8567 * try to handle this event. 8568 */ 8569 bxe_acquire_alr(sc); 8570 8571 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8572 /* XXX 8573 * In case of parity errors don't handle attentions so that 8574 * other function would "see" parity errors. 8575 */ 8576 sc->recovery_state = BXE_RECOVERY_INIT; 8577 // XXX schedule a recovery task... 8578 /* disable HW interrupts */ 8579 bxe_int_disable(sc); 8580 bxe_release_alr(sc); 8581 return; 8582 } 8583 8584 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8585 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8586 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8587 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8588 if (!CHIP_IS_E1x(sc)) { 8589 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8590 } else { 8591 attn.sig[4] = 0; 8592 } 8593 8594 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8595 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8596 8597 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8598 if (deasserted & (1 << index)) { 8599 group_mask = &sc->attn_group[index]; 8600 8601 BLOGD(sc, DBG_INTR, 8602 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8603 group_mask->sig[0], group_mask->sig[1], 8604 group_mask->sig[2], group_mask->sig[3], 8605 group_mask->sig[4]); 8606 8607 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8608 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8609 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8610 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8611 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8612 } 8613 } 8614 8615 bxe_release_alr(sc); 8616 8617 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8618 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8619 COMMAND_REG_ATTN_BITS_CLR); 8620 } else { 8621 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8622 } 8623 8624 val = ~deasserted; 8625 BLOGD(sc, DBG_INTR, 8626 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8627 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8628 REG_WR(sc, reg_addr, val); 8629 8630 if (~sc->attn_state & deasserted) { 8631 BLOGE(sc, "IGU error\n"); 8632 } 8633 8634 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8635 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8636 8637 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8638 8639 aeu_mask = REG_RD(sc, reg_addr); 8640 8641 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8642 aeu_mask, deasserted); 8643 aeu_mask |= (deasserted & 0x3ff); 8644 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8645 8646 REG_WR(sc, reg_addr, aeu_mask); 8647 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8648 8649 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8650 sc->attn_state &= ~deasserted; 8651 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8652} 8653 8654static void 8655bxe_attn_int(struct bxe_softc *sc) 8656{ 8657 /* read local copy of bits */ 8658 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8659 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8660 uint32_t attn_state = sc->attn_state; 8661 8662 /* look for changed bits */ 8663 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8664 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8665 8666 BLOGD(sc, DBG_INTR, 8667 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8668 attn_bits, attn_ack, asserted, deasserted); 8669 8670 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8671 BLOGE(sc, "BAD attention state\n"); 8672 } 8673 8674 /* handle bits that were raised */ 8675 if (asserted) { 8676 bxe_attn_int_asserted(sc, asserted); 8677 } 8678 8679 if (deasserted) { 8680 bxe_attn_int_deasserted(sc, deasserted); 8681 } 8682} 8683 8684static uint16_t 8685bxe_update_dsb_idx(struct bxe_softc *sc) 8686{ 8687 struct host_sp_status_block *def_sb = sc->def_sb; 8688 uint16_t rc = 0; 8689 8690 mb(); /* status block is written to by the chip */ 8691 8692 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8693 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8694 rc |= BXE_DEF_SB_ATT_IDX; 8695 } 8696 8697 if (sc->def_idx != def_sb->sp_sb.running_index) { 8698 sc->def_idx = def_sb->sp_sb.running_index; 8699 rc |= BXE_DEF_SB_IDX; 8700 } 8701 8702 mb(); 8703 8704 return (rc); 8705} 8706 8707static inline struct ecore_queue_sp_obj * 8708bxe_cid_to_q_obj(struct bxe_softc *sc, 8709 uint32_t cid) 8710{ 8711 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8712 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8713} 8714 8715static void 8716bxe_handle_mcast_eqe(struct bxe_softc *sc) 8717{ 8718 struct ecore_mcast_ramrod_params rparam; 8719 int rc; 8720 8721 memset(&rparam, 0, sizeof(rparam)); 8722 8723 rparam.mcast_obj = &sc->mcast_obj; 8724 8725 BXE_MCAST_LOCK(sc); 8726 8727 /* clear pending state for the last command */ 8728 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8729 8730 /* if there are pending mcast commands - send them */ 8731 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8732 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8733 if (rc < 0) { 8734 BLOGD(sc, DBG_SP, 8735 "ERROR: Failed to send pending mcast commands (%d)\n", 8736 rc); 8737 } 8738 } 8739 8740 BXE_MCAST_UNLOCK(sc); 8741} 8742 8743static void 8744bxe_handle_classification_eqe(struct bxe_softc *sc, 8745 union event_ring_elem *elem) 8746{ 8747 unsigned long ramrod_flags = 0; 8748 int rc = 0; 8749 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8750 struct ecore_vlan_mac_obj *vlan_mac_obj; 8751 8752 /* always push next commands out, don't wait here */ 8753 bit_set(&ramrod_flags, RAMROD_CONT); 8754 8755 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8756 case ECORE_FILTER_MAC_PENDING: 8757 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8758 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8759 break; 8760 8761 case ECORE_FILTER_MCAST_PENDING: 8762 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8763 /* 8764 * This is only relevant for 57710 where multicast MACs are 8765 * configured as unicast MACs using the same ramrod. 8766 */ 8767 bxe_handle_mcast_eqe(sc); 8768 return; 8769 8770 default: 8771 BLOGE(sc, "Unsupported classification command: %d\n", 8772 elem->message.data.eth_event.echo); 8773 return; 8774 } 8775 8776 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8777 8778 if (rc < 0) { 8779 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8780 } else if (rc > 0) { 8781 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8782 } 8783} 8784 8785static void 8786bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8787 union event_ring_elem *elem) 8788{ 8789 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8790 8791 /* send rx_mode command again if was requested */ 8792 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8793 &sc->sp_state)) { 8794 bxe_set_storm_rx_mode(sc); 8795 } 8796#if 0 8797 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, 8798 &sc->sp_state)) { 8799 bxe_set_iscsi_eth_rx_mode(sc, TRUE); 8800 } 8801 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 8802 &sc->sp_state)) { 8803 bxe_set_iscsi_eth_rx_mode(sc, FALSE); 8804 } 8805#endif 8806} 8807 8808static void 8809bxe_update_eq_prod(struct bxe_softc *sc, 8810 uint16_t prod) 8811{ 8812 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8813 wmb(); /* keep prod updates ordered */ 8814} 8815 8816static void 8817bxe_eq_int(struct bxe_softc *sc) 8818{ 8819 uint16_t hw_cons, sw_cons, sw_prod; 8820 union event_ring_elem *elem; 8821 uint8_t echo; 8822 uint32_t cid; 8823 uint8_t opcode; 8824 int spqe_cnt = 0; 8825 struct ecore_queue_sp_obj *q_obj; 8826 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8827 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8828 8829 hw_cons = le16toh(*sc->eq_cons_sb); 8830 8831 /* 8832 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8833 * when we get to the next-page we need to adjust so the loop 8834 * condition below will be met. The next element is the size of a 8835 * regular element and hence incrementing by 1 8836 */ 8837 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8838 hw_cons++; 8839 } 8840 8841 /* 8842 * This function may never run in parallel with itself for a 8843 * specific sc and no need for a read memory barrier here. 8844 */ 8845 sw_cons = sc->eq_cons; 8846 sw_prod = sc->eq_prod; 8847 8848 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8849 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8850 8851 for (; 8852 sw_cons != hw_cons; 8853 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8854 8855 elem = &sc->eq[EQ_DESC(sw_cons)]; 8856 8857#if 0 8858 int rc; 8859 rc = bxe_iov_eq_sp_event(sc, elem); 8860 if (!rc) { 8861 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); 8862 goto next_spqe; 8863 } 8864#endif 8865 8866 /* elem CID originates from FW, actually LE */ 8867 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8868 opcode = elem->message.opcode; 8869 8870 /* handle eq element */ 8871 switch (opcode) { 8872#if 0 8873 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 8874 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); 8875 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); 8876 continue; 8877#endif 8878 8879 case EVENT_RING_OPCODE_STAT_QUERY: 8880 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8881 sc->stats_comp++); 8882 /* nothing to do with stats comp */ 8883 goto next_spqe; 8884 8885 case EVENT_RING_OPCODE_CFC_DEL: 8886 /* handle according to cid range */ 8887 /* we may want to verify here that the sc state is HALTING */ 8888 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8889 q_obj = bxe_cid_to_q_obj(sc, cid); 8890 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8891 break; 8892 } 8893 goto next_spqe; 8894 8895 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8896 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8897 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8898 break; 8899 } 8900 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8901 goto next_spqe; 8902 8903 case EVENT_RING_OPCODE_START_TRAFFIC: 8904 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8905 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8906 break; 8907 } 8908 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8909 goto next_spqe; 8910 8911 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8912 echo = elem->message.data.function_update_event.echo; 8913 if (echo == SWITCH_UPDATE) { 8914 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8915 if (f_obj->complete_cmd(sc, f_obj, 8916 ECORE_F_CMD_SWITCH_UPDATE)) { 8917 break; 8918 } 8919 } 8920 else { 8921 BLOGD(sc, DBG_SP, 8922 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8923#if 0 8924 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); 8925 /* 8926 * We will perform the queues update from the sp_core_task as 8927 * all queue SP operations should run with CORE_LOCK. 8928 */ 8929 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); 8930 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8931#endif 8932 } 8933 goto next_spqe; 8934 8935#if 0 8936 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 8937 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); 8938 bxe_after_afex_vif_lists(sc, elem); 8939 goto next_spqe; 8940#endif 8941 8942 case EVENT_RING_OPCODE_FORWARD_SETUP: 8943 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8944 if (q_obj->complete_cmd(sc, q_obj, 8945 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8946 break; 8947 } 8948 goto next_spqe; 8949 8950 case EVENT_RING_OPCODE_FUNCTION_START: 8951 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8952 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8953 break; 8954 } 8955 goto next_spqe; 8956 8957 case EVENT_RING_OPCODE_FUNCTION_STOP: 8958 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8959 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8960 break; 8961 } 8962 goto next_spqe; 8963 } 8964 8965 switch (opcode | sc->state) { 8966 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8967 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8968 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8969 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8970 rss_raw->clear_pending(rss_raw); 8971 break; 8972 8973 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8974 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8975 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8976 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8977 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8978 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8979 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8980 bxe_handle_classification_eqe(sc, elem); 8981 break; 8982 8983 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8984 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8985 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8986 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8987 bxe_handle_mcast_eqe(sc); 8988 break; 8989 8990 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8991 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8992 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8993 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8994 bxe_handle_rx_mode_eqe(sc, elem); 8995 break; 8996 8997 default: 8998 /* unknown event log error and continue */ 8999 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 9000 elem->message.opcode, sc->state); 9001 } 9002 9003next_spqe: 9004 spqe_cnt++; 9005 } /* for */ 9006 9007 mb(); 9008 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 9009 9010 sc->eq_cons = sw_cons; 9011 sc->eq_prod = sw_prod; 9012 9013 /* make sure that above mem writes were issued towards the memory */ 9014 wmb(); 9015 9016 /* update producer */ 9017 bxe_update_eq_prod(sc, sc->eq_prod); 9018} 9019 9020static void 9021bxe_handle_sp_tq(void *context, 9022 int pending) 9023{ 9024 struct bxe_softc *sc = (struct bxe_softc *)context; 9025 uint16_t status; 9026 9027 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 9028 9029 /* what work needs to be performed? */ 9030 status = bxe_update_dsb_idx(sc); 9031 9032 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 9033 9034 /* HW attentions */ 9035 if (status & BXE_DEF_SB_ATT_IDX) { 9036 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 9037 bxe_attn_int(sc); 9038 status &= ~BXE_DEF_SB_ATT_IDX; 9039 } 9040 9041 /* SP events: STAT_QUERY and others */ 9042 if (status & BXE_DEF_SB_IDX) { 9043 /* handle EQ completions */ 9044 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 9045 bxe_eq_int(sc); 9046 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 9047 le16toh(sc->def_idx), IGU_INT_NOP, 1); 9048 status &= ~BXE_DEF_SB_IDX; 9049 } 9050 9051 /* if status is non zero then something went wrong */ 9052 if (__predict_false(status)) { 9053 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 9054 } 9055 9056 /* ack status block only if something was actually handled */ 9057 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 9058 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 9059 9060 /* 9061 * Must be called after the EQ processing (since eq leads to sriov 9062 * ramrod completion flows). 9063 * This flow may have been scheduled by the arrival of a ramrod 9064 * completion, or by the sriov code rescheduling itself. 9065 */ 9066 // XXX bxe_iov_sp_task(sc); 9067 9068#if 0 9069 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ 9070 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, 9071 &sc->sp_state)) { 9072 bxe_link_report(sc); 9073 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 9074 } 9075#endif 9076} 9077 9078static void 9079bxe_handle_fp_tq(void *context, 9080 int pending) 9081{ 9082 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 9083 struct bxe_softc *sc = fp->sc; 9084 uint8_t more_tx = FALSE; 9085 uint8_t more_rx = FALSE; 9086 9087 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 9088 9089 /* XXX 9090 * IFF_DRV_RUNNING state can't be checked here since we process 9091 * slowpath events on a client queue during setup. Instead 9092 * we need to add a "process/continue" flag here that the driver 9093 * can use to tell the task here not to do anything. 9094 */ 9095#if 0 9096 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 9097 return; 9098 } 9099#endif 9100 9101 /* update the fastpath index */ 9102 bxe_update_fp_sb_idx(fp); 9103 9104 /* XXX add loop here if ever support multiple tx CoS */ 9105 /* fp->txdata[cos] */ 9106 if (bxe_has_tx_work(fp)) { 9107 BXE_FP_TX_LOCK(fp); 9108 more_tx = bxe_txeof(sc, fp); 9109 BXE_FP_TX_UNLOCK(fp); 9110 } 9111 9112 if (bxe_has_rx_work(fp)) { 9113 more_rx = bxe_rxeof(sc, fp); 9114 } 9115 9116 if (more_rx /*|| more_tx*/) { 9117 /* still more work to do */ 9118 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9119 return; 9120 } 9121 9122 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9123 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9124} 9125 9126static void 9127bxe_task_fp(struct bxe_fastpath *fp) 9128{ 9129 struct bxe_softc *sc = fp->sc; 9130 uint8_t more_tx = FALSE; 9131 uint8_t more_rx = FALSE; 9132 9133 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 9134 9135 /* update the fastpath index */ 9136 bxe_update_fp_sb_idx(fp); 9137 9138 /* XXX add loop here if ever support multiple tx CoS */ 9139 /* fp->txdata[cos] */ 9140 if (bxe_has_tx_work(fp)) { 9141 BXE_FP_TX_LOCK(fp); 9142 more_tx = bxe_txeof(sc, fp); 9143 BXE_FP_TX_UNLOCK(fp); 9144 } 9145 9146 if (bxe_has_rx_work(fp)) { 9147 more_rx = bxe_rxeof(sc, fp); 9148 } 9149 9150 if (more_rx /*|| more_tx*/) { 9151 /* still more work to do, bail out if this ISR and process later */ 9152 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9153 return; 9154 } 9155 9156 /* 9157 * Here we write the fastpath index taken before doing any tx or rx work. 9158 * It is very well possible other hw events occurred up to this point and 9159 * they were actually processed accordingly above. Since we're going to 9160 * write an older fastpath index, an interrupt is coming which we might 9161 * not do any work in. 9162 */ 9163 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9164 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9165} 9166 9167/* 9168 * Legacy interrupt entry point. 9169 * 9170 * Verifies that the controller generated the interrupt and 9171 * then calls a separate routine to handle the various 9172 * interrupt causes: link, RX, and TX. 9173 */ 9174static void 9175bxe_intr_legacy(void *xsc) 9176{ 9177 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9178 struct bxe_fastpath *fp; 9179 uint16_t status, mask; 9180 int i; 9181 9182 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 9183 9184#if 0 9185 /* Don't handle any interrupts if we're not ready. */ 9186 if (__predict_false(sc->intr_sem != 0)) { 9187 return; 9188 } 9189#endif 9190 9191 /* 9192 * 0 for ustorm, 1 for cstorm 9193 * the bits returned from ack_int() are 0-15 9194 * bit 0 = attention status block 9195 * bit 1 = fast path status block 9196 * a mask of 0x2 or more = tx/rx event 9197 * a mask of 1 = slow path event 9198 */ 9199 9200 status = bxe_ack_int(sc); 9201 9202 /* the interrupt is not for us */ 9203 if (__predict_false(status == 0)) { 9204 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 9205 return; 9206 } 9207 9208 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 9209 9210 FOR_EACH_ETH_QUEUE(sc, i) { 9211 fp = &sc->fp[i]; 9212 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 9213 if (status & mask) { 9214 /* acknowledge and disable further fastpath interrupts */ 9215 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9216 bxe_task_fp(fp); 9217 status &= ~mask; 9218 } 9219 } 9220 9221#if 0 9222 if (CNIC_SUPPORT(sc)) { 9223 mask = 0x2; 9224 if (status & (mask | 0x1)) { 9225 ... 9226 status &= ~mask; 9227 } 9228 } 9229#endif 9230 9231 if (__predict_false(status & 0x1)) { 9232 /* acknowledge and disable further slowpath interrupts */ 9233 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9234 9235 /* schedule slowpath handler */ 9236 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9237 9238 status &= ~0x1; 9239 } 9240 9241 if (__predict_false(status)) { 9242 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 9243 } 9244} 9245 9246/* slowpath interrupt entry point */ 9247static void 9248bxe_intr_sp(void *xsc) 9249{ 9250 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9251 9252 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 9253 9254 /* acknowledge and disable further slowpath interrupts */ 9255 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9256 9257 /* schedule slowpath handler */ 9258 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9259} 9260 9261/* fastpath interrupt entry point */ 9262static void 9263bxe_intr_fp(void *xfp) 9264{ 9265 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 9266 struct bxe_softc *sc = fp->sc; 9267 9268 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 9269 9270 BLOGD(sc, DBG_INTR, 9271 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 9272 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 9273 9274#if 0 9275 /* Don't handle any interrupts if we're not ready. */ 9276 if (__predict_false(sc->intr_sem != 0)) { 9277 return; 9278 } 9279#endif 9280 9281 /* acknowledge and disable further fastpath interrupts */ 9282 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9283 9284 bxe_task_fp(fp); 9285} 9286 9287/* Release all interrupts allocated by the driver. */ 9288static void 9289bxe_interrupt_free(struct bxe_softc *sc) 9290{ 9291 int i; 9292 9293 switch (sc->interrupt_mode) { 9294 case INTR_MODE_INTX: 9295 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 9296 if (sc->intr[0].resource != NULL) { 9297 bus_release_resource(sc->dev, 9298 SYS_RES_IRQ, 9299 sc->intr[0].rid, 9300 sc->intr[0].resource); 9301 } 9302 break; 9303 case INTR_MODE_MSI: 9304 for (i = 0; i < sc->intr_count; i++) { 9305 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 9306 if (sc->intr[i].resource && sc->intr[i].rid) { 9307 bus_release_resource(sc->dev, 9308 SYS_RES_IRQ, 9309 sc->intr[i].rid, 9310 sc->intr[i].resource); 9311 } 9312 } 9313 pci_release_msi(sc->dev); 9314 break; 9315 case INTR_MODE_MSIX: 9316 for (i = 0; i < sc->intr_count; i++) { 9317 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 9318 if (sc->intr[i].resource && sc->intr[i].rid) { 9319 bus_release_resource(sc->dev, 9320 SYS_RES_IRQ, 9321 sc->intr[i].rid, 9322 sc->intr[i].resource); 9323 } 9324 } 9325 pci_release_msi(sc->dev); 9326 break; 9327 default: 9328 /* nothing to do as initial allocation failed */ 9329 break; 9330 } 9331} 9332 9333/* 9334 * This function determines and allocates the appropriate 9335 * interrupt based on system capabilites and user request. 9336 * 9337 * The user may force a particular interrupt mode, specify 9338 * the number of receive queues, specify the method for 9339 * distribuitng received frames to receive queues, or use 9340 * the default settings which will automatically select the 9341 * best supported combination. In addition, the OS may or 9342 * may not support certain combinations of these settings. 9343 * This routine attempts to reconcile the settings requested 9344 * by the user with the capabilites available from the system 9345 * to select the optimal combination of features. 9346 * 9347 * Returns: 9348 * 0 = Success, !0 = Failure. 9349 */ 9350static int 9351bxe_interrupt_alloc(struct bxe_softc *sc) 9352{ 9353 int msix_count = 0; 9354 int msi_count = 0; 9355 int num_requested = 0; 9356 int num_allocated = 0; 9357 int rid, i, j; 9358 int rc; 9359 9360 /* get the number of available MSI/MSI-X interrupts from the OS */ 9361 if (sc->interrupt_mode > 0) { 9362 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 9363 msix_count = pci_msix_count(sc->dev); 9364 } 9365 9366 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 9367 msi_count = pci_msi_count(sc->dev); 9368 } 9369 9370 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 9371 msi_count, msix_count); 9372 } 9373 9374 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9375 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9376 break; 9377 } 9378 9379 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9380 (msix_count < 2)) { 9381 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9382 break; 9383 } 9384 9385 /* ask for the necessary number of MSI-X vectors */ 9386 num_requested = min((sc->num_queues + 1), msix_count); 9387 9388 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9389 9390 num_allocated = num_requested; 9391 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9392 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9393 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9394 break; 9395 } 9396 9397 if (num_allocated < 2) { /* possible? */ 9398 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9399 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9400 pci_release_msi(sc->dev); 9401 break; 9402 } 9403 9404 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9405 num_requested, num_allocated); 9406 9407 /* best effort so use the number of vectors allocated to us */ 9408 sc->intr_count = num_allocated; 9409 sc->num_queues = num_allocated - 1; 9410 9411 rid = 1; /* initial resource identifier */ 9412 9413 /* allocate the MSI-X vectors */ 9414 for (i = 0; i < num_allocated; i++) { 9415 sc->intr[i].rid = (rid + i); 9416 9417 if ((sc->intr[i].resource = 9418 bus_alloc_resource_any(sc->dev, 9419 SYS_RES_IRQ, 9420 &sc->intr[i].rid, 9421 RF_ACTIVE)) == NULL) { 9422 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9423 i, (rid + i)); 9424 9425 for (j = (i - 1); j >= 0; j--) { 9426 bus_release_resource(sc->dev, 9427 SYS_RES_IRQ, 9428 sc->intr[j].rid, 9429 sc->intr[j].resource); 9430 } 9431 9432 sc->intr_count = 0; 9433 sc->num_queues = 0; 9434 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9435 pci_release_msi(sc->dev); 9436 break; 9437 } 9438 9439 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9440 } 9441 } while (0); 9442 9443 do { /* try allocating MSI vector resources (at least 2) */ 9444 if (sc->interrupt_mode != INTR_MODE_MSI) { 9445 break; 9446 } 9447 9448 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9449 (msi_count < 1)) { 9450 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9451 break; 9452 } 9453 9454 /* ask for a single MSI vector */ 9455 num_requested = 1; 9456 9457 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9458 9459 num_allocated = num_requested; 9460 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9461 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9462 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9463 break; 9464 } 9465 9466 if (num_allocated != 1) { /* possible? */ 9467 BLOGE(sc, "MSI allocation is not 1!\n"); 9468 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9469 pci_release_msi(sc->dev); 9470 break; 9471 } 9472 9473 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9474 num_requested, num_allocated); 9475 9476 /* best effort so use the number of vectors allocated to us */ 9477 sc->intr_count = num_allocated; 9478 sc->num_queues = num_allocated; 9479 9480 rid = 1; /* initial resource identifier */ 9481 9482 sc->intr[0].rid = rid; 9483 9484 if ((sc->intr[0].resource = 9485 bus_alloc_resource_any(sc->dev, 9486 SYS_RES_IRQ, 9487 &sc->intr[0].rid, 9488 RF_ACTIVE)) == NULL) { 9489 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9490 sc->intr_count = 0; 9491 sc->num_queues = 0; 9492 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9493 pci_release_msi(sc->dev); 9494 break; 9495 } 9496 9497 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9498 } while (0); 9499 9500 do { /* try allocating INTx vector resources */ 9501 if (sc->interrupt_mode != INTR_MODE_INTX) { 9502 break; 9503 } 9504 9505 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9506 9507 /* only one vector for INTx */ 9508 sc->intr_count = 1; 9509 sc->num_queues = 1; 9510 9511 rid = 0; /* initial resource identifier */ 9512 9513 sc->intr[0].rid = rid; 9514 9515 if ((sc->intr[0].resource = 9516 bus_alloc_resource_any(sc->dev, 9517 SYS_RES_IRQ, 9518 &sc->intr[0].rid, 9519 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9520 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9521 sc->intr_count = 0; 9522 sc->num_queues = 0; 9523 sc->interrupt_mode = -1; /* Failed! */ 9524 break; 9525 } 9526 9527 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9528 } while (0); 9529 9530 if (sc->interrupt_mode == -1) { 9531 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9532 rc = 1; 9533 } else { 9534 BLOGD(sc, DBG_LOAD, 9535 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9536 sc->interrupt_mode, sc->num_queues); 9537 rc = 0; 9538 } 9539 9540 return (rc); 9541} 9542 9543static void 9544bxe_interrupt_detach(struct bxe_softc *sc) 9545{ 9546 struct bxe_fastpath *fp; 9547 int i; 9548 9549 /* release interrupt resources */ 9550 for (i = 0; i < sc->intr_count; i++) { 9551 if (sc->intr[i].resource && sc->intr[i].tag) { 9552 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9553 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9554 } 9555 } 9556 9557 for (i = 0; i < sc->num_queues; i++) { 9558 fp = &sc->fp[i]; 9559 if (fp->tq) { 9560 taskqueue_drain(fp->tq, &fp->tq_task); 9561 taskqueue_free(fp->tq); 9562 fp->tq = NULL; 9563 } 9564 } 9565 9566 9567 if (sc->sp_tq) { 9568 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9569 taskqueue_free(sc->sp_tq); 9570 sc->sp_tq = NULL; 9571 } 9572} 9573 9574/* 9575 * Enables interrupts and attach to the ISR. 9576 * 9577 * When using multiple MSI/MSI-X vectors the first vector 9578 * is used for slowpath operations while all remaining 9579 * vectors are used for fastpath operations. If only a 9580 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9581 * ISR must look for both slowpath and fastpath completions. 9582 */ 9583static int 9584bxe_interrupt_attach(struct bxe_softc *sc) 9585{ 9586 struct bxe_fastpath *fp; 9587 int rc = 0; 9588 int i; 9589 9590 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9591 "bxe%d_sp_tq", sc->unit); 9592 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9593 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, 9594 taskqueue_thread_enqueue, 9595 &sc->sp_tq); 9596 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9597 "%s", sc->sp_tq_name); 9598 9599 9600 for (i = 0; i < sc->num_queues; i++) { 9601 fp = &sc->fp[i]; 9602 snprintf(fp->tq_name, sizeof(fp->tq_name), 9603 "bxe%d_fp%d_tq", sc->unit, i); 9604 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9605 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, 9606 taskqueue_thread_enqueue, 9607 &fp->tq); 9608 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9609 "%s", fp->tq_name); 9610 } 9611 9612 /* setup interrupt handlers */ 9613 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9614 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9615 9616 /* 9617 * Setup the interrupt handler. Note that we pass the driver instance 9618 * to the interrupt handler for the slowpath. 9619 */ 9620 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9621 (INTR_TYPE_NET | INTR_MPSAFE), 9622 NULL, bxe_intr_sp, sc, 9623 &sc->intr[0].tag)) != 0) { 9624 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9625 goto bxe_interrupt_attach_exit; 9626 } 9627 9628 bus_describe_intr(sc->dev, sc->intr[0].resource, 9629 sc->intr[0].tag, "sp"); 9630 9631 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9632 9633 /* initialize the fastpath vectors (note the first was used for sp) */ 9634 for (i = 0; i < sc->num_queues; i++) { 9635 fp = &sc->fp[i]; 9636 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9637 9638 /* 9639 * Setup the interrupt handler. Note that we pass the 9640 * fastpath context to the interrupt handler in this 9641 * case. 9642 */ 9643 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9644 (INTR_TYPE_NET | INTR_MPSAFE), 9645 NULL, bxe_intr_fp, fp, 9646 &sc->intr[i + 1].tag)) != 0) { 9647 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9648 (i + 1), rc); 9649 goto bxe_interrupt_attach_exit; 9650 } 9651 9652 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9653 sc->intr[i + 1].tag, "fp%02d", i); 9654 9655 /* bind the fastpath instance to a cpu */ 9656 if (sc->num_queues > 1) { 9657 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9658 } 9659 9660 fp->state = BXE_FP_STATE_IRQ; 9661 } 9662 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9663 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9664 9665 /* 9666 * Setup the interrupt handler. Note that we pass the 9667 * driver instance to the interrupt handler which 9668 * will handle both the slowpath and fastpath. 9669 */ 9670 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9671 (INTR_TYPE_NET | INTR_MPSAFE), 9672 NULL, bxe_intr_legacy, sc, 9673 &sc->intr[0].tag)) != 0) { 9674 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9675 goto bxe_interrupt_attach_exit; 9676 } 9677 9678 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9679 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9680 9681 /* 9682 * Setup the interrupt handler. Note that we pass the 9683 * driver instance to the interrupt handler which 9684 * will handle both the slowpath and fastpath. 9685 */ 9686 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9687 (INTR_TYPE_NET | INTR_MPSAFE), 9688 NULL, bxe_intr_legacy, sc, 9689 &sc->intr[0].tag)) != 0) { 9690 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9691 goto bxe_interrupt_attach_exit; 9692 } 9693 } 9694 9695bxe_interrupt_attach_exit: 9696 9697 return (rc); 9698} 9699 9700static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9701static int bxe_init_hw_common(struct bxe_softc *sc); 9702static int bxe_init_hw_port(struct bxe_softc *sc); 9703static int bxe_init_hw_func(struct bxe_softc *sc); 9704static void bxe_reset_common(struct bxe_softc *sc); 9705static void bxe_reset_port(struct bxe_softc *sc); 9706static void bxe_reset_func(struct bxe_softc *sc); 9707static int bxe_gunzip_init(struct bxe_softc *sc); 9708static void bxe_gunzip_end(struct bxe_softc *sc); 9709static int bxe_init_firmware(struct bxe_softc *sc); 9710static void bxe_release_firmware(struct bxe_softc *sc); 9711 9712static struct 9713ecore_func_sp_drv_ops bxe_func_sp_drv = { 9714 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9715 .init_hw_cmn = bxe_init_hw_common, 9716 .init_hw_port = bxe_init_hw_port, 9717 .init_hw_func = bxe_init_hw_func, 9718 9719 .reset_hw_cmn = bxe_reset_common, 9720 .reset_hw_port = bxe_reset_port, 9721 .reset_hw_func = bxe_reset_func, 9722 9723 .gunzip_init = bxe_gunzip_init, 9724 .gunzip_end = bxe_gunzip_end, 9725 9726 .init_fw = bxe_init_firmware, 9727 .release_fw = bxe_release_firmware, 9728}; 9729 9730static void 9731bxe_init_func_obj(struct bxe_softc *sc) 9732{ 9733 sc->dmae_ready = 0; 9734 9735 ecore_init_func_obj(sc, 9736 &sc->func_obj, 9737 BXE_SP(sc, func_rdata), 9738 BXE_SP_MAPPING(sc, func_rdata), 9739 BXE_SP(sc, func_afex_rdata), 9740 BXE_SP_MAPPING(sc, func_afex_rdata), 9741 &bxe_func_sp_drv); 9742} 9743 9744static int 9745bxe_init_hw(struct bxe_softc *sc, 9746 uint32_t load_code) 9747{ 9748 struct ecore_func_state_params func_params = { NULL }; 9749 int rc; 9750 9751 /* prepare the parameters for function state transitions */ 9752 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9753 9754 func_params.f_obj = &sc->func_obj; 9755 func_params.cmd = ECORE_F_CMD_HW_INIT; 9756 9757 func_params.params.hw_init.load_phase = load_code; 9758 9759 /* 9760 * Via a plethora of function pointers, we will eventually reach 9761 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9762 */ 9763 rc = ecore_func_state_change(sc, &func_params); 9764 9765 return (rc); 9766} 9767 9768static void 9769bxe_fill(struct bxe_softc *sc, 9770 uint32_t addr, 9771 int fill, 9772 uint32_t len) 9773{ 9774 uint32_t i; 9775 9776 if (!(len % 4) && !(addr % 4)) { 9777 for (i = 0; i < len; i += 4) { 9778 REG_WR(sc, (addr + i), fill); 9779 } 9780 } else { 9781 for (i = 0; i < len; i++) { 9782 REG_WR8(sc, (addr + i), fill); 9783 } 9784 } 9785} 9786 9787/* writes FP SP data to FW - data_size in dwords */ 9788static void 9789bxe_wr_fp_sb_data(struct bxe_softc *sc, 9790 int fw_sb_id, 9791 uint32_t *sb_data_p, 9792 uint32_t data_size) 9793{ 9794 int index; 9795 9796 for (index = 0; index < data_size; index++) { 9797 REG_WR(sc, 9798 (BAR_CSTRORM_INTMEM + 9799 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9800 (sizeof(uint32_t) * index)), 9801 *(sb_data_p + index)); 9802 } 9803} 9804 9805static void 9806bxe_zero_fp_sb(struct bxe_softc *sc, 9807 int fw_sb_id) 9808{ 9809 struct hc_status_block_data_e2 sb_data_e2; 9810 struct hc_status_block_data_e1x sb_data_e1x; 9811 uint32_t *sb_data_p; 9812 uint32_t data_size = 0; 9813 9814 if (!CHIP_IS_E1x(sc)) { 9815 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9816 sb_data_e2.common.state = SB_DISABLED; 9817 sb_data_e2.common.p_func.vf_valid = FALSE; 9818 sb_data_p = (uint32_t *)&sb_data_e2; 9819 data_size = (sizeof(struct hc_status_block_data_e2) / 9820 sizeof(uint32_t)); 9821 } else { 9822 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9823 sb_data_e1x.common.state = SB_DISABLED; 9824 sb_data_e1x.common.p_func.vf_valid = FALSE; 9825 sb_data_p = (uint32_t *)&sb_data_e1x; 9826 data_size = (sizeof(struct hc_status_block_data_e1x) / 9827 sizeof(uint32_t)); 9828 } 9829 9830 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9831 9832 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9833 0, CSTORM_STATUS_BLOCK_SIZE); 9834 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9835 0, CSTORM_SYNC_BLOCK_SIZE); 9836} 9837 9838static void 9839bxe_wr_sp_sb_data(struct bxe_softc *sc, 9840 struct hc_sp_status_block_data *sp_sb_data) 9841{ 9842 int i; 9843 9844 for (i = 0; 9845 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9846 i++) { 9847 REG_WR(sc, 9848 (BAR_CSTRORM_INTMEM + 9849 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9850 (i * sizeof(uint32_t))), 9851 *((uint32_t *)sp_sb_data + i)); 9852 } 9853} 9854 9855static void 9856bxe_zero_sp_sb(struct bxe_softc *sc) 9857{ 9858 struct hc_sp_status_block_data sp_sb_data; 9859 9860 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9861 9862 sp_sb_data.state = SB_DISABLED; 9863 sp_sb_data.p_func.vf_valid = FALSE; 9864 9865 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9866 9867 bxe_fill(sc, 9868 (BAR_CSTRORM_INTMEM + 9869 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9870 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9871 bxe_fill(sc, 9872 (BAR_CSTRORM_INTMEM + 9873 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9874 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9875} 9876 9877static void 9878bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9879 int igu_sb_id, 9880 int igu_seg_id) 9881{ 9882 hc_sm->igu_sb_id = igu_sb_id; 9883 hc_sm->igu_seg_id = igu_seg_id; 9884 hc_sm->timer_value = 0xFF; 9885 hc_sm->time_to_expire = 0xFFFFFFFF; 9886} 9887 9888static void 9889bxe_map_sb_state_machines(struct hc_index_data *index_data) 9890{ 9891 /* zero out state machine indices */ 9892 9893 /* rx indices */ 9894 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9895 9896 /* tx indices */ 9897 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9898 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9899 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9900 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9901 9902 /* map indices */ 9903 9904 /* rx indices */ 9905 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9906 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9907 9908 /* tx indices */ 9909 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9910 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9911 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9912 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9913 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9914 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9915 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9916 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9917} 9918 9919static void 9920bxe_init_sb(struct bxe_softc *sc, 9921 bus_addr_t busaddr, 9922 int vfid, 9923 uint8_t vf_valid, 9924 int fw_sb_id, 9925 int igu_sb_id) 9926{ 9927 struct hc_status_block_data_e2 sb_data_e2; 9928 struct hc_status_block_data_e1x sb_data_e1x; 9929 struct hc_status_block_sm *hc_sm_p; 9930 uint32_t *sb_data_p; 9931 int igu_seg_id; 9932 int data_size; 9933 9934 if (CHIP_INT_MODE_IS_BC(sc)) { 9935 igu_seg_id = HC_SEG_ACCESS_NORM; 9936 } else { 9937 igu_seg_id = IGU_SEG_ACCESS_NORM; 9938 } 9939 9940 bxe_zero_fp_sb(sc, fw_sb_id); 9941 9942 if (!CHIP_IS_E1x(sc)) { 9943 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9944 sb_data_e2.common.state = SB_ENABLED; 9945 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9946 sb_data_e2.common.p_func.vf_id = vfid; 9947 sb_data_e2.common.p_func.vf_valid = vf_valid; 9948 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9949 sb_data_e2.common.same_igu_sb_1b = TRUE; 9950 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9951 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9952 hc_sm_p = sb_data_e2.common.state_machine; 9953 sb_data_p = (uint32_t *)&sb_data_e2; 9954 data_size = (sizeof(struct hc_status_block_data_e2) / 9955 sizeof(uint32_t)); 9956 bxe_map_sb_state_machines(sb_data_e2.index_data); 9957 } else { 9958 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9959 sb_data_e1x.common.state = SB_ENABLED; 9960 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9961 sb_data_e1x.common.p_func.vf_id = 0xff; 9962 sb_data_e1x.common.p_func.vf_valid = FALSE; 9963 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9964 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9965 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9966 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9967 hc_sm_p = sb_data_e1x.common.state_machine; 9968 sb_data_p = (uint32_t *)&sb_data_e1x; 9969 data_size = (sizeof(struct hc_status_block_data_e1x) / 9970 sizeof(uint32_t)); 9971 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9972 } 9973 9974 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9975 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9976 9977 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9978 9979 /* write indices to HW - PCI guarantees endianity of regpairs */ 9980 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9981} 9982 9983static inline uint8_t 9984bxe_fp_qzone_id(struct bxe_fastpath *fp) 9985{ 9986 if (CHIP_IS_E1x(fp->sc)) { 9987 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9988 } else { 9989 return (fp->cl_id); 9990 } 9991} 9992 9993static inline uint32_t 9994bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9995 struct bxe_fastpath *fp) 9996{ 9997 uint32_t offset = BAR_USTRORM_INTMEM; 9998 9999#if 0 10000 if (IS_VF(sc)) { 10001 return (PXP_VF_ADDR_USDM_QUEUES_START + 10002 (sc->acquire_resp.resc.hw_qid[fp->index] * 10003 sizeof(struct ustorm_queue_zone_data))); 10004 } else 10005#endif 10006 if (!CHIP_IS_E1x(sc)) { 10007 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 10008 } else { 10009 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 10010 } 10011 10012 return (offset); 10013} 10014 10015static void 10016bxe_init_eth_fp(struct bxe_softc *sc, 10017 int idx) 10018{ 10019 struct bxe_fastpath *fp = &sc->fp[idx]; 10020 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 10021 unsigned long q_type = 0; 10022 int cos; 10023 10024 fp->sc = sc; 10025 fp->index = idx; 10026 10027 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 10028 "bxe%d_fp%d_tx_lock", sc->unit, idx); 10029 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 10030 10031 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 10032 "bxe%d_fp%d_rx_lock", sc->unit, idx); 10033 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 10034 10035 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 10036 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 10037 10038 fp->cl_id = (CHIP_IS_E1x(sc)) ? 10039 (SC_L_ID(sc) + idx) : 10040 /* want client ID same as IGU SB ID for non-E1 */ 10041 fp->igu_sb_id; 10042 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 10043 10044 /* setup sb indices */ 10045 if (!CHIP_IS_E1x(sc)) { 10046 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 10047 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 10048 } else { 10049 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 10050 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 10051 } 10052 10053 /* init shortcut */ 10054 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 10055 10056 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 10057 10058 /* 10059 * XXX If multiple CoS is ever supported then each fastpath structure 10060 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 10061 */ 10062 for (cos = 0; cos < sc->max_cos; cos++) { 10063 cids[cos] = idx; 10064 } 10065 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 10066 10067 /* nothing more for a VF to do */ 10068 if (IS_VF(sc)) { 10069 return; 10070 } 10071 10072 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 10073 fp->fw_sb_id, fp->igu_sb_id); 10074 10075 bxe_update_fp_sb_idx(fp); 10076 10077 /* Configure Queue State object */ 10078 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 10079 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 10080 10081 ecore_init_queue_obj(sc, 10082 &sc->sp_objs[idx].q_obj, 10083 fp->cl_id, 10084 cids, 10085 sc->max_cos, 10086 SC_FUNC(sc), 10087 BXE_SP(sc, q_rdata), 10088 BXE_SP_MAPPING(sc, q_rdata), 10089 q_type); 10090 10091 /* configure classification DBs */ 10092 ecore_init_mac_obj(sc, 10093 &sc->sp_objs[idx].mac_obj, 10094 fp->cl_id, 10095 idx, 10096 SC_FUNC(sc), 10097 BXE_SP(sc, mac_rdata), 10098 BXE_SP_MAPPING(sc, mac_rdata), 10099 ECORE_FILTER_MAC_PENDING, 10100 &sc->sp_state, 10101 ECORE_OBJ_TYPE_RX_TX, 10102 &sc->macs_pool); 10103 10104 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 10105 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 10106} 10107 10108static inline void 10109bxe_update_rx_prod(struct bxe_softc *sc, 10110 struct bxe_fastpath *fp, 10111 uint16_t rx_bd_prod, 10112 uint16_t rx_cq_prod, 10113 uint16_t rx_sge_prod) 10114{ 10115 struct ustorm_eth_rx_producers rx_prods = { 0 }; 10116 uint32_t i; 10117 10118 /* update producers */ 10119 rx_prods.bd_prod = rx_bd_prod; 10120 rx_prods.cqe_prod = rx_cq_prod; 10121 rx_prods.sge_prod = rx_sge_prod; 10122 10123 /* 10124 * Make sure that the BD and SGE data is updated before updating the 10125 * producers since FW might read the BD/SGE right after the producer 10126 * is updated. 10127 * This is only applicable for weak-ordered memory model archs such 10128 * as IA-64. The following barrier is also mandatory since FW will 10129 * assumes BDs must have buffers. 10130 */ 10131 wmb(); 10132 10133 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 10134 REG_WR(sc, 10135 (fp->ustorm_rx_prods_offset + (i * 4)), 10136 ((uint32_t *)&rx_prods)[i]); 10137 } 10138 10139 wmb(); /* keep prod updates ordered */ 10140 10141 BLOGD(sc, DBG_RX, 10142 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 10143 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 10144} 10145 10146static void 10147bxe_init_rx_rings(struct bxe_softc *sc) 10148{ 10149 struct bxe_fastpath *fp; 10150 int i; 10151 10152 for (i = 0; i < sc->num_queues; i++) { 10153 fp = &sc->fp[i]; 10154 10155 fp->rx_bd_cons = 0; 10156 10157 /* 10158 * Activate the BD ring... 10159 * Warning, this will generate an interrupt (to the TSTORM) 10160 * so this can only be done after the chip is initialized 10161 */ 10162 bxe_update_rx_prod(sc, fp, 10163 fp->rx_bd_prod, 10164 fp->rx_cq_prod, 10165 fp->rx_sge_prod); 10166 10167 if (i != 0) { 10168 continue; 10169 } 10170 10171 if (CHIP_IS_E1(sc)) { 10172 REG_WR(sc, 10173 (BAR_USTRORM_INTMEM + 10174 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 10175 U64_LO(fp->rcq_dma.paddr)); 10176 REG_WR(sc, 10177 (BAR_USTRORM_INTMEM + 10178 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 10179 U64_HI(fp->rcq_dma.paddr)); 10180 } 10181 } 10182} 10183 10184static void 10185bxe_init_tx_ring_one(struct bxe_fastpath *fp) 10186{ 10187 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 10188 fp->tx_db.data.zero_fill1 = 0; 10189 fp->tx_db.data.prod = 0; 10190 10191 fp->tx_pkt_prod = 0; 10192 fp->tx_pkt_cons = 0; 10193 fp->tx_bd_prod = 0; 10194 fp->tx_bd_cons = 0; 10195 fp->eth_q_stats.tx_pkts = 0; 10196} 10197 10198static inline void 10199bxe_init_tx_rings(struct bxe_softc *sc) 10200{ 10201 int i; 10202 10203 for (i = 0; i < sc->num_queues; i++) { 10204#if 0 10205 uint8_t cos; 10206 for (cos = 0; cos < sc->max_cos; cos++) { 10207 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); 10208 } 10209#else 10210 bxe_init_tx_ring_one(&sc->fp[i]); 10211#endif 10212 } 10213} 10214 10215static void 10216bxe_init_def_sb(struct bxe_softc *sc) 10217{ 10218 struct host_sp_status_block *def_sb = sc->def_sb; 10219 bus_addr_t mapping = sc->def_sb_dma.paddr; 10220 int igu_sp_sb_index; 10221 int igu_seg_id; 10222 int port = SC_PORT(sc); 10223 int func = SC_FUNC(sc); 10224 int reg_offset, reg_offset_en5; 10225 uint64_t section; 10226 int index, sindex; 10227 struct hc_sp_status_block_data sp_sb_data; 10228 10229 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 10230 10231 if (CHIP_INT_MODE_IS_BC(sc)) { 10232 igu_sp_sb_index = DEF_SB_IGU_ID; 10233 igu_seg_id = HC_SEG_ACCESS_DEF; 10234 } else { 10235 igu_sp_sb_index = sc->igu_dsb_id; 10236 igu_seg_id = IGU_SEG_ACCESS_DEF; 10237 } 10238 10239 /* attentions */ 10240 section = ((uint64_t)mapping + 10241 offsetof(struct host_sp_status_block, atten_status_block)); 10242 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 10243 sc->attn_state = 0; 10244 10245 reg_offset = (port) ? 10246 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10247 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 10248 reg_offset_en5 = (port) ? 10249 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 10250 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 10251 10252 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 10253 /* take care of sig[0]..sig[4] */ 10254 for (sindex = 0; sindex < 4; sindex++) { 10255 sc->attn_group[index].sig[sindex] = 10256 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 10257 } 10258 10259 if (!CHIP_IS_E1x(sc)) { 10260 /* 10261 * enable5 is separate from the rest of the registers, 10262 * and the address skip is 4 and not 16 between the 10263 * different groups 10264 */ 10265 sc->attn_group[index].sig[4] = 10266 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 10267 } else { 10268 sc->attn_group[index].sig[4] = 0; 10269 } 10270 } 10271 10272 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10273 reg_offset = (port) ? 10274 HC_REG_ATTN_MSG1_ADDR_L : 10275 HC_REG_ATTN_MSG0_ADDR_L; 10276 REG_WR(sc, reg_offset, U64_LO(section)); 10277 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 10278 } else if (!CHIP_IS_E1x(sc)) { 10279 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 10280 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 10281 } 10282 10283 section = ((uint64_t)mapping + 10284 offsetof(struct host_sp_status_block, sp_sb)); 10285 10286 bxe_zero_sp_sb(sc); 10287 10288 /* PCI guarantees endianity of regpair */ 10289 sp_sb_data.state = SB_ENABLED; 10290 sp_sb_data.host_sb_addr.lo = U64_LO(section); 10291 sp_sb_data.host_sb_addr.hi = U64_HI(section); 10292 sp_sb_data.igu_sb_id = igu_sp_sb_index; 10293 sp_sb_data.igu_seg_id = igu_seg_id; 10294 sp_sb_data.p_func.pf_id = func; 10295 sp_sb_data.p_func.vnic_id = SC_VN(sc); 10296 sp_sb_data.p_func.vf_id = 0xff; 10297 10298 bxe_wr_sp_sb_data(sc, &sp_sb_data); 10299 10300 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 10301} 10302 10303static void 10304bxe_init_sp_ring(struct bxe_softc *sc) 10305{ 10306 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 10307 sc->spq_prod_idx = 0; 10308 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 10309 sc->spq_prod_bd = sc->spq; 10310 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 10311} 10312 10313static void 10314bxe_init_eq_ring(struct bxe_softc *sc) 10315{ 10316 union event_ring_elem *elem; 10317 int i; 10318 10319 for (i = 1; i <= NUM_EQ_PAGES; i++) { 10320 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 10321 10322 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 10323 BCM_PAGE_SIZE * 10324 (i % NUM_EQ_PAGES))); 10325 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 10326 BCM_PAGE_SIZE * 10327 (i % NUM_EQ_PAGES))); 10328 } 10329 10330 sc->eq_cons = 0; 10331 sc->eq_prod = NUM_EQ_DESC; 10332 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 10333 10334 atomic_store_rel_long(&sc->eq_spq_left, 10335 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 10336 NUM_EQ_DESC) - 1)); 10337} 10338 10339static void 10340bxe_init_internal_common(struct bxe_softc *sc) 10341{ 10342 int i; 10343 10344 if (IS_MF_SI(sc)) { 10345 /* 10346 * In switch independent mode, the TSTORM needs to accept 10347 * packets that failed classification, since approximate match 10348 * mac addresses aren't written to NIG LLH. 10349 */ 10350 REG_WR8(sc, 10351 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10352 2); 10353 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ 10354 REG_WR8(sc, 10355 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10356 0); 10357 } 10358 10359 /* 10360 * Zero this manually as its initialization is currently missing 10361 * in the initTool. 10362 */ 10363 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 10364 REG_WR(sc, 10365 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 10366 0); 10367 } 10368 10369 if (!CHIP_IS_E1x(sc)) { 10370 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 10371 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 10372 } 10373} 10374 10375static void 10376bxe_init_internal(struct bxe_softc *sc, 10377 uint32_t load_code) 10378{ 10379 switch (load_code) { 10380 case FW_MSG_CODE_DRV_LOAD_COMMON: 10381 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 10382 bxe_init_internal_common(sc); 10383 /* no break */ 10384 10385 case FW_MSG_CODE_DRV_LOAD_PORT: 10386 /* nothing to do */ 10387 /* no break */ 10388 10389 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 10390 /* internal memory per function is initialized inside bxe_pf_init */ 10391 break; 10392 10393 default: 10394 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 10395 break; 10396 } 10397} 10398 10399static void 10400storm_memset_func_cfg(struct bxe_softc *sc, 10401 struct tstorm_eth_function_common_config *tcfg, 10402 uint16_t abs_fid) 10403{ 10404 uint32_t addr; 10405 size_t size; 10406 10407 addr = (BAR_TSTRORM_INTMEM + 10408 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10409 size = sizeof(struct tstorm_eth_function_common_config); 10410 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10411} 10412 10413static void 10414bxe_func_init(struct bxe_softc *sc, 10415 struct bxe_func_init_params *p) 10416{ 10417 struct tstorm_eth_function_common_config tcfg = { 0 }; 10418 10419 if (CHIP_IS_E1x(sc)) { 10420 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10421 } 10422 10423 /* Enable the function in the FW */ 10424 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10425 storm_memset_func_en(sc, p->func_id, 1); 10426 10427 /* spq */ 10428 if (p->func_flgs & FUNC_FLG_SPQ) { 10429 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10430 REG_WR(sc, 10431 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10432 p->spq_prod); 10433 } 10434} 10435 10436/* 10437 * Calculates the sum of vn_min_rates. 10438 * It's needed for further normalizing of the min_rates. 10439 * Returns: 10440 * sum of vn_min_rates. 10441 * or 10442 * 0 - if all the min_rates are 0. 10443 * In the later case fainess algorithm should be deactivated. 10444 * If all min rates are not zero then those that are zeroes will be set to 1. 10445 */ 10446static void 10447bxe_calc_vn_min(struct bxe_softc *sc, 10448 struct cmng_init_input *input) 10449{ 10450 uint32_t vn_cfg; 10451 uint32_t vn_min_rate; 10452 int all_zero = 1; 10453 int vn; 10454 10455 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10456 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10457 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10458 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10459 10460 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10461 /* skip hidden VNs */ 10462 vn_min_rate = 0; 10463 } else if (!vn_min_rate) { 10464 /* If min rate is zero - set it to 100 */ 10465 vn_min_rate = DEF_MIN_RATE; 10466 } else { 10467 all_zero = 0; 10468 } 10469 10470 input->vnic_min_rate[vn] = vn_min_rate; 10471 } 10472 10473 /* if ETS or all min rates are zeros - disable fairness */ 10474 if (BXE_IS_ETS_ENABLED(sc)) { 10475 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10476 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10477 } else if (all_zero) { 10478 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10479 BLOGD(sc, DBG_LOAD, 10480 "Fariness disabled (all MIN values are zeroes)\n"); 10481 } else { 10482 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10483 } 10484} 10485 10486static inline uint16_t 10487bxe_extract_max_cfg(struct bxe_softc *sc, 10488 uint32_t mf_cfg) 10489{ 10490 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10491 FUNC_MF_CFG_MAX_BW_SHIFT); 10492 10493 if (!max_cfg) { 10494 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10495 max_cfg = 100; 10496 } 10497 10498 return (max_cfg); 10499} 10500 10501static void 10502bxe_calc_vn_max(struct bxe_softc *sc, 10503 int vn, 10504 struct cmng_init_input *input) 10505{ 10506 uint16_t vn_max_rate; 10507 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10508 uint32_t max_cfg; 10509 10510 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10511 vn_max_rate = 0; 10512 } else { 10513 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10514 10515 if (IS_MF_SI(sc)) { 10516 /* max_cfg in percents of linkspeed */ 10517 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10518 } else { /* SD modes */ 10519 /* max_cfg is absolute in 100Mb units */ 10520 vn_max_rate = (max_cfg * 100); 10521 } 10522 } 10523 10524 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10525 10526 input->vnic_max_rate[vn] = vn_max_rate; 10527} 10528 10529static void 10530bxe_cmng_fns_init(struct bxe_softc *sc, 10531 uint8_t read_cfg, 10532 uint8_t cmng_type) 10533{ 10534 struct cmng_init_input input; 10535 int vn; 10536 10537 memset(&input, 0, sizeof(struct cmng_init_input)); 10538 10539 input.port_rate = sc->link_vars.line_speed; 10540 10541 if (cmng_type == CMNG_FNS_MINMAX) { 10542 /* read mf conf from shmem */ 10543 if (read_cfg) { 10544 bxe_read_mf_cfg(sc); 10545 } 10546 10547 /* get VN min rate and enable fairness if not 0 */ 10548 bxe_calc_vn_min(sc, &input); 10549 10550 /* get VN max rate */ 10551 if (sc->port.pmf) { 10552 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10553 bxe_calc_vn_max(sc, vn, &input); 10554 } 10555 } 10556 10557 /* always enable rate shaping and fairness */ 10558 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10559 10560 ecore_init_cmng(&input, &sc->cmng); 10561 return; 10562 } 10563 10564 /* rate shaping and fairness are disabled */ 10565 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10566} 10567 10568static int 10569bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10570{ 10571 if (CHIP_REV_IS_SLOW(sc)) { 10572 return (CMNG_FNS_NONE); 10573 } 10574 10575 if (IS_MF(sc)) { 10576 return (CMNG_FNS_MINMAX); 10577 } 10578 10579 return (CMNG_FNS_NONE); 10580} 10581 10582static void 10583storm_memset_cmng(struct bxe_softc *sc, 10584 struct cmng_init *cmng, 10585 uint8_t port) 10586{ 10587 int vn; 10588 int func; 10589 uint32_t addr; 10590 size_t size; 10591 10592 addr = (BAR_XSTRORM_INTMEM + 10593 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10594 size = sizeof(struct cmng_struct_per_port); 10595 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10596 10597 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10598 func = func_by_vn(sc, vn); 10599 10600 addr = (BAR_XSTRORM_INTMEM + 10601 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10602 size = sizeof(struct rate_shaping_vars_per_vn); 10603 ecore_storm_memset_struct(sc, addr, size, 10604 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10605 10606 addr = (BAR_XSTRORM_INTMEM + 10607 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10608 size = sizeof(struct fairness_vars_per_vn); 10609 ecore_storm_memset_struct(sc, addr, size, 10610 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10611 } 10612} 10613 10614static void 10615bxe_pf_init(struct bxe_softc *sc) 10616{ 10617 struct bxe_func_init_params func_init = { 0 }; 10618 struct event_ring_data eq_data = { { 0 } }; 10619 uint16_t flags; 10620 10621 if (!CHIP_IS_E1x(sc)) { 10622 /* reset IGU PF statistics: MSIX + ATTN */ 10623 /* PF */ 10624 REG_WR(sc, 10625 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10626 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10627 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10628 0); 10629 /* ATTN */ 10630 REG_WR(sc, 10631 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10632 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10633 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10634 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10635 0); 10636 } 10637 10638 /* function setup flags */ 10639 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10640 10641 /* 10642 * This flag is relevant for E1x only. 10643 * E2 doesn't have a TPA configuration in a function level. 10644 */ 10645 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10646 10647 func_init.func_flgs = flags; 10648 func_init.pf_id = SC_FUNC(sc); 10649 func_init.func_id = SC_FUNC(sc); 10650 func_init.spq_map = sc->spq_dma.paddr; 10651 func_init.spq_prod = sc->spq_prod_idx; 10652 10653 bxe_func_init(sc, &func_init); 10654 10655 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10656 10657 /* 10658 * Congestion management values depend on the link rate. 10659 * There is no active link so initial link rate is set to 10Gbps. 10660 * When the link comes up the congestion management values are 10661 * re-calculated according to the actual link rate. 10662 */ 10663 sc->link_vars.line_speed = SPEED_10000; 10664 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10665 10666 /* Only the PMF sets the HW */ 10667 if (sc->port.pmf) { 10668 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10669 } 10670 10671 /* init Event Queue - PCI bus guarantees correct endainity */ 10672 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10673 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10674 eq_data.producer = sc->eq_prod; 10675 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10676 eq_data.sb_id = DEF_SB_ID; 10677 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10678} 10679 10680static void 10681bxe_hc_int_enable(struct bxe_softc *sc) 10682{ 10683 int port = SC_PORT(sc); 10684 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10685 uint32_t val = REG_RD(sc, addr); 10686 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10687 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10688 (sc->intr_count == 1)) ? TRUE : FALSE; 10689 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10690 10691 if (msix) { 10692 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10693 HC_CONFIG_0_REG_INT_LINE_EN_0); 10694 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10695 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10696 if (single_msix) { 10697 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10698 } 10699 } else if (msi) { 10700 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10701 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10702 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10703 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10704 } else { 10705 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10706 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10707 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10708 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10709 10710 if (!CHIP_IS_E1(sc)) { 10711 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10712 val, port, addr); 10713 10714 REG_WR(sc, addr, val); 10715 10716 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10717 } 10718 } 10719 10720 if (CHIP_IS_E1(sc)) { 10721 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10722 } 10723 10724 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10725 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10726 10727 REG_WR(sc, addr, val); 10728 10729 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10730 mb(); 10731 10732 if (!CHIP_IS_E1(sc)) { 10733 /* init leading/trailing edge */ 10734 if (IS_MF(sc)) { 10735 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10736 if (sc->port.pmf) { 10737 /* enable nig and gpio3 attention */ 10738 val |= 0x1100; 10739 } 10740 } else { 10741 val = 0xffff; 10742 } 10743 10744 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10745 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10746 } 10747 10748 /* make sure that interrupts are indeed enabled from here on */ 10749 mb(); 10750} 10751 10752static void 10753bxe_igu_int_enable(struct bxe_softc *sc) 10754{ 10755 uint32_t val; 10756 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10757 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10758 (sc->intr_count == 1)) ? TRUE : FALSE; 10759 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10760 10761 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10762 10763 if (msix) { 10764 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10765 IGU_PF_CONF_SINGLE_ISR_EN); 10766 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10767 IGU_PF_CONF_ATTN_BIT_EN); 10768 if (single_msix) { 10769 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10770 } 10771 } else if (msi) { 10772 val &= ~IGU_PF_CONF_INT_LINE_EN; 10773 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10774 IGU_PF_CONF_ATTN_BIT_EN | 10775 IGU_PF_CONF_SINGLE_ISR_EN); 10776 } else { 10777 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10778 val |= (IGU_PF_CONF_INT_LINE_EN | 10779 IGU_PF_CONF_ATTN_BIT_EN | 10780 IGU_PF_CONF_SINGLE_ISR_EN); 10781 } 10782 10783 /* clean previous status - need to configure igu prior to ack*/ 10784 if ((!msix) || single_msix) { 10785 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10786 bxe_ack_int(sc); 10787 } 10788 10789 val |= IGU_PF_CONF_FUNC_EN; 10790 10791 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10792 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10793 10794 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10795 10796 mb(); 10797 10798 /* init leading/trailing edge */ 10799 if (IS_MF(sc)) { 10800 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10801 if (sc->port.pmf) { 10802 /* enable nig and gpio3 attention */ 10803 val |= 0x1100; 10804 } 10805 } else { 10806 val = 0xffff; 10807 } 10808 10809 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10810 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10811 10812 /* make sure that interrupts are indeed enabled from here on */ 10813 mb(); 10814} 10815 10816static void 10817bxe_int_enable(struct bxe_softc *sc) 10818{ 10819 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10820 bxe_hc_int_enable(sc); 10821 } else { 10822 bxe_igu_int_enable(sc); 10823 } 10824} 10825 10826static void 10827bxe_hc_int_disable(struct bxe_softc *sc) 10828{ 10829 int port = SC_PORT(sc); 10830 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10831 uint32_t val = REG_RD(sc, addr); 10832 10833 /* 10834 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10835 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10836 * block 10837 */ 10838 if (CHIP_IS_E1(sc)) { 10839 /* 10840 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10841 * to prevent from HC sending interrupts after we exit the function 10842 */ 10843 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10844 10845 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10846 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10847 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10848 } else { 10849 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10850 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10851 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10852 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10853 } 10854 10855 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10856 10857 /* flush all outstanding writes */ 10858 mb(); 10859 10860 REG_WR(sc, addr, val); 10861 if (REG_RD(sc, addr) != val) { 10862 BLOGE(sc, "proper val not read from HC IGU!\n"); 10863 } 10864} 10865 10866static void 10867bxe_igu_int_disable(struct bxe_softc *sc) 10868{ 10869 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10870 10871 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10872 IGU_PF_CONF_INT_LINE_EN | 10873 IGU_PF_CONF_ATTN_BIT_EN); 10874 10875 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10876 10877 /* flush all outstanding writes */ 10878 mb(); 10879 10880 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10881 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10882 BLOGE(sc, "proper val not read from IGU!\n"); 10883 } 10884} 10885 10886static void 10887bxe_int_disable(struct bxe_softc *sc) 10888{ 10889 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10890 bxe_hc_int_disable(sc); 10891 } else { 10892 bxe_igu_int_disable(sc); 10893 } 10894} 10895 10896static void 10897bxe_nic_init(struct bxe_softc *sc, 10898 int load_code) 10899{ 10900 int i; 10901 10902 for (i = 0; i < sc->num_queues; i++) { 10903 bxe_init_eth_fp(sc, i); 10904 } 10905 10906 rmb(); /* ensure status block indices were read */ 10907 10908 bxe_init_rx_rings(sc); 10909 bxe_init_tx_rings(sc); 10910 10911 if (IS_VF(sc)) { 10912 return; 10913 } 10914 10915 /* initialize MOD_ABS interrupts */ 10916 elink_init_mod_abs_int(sc, &sc->link_vars, 10917 sc->devinfo.chip_id, 10918 sc->devinfo.shmem_base, 10919 sc->devinfo.shmem2_base, 10920 SC_PORT(sc)); 10921 10922 bxe_init_def_sb(sc); 10923 bxe_update_dsb_idx(sc); 10924 bxe_init_sp_ring(sc); 10925 bxe_init_eq_ring(sc); 10926 bxe_init_internal(sc, load_code); 10927 bxe_pf_init(sc); 10928 bxe_stats_init(sc); 10929 10930 /* flush all before enabling interrupts */ 10931 mb(); 10932 10933 bxe_int_enable(sc); 10934 10935 /* check for SPIO5 */ 10936 bxe_attn_int_deasserted0(sc, 10937 REG_RD(sc, 10938 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10939 SC_PORT(sc)*4)) & 10940 AEU_INPUTS_ATTN_BITS_SPIO5); 10941} 10942 10943static inline void 10944bxe_init_objs(struct bxe_softc *sc) 10945{ 10946 /* mcast rules must be added to tx if tx switching is enabled */ 10947 ecore_obj_type o_type = 10948 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10949 ECORE_OBJ_TYPE_RX; 10950 10951 /* RX_MODE controlling object */ 10952 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10953 10954 /* multicast configuration controlling object */ 10955 ecore_init_mcast_obj(sc, 10956 &sc->mcast_obj, 10957 sc->fp[0].cl_id, 10958 sc->fp[0].index, 10959 SC_FUNC(sc), 10960 SC_FUNC(sc), 10961 BXE_SP(sc, mcast_rdata), 10962 BXE_SP_MAPPING(sc, mcast_rdata), 10963 ECORE_FILTER_MCAST_PENDING, 10964 &sc->sp_state, 10965 o_type); 10966 10967 /* Setup CAM credit pools */ 10968 ecore_init_mac_credit_pool(sc, 10969 &sc->macs_pool, 10970 SC_FUNC(sc), 10971 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10972 VNICS_PER_PATH(sc)); 10973 10974 ecore_init_vlan_credit_pool(sc, 10975 &sc->vlans_pool, 10976 SC_ABS_FUNC(sc) >> 1, 10977 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10978 VNICS_PER_PATH(sc)); 10979 10980 /* RSS configuration object */ 10981 ecore_init_rss_config_obj(sc, 10982 &sc->rss_conf_obj, 10983 sc->fp[0].cl_id, 10984 sc->fp[0].index, 10985 SC_FUNC(sc), 10986 SC_FUNC(sc), 10987 BXE_SP(sc, rss_rdata), 10988 BXE_SP_MAPPING(sc, rss_rdata), 10989 ECORE_FILTER_RSS_CONF_PENDING, 10990 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10991} 10992 10993/* 10994 * Initialize the function. This must be called before sending CLIENT_SETUP 10995 * for the first client. 10996 */ 10997static inline int 10998bxe_func_start(struct bxe_softc *sc) 10999{ 11000 struct ecore_func_state_params func_params = { NULL }; 11001 struct ecore_func_start_params *start_params = &func_params.params.start; 11002 11003 /* Prepare parameters for function state transitions */ 11004 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 11005 11006 func_params.f_obj = &sc->func_obj; 11007 func_params.cmd = ECORE_F_CMD_START; 11008 11009 /* Function parameters */ 11010 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 11011 start_params->sd_vlan_tag = OVLAN(sc); 11012 11013 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 11014 start_params->network_cos_mode = STATIC_COS; 11015 } else { /* CHIP_IS_E1X */ 11016 start_params->network_cos_mode = FW_WRR; 11017 } 11018 11019 start_params->gre_tunnel_mode = 0; 11020 start_params->gre_tunnel_rss = 0; 11021 11022 return (ecore_func_state_change(sc, &func_params)); 11023} 11024 11025static int 11026bxe_set_power_state(struct bxe_softc *sc, 11027 uint8_t state) 11028{ 11029 uint16_t pmcsr; 11030 11031 /* If there is no power capability, silently succeed */ 11032 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 11033 BLOGW(sc, "No power capability\n"); 11034 return (0); 11035 } 11036 11037 pmcsr = pci_read_config(sc->dev, 11038 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11039 2); 11040 11041 switch (state) { 11042 case PCI_PM_D0: 11043 pci_write_config(sc->dev, 11044 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11045 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 11046 11047 if (pmcsr & PCIM_PSTAT_DMASK) { 11048 /* delay required during transition out of D3hot */ 11049 DELAY(20000); 11050 } 11051 11052 break; 11053 11054 case PCI_PM_D3hot: 11055 /* XXX if there are other clients above don't shut down the power */ 11056 11057 /* don't shut down the power for emulation and FPGA */ 11058 if (CHIP_REV_IS_SLOW(sc)) { 11059 return (0); 11060 } 11061 11062 pmcsr &= ~PCIM_PSTAT_DMASK; 11063 pmcsr |= PCIM_PSTAT_D3; 11064 11065 if (sc->wol) { 11066 pmcsr |= PCIM_PSTAT_PMEENABLE; 11067 } 11068 11069 pci_write_config(sc->dev, 11070 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11071 pmcsr, 4); 11072 11073 /* 11074 * No more memory access after this point until device is brought back 11075 * to D0 state. 11076 */ 11077 break; 11078 11079 default: 11080 BLOGE(sc, "Can't support PCI power state = %d\n", state); 11081 return (-1); 11082 } 11083 11084 return (0); 11085} 11086 11087 11088/* return true if succeeded to acquire the lock */ 11089static uint8_t 11090bxe_trylock_hw_lock(struct bxe_softc *sc, 11091 uint32_t resource) 11092{ 11093 uint32_t lock_status; 11094 uint32_t resource_bit = (1 << resource); 11095 int func = SC_FUNC(sc); 11096 uint32_t hw_lock_control_reg; 11097 11098 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 11099 11100 /* Validating that the resource is within range */ 11101 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 11102 BLOGD(sc, DBG_LOAD, 11103 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 11104 resource, HW_LOCK_MAX_RESOURCE_VALUE); 11105 return (FALSE); 11106 } 11107 11108 if (func <= 5) { 11109 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 11110 } else { 11111 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 11112 } 11113 11114 /* try to acquire the lock */ 11115 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 11116 lock_status = REG_RD(sc, hw_lock_control_reg); 11117 if (lock_status & resource_bit) { 11118 return (TRUE); 11119 } 11120 11121 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); 11122 11123 return (FALSE); 11124} 11125 11126/* 11127 * Get the recovery leader resource id according to the engine this function 11128 * belongs to. Currently only only 2 engines is supported. 11129 */ 11130static int 11131bxe_get_leader_lock_resource(struct bxe_softc *sc) 11132{ 11133 if (SC_PATH(sc)) { 11134 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 11135 } else { 11136 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 11137 } 11138} 11139 11140/* try to acquire a leader lock for current engine */ 11141static uint8_t 11142bxe_trylock_leader_lock(struct bxe_softc *sc) 11143{ 11144 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11145} 11146 11147static int 11148bxe_release_leader_lock(struct bxe_softc *sc) 11149{ 11150 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11151} 11152 11153/* close gates #2, #3 and #4 */ 11154static void 11155bxe_set_234_gates(struct bxe_softc *sc, 11156 uint8_t close) 11157{ 11158 uint32_t val; 11159 11160 /* gates #2 and #4a are closed/opened for "not E1" only */ 11161 if (!CHIP_IS_E1(sc)) { 11162 /* #4 */ 11163 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 11164 /* #2 */ 11165 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 11166 } 11167 11168 /* #3 */ 11169 if (CHIP_IS_E1x(sc)) { 11170 /* prevent interrupts from HC on both ports */ 11171 val = REG_RD(sc, HC_REG_CONFIG_1); 11172 REG_WR(sc, HC_REG_CONFIG_1, 11173 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 11174 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 11175 11176 val = REG_RD(sc, HC_REG_CONFIG_0); 11177 REG_WR(sc, HC_REG_CONFIG_0, 11178 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 11179 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 11180 } else { 11181 /* Prevent incomming interrupts in IGU */ 11182 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 11183 11184 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 11185 (!close) ? 11186 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 11187 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 11188 } 11189 11190 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 11191 close ? "closing" : "opening"); 11192 11193 wmb(); 11194} 11195 11196/* poll for pending writes bit, it should get cleared in no more than 1s */ 11197static int 11198bxe_er_poll_igu_vq(struct bxe_softc *sc) 11199{ 11200 uint32_t cnt = 1000; 11201 uint32_t pend_bits = 0; 11202 11203 do { 11204 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 11205 11206 if (pend_bits == 0) { 11207 break; 11208 } 11209 11210 DELAY(1000); 11211 } while (--cnt > 0); 11212 11213 if (cnt == 0) { 11214 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 11215 return (-1); 11216 } 11217 11218 return (0); 11219} 11220 11221#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 11222 11223static void 11224bxe_clp_reset_prep(struct bxe_softc *sc, 11225 uint32_t *magic_val) 11226{ 11227 /* Do some magic... */ 11228 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11229 *magic_val = val & SHARED_MF_CLP_MAGIC; 11230 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 11231} 11232 11233/* restore the value of the 'magic' bit */ 11234static void 11235bxe_clp_reset_done(struct bxe_softc *sc, 11236 uint32_t magic_val) 11237{ 11238 /* Restore the 'magic' bit value... */ 11239 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11240 MFCFG_WR(sc, shared_mf_config.clp_mb, 11241 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 11242} 11243 11244/* prepare for MCP reset, takes care of CLP configurations */ 11245static void 11246bxe_reset_mcp_prep(struct bxe_softc *sc, 11247 uint32_t *magic_val) 11248{ 11249 uint32_t shmem; 11250 uint32_t validity_offset; 11251 11252 /* set `magic' bit in order to save MF config */ 11253 if (!CHIP_IS_E1(sc)) { 11254 bxe_clp_reset_prep(sc, magic_val); 11255 } 11256 11257 /* get shmem offset */ 11258 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11259 validity_offset = 11260 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 11261 11262 /* Clear validity map flags */ 11263 if (shmem > 0) { 11264 REG_WR(sc, shmem + validity_offset, 0); 11265 } 11266} 11267 11268#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 11269#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 11270 11271static void 11272bxe_mcp_wait_one(struct bxe_softc *sc) 11273{ 11274 /* special handling for emulation and FPGA (10 times longer) */ 11275 if (CHIP_REV_IS_SLOW(sc)) { 11276 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 11277 } else { 11278 DELAY((MCP_ONE_TIMEOUT) * 1000); 11279 } 11280} 11281 11282/* initialize shmem_base and waits for validity signature to appear */ 11283static int 11284bxe_init_shmem(struct bxe_softc *sc) 11285{ 11286 int cnt = 0; 11287 uint32_t val = 0; 11288 11289 do { 11290 sc->devinfo.shmem_base = 11291 sc->link_params.shmem_base = 11292 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11293 11294 if (sc->devinfo.shmem_base) { 11295 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 11296 if (val & SHR_MEM_VALIDITY_MB) 11297 return (0); 11298 } 11299 11300 bxe_mcp_wait_one(sc); 11301 11302 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 11303 11304 BLOGE(sc, "BAD MCP validity signature\n"); 11305 11306 return (-1); 11307} 11308 11309static int 11310bxe_reset_mcp_comp(struct bxe_softc *sc, 11311 uint32_t magic_val) 11312{ 11313 int rc = bxe_init_shmem(sc); 11314 11315 /* Restore the `magic' bit value */ 11316 if (!CHIP_IS_E1(sc)) { 11317 bxe_clp_reset_done(sc, magic_val); 11318 } 11319 11320 return (rc); 11321} 11322 11323static void 11324bxe_pxp_prep(struct bxe_softc *sc) 11325{ 11326 if (!CHIP_IS_E1(sc)) { 11327 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 11328 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 11329 wmb(); 11330 } 11331} 11332 11333/* 11334 * Reset the whole chip except for: 11335 * - PCIE core 11336 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 11337 * - IGU 11338 * - MISC (including AEU) 11339 * - GRC 11340 * - RBCN, RBCP 11341 */ 11342static void 11343bxe_process_kill_chip_reset(struct bxe_softc *sc, 11344 uint8_t global) 11345{ 11346 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 11347 uint32_t global_bits2, stay_reset2; 11348 11349 /* 11350 * Bits that have to be set in reset_mask2 if we want to reset 'global' 11351 * (per chip) blocks. 11352 */ 11353 global_bits2 = 11354 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 11355 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 11356 11357 /* 11358 * Don't reset the following blocks. 11359 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 11360 * reset, as in 4 port device they might still be owned 11361 * by the MCP (there is only one leader per path). 11362 */ 11363 not_reset_mask1 = 11364 MISC_REGISTERS_RESET_REG_1_RST_HC | 11365 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 11366 MISC_REGISTERS_RESET_REG_1_RST_PXP; 11367 11368 not_reset_mask2 = 11369 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 11370 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 11371 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 11372 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 11373 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 11374 MISC_REGISTERS_RESET_REG_2_RST_GRC | 11375 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 11376 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 11377 MISC_REGISTERS_RESET_REG_2_RST_ATC | 11378 MISC_REGISTERS_RESET_REG_2_PGLC | 11379 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 11380 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 11381 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 11382 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 11383 MISC_REGISTERS_RESET_REG_2_UMAC0 | 11384 MISC_REGISTERS_RESET_REG_2_UMAC1; 11385 11386 /* 11387 * Keep the following blocks in reset: 11388 * - all xxMACs are handled by the elink code. 11389 */ 11390 stay_reset2 = 11391 MISC_REGISTERS_RESET_REG_2_XMAC | 11392 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 11393 11394 /* Full reset masks according to the chip */ 11395 reset_mask1 = 0xffffffff; 11396 11397 if (CHIP_IS_E1(sc)) 11398 reset_mask2 = 0xffff; 11399 else if (CHIP_IS_E1H(sc)) 11400 reset_mask2 = 0x1ffff; 11401 else if (CHIP_IS_E2(sc)) 11402 reset_mask2 = 0xfffff; 11403 else /* CHIP_IS_E3 */ 11404 reset_mask2 = 0x3ffffff; 11405 11406 /* Don't reset global blocks unless we need to */ 11407 if (!global) 11408 reset_mask2 &= ~global_bits2; 11409 11410 /* 11411 * In case of attention in the QM, we need to reset PXP 11412 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11413 * because otherwise QM reset would release 'close the gates' shortly 11414 * before resetting the PXP, then the PSWRQ would send a write 11415 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11416 * read the payload data from PSWWR, but PSWWR would not 11417 * respond. The write queue in PGLUE would stuck, dmae commands 11418 * would not return. Therefore it's important to reset the second 11419 * reset register (containing the 11420 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11421 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11422 * bit). 11423 */ 11424 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11425 reset_mask2 & (~not_reset_mask2)); 11426 11427 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11428 reset_mask1 & (~not_reset_mask1)); 11429 11430 mb(); 11431 wmb(); 11432 11433 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11434 reset_mask2 & (~stay_reset2)); 11435 11436 mb(); 11437 wmb(); 11438 11439 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11440 wmb(); 11441} 11442 11443static int 11444bxe_process_kill(struct bxe_softc *sc, 11445 uint8_t global) 11446{ 11447 int cnt = 1000; 11448 uint32_t val = 0; 11449 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11450 uint32_t tags_63_32 = 0; 11451 11452 /* Empty the Tetris buffer, wait for 1s */ 11453 do { 11454 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11455 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11456 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11457 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11458 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11459 if (CHIP_IS_E3(sc)) { 11460 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11461 } 11462 11463 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11464 ((port_is_idle_0 & 0x1) == 0x1) && 11465 ((port_is_idle_1 & 0x1) == 0x1) && 11466 (pgl_exp_rom2 == 0xffffffff) && 11467 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11468 break; 11469 DELAY(1000); 11470 } while (cnt-- > 0); 11471 11472 if (cnt <= 0) { 11473 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11474 "are still outstanding read requests after 1s! " 11475 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11476 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11477 sr_cnt, blk_cnt, port_is_idle_0, 11478 port_is_idle_1, pgl_exp_rom2); 11479 return (-1); 11480 } 11481 11482 mb(); 11483 11484 /* Close gates #2, #3 and #4 */ 11485 bxe_set_234_gates(sc, TRUE); 11486 11487 /* Poll for IGU VQs for 57712 and newer chips */ 11488 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11489 return (-1); 11490 } 11491 11492 /* XXX indicate that "process kill" is in progress to MCP */ 11493 11494 /* clear "unprepared" bit */ 11495 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11496 mb(); 11497 11498 /* Make sure all is written to the chip before the reset */ 11499 wmb(); 11500 11501 /* 11502 * Wait for 1ms to empty GLUE and PCI-E core queues, 11503 * PSWHST, GRC and PSWRD Tetris buffer. 11504 */ 11505 DELAY(1000); 11506 11507 /* Prepare to chip reset: */ 11508 /* MCP */ 11509 if (global) { 11510 bxe_reset_mcp_prep(sc, &val); 11511 } 11512 11513 /* PXP */ 11514 bxe_pxp_prep(sc); 11515 mb(); 11516 11517 /* reset the chip */ 11518 bxe_process_kill_chip_reset(sc, global); 11519 mb(); 11520 11521 /* clear errors in PGB */ 11522 if (!CHIP_IS_E1(sc)) 11523 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11524 11525 /* Recover after reset: */ 11526 /* MCP */ 11527 if (global && bxe_reset_mcp_comp(sc, val)) { 11528 return (-1); 11529 } 11530 11531 /* XXX add resetting the NO_MCP mode DB here */ 11532 11533 /* Open the gates #2, #3 and #4 */ 11534 bxe_set_234_gates(sc, FALSE); 11535 11536 /* XXX 11537 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11538 * re-enable attentions 11539 */ 11540 11541 return (0); 11542} 11543 11544static int 11545bxe_leader_reset(struct bxe_softc *sc) 11546{ 11547 int rc = 0; 11548 uint8_t global = bxe_reset_is_global(sc); 11549 uint32_t load_code; 11550 11551 /* 11552 * If not going to reset MCP, load "fake" driver to reset HW while 11553 * driver is owner of the HW. 11554 */ 11555 if (!global && !BXE_NOMCP(sc)) { 11556 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11557 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11558 if (!load_code) { 11559 BLOGE(sc, "MCP response failure, aborting\n"); 11560 rc = -1; 11561 goto exit_leader_reset; 11562 } 11563 11564 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11565 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11566 BLOGE(sc, "MCP unexpected response, aborting\n"); 11567 rc = -1; 11568 goto exit_leader_reset2; 11569 } 11570 11571 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11572 if (!load_code) { 11573 BLOGE(sc, "MCP response failure, aborting\n"); 11574 rc = -1; 11575 goto exit_leader_reset2; 11576 } 11577 } 11578 11579 /* try to recover after the failure */ 11580 if (bxe_process_kill(sc, global)) { 11581 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11582 rc = -1; 11583 goto exit_leader_reset2; 11584 } 11585 11586 /* 11587 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11588 * state. 11589 */ 11590 bxe_set_reset_done(sc); 11591 if (global) { 11592 bxe_clear_reset_global(sc); 11593 } 11594 11595exit_leader_reset2: 11596 11597 /* unload "fake driver" if it was loaded */ 11598 if (!global && !BXE_NOMCP(sc)) { 11599 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11600 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11601 } 11602 11603exit_leader_reset: 11604 11605 sc->is_leader = 0; 11606 bxe_release_leader_lock(sc); 11607 11608 mb(); 11609 return (rc); 11610} 11611 11612/* 11613 * prepare INIT transition, parameters configured: 11614 * - HC configuration 11615 * - Queue's CDU context 11616 */ 11617static void 11618bxe_pf_q_prep_init(struct bxe_softc *sc, 11619 struct bxe_fastpath *fp, 11620 struct ecore_queue_init_params *init_params) 11621{ 11622 uint8_t cos; 11623 int cxt_index, cxt_offset; 11624 11625 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11626 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11627 11628 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11629 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11630 11631 /* HC rate */ 11632 init_params->rx.hc_rate = 11633 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11634 init_params->tx.hc_rate = 11635 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11636 11637 /* FW SB ID */ 11638 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11639 11640 /* CQ index among the SB indices */ 11641 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11642 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11643 11644 /* set maximum number of COSs supported by this queue */ 11645 init_params->max_cos = sc->max_cos; 11646 11647 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11648 fp->index, init_params->max_cos); 11649 11650 /* set the context pointers queue object */ 11651 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11652 /* XXX change index/cid here if ever support multiple tx CoS */ 11653 /* fp->txdata[cos]->cid */ 11654 cxt_index = fp->index / ILT_PAGE_CIDS; 11655 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11656 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11657 } 11658} 11659 11660/* set flags that are common for the Tx-only and not normal connections */ 11661static unsigned long 11662bxe_get_common_flags(struct bxe_softc *sc, 11663 struct bxe_fastpath *fp, 11664 uint8_t zero_stats) 11665{ 11666 unsigned long flags = 0; 11667 11668 /* PF driver will always initialize the Queue to an ACTIVE state */ 11669 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11670 11671 /* 11672 * tx only connections collect statistics (on the same index as the 11673 * parent connection). The statistics are zeroed when the parent 11674 * connection is initialized. 11675 */ 11676 11677 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11678 if (zero_stats) { 11679 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11680 } 11681 11682 /* 11683 * tx only connections can support tx-switching, though their 11684 * CoS-ness doesn't survive the loopback 11685 */ 11686 if (sc->flags & BXE_TX_SWITCHING) { 11687 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11688 } 11689 11690 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11691 11692 return (flags); 11693} 11694 11695static unsigned long 11696bxe_get_q_flags(struct bxe_softc *sc, 11697 struct bxe_fastpath *fp, 11698 uint8_t leading) 11699{ 11700 unsigned long flags = 0; 11701 11702 if (IS_MF_SD(sc)) { 11703 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11704 } 11705 11706 if (if_getcapenable(sc->ifp) & IFCAP_LRO) { 11707 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11708 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11709#if 0 11710 if (fp->mode == TPA_MODE_GRO) 11711 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags); 11712#endif 11713 } 11714 11715 if (leading) { 11716 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11717 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11718 } 11719 11720 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11721 11722#if 0 11723 /* configure silent vlan removal */ 11724 if (IS_MF_AFEX(sc)) { 11725 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); 11726 } 11727#endif 11728 11729 /* merge with common flags */ 11730 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11731} 11732 11733static void 11734bxe_pf_q_prep_general(struct bxe_softc *sc, 11735 struct bxe_fastpath *fp, 11736 struct ecore_general_setup_params *gen_init, 11737 uint8_t cos) 11738{ 11739 gen_init->stat_id = bxe_stats_id(fp); 11740 gen_init->spcl_id = fp->cl_id; 11741 gen_init->mtu = sc->mtu; 11742 gen_init->cos = cos; 11743} 11744 11745static void 11746bxe_pf_rx_q_prep(struct bxe_softc *sc, 11747 struct bxe_fastpath *fp, 11748 struct rxq_pause_params *pause, 11749 struct ecore_rxq_setup_params *rxq_init) 11750{ 11751 uint8_t max_sge = 0; 11752 uint16_t sge_sz = 0; 11753 uint16_t tpa_agg_size = 0; 11754 11755 pause->sge_th_lo = SGE_TH_LO(sc); 11756 pause->sge_th_hi = SGE_TH_HI(sc); 11757 11758 /* validate SGE ring has enough to cross high threshold */ 11759 if (sc->dropless_fc && 11760 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11761 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11762 BLOGW(sc, "sge ring threshold limit\n"); 11763 } 11764 11765 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11766 tpa_agg_size = (2 * sc->mtu); 11767 if (tpa_agg_size < sc->max_aggregation_size) { 11768 tpa_agg_size = sc->max_aggregation_size; 11769 } 11770 11771 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11772 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11773 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11774 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11775 11776 /* pause - not for e1 */ 11777 if (!CHIP_IS_E1(sc)) { 11778 pause->bd_th_lo = BD_TH_LO(sc); 11779 pause->bd_th_hi = BD_TH_HI(sc); 11780 11781 pause->rcq_th_lo = RCQ_TH_LO(sc); 11782 pause->rcq_th_hi = RCQ_TH_HI(sc); 11783 11784 /* validate rings have enough entries to cross high thresholds */ 11785 if (sc->dropless_fc && 11786 pause->bd_th_hi + FW_PREFETCH_CNT > 11787 sc->rx_ring_size) { 11788 BLOGW(sc, "rx bd ring threshold limit\n"); 11789 } 11790 11791 if (sc->dropless_fc && 11792 pause->rcq_th_hi + FW_PREFETCH_CNT > 11793 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11794 BLOGW(sc, "rcq ring threshold limit\n"); 11795 } 11796 11797 pause->pri_map = 1; 11798 } 11799 11800 /* rxq setup */ 11801 rxq_init->dscr_map = fp->rx_dma.paddr; 11802 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11803 rxq_init->rcq_map = fp->rcq_dma.paddr; 11804 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11805 11806 /* 11807 * This should be a maximum number of data bytes that may be 11808 * placed on the BD (not including paddings). 11809 */ 11810 rxq_init->buf_sz = (fp->rx_buf_size - 11811 IP_HEADER_ALIGNMENT_PADDING); 11812 11813 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11814 rxq_init->tpa_agg_sz = tpa_agg_size; 11815 rxq_init->sge_buf_sz = sge_sz; 11816 rxq_init->max_sges_pkt = max_sge; 11817 rxq_init->rss_engine_id = SC_FUNC(sc); 11818 rxq_init->mcast_engine_id = SC_FUNC(sc); 11819 11820 /* 11821 * Maximum number or simultaneous TPA aggregation for this Queue. 11822 * For PF Clients it should be the maximum available number. 11823 * VF driver(s) may want to define it to a smaller value. 11824 */ 11825 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11826 11827 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11828 rxq_init->fw_sb_id = fp->fw_sb_id; 11829 11830 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11831 11832 /* 11833 * configure silent vlan removal 11834 * if multi function mode is afex, then mask default vlan 11835 */ 11836 if (IS_MF_AFEX(sc)) { 11837 rxq_init->silent_removal_value = 11838 sc->devinfo.mf_info.afex_def_vlan_tag; 11839 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11840 } 11841} 11842 11843static void 11844bxe_pf_tx_q_prep(struct bxe_softc *sc, 11845 struct bxe_fastpath *fp, 11846 struct ecore_txq_setup_params *txq_init, 11847 uint8_t cos) 11848{ 11849 /* 11850 * XXX If multiple CoS is ever supported then each fastpath structure 11851 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11852 * fp->txdata[cos]->tx_dma.paddr; 11853 */ 11854 txq_init->dscr_map = fp->tx_dma.paddr; 11855 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11856 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11857 txq_init->fw_sb_id = fp->fw_sb_id; 11858 11859 /* 11860 * set the TSS leading client id for TX classfication to the 11861 * leading RSS client id 11862 */ 11863 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11864} 11865 11866/* 11867 * This function performs 2 steps in a queue state machine: 11868 * 1) RESET->INIT 11869 * 2) INIT->SETUP 11870 */ 11871static int 11872bxe_setup_queue(struct bxe_softc *sc, 11873 struct bxe_fastpath *fp, 11874 uint8_t leading) 11875{ 11876 struct ecore_queue_state_params q_params = { NULL }; 11877 struct ecore_queue_setup_params *setup_params = 11878 &q_params.params.setup; 11879#if 0 11880 struct ecore_queue_setup_tx_only_params *tx_only_params = 11881 &q_params.params.tx_only; 11882 uint8_t tx_index; 11883#endif 11884 int rc; 11885 11886 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11887 11888 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11889 11890 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11891 11892 /* we want to wait for completion in this context */ 11893 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11894 11895 /* prepare the INIT parameters */ 11896 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11897 11898 /* Set the command */ 11899 q_params.cmd = ECORE_Q_CMD_INIT; 11900 11901 /* Change the state to INIT */ 11902 rc = ecore_queue_state_change(sc, &q_params); 11903 if (rc) { 11904 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); 11905 return (rc); 11906 } 11907 11908 BLOGD(sc, DBG_LOAD, "init complete\n"); 11909 11910 /* now move the Queue to the SETUP state */ 11911 memset(setup_params, 0, sizeof(*setup_params)); 11912 11913 /* set Queue flags */ 11914 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11915 11916 /* set general SETUP parameters */ 11917 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11918 FIRST_TX_COS_INDEX); 11919 11920 bxe_pf_rx_q_prep(sc, fp, 11921 &setup_params->pause_params, 11922 &setup_params->rxq_params); 11923 11924 bxe_pf_tx_q_prep(sc, fp, 11925 &setup_params->txq_params, 11926 FIRST_TX_COS_INDEX); 11927 11928 /* Set the command */ 11929 q_params.cmd = ECORE_Q_CMD_SETUP; 11930 11931 /* change the state to SETUP */ 11932 rc = ecore_queue_state_change(sc, &q_params); 11933 if (rc) { 11934 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); 11935 return (rc); 11936 } 11937 11938#if 0 11939 /* loop through the relevant tx-only indices */ 11940 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 11941 tx_index < sc->max_cos; 11942 tx_index++) { 11943 /* prepare and send tx-only ramrod*/ 11944 rc = bxe_setup_tx_only(sc, fp, &q_params, 11945 tx_only_params, tx_index, leading); 11946 if (rc) { 11947 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", 11948 fp->index, tx_index); 11949 return (rc); 11950 } 11951 } 11952#endif 11953 11954 return (rc); 11955} 11956 11957static int 11958bxe_setup_leading(struct bxe_softc *sc) 11959{ 11960 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11961} 11962 11963static int 11964bxe_config_rss_pf(struct bxe_softc *sc, 11965 struct ecore_rss_config_obj *rss_obj, 11966 uint8_t config_hash) 11967{ 11968 struct ecore_config_rss_params params = { NULL }; 11969 int i; 11970 11971 /* 11972 * Although RSS is meaningless when there is a single HW queue we 11973 * still need it enabled in order to have HW Rx hash generated. 11974 */ 11975 11976 params.rss_obj = rss_obj; 11977 11978 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11979 11980 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11981 11982 /* RSS configuration */ 11983 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11984 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11985 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11986 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11987 if (rss_obj->udp_rss_v4) { 11988 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11989 } 11990 if (rss_obj->udp_rss_v6) { 11991 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11992 } 11993 11994 /* Hash bits */ 11995 params.rss_result_mask = MULTI_MASK; 11996 11997 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 11998 11999 if (config_hash) { 12000 /* RSS keys */ 12001 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 12002 params.rss_key[i] = arc4random(); 12003 } 12004 12005 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 12006 } 12007 12008 return (ecore_config_rss(sc, ¶ms)); 12009} 12010 12011static int 12012bxe_config_rss_eth(struct bxe_softc *sc, 12013 uint8_t config_hash) 12014{ 12015 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 12016} 12017 12018static int 12019bxe_init_rss_pf(struct bxe_softc *sc) 12020{ 12021 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 12022 int i; 12023 12024 /* 12025 * Prepare the initial contents of the indirection table if 12026 * RSS is enabled 12027 */ 12028 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 12029 sc->rss_conf_obj.ind_table[i] = 12030 (sc->fp->cl_id + (i % num_eth_queues)); 12031 } 12032 12033 if (sc->udp_rss) { 12034 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 12035 } 12036 12037 /* 12038 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 12039 * per-port, so if explicit configuration is needed, do it only 12040 * for a PMF. 12041 * 12042 * For 57712 and newer it's a per-function configuration. 12043 */ 12044 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 12045} 12046 12047static int 12048bxe_set_mac_one(struct bxe_softc *sc, 12049 uint8_t *mac, 12050 struct ecore_vlan_mac_obj *obj, 12051 uint8_t set, 12052 int mac_type, 12053 unsigned long *ramrod_flags) 12054{ 12055 struct ecore_vlan_mac_ramrod_params ramrod_param; 12056 int rc; 12057 12058 memset(&ramrod_param, 0, sizeof(ramrod_param)); 12059 12060 /* fill in general parameters */ 12061 ramrod_param.vlan_mac_obj = obj; 12062 ramrod_param.ramrod_flags = *ramrod_flags; 12063 12064 /* fill a user request section if needed */ 12065 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 12066 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 12067 12068 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 12069 12070 /* Set the command: ADD or DEL */ 12071 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 12072 ECORE_VLAN_MAC_DEL; 12073 } 12074 12075 rc = ecore_config_vlan_mac(sc, &ramrod_param); 12076 12077 if (rc == ECORE_EXISTS) { 12078 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12079 /* do not treat adding same MAC as error */ 12080 rc = 0; 12081 } else if (rc < 0) { 12082 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 12083 } 12084 12085 return (rc); 12086} 12087 12088static int 12089bxe_set_eth_mac(struct bxe_softc *sc, 12090 uint8_t set) 12091{ 12092 unsigned long ramrod_flags = 0; 12093 12094 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 12095 12096 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12097 12098 /* Eth MAC is set on RSS leading client (fp[0]) */ 12099 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 12100 &sc->sp_objs->mac_obj, 12101 set, ECORE_ETH_MAC, &ramrod_flags)); 12102} 12103 12104#if 0 12105static void 12106bxe_update_max_mf_config(struct bxe_softc *sc, 12107 uint32_t value) 12108{ 12109 /* load old values */ 12110 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; 12111 12112 if (value != bxe_extract_max_cfg(sc, mf_cfg)) { 12113 /* leave all but MAX value */ 12114 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 12115 12116 /* set new MAX value */ 12117 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & 12118 FUNC_MF_CFG_MAX_BW_MASK); 12119 12120 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 12121 } 12122} 12123#endif 12124 12125static int 12126bxe_get_cur_phy_idx(struct bxe_softc *sc) 12127{ 12128 uint32_t sel_phy_idx = 0; 12129 12130 if (sc->link_params.num_phys <= 1) { 12131 return (ELINK_INT_PHY); 12132 } 12133 12134 if (sc->link_vars.link_up) { 12135 sel_phy_idx = ELINK_EXT_PHY1; 12136 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 12137 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 12138 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 12139 ELINK_SUPPORTED_FIBRE)) 12140 sel_phy_idx = ELINK_EXT_PHY2; 12141 } else { 12142 switch (elink_phy_selection(&sc->link_params)) { 12143 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 12144 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 12145 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 12146 sel_phy_idx = ELINK_EXT_PHY1; 12147 break; 12148 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 12149 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 12150 sel_phy_idx = ELINK_EXT_PHY2; 12151 break; 12152 } 12153 } 12154 12155 return (sel_phy_idx); 12156} 12157 12158static int 12159bxe_get_link_cfg_idx(struct bxe_softc *sc) 12160{ 12161 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 12162 12163 /* 12164 * The selected activated PHY is always after swapping (in case PHY 12165 * swapping is enabled). So when swapping is enabled, we need to reverse 12166 * the configuration 12167 */ 12168 12169 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 12170 if (sel_phy_idx == ELINK_EXT_PHY1) 12171 sel_phy_idx = ELINK_EXT_PHY2; 12172 else if (sel_phy_idx == ELINK_EXT_PHY2) 12173 sel_phy_idx = ELINK_EXT_PHY1; 12174 } 12175 12176 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 12177} 12178 12179static void 12180bxe_set_requested_fc(struct bxe_softc *sc) 12181{ 12182 /* 12183 * Initialize link parameters structure variables 12184 * It is recommended to turn off RX FC for jumbo frames 12185 * for better performance 12186 */ 12187 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 12188 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 12189 } else { 12190 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 12191 } 12192} 12193 12194static void 12195bxe_calc_fc_adv(struct bxe_softc *sc) 12196{ 12197 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 12198 switch (sc->link_vars.ieee_fc & 12199 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 12200 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 12201 default: 12202 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 12203 ADVERTISED_Pause); 12204 break; 12205 12206 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 12207 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 12208 ADVERTISED_Pause); 12209 break; 12210 12211 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 12212 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 12213 break; 12214 } 12215} 12216 12217static uint16_t 12218bxe_get_mf_speed(struct bxe_softc *sc) 12219{ 12220 uint16_t line_speed = sc->link_vars.line_speed; 12221 if (IS_MF(sc)) { 12222 uint16_t maxCfg = 12223 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 12224 12225 /* calculate the current MAX line speed limit for the MF devices */ 12226 if (IS_MF_SI(sc)) { 12227 line_speed = (line_speed * maxCfg) / 100; 12228 } else { /* SD mode */ 12229 uint16_t vn_max_rate = maxCfg * 100; 12230 12231 if (vn_max_rate < line_speed) { 12232 line_speed = vn_max_rate; 12233 } 12234 } 12235 } 12236 12237 return (line_speed); 12238} 12239 12240static void 12241bxe_fill_report_data(struct bxe_softc *sc, 12242 struct bxe_link_report_data *data) 12243{ 12244 uint16_t line_speed = bxe_get_mf_speed(sc); 12245 12246 memset(data, 0, sizeof(*data)); 12247 12248 /* fill the report data with the effective line speed */ 12249 data->line_speed = line_speed; 12250 12251 /* Link is down */ 12252 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 12253 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 12254 } 12255 12256 /* Full DUPLEX */ 12257 if (sc->link_vars.duplex == DUPLEX_FULL) { 12258 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 12259 } 12260 12261 /* Rx Flow Control is ON */ 12262 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 12263 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 12264 } 12265 12266 /* Tx Flow Control is ON */ 12267 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 12268 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 12269 } 12270} 12271 12272/* report link status to OS, should be called under phy_lock */ 12273static void 12274bxe_link_report_locked(struct bxe_softc *sc) 12275{ 12276 struct bxe_link_report_data cur_data; 12277 12278 /* reread mf_cfg */ 12279 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 12280 bxe_read_mf_cfg(sc); 12281 } 12282 12283 /* Read the current link report info */ 12284 bxe_fill_report_data(sc, &cur_data); 12285 12286 /* Don't report link down or exactly the same link status twice */ 12287 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 12288 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12289 &sc->last_reported_link.link_report_flags) && 12290 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12291 &cur_data.link_report_flags))) { 12292 return; 12293 } 12294 12295 sc->link_cnt++; 12296 12297 /* report new link params and remember the state for the next time */ 12298 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 12299 12300 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12301 &cur_data.link_report_flags)) { 12302 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 12303 BLOGI(sc, "NIC Link is Down\n"); 12304 } else { 12305 const char *duplex; 12306 const char *flow; 12307 12308 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 12309 &cur_data.link_report_flags)) { 12310 duplex = "full"; 12311 } else { 12312 duplex = "half"; 12313 } 12314 12315 /* 12316 * Handle the FC at the end so that only these flags would be 12317 * possibly set. This way we may easily check if there is no FC 12318 * enabled. 12319 */ 12320 if (cur_data.link_report_flags) { 12321 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12322 &cur_data.link_report_flags) && 12323 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12324 &cur_data.link_report_flags)) { 12325 flow = "ON - receive & transmit"; 12326 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12327 &cur_data.link_report_flags) && 12328 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12329 &cur_data.link_report_flags)) { 12330 flow = "ON - receive"; 12331 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12332 &cur_data.link_report_flags) && 12333 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12334 &cur_data.link_report_flags)) { 12335 flow = "ON - transmit"; 12336 } else { 12337 flow = "none"; /* possible? */ 12338 } 12339 } else { 12340 flow = "none"; 12341 } 12342 12343 if_link_state_change(sc->ifp, LINK_STATE_UP); 12344 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 12345 cur_data.line_speed, duplex, flow); 12346 } 12347} 12348 12349static void 12350bxe_link_report(struct bxe_softc *sc) 12351{ 12352 bxe_acquire_phy_lock(sc); 12353 bxe_link_report_locked(sc); 12354 bxe_release_phy_lock(sc); 12355} 12356 12357static void 12358bxe_link_status_update(struct bxe_softc *sc) 12359{ 12360 if (sc->state != BXE_STATE_OPEN) { 12361 return; 12362 } 12363 12364#if 0 12365 /* read updated dcb configuration */ 12366 if (IS_PF(sc)) 12367 bxe_dcbx_pmf_update(sc); 12368#endif 12369 12370 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 12371 elink_link_status_update(&sc->link_params, &sc->link_vars); 12372 } else { 12373 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 12374 ELINK_SUPPORTED_10baseT_Full | 12375 ELINK_SUPPORTED_100baseT_Half | 12376 ELINK_SUPPORTED_100baseT_Full | 12377 ELINK_SUPPORTED_1000baseT_Full | 12378 ELINK_SUPPORTED_2500baseX_Full | 12379 ELINK_SUPPORTED_10000baseT_Full | 12380 ELINK_SUPPORTED_TP | 12381 ELINK_SUPPORTED_FIBRE | 12382 ELINK_SUPPORTED_Autoneg | 12383 ELINK_SUPPORTED_Pause | 12384 ELINK_SUPPORTED_Asym_Pause); 12385 sc->port.advertising[0] = sc->port.supported[0]; 12386 12387 sc->link_params.sc = sc; 12388 sc->link_params.port = SC_PORT(sc); 12389 sc->link_params.req_duplex[0] = DUPLEX_FULL; 12390 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 12391 sc->link_params.req_line_speed[0] = SPEED_10000; 12392 sc->link_params.speed_cap_mask[0] = 0x7f0000; 12393 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 12394 12395 if (CHIP_REV_IS_FPGA(sc)) { 12396 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 12397 sc->link_vars.line_speed = ELINK_SPEED_1000; 12398 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12399 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 12400 } else { 12401 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 12402 sc->link_vars.line_speed = ELINK_SPEED_10000; 12403 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12404 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 12405 } 12406 12407 sc->link_vars.link_up = 1; 12408 12409 sc->link_vars.duplex = DUPLEX_FULL; 12410 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 12411 12412 if (IS_PF(sc)) { 12413 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 12414 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12415 bxe_link_report(sc); 12416 } 12417 } 12418 12419 if (IS_PF(sc)) { 12420 if (sc->link_vars.link_up) { 12421 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12422 } else { 12423 bxe_stats_handle(sc, STATS_EVENT_STOP); 12424 } 12425 bxe_link_report(sc); 12426 } else { 12427 bxe_link_report(sc); 12428 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12429 } 12430} 12431 12432static int 12433bxe_initial_phy_init(struct bxe_softc *sc, 12434 int load_mode) 12435{ 12436 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 12437 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 12438 struct elink_params *lp = &sc->link_params; 12439 12440 bxe_set_requested_fc(sc); 12441 12442 if (CHIP_REV_IS_SLOW(sc)) { 12443 uint32_t bond = CHIP_BOND_ID(sc); 12444 uint32_t feat = 0; 12445 12446 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12447 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12448 } else if (bond & 0x4) { 12449 if (CHIP_IS_E3(sc)) { 12450 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12451 } else { 12452 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12453 } 12454 } else if (bond & 0x8) { 12455 if (CHIP_IS_E3(sc)) { 12456 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12457 } else { 12458 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12459 } 12460 } 12461 12462 /* disable EMAC for E3 and above */ 12463 if (bond & 0x2) { 12464 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12465 } 12466 12467 sc->link_params.feature_config_flags |= feat; 12468 } 12469 12470 bxe_acquire_phy_lock(sc); 12471 12472 if (load_mode == LOAD_DIAG) { 12473 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12474 /* Prefer doing PHY loopback at 10G speed, if possible */ 12475 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12476 if (lp->speed_cap_mask[cfg_idx] & 12477 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12478 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12479 } else { 12480 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12481 } 12482 } 12483 } 12484 12485 if (load_mode == LOAD_LOOPBACK_EXT) { 12486 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12487 } 12488 12489 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12490 12491 bxe_release_phy_lock(sc); 12492 12493 bxe_calc_fc_adv(sc); 12494 12495 if (sc->link_vars.link_up) { 12496 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12497 bxe_link_report(sc); 12498 } 12499 12500 if (!CHIP_REV_IS_SLOW(sc)) { 12501 bxe_periodic_start(sc); 12502 } 12503 12504 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12505 return (rc); 12506} 12507 12508/* must be called under IF_ADDR_LOCK */ 12509 12510static int 12511bxe_set_mc_list(struct bxe_softc *sc) 12512{ 12513 struct ecore_mcast_ramrod_params rparam = { NULL }; 12514 int rc = 0; 12515 int mc_count = 0; 12516 int mcnt, i; 12517 struct ecore_mcast_list_elem *mc_mac, *mc_mac_start; 12518 unsigned char *mta; 12519 if_t ifp = sc->ifp; 12520 12521 mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ 12522 if (!mc_count) 12523 return (0); 12524 12525 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * 12526 mc_count, M_DEVBUF, M_NOWAIT); 12527 12528 if(mta == NULL) { 12529 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12530 return (-1); 12531 } 12532 bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count)); 12533 12534 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); 12535 mc_mac_start = mc_mac; 12536 12537 if (!mc_mac) { 12538 free(mta, M_DEVBUF); 12539 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12540 return (-1); 12541 } 12542 bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); 12543 12544 /* mta and mcnt not expected to be different */ 12545 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 12546 12547 12548 rparam.mcast_obj = &sc->mcast_obj; 12549 ECORE_LIST_INIT(&rparam.mcast_list); 12550 12551 for(i=0; i< mcnt; i++) { 12552 12553 mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN)); 12554 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list); 12555 12556 BLOGD(sc, DBG_LOAD, 12557 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", 12558 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12559 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); 12560 12561 mc_mac++; 12562 } 12563 rparam.mcast_list_len = mc_count; 12564 12565 BXE_MCAST_LOCK(sc); 12566 12567 /* first, clear all configured multicast MACs */ 12568 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12569 if (rc < 0) { 12570 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12571 BXE_MCAST_UNLOCK(sc); 12572 free(mc_mac_start, M_DEVBUF); 12573 free(mta, M_DEVBUF); 12574 return (rc); 12575 } 12576 12577 /* Now add the new MACs */ 12578 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12579 if (rc < 0) { 12580 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12581 } 12582 12583 BXE_MCAST_UNLOCK(sc); 12584 12585 free(mc_mac_start, M_DEVBUF); 12586 free(mta, M_DEVBUF); 12587 12588 return (rc); 12589} 12590 12591static int 12592bxe_set_uc_list(struct bxe_softc *sc) 12593{ 12594 if_t ifp = sc->ifp; 12595 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12596 struct ifaddr *ifa; 12597 unsigned long ramrod_flags = 0; 12598 int rc; 12599 12600#if __FreeBSD_version < 800000 12601 IF_ADDR_LOCK(ifp); 12602#else 12603 if_addr_rlock(ifp); 12604#endif 12605 12606 /* first schedule a cleanup up of old configuration */ 12607 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12608 if (rc < 0) { 12609 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12610#if __FreeBSD_version < 800000 12611 IF_ADDR_UNLOCK(ifp); 12612#else 12613 if_addr_runlock(ifp); 12614#endif 12615 return (rc); 12616 } 12617 12618 ifa = if_getifaddr(ifp); /* XXX Is this structure */ 12619 while (ifa) { 12620 if (ifa->ifa_addr->sa_family != AF_LINK) { 12621 ifa = TAILQ_NEXT(ifa, ifa_link); 12622 continue; 12623 } 12624 12625 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12626 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12627 if (rc == -EEXIST) { 12628 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12629 /* do not treat adding same MAC as an error */ 12630 rc = 0; 12631 } else if (rc < 0) { 12632 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12633#if __FreeBSD_version < 800000 12634 IF_ADDR_UNLOCK(ifp); 12635#else 12636 if_addr_runlock(ifp); 12637#endif 12638 return (rc); 12639 } 12640 12641 ifa = TAILQ_NEXT(ifa, ifa_link); 12642 } 12643 12644#if __FreeBSD_version < 800000 12645 IF_ADDR_UNLOCK(ifp); 12646#else 12647 if_addr_runlock(ifp); 12648#endif 12649 12650 /* Execute the pending commands */ 12651 bit_set(&ramrod_flags, RAMROD_CONT); 12652 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12653 ECORE_UC_LIST_MAC, &ramrod_flags)); 12654} 12655 12656static void 12657bxe_set_rx_mode(struct bxe_softc *sc) 12658{ 12659 if_t ifp = sc->ifp; 12660 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12661 12662 if (sc->state != BXE_STATE_OPEN) { 12663 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12664 return; 12665 } 12666 12667 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); 12668 12669 if (if_getflags(ifp) & IFF_PROMISC) { 12670 rx_mode = BXE_RX_MODE_PROMISC; 12671 } else if ((if_getflags(ifp) & IFF_ALLMULTI) || 12672 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && 12673 CHIP_IS_E1(sc))) { 12674 rx_mode = BXE_RX_MODE_ALLMULTI; 12675 } else { 12676 if (IS_PF(sc)) { 12677 /* some multicasts */ 12678 if (bxe_set_mc_list(sc) < 0) { 12679 rx_mode = BXE_RX_MODE_ALLMULTI; 12680 } 12681 if (bxe_set_uc_list(sc) < 0) { 12682 rx_mode = BXE_RX_MODE_PROMISC; 12683 } 12684 } 12685#if 0 12686 else { 12687 /* 12688 * Configuring mcast to a VF involves sleeping (when we 12689 * wait for the PF's response). Since this function is 12690 * called from a non sleepable context we must schedule 12691 * a work item for this purpose 12692 */ 12693 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); 12694 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12695 } 12696#endif 12697 } 12698 12699 sc->rx_mode = rx_mode; 12700 12701 /* schedule the rx_mode command */ 12702 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12703 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12704 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12705 return; 12706 } 12707 12708 if (IS_PF(sc)) { 12709 bxe_set_storm_rx_mode(sc); 12710 } 12711#if 0 12712 else { 12713 /* 12714 * Configuring mcast to a VF involves sleeping (when we 12715 * wait for the PF's response). Since this function is 12716 * called from a non sleepable context we must schedule 12717 * a work item for this purpose 12718 */ 12719 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); 12720 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12721 } 12722#endif 12723 12724} 12725 12726 12727/* update flags in shmem */ 12728static void 12729bxe_update_drv_flags(struct bxe_softc *sc, 12730 uint32_t flags, 12731 uint32_t set) 12732{ 12733 uint32_t drv_flags; 12734 12735 if (SHMEM2_HAS(sc, drv_flags)) { 12736 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12737 drv_flags = SHMEM2_RD(sc, drv_flags); 12738 12739 if (set) { 12740 SET_FLAGS(drv_flags, flags); 12741 } else { 12742 RESET_FLAGS(drv_flags, flags); 12743 } 12744 12745 SHMEM2_WR(sc, drv_flags, drv_flags); 12746 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12747 12748 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12749 } 12750} 12751 12752/* periodic timer callout routine, only runs when the interface is up */ 12753 12754static void 12755bxe_periodic_callout_func(void *xsc) 12756{ 12757 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12758 int i; 12759 12760 if (!BXE_CORE_TRYLOCK(sc)) { 12761 /* just bail and try again next time */ 12762 12763 if ((sc->state == BXE_STATE_OPEN) && 12764 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12765 /* schedule the next periodic callout */ 12766 callout_reset(&sc->periodic_callout, hz, 12767 bxe_periodic_callout_func, sc); 12768 } 12769 12770 return; 12771 } 12772 12773 if ((sc->state != BXE_STATE_OPEN) || 12774 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12775 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12776 BXE_CORE_UNLOCK(sc); 12777 return; 12778 } 12779 12780 /* Check for TX timeouts on any fastpath. */ 12781 FOR_EACH_QUEUE(sc, i) { 12782 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12783 /* Ruh-Roh, chip was reset! */ 12784 break; 12785 } 12786 } 12787 12788 if (!CHIP_REV_IS_SLOW(sc)) { 12789 /* 12790 * This barrier is needed to ensure the ordering between the writing 12791 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12792 * the reading here. 12793 */ 12794 mb(); 12795 if (sc->port.pmf) { 12796 bxe_acquire_phy_lock(sc); 12797 elink_period_func(&sc->link_params, &sc->link_vars); 12798 bxe_release_phy_lock(sc); 12799 } 12800 } 12801 12802 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { 12803 int mb_idx = SC_FW_MB_IDX(sc); 12804 uint32_t drv_pulse; 12805 uint32_t mcp_pulse; 12806 12807 ++sc->fw_drv_pulse_wr_seq; 12808 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12809 12810 drv_pulse = sc->fw_drv_pulse_wr_seq; 12811 bxe_drv_pulse(sc); 12812 12813 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12814 MCP_PULSE_SEQ_MASK); 12815 12816 /* 12817 * The delta between driver pulse and mcp response should 12818 * be 1 (before mcp response) or 0 (after mcp response). 12819 */ 12820 if ((drv_pulse != mcp_pulse) && 12821 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12822 /* someone lost a heartbeat... */ 12823 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12824 drv_pulse, mcp_pulse); 12825 } 12826 } 12827 12828 /* state is BXE_STATE_OPEN */ 12829 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12830 12831#if 0 12832 /* sample VF bulletin board for new posts from PF */ 12833 if (IS_VF(sc)) { 12834 bxe_sample_bulletin(sc); 12835 } 12836#endif 12837 12838 BXE_CORE_UNLOCK(sc); 12839 12840 if ((sc->state == BXE_STATE_OPEN) && 12841 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12842 /* schedule the next periodic callout */ 12843 callout_reset(&sc->periodic_callout, hz, 12844 bxe_periodic_callout_func, sc); 12845 } 12846} 12847 12848static void 12849bxe_periodic_start(struct bxe_softc *sc) 12850{ 12851 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12852 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12853} 12854 12855static void 12856bxe_periodic_stop(struct bxe_softc *sc) 12857{ 12858 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12859 callout_drain(&sc->periodic_callout); 12860} 12861 12862/* start the controller */ 12863static __noinline int 12864bxe_nic_load(struct bxe_softc *sc, 12865 int load_mode) 12866{ 12867 uint32_t val; 12868 int load_code = 0; 12869 int i, rc = 0; 12870 12871 BXE_CORE_LOCK_ASSERT(sc); 12872 12873 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12874 12875 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12876 12877 if (IS_PF(sc)) { 12878 /* must be called before memory allocation and HW init */ 12879 bxe_ilt_set_info(sc); 12880 } 12881 12882 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12883 12884 bxe_set_fp_rx_buf_size(sc); 12885 12886 if (bxe_alloc_fp_buffers(sc) != 0) { 12887 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12888 sc->state = BXE_STATE_CLOSED; 12889 rc = ENOMEM; 12890 goto bxe_nic_load_error0; 12891 } 12892 12893 if (bxe_alloc_mem(sc) != 0) { 12894 sc->state = BXE_STATE_CLOSED; 12895 rc = ENOMEM; 12896 goto bxe_nic_load_error0; 12897 } 12898 12899 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12900 sc->state = BXE_STATE_CLOSED; 12901 rc = ENOMEM; 12902 goto bxe_nic_load_error0; 12903 } 12904 12905 if (IS_PF(sc)) { 12906 /* set pf load just before approaching the MCP */ 12907 bxe_set_pf_load(sc); 12908 12909 /* if MCP exists send load request and analyze response */ 12910 if (!BXE_NOMCP(sc)) { 12911 /* attempt to load pf */ 12912 if (bxe_nic_load_request(sc, &load_code) != 0) { 12913 sc->state = BXE_STATE_CLOSED; 12914 rc = ENXIO; 12915 goto bxe_nic_load_error1; 12916 } 12917 12918 /* what did the MCP say? */ 12919 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12920 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12921 sc->state = BXE_STATE_CLOSED; 12922 rc = ENXIO; 12923 goto bxe_nic_load_error2; 12924 } 12925 } else { 12926 BLOGI(sc, "Device has no MCP!\n"); 12927 load_code = bxe_nic_load_no_mcp(sc); 12928 } 12929 12930 /* mark PMF if applicable */ 12931 bxe_nic_load_pmf(sc, load_code); 12932 12933 /* Init Function state controlling object */ 12934 bxe_init_func_obj(sc); 12935 12936 /* Initialize HW */ 12937 if (bxe_init_hw(sc, load_code) != 0) { 12938 BLOGE(sc, "HW init failed\n"); 12939 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12940 sc->state = BXE_STATE_CLOSED; 12941 rc = ENXIO; 12942 goto bxe_nic_load_error2; 12943 } 12944 } 12945 12946 /* set ALWAYS_ALIVE bit in shmem */ 12947 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 12948 bxe_drv_pulse(sc); 12949 sc->flags |= BXE_NO_PULSE; 12950 12951 /* attach interrupts */ 12952 if (bxe_interrupt_attach(sc) != 0) { 12953 sc->state = BXE_STATE_CLOSED; 12954 rc = ENXIO; 12955 goto bxe_nic_load_error2; 12956 } 12957 12958 bxe_nic_init(sc, load_code); 12959 12960 /* Init per-function objects */ 12961 if (IS_PF(sc)) { 12962 bxe_init_objs(sc); 12963 // XXX bxe_iov_nic_init(sc); 12964 12965 /* set AFEX default VLAN tag to an invalid value */ 12966 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12967 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12968 12969 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12970 rc = bxe_func_start(sc); 12971 if (rc) { 12972 BLOGE(sc, "Function start failed!\n"); 12973 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12974 sc->state = BXE_STATE_ERROR; 12975 goto bxe_nic_load_error3; 12976 } 12977 12978 /* send LOAD_DONE command to MCP */ 12979 if (!BXE_NOMCP(sc)) { 12980 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12981 if (!load_code) { 12982 BLOGE(sc, "MCP response failure, aborting\n"); 12983 sc->state = BXE_STATE_ERROR; 12984 rc = ENXIO; 12985 goto bxe_nic_load_error3; 12986 } 12987 } 12988 12989 rc = bxe_setup_leading(sc); 12990 if (rc) { 12991 BLOGE(sc, "Setup leading failed!\n"); 12992 sc->state = BXE_STATE_ERROR; 12993 goto bxe_nic_load_error3; 12994 } 12995 12996 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 12997 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 12998 if (rc) { 12999 BLOGE(sc, "Queue(%d) setup failed\n", i); 13000 sc->state = BXE_STATE_ERROR; 13001 goto bxe_nic_load_error3; 13002 } 13003 } 13004 13005 rc = bxe_init_rss_pf(sc); 13006 if (rc) { 13007 BLOGE(sc, "PF RSS init failed\n"); 13008 sc->state = BXE_STATE_ERROR; 13009 goto bxe_nic_load_error3; 13010 } 13011 } 13012 /* XXX VF */ 13013#if 0 13014 else { /* VF */ 13015 FOR_EACH_ETH_QUEUE(sc, i) { 13016 rc = bxe_vfpf_setup_q(sc, i); 13017 if (rc) { 13018 BLOGE(sc, "Queue(%d) setup failed\n", i); 13019 sc->state = BXE_STATE_ERROR; 13020 goto bxe_nic_load_error3; 13021 } 13022 } 13023 } 13024#endif 13025 13026 /* now when Clients are configured we are ready to work */ 13027 sc->state = BXE_STATE_OPEN; 13028 13029 /* Configure a ucast MAC */ 13030 if (IS_PF(sc)) { 13031 rc = bxe_set_eth_mac(sc, TRUE); 13032 } 13033#if 0 13034 else { /* IS_VF(sc) */ 13035 rc = bxe_vfpf_set_mac(sc); 13036 } 13037#endif 13038 if (rc) { 13039 BLOGE(sc, "Setting Ethernet MAC failed\n"); 13040 sc->state = BXE_STATE_ERROR; 13041 goto bxe_nic_load_error3; 13042 } 13043 13044#if 0 13045 if (IS_PF(sc) && sc->pending_max) { 13046 /* for AFEX */ 13047 bxe_update_max_mf_config(sc, sc->pending_max); 13048 sc->pending_max = 0; 13049 } 13050#endif 13051 13052 if (sc->port.pmf) { 13053 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 13054 if (rc) { 13055 sc->state = BXE_STATE_ERROR; 13056 goto bxe_nic_load_error3; 13057 } 13058 } 13059 13060 sc->link_params.feature_config_flags &= 13061 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 13062 13063 /* start fast path */ 13064 13065 /* Initialize Rx filter */ 13066 bxe_set_rx_mode(sc); 13067 13068 /* start the Tx */ 13069 switch (/* XXX load_mode */LOAD_OPEN) { 13070 case LOAD_NORMAL: 13071 case LOAD_OPEN: 13072 break; 13073 13074 case LOAD_DIAG: 13075 case LOAD_LOOPBACK_EXT: 13076 sc->state = BXE_STATE_DIAG; 13077 break; 13078 13079 default: 13080 break; 13081 } 13082 13083 if (sc->port.pmf) { 13084 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 13085 } else { 13086 bxe_link_status_update(sc); 13087 } 13088 13089 /* start the periodic timer callout */ 13090 bxe_periodic_start(sc); 13091 13092 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 13093 /* mark driver is loaded in shmem2 */ 13094 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 13095 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 13096 (val | 13097 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 13098 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 13099 } 13100 13101 /* wait for all pending SP commands to complete */ 13102 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 13103 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 13104 bxe_periodic_stop(sc); 13105 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 13106 return (ENXIO); 13107 } 13108 13109#if 0 13110 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 13111 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { 13112 bxe_dcbx_init(sc, FALSE); 13113 } 13114#endif 13115 13116 /* Tell the stack the driver is running! */ 13117 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); 13118 13119 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 13120 13121 return (0); 13122 13123bxe_nic_load_error3: 13124 13125 if (IS_PF(sc)) { 13126 bxe_int_disable_sync(sc, 1); 13127 13128 /* clean out queued objects */ 13129 bxe_squeeze_objects(sc); 13130 } 13131 13132 bxe_interrupt_detach(sc); 13133 13134bxe_nic_load_error2: 13135 13136 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 13137 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 13138 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 13139 } 13140 13141 sc->port.pmf = 0; 13142 13143bxe_nic_load_error1: 13144 13145 /* clear pf_load status, as it was already set */ 13146 if (IS_PF(sc)) { 13147 bxe_clear_pf_load(sc); 13148 } 13149 13150bxe_nic_load_error0: 13151 13152 bxe_free_fw_stats_mem(sc); 13153 bxe_free_fp_buffers(sc); 13154 bxe_free_mem(sc); 13155 13156 return (rc); 13157} 13158 13159static int 13160bxe_init_locked(struct bxe_softc *sc) 13161{ 13162 int other_engine = SC_PATH(sc) ? 0 : 1; 13163 uint8_t other_load_status, load_status; 13164 uint8_t global = FALSE; 13165 int rc; 13166 13167 BXE_CORE_LOCK_ASSERT(sc); 13168 13169 /* check if the driver is already running */ 13170 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 13171 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 13172 return (0); 13173 } 13174 13175 bxe_set_power_state(sc, PCI_PM_D0); 13176 13177 /* 13178 * If parity occurred during the unload, then attentions and/or 13179 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 13180 * loaded on the current engine to complete the recovery. Parity recovery 13181 * is only relevant for PF driver. 13182 */ 13183 if (IS_PF(sc)) { 13184 other_load_status = bxe_get_load_status(sc, other_engine); 13185 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 13186 13187 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 13188 bxe_chk_parity_attn(sc, &global, TRUE)) { 13189 do { 13190 /* 13191 * If there are attentions and they are in global blocks, set 13192 * the GLOBAL_RESET bit regardless whether it will be this 13193 * function that will complete the recovery or not. 13194 */ 13195 if (global) { 13196 bxe_set_reset_global(sc); 13197 } 13198 13199 /* 13200 * Only the first function on the current engine should try 13201 * to recover in open. In case of attentions in global blocks 13202 * only the first in the chip should try to recover. 13203 */ 13204 if ((!load_status && (!global || !other_load_status)) && 13205 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 13206 BLOGI(sc, "Recovered during init\n"); 13207 break; 13208 } 13209 13210 /* recovery has failed... */ 13211 bxe_set_power_state(sc, PCI_PM_D3hot); 13212 sc->recovery_state = BXE_RECOVERY_FAILED; 13213 13214 BLOGE(sc, "Recovery flow hasn't properly " 13215 "completed yet, try again later. " 13216 "If you still see this message after a " 13217 "few retries then power cycle is required.\n"); 13218 13219 rc = ENXIO; 13220 goto bxe_init_locked_done; 13221 } while (0); 13222 } 13223 } 13224 13225 sc->recovery_state = BXE_RECOVERY_DONE; 13226 13227 rc = bxe_nic_load(sc, LOAD_OPEN); 13228 13229bxe_init_locked_done: 13230 13231 if (rc) { 13232 /* Tell the stack the driver is NOT running! */ 13233 BLOGE(sc, "Initialization failed, " 13234 "stack notified driver is NOT running!\n"); 13235 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 13236 } 13237 13238 return (rc); 13239} 13240 13241static int 13242bxe_stop_locked(struct bxe_softc *sc) 13243{ 13244 BXE_CORE_LOCK_ASSERT(sc); 13245 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13246} 13247 13248/* 13249 * Handles controller initialization when called from an unlocked routine. 13250 * ifconfig calls this function. 13251 * 13252 * Returns: 13253 * void 13254 */ 13255static void 13256bxe_init(void *xsc) 13257{ 13258 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13259 13260 BXE_CORE_LOCK(sc); 13261 bxe_init_locked(sc); 13262 BXE_CORE_UNLOCK(sc); 13263} 13264 13265static int 13266bxe_init_ifnet(struct bxe_softc *sc) 13267{ 13268 if_t ifp; 13269 int capabilities; 13270 13271 /* ifconfig entrypoint for media type/status reporting */ 13272 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13273 bxe_ifmedia_update, 13274 bxe_ifmedia_status); 13275 13276 /* set the default interface values */ 13277 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13278 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13279 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13280 13281 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13282 13283 /* allocate the ifnet structure */ 13284 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { 13285 BLOGE(sc, "Interface allocation failed!\n"); 13286 return (ENXIO); 13287 } 13288 13289 if_setsoftc(ifp, sc); 13290 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13291 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); 13292 if_setioctlfn(ifp, bxe_ioctl); 13293 if_setstartfn(ifp, bxe_tx_start); 13294 if_setgetcounterfn(ifp, bxe_get_counter); 13295#if __FreeBSD_version >= 800000 13296 if_settransmitfn(ifp, bxe_tx_mq_start); 13297 if_setqflushfn(ifp, bxe_mq_flush); 13298#endif 13299#ifdef FreeBSD8_0 13300 if_settimer(ifp, 0); 13301#endif 13302 if_setinitfn(ifp, bxe_init); 13303 if_setmtu(ifp, sc->mtu); 13304 if_sethwassist(ifp, (CSUM_IP | 13305 CSUM_TCP | 13306 CSUM_UDP | 13307 CSUM_TSO | 13308 CSUM_TCP_IPV6 | 13309 CSUM_UDP_IPV6)); 13310 13311 capabilities = 13312#if __FreeBSD_version < 700000 13313 (IFCAP_VLAN_MTU | 13314 IFCAP_VLAN_HWTAGGING | 13315 IFCAP_HWCSUM | 13316 IFCAP_JUMBO_MTU | 13317 IFCAP_LRO); 13318#else 13319 (IFCAP_VLAN_MTU | 13320 IFCAP_VLAN_HWTAGGING | 13321 IFCAP_VLAN_HWTSO | 13322 IFCAP_VLAN_HWFILTER | 13323 IFCAP_VLAN_HWCSUM | 13324 IFCAP_HWCSUM | 13325 IFCAP_JUMBO_MTU | 13326 IFCAP_LRO | 13327 IFCAP_TSO4 | 13328 IFCAP_TSO6 | 13329 IFCAP_WOL_MAGIC); 13330#endif 13331 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ 13332 if_setbaudrate(ifp, IF_Gbps(10)); 13333/* XXX */ 13334 if_setsendqlen(ifp, sc->tx_ring_size); 13335 if_setsendqready(ifp); 13336/* XXX */ 13337 13338 sc->ifp = ifp; 13339 13340 /* attach to the Ethernet interface list */ 13341 ether_ifattach(ifp, sc->link_params.mac_addr); 13342 13343 return (0); 13344} 13345 13346static void 13347bxe_deallocate_bars(struct bxe_softc *sc) 13348{ 13349 int i; 13350 13351 for (i = 0; i < MAX_BARS; i++) { 13352 if (sc->bar[i].resource != NULL) { 13353 bus_release_resource(sc->dev, 13354 SYS_RES_MEMORY, 13355 sc->bar[i].rid, 13356 sc->bar[i].resource); 13357 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13358 i, PCIR_BAR(i)); 13359 } 13360 } 13361} 13362 13363static int 13364bxe_allocate_bars(struct bxe_softc *sc) 13365{ 13366 u_int flags; 13367 int i; 13368 13369 memset(sc->bar, 0, sizeof(sc->bar)); 13370 13371 for (i = 0; i < MAX_BARS; i++) { 13372 13373 /* memory resources reside at BARs 0, 2, 4 */ 13374 /* Run `pciconf -lb` to see mappings */ 13375 if ((i != 0) && (i != 2) && (i != 4)) { 13376 continue; 13377 } 13378 13379 sc->bar[i].rid = PCIR_BAR(i); 13380 13381 flags = RF_ACTIVE; 13382 if (i == 0) { 13383 flags |= RF_SHAREABLE; 13384 } 13385 13386 if ((sc->bar[i].resource = 13387 bus_alloc_resource_any(sc->dev, 13388 SYS_RES_MEMORY, 13389 &sc->bar[i].rid, 13390 flags)) == NULL) { 13391#if 0 13392 /* BAR4 doesn't exist for E1 */ 13393 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", 13394 i, PCIR_BAR(i)); 13395#endif 13396 return (0); 13397 } 13398 13399 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13400 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13401 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13402 13403 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", 13404 i, PCIR_BAR(i), 13405 (void *)rman_get_start(sc->bar[i].resource), 13406 (void *)rman_get_end(sc->bar[i].resource), 13407 rman_get_size(sc->bar[i].resource), 13408 (void *)sc->bar[i].kva); 13409 } 13410 13411 return (0); 13412} 13413 13414static void 13415bxe_get_function_num(struct bxe_softc *sc) 13416{ 13417 uint32_t val = 0; 13418 13419 /* 13420 * Read the ME register to get the function number. The ME register 13421 * holds the relative-function number and absolute-function number. The 13422 * absolute-function number appears only in E2 and above. Before that 13423 * these bits always contained zero, therefore we cannot blindly use them. 13424 */ 13425 13426 val = REG_RD(sc, BAR_ME_REGISTER); 13427 13428 sc->pfunc_rel = 13429 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13430 sc->path_id = 13431 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13432 13433 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13434 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13435 } else { 13436 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13437 } 13438 13439 BLOGD(sc, DBG_LOAD, 13440 "Relative function %d, Absolute function %d, Path %d\n", 13441 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13442} 13443 13444static uint32_t 13445bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13446{ 13447 uint32_t shmem2_size; 13448 uint32_t offset; 13449 uint32_t mf_cfg_offset_value; 13450 13451 /* Non 57712 */ 13452 offset = (SHMEM_RD(sc, func_mb) + 13453 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13454 13455 /* 57712 plus */ 13456 if (sc->devinfo.shmem2_base != 0) { 13457 shmem2_size = SHMEM2_RD(sc, size); 13458 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13459 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13460 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13461 offset = mf_cfg_offset_value; 13462 } 13463 } 13464 } 13465 13466 return (offset); 13467} 13468 13469static uint32_t 13470bxe_pcie_capability_read(struct bxe_softc *sc, 13471 int reg, 13472 int width) 13473{ 13474 int pcie_reg; 13475 13476 /* ensure PCIe capability is enabled */ 13477 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13478 if (pcie_reg != 0) { 13479 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13480 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13481 } 13482 } 13483 13484 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13485 13486 return (0); 13487} 13488 13489static uint8_t 13490bxe_is_pcie_pending(struct bxe_softc *sc) 13491{ 13492 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13493 PCIM_EXP_STA_TRANSACTION_PND); 13494} 13495 13496/* 13497 * Walk the PCI capabiites list for the device to find what features are 13498 * supported. These capabilites may be enabled/disabled by firmware so it's 13499 * best to walk the list rather than make assumptions. 13500 */ 13501static void 13502bxe_probe_pci_caps(struct bxe_softc *sc) 13503{ 13504 uint16_t link_status; 13505 int reg; 13506 13507 /* check if PCI Power Management is enabled */ 13508 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13509 if (reg != 0) { 13510 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13511 13512 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13513 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13514 } 13515 } 13516 13517 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13518 13519 /* handle PCIe 2.0 workarounds for 57710 */ 13520 if (CHIP_IS_E1(sc)) { 13521 /* workaround for 57710 errata E4_57710_27462 */ 13522 sc->devinfo.pcie_link_speed = 13523 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13524 13525 /* workaround for 57710 errata E4_57710_27488 */ 13526 sc->devinfo.pcie_link_width = 13527 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13528 if (sc->devinfo.pcie_link_speed > 1) { 13529 sc->devinfo.pcie_link_width = 13530 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13531 } 13532 } else { 13533 sc->devinfo.pcie_link_speed = 13534 (link_status & PCIM_LINK_STA_SPEED); 13535 sc->devinfo.pcie_link_width = 13536 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13537 } 13538 13539 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13540 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13541 13542 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13543 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13544 13545 /* check if MSI capability is enabled */ 13546 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13547 if (reg != 0) { 13548 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13549 13550 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13551 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13552 } 13553 } 13554 13555 /* check if MSI-X capability is enabled */ 13556 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13557 if (reg != 0) { 13558 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13559 13560 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13561 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13562 } 13563 } 13564} 13565 13566static int 13567bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13568{ 13569 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13570 uint32_t val; 13571 13572 /* get the outer vlan if we're in switch-dependent mode */ 13573 13574 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13575 mf_info->ext_id = (uint16_t)val; 13576 13577 mf_info->multi_vnics_mode = 1; 13578 13579 if (!VALID_OVLAN(mf_info->ext_id)) { 13580 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13581 return (1); 13582 } 13583 13584 /* get the capabilities */ 13585 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13586 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13587 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13588 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13589 FUNC_MF_CFG_PROTOCOL_FCOE) { 13590 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13591 } else { 13592 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13593 } 13594 13595 mf_info->vnics_per_port = 13596 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13597 13598 return (0); 13599} 13600 13601static uint32_t 13602bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13603{ 13604 uint32_t retval = 0; 13605 uint32_t val; 13606 13607 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13608 13609 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13610 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13611 retval |= MF_PROTO_SUPPORT_ETHERNET; 13612 } 13613 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13614 retval |= MF_PROTO_SUPPORT_ISCSI; 13615 } 13616 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13617 retval |= MF_PROTO_SUPPORT_FCOE; 13618 } 13619 } 13620 13621 return (retval); 13622} 13623 13624static int 13625bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13626{ 13627 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13628 uint32_t val; 13629 13630 /* 13631 * There is no outer vlan if we're in switch-independent mode. 13632 * If the mac is valid then assume multi-function. 13633 */ 13634 13635 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13636 13637 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13638 13639 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13640 13641 mf_info->vnics_per_port = 13642 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13643 13644 return (0); 13645} 13646 13647static int 13648bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13649{ 13650 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13651 uint32_t e1hov_tag; 13652 uint32_t func_config; 13653 uint32_t niv_config; 13654 13655 mf_info->multi_vnics_mode = 1; 13656 13657 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13658 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13659 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13660 13661 mf_info->ext_id = 13662 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13663 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13664 13665 mf_info->default_vlan = 13666 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13667 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13668 13669 mf_info->niv_allowed_priorities = 13670 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13671 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13672 13673 mf_info->niv_default_cos = 13674 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13675 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13676 13677 mf_info->afex_vlan_mode = 13678 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13679 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13680 13681 mf_info->niv_mba_enabled = 13682 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13683 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13684 13685 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13686 13687 mf_info->vnics_per_port = 13688 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13689 13690 return (0); 13691} 13692 13693static int 13694bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13695{ 13696 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13697 uint32_t mf_cfg1; 13698 uint32_t mf_cfg2; 13699 uint32_t ovlan1; 13700 uint32_t ovlan2; 13701 uint8_t i, j; 13702 13703 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13704 SC_PORT(sc)); 13705 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13706 mf_info->mf_config[SC_VN(sc)]); 13707 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13708 mf_info->multi_vnics_mode); 13709 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13710 mf_info->vnics_per_port); 13711 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13712 mf_info->ext_id); 13713 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13714 mf_info->min_bw[0], mf_info->min_bw[1], 13715 mf_info->min_bw[2], mf_info->min_bw[3]); 13716 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13717 mf_info->max_bw[0], mf_info->max_bw[1], 13718 mf_info->max_bw[2], mf_info->max_bw[3]); 13719 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13720 sc->mac_addr_str); 13721 13722 /* various MF mode sanity checks... */ 13723 13724 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13725 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13726 SC_PORT(sc)); 13727 return (1); 13728 } 13729 13730 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13731 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13732 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13733 return (1); 13734 } 13735 13736 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13737 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13738 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13739 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13740 SC_VN(sc), OVLAN(sc)); 13741 return (1); 13742 } 13743 13744 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13745 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13746 mf_info->multi_vnics_mode, OVLAN(sc)); 13747 return (1); 13748 } 13749 13750 /* 13751 * Verify all functions are either MF or SF mode. If MF, make sure 13752 * sure that all non-hidden functions have a valid ovlan. If SF, 13753 * make sure that all non-hidden functions have an invalid ovlan. 13754 */ 13755 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13756 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13757 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13758 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13759 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13760 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13761 BLOGE(sc, "mf_mode=SD function %d MF config " 13762 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13763 i, mf_info->multi_vnics_mode, ovlan1); 13764 return (1); 13765 } 13766 } 13767 13768 /* Verify all funcs on the same port each have a different ovlan. */ 13769 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13770 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13771 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13772 /* iterate from the next function on the port to the max func */ 13773 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13774 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13775 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13776 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13777 VALID_OVLAN(ovlan1) && 13778 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13779 VALID_OVLAN(ovlan2) && 13780 (ovlan1 == ovlan2)) { 13781 BLOGE(sc, "mf_mode=SD functions %d and %d " 13782 "have the same ovlan (%d)\n", 13783 i, j, ovlan1); 13784 return (1); 13785 } 13786 } 13787 } 13788 } /* MULTI_FUNCTION_SD */ 13789 13790 return (0); 13791} 13792 13793static int 13794bxe_get_mf_cfg_info(struct bxe_softc *sc) 13795{ 13796 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13797 uint32_t val, mac_upper; 13798 uint8_t i, vnic; 13799 13800 /* initialize mf_info defaults */ 13801 mf_info->vnics_per_port = 1; 13802 mf_info->multi_vnics_mode = FALSE; 13803 mf_info->path_has_ovlan = FALSE; 13804 mf_info->mf_mode = SINGLE_FUNCTION; 13805 13806 if (!CHIP_IS_MF_CAP(sc)) { 13807 return (0); 13808 } 13809 13810 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13811 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13812 return (1); 13813 } 13814 13815 /* get the MF mode (switch dependent / independent / single-function) */ 13816 13817 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13818 13819 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13820 { 13821 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13822 13823 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13824 13825 /* check for legal upper mac bytes */ 13826 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13827 mf_info->mf_mode = MULTI_FUNCTION_SI; 13828 } else { 13829 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13830 } 13831 13832 break; 13833 13834 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13835 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13836 13837 /* get outer vlan configuration */ 13838 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13839 13840 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13841 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13842 mf_info->mf_mode = MULTI_FUNCTION_SD; 13843 } else { 13844 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13845 } 13846 13847 break; 13848 13849 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13850 13851 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13852 return (0); 13853 13854 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13855 13856 /* 13857 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13858 * and the MAC address is valid. 13859 */ 13860 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13861 13862 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13863 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13864 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13865 } else { 13866 BLOGE(sc, "Invalid config for AFEX mode\n"); 13867 } 13868 13869 break; 13870 13871 default: 13872 13873 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13874 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13875 13876 return (1); 13877 } 13878 13879 /* set path mf_mode (which could be different than function mf_mode) */ 13880 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13881 mf_info->path_has_ovlan = TRUE; 13882 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13883 /* 13884 * Decide on path multi vnics mode. If we're not in MF mode and in 13885 * 4-port mode, this is good enough to check vnic-0 of the other port 13886 * on the same path 13887 */ 13888 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13889 uint8_t other_port = !(PORT_ID(sc) & 1); 13890 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13891 13892 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13893 13894 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13895 } 13896 } 13897 13898 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13899 /* invalid MF config */ 13900 if (SC_VN(sc) >= 1) { 13901 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13902 return (1); 13903 } 13904 13905 return (0); 13906 } 13907 13908 /* get the MF configuration */ 13909 mf_info->mf_config[SC_VN(sc)] = 13910 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13911 13912 switch(mf_info->mf_mode) 13913 { 13914 case MULTI_FUNCTION_SD: 13915 13916 bxe_get_shmem_mf_cfg_info_sd(sc); 13917 break; 13918 13919 case MULTI_FUNCTION_SI: 13920 13921 bxe_get_shmem_mf_cfg_info_si(sc); 13922 break; 13923 13924 case MULTI_FUNCTION_AFEX: 13925 13926 bxe_get_shmem_mf_cfg_info_niv(sc); 13927 break; 13928 13929 default: 13930 13931 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13932 mf_info->mf_mode); 13933 return (1); 13934 } 13935 13936 /* get the congestion management parameters */ 13937 13938 vnic = 0; 13939 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13940 /* get min/max bw */ 13941 val = MFCFG_RD(sc, func_mf_config[i].config); 13942 mf_info->min_bw[vnic] = 13943 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13944 mf_info->max_bw[vnic] = 13945 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13946 vnic++; 13947 } 13948 13949 return (bxe_check_valid_mf_cfg(sc)); 13950} 13951 13952static int 13953bxe_get_shmem_info(struct bxe_softc *sc) 13954{ 13955 int port; 13956 uint32_t mac_hi, mac_lo, val; 13957 13958 port = SC_PORT(sc); 13959 mac_hi = mac_lo = 0; 13960 13961 sc->link_params.sc = sc; 13962 sc->link_params.port = port; 13963 13964 /* get the hardware config info */ 13965 sc->devinfo.hw_config = 13966 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13967 sc->devinfo.hw_config2 = 13968 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13969 13970 sc->link_params.hw_led_mode = 13971 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13972 SHARED_HW_CFG_LED_MODE_SHIFT); 13973 13974 /* get the port feature config */ 13975 sc->port.config = 13976 SHMEM_RD(sc, dev_info.port_feature_config[port].config), 13977 13978 /* get the link params */ 13979 sc->link_params.speed_cap_mask[0] = 13980 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13981 sc->link_params.speed_cap_mask[1] = 13982 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13983 13984 /* get the lane config */ 13985 sc->link_params.lane_config = 13986 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13987 13988 /* get the link config */ 13989 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13990 sc->port.link_config[ELINK_INT_PHY] = val; 13991 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13992 sc->port.link_config[ELINK_EXT_PHY1] = 13993 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13994 13995 /* get the override preemphasis flag and enable it or turn it off */ 13996 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13997 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 13998 sc->link_params.feature_config_flags |= 13999 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14000 } else { 14001 sc->link_params.feature_config_flags &= 14002 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14003 } 14004 14005 /* get the initial value of the link params */ 14006 sc->link_params.multi_phy_config = 14007 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 14008 14009 /* get external phy info */ 14010 sc->port.ext_phy_config = 14011 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 14012 14013 /* get the multifunction configuration */ 14014 bxe_get_mf_cfg_info(sc); 14015 14016 /* get the mac address */ 14017 if (IS_MF(sc)) { 14018 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 14019 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 14020 } else { 14021 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 14022 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 14023 } 14024 14025 if ((mac_lo == 0) && (mac_hi == 0)) { 14026 *sc->mac_addr_str = 0; 14027 BLOGE(sc, "No Ethernet address programmed!\n"); 14028 } else { 14029 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 14030 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 14031 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 14032 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 14033 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 14034 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 14035 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 14036 "%02x:%02x:%02x:%02x:%02x:%02x", 14037 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 14038 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 14039 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 14040 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 14041 } 14042 14043#if 0 14044 if (!IS_MF(sc) && 14045 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14046 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { 14047 sc->flags |= BXE_NO_ISCSI; 14048 } 14049 if (!IS_MF(sc) && 14050 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14051 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { 14052 sc->flags |= BXE_NO_FCOE_FLAG; 14053 } 14054#endif 14055 14056 return (0); 14057} 14058 14059static void 14060bxe_get_tunable_params(struct bxe_softc *sc) 14061{ 14062 /* sanity checks */ 14063 14064 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 14065 (bxe_interrupt_mode != INTR_MODE_MSI) && 14066 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 14067 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 14068 bxe_interrupt_mode = INTR_MODE_MSIX; 14069 } 14070 14071 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 14072 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 14073 bxe_queue_count = 0; 14074 } 14075 14076 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 14077 if (bxe_max_rx_bufs == 0) { 14078 bxe_max_rx_bufs = RX_BD_USABLE; 14079 } else { 14080 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 14081 bxe_max_rx_bufs = 2048; 14082 } 14083 } 14084 14085 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 14086 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 14087 bxe_hc_rx_ticks = 25; 14088 } 14089 14090 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 14091 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 14092 bxe_hc_tx_ticks = 50; 14093 } 14094 14095 if (bxe_max_aggregation_size == 0) { 14096 bxe_max_aggregation_size = TPA_AGG_SIZE; 14097 } 14098 14099 if (bxe_max_aggregation_size > 0xffff) { 14100 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 14101 bxe_max_aggregation_size); 14102 bxe_max_aggregation_size = TPA_AGG_SIZE; 14103 } 14104 14105 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 14106 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 14107 bxe_mrrs = -1; 14108 } 14109 14110 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 14111 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 14112 bxe_autogreeen = 0; 14113 } 14114 14115 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 14116 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 14117 bxe_udp_rss = 0; 14118 } 14119 14120 /* pull in user settings */ 14121 14122 sc->interrupt_mode = bxe_interrupt_mode; 14123 sc->max_rx_bufs = bxe_max_rx_bufs; 14124 sc->hc_rx_ticks = bxe_hc_rx_ticks; 14125 sc->hc_tx_ticks = bxe_hc_tx_ticks; 14126 sc->max_aggregation_size = bxe_max_aggregation_size; 14127 sc->mrrs = bxe_mrrs; 14128 sc->autogreeen = bxe_autogreeen; 14129 sc->udp_rss = bxe_udp_rss; 14130 14131 if (bxe_interrupt_mode == INTR_MODE_INTX) { 14132 sc->num_queues = 1; 14133 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 14134 sc->num_queues = 14135 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 14136 MAX_RSS_CHAINS); 14137 if (sc->num_queues > mp_ncpus) { 14138 sc->num_queues = mp_ncpus; 14139 } 14140 } 14141 14142 BLOGD(sc, DBG_LOAD, 14143 "User Config: " 14144 "debug=0x%lx " 14145 "interrupt_mode=%d " 14146 "queue_count=%d " 14147 "hc_rx_ticks=%d " 14148 "hc_tx_ticks=%d " 14149 "rx_budget=%d " 14150 "max_aggregation_size=%d " 14151 "mrrs=%d " 14152 "autogreeen=%d " 14153 "udp_rss=%d\n", 14154 bxe_debug, 14155 sc->interrupt_mode, 14156 sc->num_queues, 14157 sc->hc_rx_ticks, 14158 sc->hc_tx_ticks, 14159 bxe_rx_budget, 14160 sc->max_aggregation_size, 14161 sc->mrrs, 14162 sc->autogreeen, 14163 sc->udp_rss); 14164} 14165 14166static void 14167bxe_media_detect(struct bxe_softc *sc) 14168{ 14169 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 14170 switch (sc->link_params.phy[phy_idx].media_type) { 14171 case ELINK_ETH_PHY_SFPP_10G_FIBER: 14172 case ELINK_ETH_PHY_XFP_FIBER: 14173 BLOGI(sc, "Found 10Gb Fiber media.\n"); 14174 sc->media = IFM_10G_SR; 14175 break; 14176 case ELINK_ETH_PHY_SFP_1G_FIBER: 14177 BLOGI(sc, "Found 1Gb Fiber media.\n"); 14178 sc->media = IFM_1000_SX; 14179 break; 14180 case ELINK_ETH_PHY_KR: 14181 case ELINK_ETH_PHY_CX4: 14182 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 14183 sc->media = IFM_10G_CX4; 14184 break; 14185 case ELINK_ETH_PHY_DA_TWINAX: 14186 BLOGI(sc, "Found 10Gb Twinax media.\n"); 14187 sc->media = IFM_10G_TWINAX; 14188 break; 14189 case ELINK_ETH_PHY_BASE_T: 14190 if (sc->link_params.speed_cap_mask[0] & 14191 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 14192 BLOGI(sc, "Found 10GBase-T media.\n"); 14193 sc->media = IFM_10G_T; 14194 } else { 14195 BLOGI(sc, "Found 1000Base-T media.\n"); 14196 sc->media = IFM_1000_T; 14197 } 14198 break; 14199 case ELINK_ETH_PHY_NOT_PRESENT: 14200 BLOGI(sc, "Media not present.\n"); 14201 sc->media = 0; 14202 break; 14203 case ELINK_ETH_PHY_UNSPECIFIED: 14204 default: 14205 BLOGI(sc, "Unknown media!\n"); 14206 sc->media = 0; 14207 break; 14208 } 14209} 14210 14211#define GET_FIELD(value, fname) \ 14212 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 14213#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 14214#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 14215 14216static int 14217bxe_get_igu_cam_info(struct bxe_softc *sc) 14218{ 14219 int pfid = SC_FUNC(sc); 14220 int igu_sb_id; 14221 uint32_t val; 14222 uint8_t fid, igu_sb_cnt = 0; 14223 14224 sc->igu_base_sb = 0xff; 14225 14226 if (CHIP_INT_MODE_IS_BC(sc)) { 14227 int vn = SC_VN(sc); 14228 igu_sb_cnt = sc->igu_sb_cnt; 14229 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14230 FP_SB_MAX_E1x); 14231 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14232 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14233 return (0); 14234 } 14235 14236 /* IGU in normal mode - read CAM */ 14237 for (igu_sb_id = 0; 14238 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14239 igu_sb_id++) { 14240 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14241 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14242 continue; 14243 } 14244 fid = IGU_FID(val); 14245 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14246 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14247 continue; 14248 } 14249 if (IGU_VEC(val) == 0) { 14250 /* default status block */ 14251 sc->igu_dsb_id = igu_sb_id; 14252 } else { 14253 if (sc->igu_base_sb == 0xff) { 14254 sc->igu_base_sb = igu_sb_id; 14255 } 14256 igu_sb_cnt++; 14257 } 14258 } 14259 } 14260 14261 /* 14262 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14263 * that number of CAM entries will not be equal to the value advertised in 14264 * PCI. Driver should use the minimal value of both as the actual status 14265 * block count 14266 */ 14267 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14268 14269 if (igu_sb_cnt == 0) { 14270 BLOGE(sc, "CAM configuration error\n"); 14271 return (-1); 14272 } 14273 14274 return (0); 14275} 14276 14277/* 14278 * Gather various information from the device config space, the device itself, 14279 * shmem, and the user input. 14280 */ 14281static int 14282bxe_get_device_info(struct bxe_softc *sc) 14283{ 14284 uint32_t val; 14285 int rc; 14286 14287 /* Get the data for the device */ 14288 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14289 sc->devinfo.device_id = pci_get_device(sc->dev); 14290 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14291 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14292 14293 /* get the chip revision (chip metal comes from pci config space) */ 14294 sc->devinfo.chip_id = 14295 sc->link_params.chip_id = 14296 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14297 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14298 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14299 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14300 14301 /* force 57811 according to MISC register */ 14302 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14303 if (CHIP_IS_57810(sc)) { 14304 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14305 (sc->devinfo.chip_id & 0x0000ffff)); 14306 } else if (CHIP_IS_57810_MF(sc)) { 14307 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14308 (sc->devinfo.chip_id & 0x0000ffff)); 14309 } 14310 sc->devinfo.chip_id |= 0x1; 14311 } 14312 14313 BLOGD(sc, DBG_LOAD, 14314 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14315 sc->devinfo.chip_id, 14316 ((sc->devinfo.chip_id >> 16) & 0xffff), 14317 ((sc->devinfo.chip_id >> 12) & 0xf), 14318 ((sc->devinfo.chip_id >> 4) & 0xff), 14319 ((sc->devinfo.chip_id >> 0) & 0xf)); 14320 14321 val = (REG_RD(sc, 0x2874) & 0x55); 14322 if ((sc->devinfo.chip_id & 0x1) || 14323 (CHIP_IS_E1(sc) && val) || 14324 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14325 sc->flags |= BXE_ONE_PORT_FLAG; 14326 BLOGD(sc, DBG_LOAD, "single port device\n"); 14327 } 14328 14329 /* set the doorbell size */ 14330 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14331 14332 /* determine whether the device is in 2 port or 4 port mode */ 14333 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14334 if (CHIP_IS_E2E3(sc)) { 14335 /* 14336 * Read port4mode_en_ovwr[0]: 14337 * If 1, four port mode is in port4mode_en_ovwr[1]. 14338 * If 0, four port mode is in port4mode_en[0]. 14339 */ 14340 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14341 if (val & 1) { 14342 val = ((val >> 1) & 1); 14343 } else { 14344 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14345 } 14346 14347 sc->devinfo.chip_port_mode = 14348 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14349 14350 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14351 } 14352 14353 /* get the function and path info for the device */ 14354 bxe_get_function_num(sc); 14355 14356 /* get the shared memory base address */ 14357 sc->devinfo.shmem_base = 14358 sc->link_params.shmem_base = 14359 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14360 sc->devinfo.shmem2_base = 14361 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14362 MISC_REG_GENERIC_CR_0)); 14363 14364 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14365 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14366 14367 if (!sc->devinfo.shmem_base) { 14368 /* this should ONLY prevent upcoming shmem reads */ 14369 BLOGI(sc, "MCP not active\n"); 14370 sc->flags |= BXE_NO_MCP_FLAG; 14371 return (0); 14372 } 14373 14374 /* make sure the shared memory contents are valid */ 14375 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14376 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14377 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14378 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14379 return (0); 14380 } 14381 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14382 14383 /* get the bootcode version */ 14384 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14385 snprintf(sc->devinfo.bc_ver_str, 14386 sizeof(sc->devinfo.bc_ver_str), 14387 "%d.%d.%d", 14388 ((sc->devinfo.bc_ver >> 24) & 0xff), 14389 ((sc->devinfo.bc_ver >> 16) & 0xff), 14390 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14391 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14392 14393 /* get the bootcode shmem address */ 14394 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14395 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14396 14397 /* clean indirect addresses as they're not used */ 14398 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14399 if (IS_PF(sc)) { 14400 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14401 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14402 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14403 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14404 if (CHIP_IS_E1x(sc)) { 14405 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14406 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14407 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14408 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14409 } 14410 14411 /* 14412 * Enable internal target-read (in case we are probed after PF 14413 * FLR). Must be done prior to any BAR read access. Only for 14414 * 57712 and up 14415 */ 14416 if (!CHIP_IS_E1x(sc)) { 14417 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14418 } 14419 } 14420 14421 /* get the nvram size */ 14422 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14423 sc->devinfo.flash_size = 14424 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14425 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14426 14427 /* get PCI capabilites */ 14428 bxe_probe_pci_caps(sc); 14429 14430 bxe_set_power_state(sc, PCI_PM_D0); 14431 14432 /* get various configuration parameters from shmem */ 14433 bxe_get_shmem_info(sc); 14434 14435 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14436 val = pci_read_config(sc->dev, 14437 (sc->devinfo.pcie_msix_cap_reg + 14438 PCIR_MSIX_CTRL), 14439 2); 14440 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14441 } else { 14442 sc->igu_sb_cnt = 1; 14443 } 14444 14445 sc->igu_base_addr = BAR_IGU_INTMEM; 14446 14447 /* initialize IGU parameters */ 14448 if (CHIP_IS_E1x(sc)) { 14449 sc->devinfo.int_block = INT_BLOCK_HC; 14450 sc->igu_dsb_id = DEF_SB_IGU_ID; 14451 sc->igu_base_sb = 0; 14452 } else { 14453 sc->devinfo.int_block = INT_BLOCK_IGU; 14454 14455 /* do not allow device reset during IGU info preocessing */ 14456 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14457 14458 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14459 14460 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14461 int tout = 5000; 14462 14463 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14464 14465 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14466 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14467 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14468 14469 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14470 tout--; 14471 DELAY(1000); 14472 } 14473 14474 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14475 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14476 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14477 return (-1); 14478 } 14479 } 14480 14481 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14482 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14483 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14484 } else { 14485 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14486 } 14487 14488 rc = bxe_get_igu_cam_info(sc); 14489 14490 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14491 14492 if (rc) { 14493 return (rc); 14494 } 14495 } 14496 14497 /* 14498 * Get base FW non-default (fast path) status block ID. This value is 14499 * used to initialize the fw_sb_id saved on the fp/queue structure to 14500 * determine the id used by the FW. 14501 */ 14502 if (CHIP_IS_E1x(sc)) { 14503 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14504 } else { 14505 /* 14506 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14507 * the same queue are indicated on the same IGU SB). So we prefer 14508 * FW and IGU SBs to be the same value. 14509 */ 14510 sc->base_fw_ndsb = sc->igu_base_sb; 14511 } 14512 14513 BLOGD(sc, DBG_LOAD, 14514 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14515 sc->igu_dsb_id, sc->igu_base_sb, 14516 sc->igu_sb_cnt, sc->base_fw_ndsb); 14517 14518 elink_phy_probe(&sc->link_params); 14519 14520 return (0); 14521} 14522 14523static void 14524bxe_link_settings_supported(struct bxe_softc *sc, 14525 uint32_t switch_cfg) 14526{ 14527 uint32_t cfg_size = 0; 14528 uint32_t idx; 14529 uint8_t port = SC_PORT(sc); 14530 14531 /* aggregation of supported attributes of all external phys */ 14532 sc->port.supported[0] = 0; 14533 sc->port.supported[1] = 0; 14534 14535 switch (sc->link_params.num_phys) { 14536 case 1: 14537 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14538 cfg_size = 1; 14539 break; 14540 case 2: 14541 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14542 cfg_size = 1; 14543 break; 14544 case 3: 14545 if (sc->link_params.multi_phy_config & 14546 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14547 sc->port.supported[1] = 14548 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14549 sc->port.supported[0] = 14550 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14551 } else { 14552 sc->port.supported[0] = 14553 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14554 sc->port.supported[1] = 14555 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14556 } 14557 cfg_size = 2; 14558 break; 14559 } 14560 14561 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14562 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14563 SHMEM_RD(sc, 14564 dev_info.port_hw_config[port].external_phy_config), 14565 SHMEM_RD(sc, 14566 dev_info.port_hw_config[port].external_phy_config2)); 14567 return; 14568 } 14569 14570 if (CHIP_IS_E3(sc)) 14571 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14572 else { 14573 switch (switch_cfg) { 14574 case ELINK_SWITCH_CFG_1G: 14575 sc->port.phy_addr = 14576 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14577 break; 14578 case ELINK_SWITCH_CFG_10G: 14579 sc->port.phy_addr = 14580 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14581 break; 14582 default: 14583 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14584 sc->port.link_config[0]); 14585 return; 14586 } 14587 } 14588 14589 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14590 14591 /* mask what we support according to speed_cap_mask per configuration */ 14592 for (idx = 0; idx < cfg_size; idx++) { 14593 if (!(sc->link_params.speed_cap_mask[idx] & 14594 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14595 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14596 } 14597 14598 if (!(sc->link_params.speed_cap_mask[idx] & 14599 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14600 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14601 } 14602 14603 if (!(sc->link_params.speed_cap_mask[idx] & 14604 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14605 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14606 } 14607 14608 if (!(sc->link_params.speed_cap_mask[idx] & 14609 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14610 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14611 } 14612 14613 if (!(sc->link_params.speed_cap_mask[idx] & 14614 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14615 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14616 } 14617 14618 if (!(sc->link_params.speed_cap_mask[idx] & 14619 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14620 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14621 } 14622 14623 if (!(sc->link_params.speed_cap_mask[idx] & 14624 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14625 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14626 } 14627 14628 if (!(sc->link_params.speed_cap_mask[idx] & 14629 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14630 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14631 } 14632 } 14633 14634 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14635 sc->port.supported[0], sc->port.supported[1]); 14636} 14637 14638static void 14639bxe_link_settings_requested(struct bxe_softc *sc) 14640{ 14641 uint32_t link_config; 14642 uint32_t idx; 14643 uint32_t cfg_size = 0; 14644 14645 sc->port.advertising[0] = 0; 14646 sc->port.advertising[1] = 0; 14647 14648 switch (sc->link_params.num_phys) { 14649 case 1: 14650 case 2: 14651 cfg_size = 1; 14652 break; 14653 case 3: 14654 cfg_size = 2; 14655 break; 14656 } 14657 14658 for (idx = 0; idx < cfg_size; idx++) { 14659 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14660 link_config = sc->port.link_config[idx]; 14661 14662 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14663 case PORT_FEATURE_LINK_SPEED_AUTO: 14664 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14665 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14666 sc->port.advertising[idx] |= sc->port.supported[idx]; 14667 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14668 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14669 sc->port.advertising[idx] |= 14670 (ELINK_SUPPORTED_100baseT_Half | 14671 ELINK_SUPPORTED_100baseT_Full); 14672 } else { 14673 /* force 10G, no AN */ 14674 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14675 sc->port.advertising[idx] |= 14676 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14677 continue; 14678 } 14679 break; 14680 14681 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14682 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14683 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14684 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14685 ADVERTISED_TP); 14686 } else { 14687 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14688 "speed_cap_mask=0x%08x\n", 14689 link_config, sc->link_params.speed_cap_mask[idx]); 14690 return; 14691 } 14692 break; 14693 14694 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14695 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14696 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14697 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14698 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14699 ADVERTISED_TP); 14700 } else { 14701 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14702 "speed_cap_mask=0x%08x\n", 14703 link_config, sc->link_params.speed_cap_mask[idx]); 14704 return; 14705 } 14706 break; 14707 14708 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14709 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14710 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14711 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14712 ADVERTISED_TP); 14713 } else { 14714 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14715 "speed_cap_mask=0x%08x\n", 14716 link_config, sc->link_params.speed_cap_mask[idx]); 14717 return; 14718 } 14719 break; 14720 14721 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14722 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14723 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14724 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14725 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14726 ADVERTISED_TP); 14727 } else { 14728 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14729 "speed_cap_mask=0x%08x\n", 14730 link_config, sc->link_params.speed_cap_mask[idx]); 14731 return; 14732 } 14733 break; 14734 14735 case PORT_FEATURE_LINK_SPEED_1G: 14736 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14737 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14738 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14739 ADVERTISED_TP); 14740 } else { 14741 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14742 "speed_cap_mask=0x%08x\n", 14743 link_config, sc->link_params.speed_cap_mask[idx]); 14744 return; 14745 } 14746 break; 14747 14748 case PORT_FEATURE_LINK_SPEED_2_5G: 14749 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14750 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14751 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14752 ADVERTISED_TP); 14753 } else { 14754 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14755 "speed_cap_mask=0x%08x\n", 14756 link_config, sc->link_params.speed_cap_mask[idx]); 14757 return; 14758 } 14759 break; 14760 14761 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14762 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14763 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14764 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14765 ADVERTISED_FIBRE); 14766 } else { 14767 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14768 "speed_cap_mask=0x%08x\n", 14769 link_config, sc->link_params.speed_cap_mask[idx]); 14770 return; 14771 } 14772 break; 14773 14774 case PORT_FEATURE_LINK_SPEED_20G: 14775 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14776 break; 14777 14778 default: 14779 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14780 "speed_cap_mask=0x%08x\n", 14781 link_config, sc->link_params.speed_cap_mask[idx]); 14782 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14783 sc->port.advertising[idx] = sc->port.supported[idx]; 14784 break; 14785 } 14786 14787 sc->link_params.req_flow_ctrl[idx] = 14788 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14789 14790 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14791 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14792 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14793 } else { 14794 bxe_set_requested_fc(sc); 14795 } 14796 } 14797 14798 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14799 "req_flow_ctrl=0x%x advertising=0x%x\n", 14800 sc->link_params.req_line_speed[idx], 14801 sc->link_params.req_duplex[idx], 14802 sc->link_params.req_flow_ctrl[idx], 14803 sc->port.advertising[idx]); 14804 } 14805} 14806 14807static void 14808bxe_get_phy_info(struct bxe_softc *sc) 14809{ 14810 uint8_t port = SC_PORT(sc); 14811 uint32_t config = sc->port.config; 14812 uint32_t eee_mode; 14813 14814 /* shmem data already read in bxe_get_shmem_info() */ 14815 14816 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14817 "link_config0=0x%08x\n", 14818 sc->link_params.lane_config, 14819 sc->link_params.speed_cap_mask[0], 14820 sc->port.link_config[0]); 14821 14822 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14823 bxe_link_settings_requested(sc); 14824 14825 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14826 sc->link_params.feature_config_flags |= 14827 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14828 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14829 sc->link_params.feature_config_flags &= 14830 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14831 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14832 sc->link_params.feature_config_flags |= 14833 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14834 } 14835 14836 /* configure link feature according to nvram value */ 14837 eee_mode = 14838 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14839 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14840 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14841 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14842 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14843 ELINK_EEE_MODE_ENABLE_LPI | 14844 ELINK_EEE_MODE_OUTPUT_TIME); 14845 } else { 14846 sc->link_params.eee_mode = 0; 14847 } 14848 14849 /* get the media type */ 14850 bxe_media_detect(sc); 14851} 14852 14853static void 14854bxe_get_params(struct bxe_softc *sc) 14855{ 14856 /* get user tunable params */ 14857 bxe_get_tunable_params(sc); 14858 14859 /* select the RX and TX ring sizes */ 14860 sc->tx_ring_size = TX_BD_USABLE; 14861 sc->rx_ring_size = RX_BD_USABLE; 14862 14863 /* XXX disable WoL */ 14864 sc->wol = 0; 14865} 14866 14867static void 14868bxe_set_modes_bitmap(struct bxe_softc *sc) 14869{ 14870 uint32_t flags = 0; 14871 14872 if (CHIP_REV_IS_FPGA(sc)) { 14873 SET_FLAGS(flags, MODE_FPGA); 14874 } else if (CHIP_REV_IS_EMUL(sc)) { 14875 SET_FLAGS(flags, MODE_EMUL); 14876 } else { 14877 SET_FLAGS(flags, MODE_ASIC); 14878 } 14879 14880 if (CHIP_IS_MODE_4_PORT(sc)) { 14881 SET_FLAGS(flags, MODE_PORT4); 14882 } else { 14883 SET_FLAGS(flags, MODE_PORT2); 14884 } 14885 14886 if (CHIP_IS_E2(sc)) { 14887 SET_FLAGS(flags, MODE_E2); 14888 } else if (CHIP_IS_E3(sc)) { 14889 SET_FLAGS(flags, MODE_E3); 14890 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14891 SET_FLAGS(flags, MODE_E3_A0); 14892 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14893 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14894 } 14895 } 14896 14897 if (IS_MF(sc)) { 14898 SET_FLAGS(flags, MODE_MF); 14899 switch (sc->devinfo.mf_info.mf_mode) { 14900 case MULTI_FUNCTION_SD: 14901 SET_FLAGS(flags, MODE_MF_SD); 14902 break; 14903 case MULTI_FUNCTION_SI: 14904 SET_FLAGS(flags, MODE_MF_SI); 14905 break; 14906 case MULTI_FUNCTION_AFEX: 14907 SET_FLAGS(flags, MODE_MF_AFEX); 14908 break; 14909 } 14910 } else { 14911 SET_FLAGS(flags, MODE_SF); 14912 } 14913 14914#if defined(__LITTLE_ENDIAN) 14915 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14916#else /* __BIG_ENDIAN */ 14917 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14918#endif 14919 14920 INIT_MODE_FLAGS(sc) = flags; 14921} 14922 14923static int 14924bxe_alloc_hsi_mem(struct bxe_softc *sc) 14925{ 14926 struct bxe_fastpath *fp; 14927 bus_addr_t busaddr; 14928 int max_agg_queues; 14929 int max_segments; 14930 bus_size_t max_size; 14931 bus_size_t max_seg_size; 14932 char buf[32]; 14933 int rc; 14934 int i, j; 14935 14936 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14937 14938 /* allocate the parent bus DMA tag */ 14939 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14940 1, /* alignment */ 14941 0, /* boundary limit */ 14942 BUS_SPACE_MAXADDR, /* restricted low */ 14943 BUS_SPACE_MAXADDR, /* restricted hi */ 14944 NULL, /* addr filter() */ 14945 NULL, /* addr filter() arg */ 14946 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14947 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14948 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14949 0, /* flags */ 14950 NULL, /* lock() */ 14951 NULL, /* lock() arg */ 14952 &sc->parent_dma_tag); /* returned dma tag */ 14953 if (rc != 0) { 14954 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14955 return (1); 14956 } 14957 14958 /************************/ 14959 /* DEFAULT STATUS BLOCK */ 14960 /************************/ 14961 14962 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14963 &sc->def_sb_dma, "default status block") != 0) { 14964 /* XXX */ 14965 bus_dma_tag_destroy(sc->parent_dma_tag); 14966 return (1); 14967 } 14968 14969 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14970 14971 /***************/ 14972 /* EVENT QUEUE */ 14973 /***************/ 14974 14975 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14976 &sc->eq_dma, "event queue") != 0) { 14977 /* XXX */ 14978 bxe_dma_free(sc, &sc->def_sb_dma); 14979 sc->def_sb = NULL; 14980 bus_dma_tag_destroy(sc->parent_dma_tag); 14981 return (1); 14982 } 14983 14984 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14985 14986 /*************/ 14987 /* SLOW PATH */ 14988 /*************/ 14989 14990 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14991 &sc->sp_dma, "slow path") != 0) { 14992 /* XXX */ 14993 bxe_dma_free(sc, &sc->eq_dma); 14994 sc->eq = NULL; 14995 bxe_dma_free(sc, &sc->def_sb_dma); 14996 sc->def_sb = NULL; 14997 bus_dma_tag_destroy(sc->parent_dma_tag); 14998 return (1); 14999 } 15000 15001 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 15002 15003 /*******************/ 15004 /* SLOW PATH QUEUE */ 15005 /*******************/ 15006 15007 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 15008 &sc->spq_dma, "slow path queue") != 0) { 15009 /* XXX */ 15010 bxe_dma_free(sc, &sc->sp_dma); 15011 sc->sp = NULL; 15012 bxe_dma_free(sc, &sc->eq_dma); 15013 sc->eq = NULL; 15014 bxe_dma_free(sc, &sc->def_sb_dma); 15015 sc->def_sb = NULL; 15016 bus_dma_tag_destroy(sc->parent_dma_tag); 15017 return (1); 15018 } 15019 15020 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 15021 15022 /***************************/ 15023 /* FW DECOMPRESSION BUFFER */ 15024 /***************************/ 15025 15026 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 15027 "fw decompression buffer") != 0) { 15028 /* XXX */ 15029 bxe_dma_free(sc, &sc->spq_dma); 15030 sc->spq = NULL; 15031 bxe_dma_free(sc, &sc->sp_dma); 15032 sc->sp = NULL; 15033 bxe_dma_free(sc, &sc->eq_dma); 15034 sc->eq = NULL; 15035 bxe_dma_free(sc, &sc->def_sb_dma); 15036 sc->def_sb = NULL; 15037 bus_dma_tag_destroy(sc->parent_dma_tag); 15038 return (1); 15039 } 15040 15041 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 15042 15043 if ((sc->gz_strm = 15044 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 15045 /* XXX */ 15046 bxe_dma_free(sc, &sc->gz_buf_dma); 15047 sc->gz_buf = NULL; 15048 bxe_dma_free(sc, &sc->spq_dma); 15049 sc->spq = NULL; 15050 bxe_dma_free(sc, &sc->sp_dma); 15051 sc->sp = NULL; 15052 bxe_dma_free(sc, &sc->eq_dma); 15053 sc->eq = NULL; 15054 bxe_dma_free(sc, &sc->def_sb_dma); 15055 sc->def_sb = NULL; 15056 bus_dma_tag_destroy(sc->parent_dma_tag); 15057 return (1); 15058 } 15059 15060 /*************/ 15061 /* FASTPATHS */ 15062 /*************/ 15063 15064 /* allocate DMA memory for each fastpath structure */ 15065 for (i = 0; i < sc->num_queues; i++) { 15066 fp = &sc->fp[i]; 15067 fp->sc = sc; 15068 fp->index = i; 15069 15070 /*******************/ 15071 /* FP STATUS BLOCK */ 15072 /*******************/ 15073 15074 snprintf(buf, sizeof(buf), "fp %d status block", i); 15075 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 15076 &fp->sb_dma, buf) != 0) { 15077 /* XXX unwind and free previous fastpath allocations */ 15078 BLOGE(sc, "Failed to alloc %s\n", buf); 15079 return (1); 15080 } else { 15081 if (CHIP_IS_E2E3(sc)) { 15082 fp->status_block.e2_sb = 15083 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 15084 } else { 15085 fp->status_block.e1x_sb = 15086 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 15087 } 15088 } 15089 15090 /******************/ 15091 /* FP TX BD CHAIN */ 15092 /******************/ 15093 15094 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 15095 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 15096 &fp->tx_dma, buf) != 0) { 15097 /* XXX unwind and free previous fastpath allocations */ 15098 BLOGE(sc, "Failed to alloc %s\n", buf); 15099 return (1); 15100 } else { 15101 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 15102 } 15103 15104 /* link together the tx bd chain pages */ 15105 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 15106 /* index into the tx bd chain array to last entry per page */ 15107 struct eth_tx_next_bd *tx_next_bd = 15108 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 15109 /* point to the next page and wrap from last page */ 15110 busaddr = (fp->tx_dma.paddr + 15111 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 15112 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 15113 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 15114 } 15115 15116 /******************/ 15117 /* FP RX BD CHAIN */ 15118 /******************/ 15119 15120 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 15121 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 15122 &fp->rx_dma, buf) != 0) { 15123 /* XXX unwind and free previous fastpath allocations */ 15124 BLOGE(sc, "Failed to alloc %s\n", buf); 15125 return (1); 15126 } else { 15127 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 15128 } 15129 15130 /* link together the rx bd chain pages */ 15131 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 15132 /* index into the rx bd chain array to last entry per page */ 15133 struct eth_rx_bd *rx_bd = 15134 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 15135 /* point to the next page and wrap from last page */ 15136 busaddr = (fp->rx_dma.paddr + 15137 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 15138 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 15139 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 15140 } 15141 15142 /*******************/ 15143 /* FP RX RCQ CHAIN */ 15144 /*******************/ 15145 15146 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 15147 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 15148 &fp->rcq_dma, buf) != 0) { 15149 /* XXX unwind and free previous fastpath allocations */ 15150 BLOGE(sc, "Failed to alloc %s\n", buf); 15151 return (1); 15152 } else { 15153 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 15154 } 15155 15156 /* link together the rcq chain pages */ 15157 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 15158 /* index into the rcq chain array to last entry per page */ 15159 struct eth_rx_cqe_next_page *rx_cqe_next = 15160 (struct eth_rx_cqe_next_page *) 15161 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 15162 /* point to the next page and wrap from last page */ 15163 busaddr = (fp->rcq_dma.paddr + 15164 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 15165 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 15166 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 15167 } 15168 15169 /*******************/ 15170 /* FP RX SGE CHAIN */ 15171 /*******************/ 15172 15173 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 15174 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 15175 &fp->rx_sge_dma, buf) != 0) { 15176 /* XXX unwind and free previous fastpath allocations */ 15177 BLOGE(sc, "Failed to alloc %s\n", buf); 15178 return (1); 15179 } else { 15180 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 15181 } 15182 15183 /* link together the sge chain pages */ 15184 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 15185 /* index into the rcq chain array to last entry per page */ 15186 struct eth_rx_sge *rx_sge = 15187 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 15188 /* point to the next page and wrap from last page */ 15189 busaddr = (fp->rx_sge_dma.paddr + 15190 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 15191 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 15192 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 15193 } 15194 15195 /***********************/ 15196 /* FP TX MBUF DMA MAPS */ 15197 /***********************/ 15198 15199 /* set required sizes before mapping to conserve resources */ 15200 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 15201 max_size = BXE_TSO_MAX_SIZE; 15202 max_segments = BXE_TSO_MAX_SEGMENTS; 15203 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 15204 } else { 15205 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 15206 max_segments = BXE_MAX_SEGMENTS; 15207 max_seg_size = MCLBYTES; 15208 } 15209 15210 /* create a dma tag for the tx mbufs */ 15211 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15212 1, /* alignment */ 15213 0, /* boundary limit */ 15214 BUS_SPACE_MAXADDR, /* restricted low */ 15215 BUS_SPACE_MAXADDR, /* restricted hi */ 15216 NULL, /* addr filter() */ 15217 NULL, /* addr filter() arg */ 15218 max_size, /* max map size */ 15219 max_segments, /* num discontinuous */ 15220 max_seg_size, /* max seg size */ 15221 0, /* flags */ 15222 NULL, /* lock() */ 15223 NULL, /* lock() arg */ 15224 &fp->tx_mbuf_tag); /* returned dma tag */ 15225 if (rc != 0) { 15226 /* XXX unwind and free previous fastpath allocations */ 15227 BLOGE(sc, "Failed to create dma tag for " 15228 "'fp %d tx mbufs' (%d)\n", 15229 i, rc); 15230 return (1); 15231 } 15232 15233 /* create dma maps for each of the tx mbuf clusters */ 15234 for (j = 0; j < TX_BD_TOTAL; j++) { 15235 if (bus_dmamap_create(fp->tx_mbuf_tag, 15236 BUS_DMA_NOWAIT, 15237 &fp->tx_mbuf_chain[j].m_map)) { 15238 /* XXX unwind and free previous fastpath allocations */ 15239 BLOGE(sc, "Failed to create dma map for " 15240 "'fp %d tx mbuf %d' (%d)\n", 15241 i, j, rc); 15242 return (1); 15243 } 15244 } 15245 15246 /***********************/ 15247 /* FP RX MBUF DMA MAPS */ 15248 /***********************/ 15249 15250 /* create a dma tag for the rx mbufs */ 15251 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15252 1, /* alignment */ 15253 0, /* boundary limit */ 15254 BUS_SPACE_MAXADDR, /* restricted low */ 15255 BUS_SPACE_MAXADDR, /* restricted hi */ 15256 NULL, /* addr filter() */ 15257 NULL, /* addr filter() arg */ 15258 MJUM9BYTES, /* max map size */ 15259 1, /* num discontinuous */ 15260 MJUM9BYTES, /* max seg size */ 15261 0, /* flags */ 15262 NULL, /* lock() */ 15263 NULL, /* lock() arg */ 15264 &fp->rx_mbuf_tag); /* returned dma tag */ 15265 if (rc != 0) { 15266 /* XXX unwind and free previous fastpath allocations */ 15267 BLOGE(sc, "Failed to create dma tag for " 15268 "'fp %d rx mbufs' (%d)\n", 15269 i, rc); 15270 return (1); 15271 } 15272 15273 /* create dma maps for each of the rx mbuf clusters */ 15274 for (j = 0; j < RX_BD_TOTAL; j++) { 15275 if (bus_dmamap_create(fp->rx_mbuf_tag, 15276 BUS_DMA_NOWAIT, 15277 &fp->rx_mbuf_chain[j].m_map)) { 15278 /* XXX unwind and free previous fastpath allocations */ 15279 BLOGE(sc, "Failed to create dma map for " 15280 "'fp %d rx mbuf %d' (%d)\n", 15281 i, j, rc); 15282 return (1); 15283 } 15284 } 15285 15286 /* create dma map for the spare rx mbuf cluster */ 15287 if (bus_dmamap_create(fp->rx_mbuf_tag, 15288 BUS_DMA_NOWAIT, 15289 &fp->rx_mbuf_spare_map)) { 15290 /* XXX unwind and free previous fastpath allocations */ 15291 BLOGE(sc, "Failed to create dma map for " 15292 "'fp %d spare rx mbuf' (%d)\n", 15293 i, rc); 15294 return (1); 15295 } 15296 15297 /***************************/ 15298 /* FP RX SGE MBUF DMA MAPS */ 15299 /***************************/ 15300 15301 /* create a dma tag for the rx sge mbufs */ 15302 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15303 1, /* alignment */ 15304 0, /* boundary limit */ 15305 BUS_SPACE_MAXADDR, /* restricted low */ 15306 BUS_SPACE_MAXADDR, /* restricted hi */ 15307 NULL, /* addr filter() */ 15308 NULL, /* addr filter() arg */ 15309 BCM_PAGE_SIZE, /* max map size */ 15310 1, /* num discontinuous */ 15311 BCM_PAGE_SIZE, /* max seg size */ 15312 0, /* flags */ 15313 NULL, /* lock() */ 15314 NULL, /* lock() arg */ 15315 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15316 if (rc != 0) { 15317 /* XXX unwind and free previous fastpath allocations */ 15318 BLOGE(sc, "Failed to create dma tag for " 15319 "'fp %d rx sge mbufs' (%d)\n", 15320 i, rc); 15321 return (1); 15322 } 15323 15324 /* create dma maps for the rx sge mbuf clusters */ 15325 for (j = 0; j < RX_SGE_TOTAL; j++) { 15326 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15327 BUS_DMA_NOWAIT, 15328 &fp->rx_sge_mbuf_chain[j].m_map)) { 15329 /* XXX unwind and free previous fastpath allocations */ 15330 BLOGE(sc, "Failed to create dma map for " 15331 "'fp %d rx sge mbuf %d' (%d)\n", 15332 i, j, rc); 15333 return (1); 15334 } 15335 } 15336 15337 /* create dma map for the spare rx sge mbuf cluster */ 15338 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15339 BUS_DMA_NOWAIT, 15340 &fp->rx_sge_mbuf_spare_map)) { 15341 /* XXX unwind and free previous fastpath allocations */ 15342 BLOGE(sc, "Failed to create dma map for " 15343 "'fp %d spare rx sge mbuf' (%d)\n", 15344 i, rc); 15345 return (1); 15346 } 15347 15348 /***************************/ 15349 /* FP RX TPA MBUF DMA MAPS */ 15350 /***************************/ 15351 15352 /* create dma maps for the rx tpa mbuf clusters */ 15353 max_agg_queues = MAX_AGG_QS(sc); 15354 15355 for (j = 0; j < max_agg_queues; j++) { 15356 if (bus_dmamap_create(fp->rx_mbuf_tag, 15357 BUS_DMA_NOWAIT, 15358 &fp->rx_tpa_info[j].bd.m_map)) { 15359 /* XXX unwind and free previous fastpath allocations */ 15360 BLOGE(sc, "Failed to create dma map for " 15361 "'fp %d rx tpa mbuf %d' (%d)\n", 15362 i, j, rc); 15363 return (1); 15364 } 15365 } 15366 15367 /* create dma map for the spare rx tpa mbuf cluster */ 15368 if (bus_dmamap_create(fp->rx_mbuf_tag, 15369 BUS_DMA_NOWAIT, 15370 &fp->rx_tpa_info_mbuf_spare_map)) { 15371 /* XXX unwind and free previous fastpath allocations */ 15372 BLOGE(sc, "Failed to create dma map for " 15373 "'fp %d spare rx tpa mbuf' (%d)\n", 15374 i, rc); 15375 return (1); 15376 } 15377 15378 bxe_init_sge_ring_bit_mask(fp); 15379 } 15380 15381 return (0); 15382} 15383 15384static void 15385bxe_free_hsi_mem(struct bxe_softc *sc) 15386{ 15387 struct bxe_fastpath *fp; 15388 int max_agg_queues; 15389 int i, j; 15390 15391 if (sc->parent_dma_tag == NULL) { 15392 return; /* assume nothing was allocated */ 15393 } 15394 15395 for (i = 0; i < sc->num_queues; i++) { 15396 fp = &sc->fp[i]; 15397 15398 /*******************/ 15399 /* FP STATUS BLOCK */ 15400 /*******************/ 15401 15402 bxe_dma_free(sc, &fp->sb_dma); 15403 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15404 15405 /******************/ 15406 /* FP TX BD CHAIN */ 15407 /******************/ 15408 15409 bxe_dma_free(sc, &fp->tx_dma); 15410 fp->tx_chain = NULL; 15411 15412 /******************/ 15413 /* FP RX BD CHAIN */ 15414 /******************/ 15415 15416 bxe_dma_free(sc, &fp->rx_dma); 15417 fp->rx_chain = NULL; 15418 15419 /*******************/ 15420 /* FP RX RCQ CHAIN */ 15421 /*******************/ 15422 15423 bxe_dma_free(sc, &fp->rcq_dma); 15424 fp->rcq_chain = NULL; 15425 15426 /*******************/ 15427 /* FP RX SGE CHAIN */ 15428 /*******************/ 15429 15430 bxe_dma_free(sc, &fp->rx_sge_dma); 15431 fp->rx_sge_chain = NULL; 15432 15433 /***********************/ 15434 /* FP TX MBUF DMA MAPS */ 15435 /***********************/ 15436 15437 if (fp->tx_mbuf_tag != NULL) { 15438 for (j = 0; j < TX_BD_TOTAL; j++) { 15439 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15440 bus_dmamap_unload(fp->tx_mbuf_tag, 15441 fp->tx_mbuf_chain[j].m_map); 15442 bus_dmamap_destroy(fp->tx_mbuf_tag, 15443 fp->tx_mbuf_chain[j].m_map); 15444 } 15445 } 15446 15447 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15448 fp->tx_mbuf_tag = NULL; 15449 } 15450 15451 /***********************/ 15452 /* FP RX MBUF DMA MAPS */ 15453 /***********************/ 15454 15455 if (fp->rx_mbuf_tag != NULL) { 15456 for (j = 0; j < RX_BD_TOTAL; j++) { 15457 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15458 bus_dmamap_unload(fp->rx_mbuf_tag, 15459 fp->rx_mbuf_chain[j].m_map); 15460 bus_dmamap_destroy(fp->rx_mbuf_tag, 15461 fp->rx_mbuf_chain[j].m_map); 15462 } 15463 } 15464 15465 if (fp->rx_mbuf_spare_map != NULL) { 15466 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15467 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15468 } 15469 15470 /***************************/ 15471 /* FP RX TPA MBUF DMA MAPS */ 15472 /***************************/ 15473 15474 max_agg_queues = MAX_AGG_QS(sc); 15475 15476 for (j = 0; j < max_agg_queues; j++) { 15477 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15478 bus_dmamap_unload(fp->rx_mbuf_tag, 15479 fp->rx_tpa_info[j].bd.m_map); 15480 bus_dmamap_destroy(fp->rx_mbuf_tag, 15481 fp->rx_tpa_info[j].bd.m_map); 15482 } 15483 } 15484 15485 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15486 bus_dmamap_unload(fp->rx_mbuf_tag, 15487 fp->rx_tpa_info_mbuf_spare_map); 15488 bus_dmamap_destroy(fp->rx_mbuf_tag, 15489 fp->rx_tpa_info_mbuf_spare_map); 15490 } 15491 15492 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15493 fp->rx_mbuf_tag = NULL; 15494 } 15495 15496 /***************************/ 15497 /* FP RX SGE MBUF DMA MAPS */ 15498 /***************************/ 15499 15500 if (fp->rx_sge_mbuf_tag != NULL) { 15501 for (j = 0; j < RX_SGE_TOTAL; j++) { 15502 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15503 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15504 fp->rx_sge_mbuf_chain[j].m_map); 15505 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15506 fp->rx_sge_mbuf_chain[j].m_map); 15507 } 15508 } 15509 15510 if (fp->rx_sge_mbuf_spare_map != NULL) { 15511 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15512 fp->rx_sge_mbuf_spare_map); 15513 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15514 fp->rx_sge_mbuf_spare_map); 15515 } 15516 15517 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15518 fp->rx_sge_mbuf_tag = NULL; 15519 } 15520 } 15521 15522 /***************************/ 15523 /* FW DECOMPRESSION BUFFER */ 15524 /***************************/ 15525 15526 bxe_dma_free(sc, &sc->gz_buf_dma); 15527 sc->gz_buf = NULL; 15528 free(sc->gz_strm, M_DEVBUF); 15529 sc->gz_strm = NULL; 15530 15531 /*******************/ 15532 /* SLOW PATH QUEUE */ 15533 /*******************/ 15534 15535 bxe_dma_free(sc, &sc->spq_dma); 15536 sc->spq = NULL; 15537 15538 /*************/ 15539 /* SLOW PATH */ 15540 /*************/ 15541 15542 bxe_dma_free(sc, &sc->sp_dma); 15543 sc->sp = NULL; 15544 15545 /***************/ 15546 /* EVENT QUEUE */ 15547 /***************/ 15548 15549 bxe_dma_free(sc, &sc->eq_dma); 15550 sc->eq = NULL; 15551 15552 /************************/ 15553 /* DEFAULT STATUS BLOCK */ 15554 /************************/ 15555 15556 bxe_dma_free(sc, &sc->def_sb_dma); 15557 sc->def_sb = NULL; 15558 15559 bus_dma_tag_destroy(sc->parent_dma_tag); 15560 sc->parent_dma_tag = NULL; 15561} 15562 15563/* 15564 * Previous driver DMAE transaction may have occurred when pre-boot stage 15565 * ended and boot began. This would invalidate the addresses of the 15566 * transaction, resulting in was-error bit set in the PCI causing all 15567 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15568 * the interrupt which detected this from the pglueb and the was-done bit 15569 */ 15570static void 15571bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15572{ 15573 uint32_t val; 15574 15575 if (!CHIP_IS_E1x(sc)) { 15576 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15577 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15578 BLOGD(sc, DBG_LOAD, 15579 "Clearing 'was-error' bit that was set in pglueb"); 15580 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15581 } 15582 } 15583} 15584 15585static int 15586bxe_prev_mcp_done(struct bxe_softc *sc) 15587{ 15588 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15589 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15590 if (!rc) { 15591 BLOGE(sc, "MCP response failure, aborting\n"); 15592 return (-1); 15593 } 15594 15595 return (0); 15596} 15597 15598static struct bxe_prev_list_node * 15599bxe_prev_path_get_entry(struct bxe_softc *sc) 15600{ 15601 struct bxe_prev_list_node *tmp; 15602 15603 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15604 if ((sc->pcie_bus == tmp->bus) && 15605 (sc->pcie_device == tmp->slot) && 15606 (SC_PATH(sc) == tmp->path)) { 15607 return (tmp); 15608 } 15609 } 15610 15611 return (NULL); 15612} 15613 15614static uint8_t 15615bxe_prev_is_path_marked(struct bxe_softc *sc) 15616{ 15617 struct bxe_prev_list_node *tmp; 15618 int rc = FALSE; 15619 15620 mtx_lock(&bxe_prev_mtx); 15621 15622 tmp = bxe_prev_path_get_entry(sc); 15623 if (tmp) { 15624 if (tmp->aer) { 15625 BLOGD(sc, DBG_LOAD, 15626 "Path %d/%d/%d was marked by AER\n", 15627 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15628 } else { 15629 rc = TRUE; 15630 BLOGD(sc, DBG_LOAD, 15631 "Path %d/%d/%d was already cleaned from previous drivers\n", 15632 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15633 } 15634 } 15635 15636 mtx_unlock(&bxe_prev_mtx); 15637 15638 return (rc); 15639} 15640 15641static int 15642bxe_prev_mark_path(struct bxe_softc *sc, 15643 uint8_t after_undi) 15644{ 15645 struct bxe_prev_list_node *tmp; 15646 15647 mtx_lock(&bxe_prev_mtx); 15648 15649 /* Check whether the entry for this path already exists */ 15650 tmp = bxe_prev_path_get_entry(sc); 15651 if (tmp) { 15652 if (!tmp->aer) { 15653 BLOGD(sc, DBG_LOAD, 15654 "Re-marking AER in path %d/%d/%d\n", 15655 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15656 } else { 15657 BLOGD(sc, DBG_LOAD, 15658 "Removing AER indication from path %d/%d/%d\n", 15659 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15660 tmp->aer = 0; 15661 } 15662 15663 mtx_unlock(&bxe_prev_mtx); 15664 return (0); 15665 } 15666 15667 mtx_unlock(&bxe_prev_mtx); 15668 15669 /* Create an entry for this path and add it */ 15670 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15671 (M_NOWAIT | M_ZERO)); 15672 if (!tmp) { 15673 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15674 return (-1); 15675 } 15676 15677 tmp->bus = sc->pcie_bus; 15678 tmp->slot = sc->pcie_device; 15679 tmp->path = SC_PATH(sc); 15680 tmp->aer = 0; 15681 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15682 15683 mtx_lock(&bxe_prev_mtx); 15684 15685 BLOGD(sc, DBG_LOAD, 15686 "Marked path %d/%d/%d - finished previous unload\n", 15687 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15688 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15689 15690 mtx_unlock(&bxe_prev_mtx); 15691 15692 return (0); 15693} 15694 15695static int 15696bxe_do_flr(struct bxe_softc *sc) 15697{ 15698 int i; 15699 15700 /* only E2 and onwards support FLR */ 15701 if (CHIP_IS_E1x(sc)) { 15702 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15703 return (-1); 15704 } 15705 15706 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15707 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15708 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15709 sc->devinfo.bc_ver); 15710 return (-1); 15711 } 15712 15713 /* Wait for Transaction Pending bit clean */ 15714 for (i = 0; i < 4; i++) { 15715 if (i) { 15716 DELAY(((1 << (i - 1)) * 100) * 1000); 15717 } 15718 15719 if (!bxe_is_pcie_pending(sc)) { 15720 goto clear; 15721 } 15722 } 15723 15724 BLOGE(sc, "PCIE transaction is not cleared, " 15725 "proceeding with reset anyway\n"); 15726 15727clear: 15728 15729 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15730 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15731 15732 return (0); 15733} 15734 15735struct bxe_mac_vals { 15736 uint32_t xmac_addr; 15737 uint32_t xmac_val; 15738 uint32_t emac_addr; 15739 uint32_t emac_val; 15740 uint32_t umac_addr; 15741 uint32_t umac_val; 15742 uint32_t bmac_addr; 15743 uint32_t bmac_val[2]; 15744}; 15745 15746static void 15747bxe_prev_unload_close_mac(struct bxe_softc *sc, 15748 struct bxe_mac_vals *vals) 15749{ 15750 uint32_t val, base_addr, offset, mask, reset_reg; 15751 uint8_t mac_stopped = FALSE; 15752 uint8_t port = SC_PORT(sc); 15753 uint32_t wb_data[2]; 15754 15755 /* reset addresses as they also mark which values were changed */ 15756 vals->bmac_addr = 0; 15757 vals->umac_addr = 0; 15758 vals->xmac_addr = 0; 15759 vals->emac_addr = 0; 15760 15761 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15762 15763 if (!CHIP_IS_E3(sc)) { 15764 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15765 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15766 if ((mask & reset_reg) && val) { 15767 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15768 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15769 : NIG_REG_INGRESS_BMAC0_MEM; 15770 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15771 : BIGMAC_REGISTER_BMAC_CONTROL; 15772 15773 /* 15774 * use rd/wr since we cannot use dmae. This is safe 15775 * since MCP won't access the bus due to the request 15776 * to unload, and no function on the path can be 15777 * loaded at this time. 15778 */ 15779 wb_data[0] = REG_RD(sc, base_addr + offset); 15780 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15781 vals->bmac_addr = base_addr + offset; 15782 vals->bmac_val[0] = wb_data[0]; 15783 vals->bmac_val[1] = wb_data[1]; 15784 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15785 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15786 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15787 } 15788 15789 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15790 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15791 vals->emac_val = REG_RD(sc, vals->emac_addr); 15792 REG_WR(sc, vals->emac_addr, 0); 15793 mac_stopped = TRUE; 15794 } else { 15795 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15796 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15797 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15798 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15799 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15800 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15801 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15802 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15803 REG_WR(sc, vals->xmac_addr, 0); 15804 mac_stopped = TRUE; 15805 } 15806 15807 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15808 if (mask & reset_reg) { 15809 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15810 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15811 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15812 vals->umac_val = REG_RD(sc, vals->umac_addr); 15813 REG_WR(sc, vals->umac_addr, 0); 15814 mac_stopped = TRUE; 15815 } 15816 } 15817 15818 if (mac_stopped) { 15819 DELAY(20000); 15820 } 15821} 15822 15823#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15824#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15825#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15826#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15827 15828static void 15829bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15830 uint8_t port, 15831 uint8_t inc) 15832{ 15833 uint16_t rcq, bd; 15834 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15835 15836 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15837 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15838 15839 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15840 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15841 15842 BLOGD(sc, DBG_LOAD, 15843 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15844 port, bd, rcq); 15845} 15846 15847static int 15848bxe_prev_unload_common(struct bxe_softc *sc) 15849{ 15850 uint32_t reset_reg, tmp_reg = 0, rc; 15851 uint8_t prev_undi = FALSE; 15852 struct bxe_mac_vals mac_vals; 15853 uint32_t timer_count = 1000; 15854 uint32_t prev_brb; 15855 15856 /* 15857 * It is possible a previous function received 'common' answer, 15858 * but hasn't loaded yet, therefore creating a scenario of 15859 * multiple functions receiving 'common' on the same path. 15860 */ 15861 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15862 15863 memset(&mac_vals, 0, sizeof(mac_vals)); 15864 15865 if (bxe_prev_is_path_marked(sc)) { 15866 return (bxe_prev_mcp_done(sc)); 15867 } 15868 15869 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15870 15871 /* Reset should be performed after BRB is emptied */ 15872 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15873 /* Close the MAC Rx to prevent BRB from filling up */ 15874 bxe_prev_unload_close_mac(sc, &mac_vals); 15875 15876 /* close LLH filters towards the BRB */ 15877 elink_set_rx_filter(&sc->link_params, 0); 15878 15879 /* 15880 * Check if the UNDI driver was previously loaded. 15881 * UNDI driver initializes CID offset for normal bell to 0x7 15882 */ 15883 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15884 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15885 if (tmp_reg == 0x7) { 15886 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15887 prev_undi = TRUE; 15888 /* clear the UNDI indication */ 15889 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15890 /* clear possible idle check errors */ 15891 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15892 } 15893 } 15894 15895 /* wait until BRB is empty */ 15896 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15897 while (timer_count) { 15898 prev_brb = tmp_reg; 15899 15900 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15901 if (!tmp_reg) { 15902 break; 15903 } 15904 15905 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15906 15907 /* reset timer as long as BRB actually gets emptied */ 15908 if (prev_brb > tmp_reg) { 15909 timer_count = 1000; 15910 } else { 15911 timer_count--; 15912 } 15913 15914 /* If UNDI resides in memory, manually increment it */ 15915 if (prev_undi) { 15916 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15917 } 15918 15919 DELAY(10); 15920 } 15921 15922 if (!timer_count) { 15923 BLOGE(sc, "Failed to empty BRB\n"); 15924 } 15925 } 15926 15927 /* No packets are in the pipeline, path is ready for reset */ 15928 bxe_reset_common(sc); 15929 15930 if (mac_vals.xmac_addr) { 15931 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15932 } 15933 if (mac_vals.umac_addr) { 15934 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15935 } 15936 if (mac_vals.emac_addr) { 15937 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15938 } 15939 if (mac_vals.bmac_addr) { 15940 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15941 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15942 } 15943 15944 rc = bxe_prev_mark_path(sc, prev_undi); 15945 if (rc) { 15946 bxe_prev_mcp_done(sc); 15947 return (rc); 15948 } 15949 15950 return (bxe_prev_mcp_done(sc)); 15951} 15952 15953static int 15954bxe_prev_unload_uncommon(struct bxe_softc *sc) 15955{ 15956 int rc; 15957 15958 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15959 15960 /* Test if previous unload process was already finished for this path */ 15961 if (bxe_prev_is_path_marked(sc)) { 15962 return (bxe_prev_mcp_done(sc)); 15963 } 15964 15965 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15966 15967 /* 15968 * If function has FLR capabilities, and existing FW version matches 15969 * the one required, then FLR will be sufficient to clean any residue 15970 * left by previous driver 15971 */ 15972 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15973 if (!rc) { 15974 /* fw version is good */ 15975 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15976 rc = bxe_do_flr(sc); 15977 } 15978 15979 if (!rc) { 15980 /* FLR was performed */ 15981 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15982 return (0); 15983 } 15984 15985 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15986 15987 /* Close the MCP request, return failure*/ 15988 rc = bxe_prev_mcp_done(sc); 15989 if (!rc) { 15990 rc = BXE_PREV_WAIT_NEEDED; 15991 } 15992 15993 return (rc); 15994} 15995 15996static int 15997bxe_prev_unload(struct bxe_softc *sc) 15998{ 15999 int time_counter = 10; 16000 uint32_t fw, hw_lock_reg, hw_lock_val; 16001 uint32_t rc = 0; 16002 16003 /* 16004 * Clear HW from errors which may have resulted from an interrupted 16005 * DMAE transaction. 16006 */ 16007 bxe_prev_interrupted_dmae(sc); 16008 16009 /* Release previously held locks */ 16010 hw_lock_reg = 16011 (SC_FUNC(sc) <= 5) ? 16012 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 16013 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 16014 16015 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 16016 if (hw_lock_val) { 16017 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 16018 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 16019 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 16020 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 16021 } 16022 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 16023 REG_WR(sc, hw_lock_reg, 0xffffffff); 16024 } else { 16025 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 16026 } 16027 16028 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 16029 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 16030 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 16031 } 16032 16033 do { 16034 /* Lock MCP using an unload request */ 16035 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 16036 if (!fw) { 16037 BLOGE(sc, "MCP response failure, aborting\n"); 16038 rc = -1; 16039 break; 16040 } 16041 16042 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 16043 rc = bxe_prev_unload_common(sc); 16044 break; 16045 } 16046 16047 /* non-common reply from MCP night require looping */ 16048 rc = bxe_prev_unload_uncommon(sc); 16049 if (rc != BXE_PREV_WAIT_NEEDED) { 16050 break; 16051 } 16052 16053 DELAY(20000); 16054 } while (--time_counter); 16055 16056 if (!time_counter || rc) { 16057 BLOGE(sc, "Failed to unload previous driver!\n"); 16058 rc = -1; 16059 } 16060 16061 return (rc); 16062} 16063 16064void 16065bxe_dcbx_set_state(struct bxe_softc *sc, 16066 uint8_t dcb_on, 16067 uint32_t dcbx_enabled) 16068{ 16069 if (!CHIP_IS_E1x(sc)) { 16070 sc->dcb_state = dcb_on; 16071 sc->dcbx_enabled = dcbx_enabled; 16072 } else { 16073 sc->dcb_state = FALSE; 16074 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 16075 } 16076 BLOGD(sc, DBG_LOAD, 16077 "DCB state [%s:%s]\n", 16078 dcb_on ? "ON" : "OFF", 16079 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 16080 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 16081 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 16082 "on-chip with negotiation" : "invalid"); 16083} 16084 16085/* must be called after sriov-enable */ 16086static int 16087bxe_set_qm_cid_count(struct bxe_softc *sc) 16088{ 16089 int cid_count = BXE_L2_MAX_CID(sc); 16090 16091 if (IS_SRIOV(sc)) { 16092 cid_count += BXE_VF_CIDS; 16093 } 16094 16095 if (CNIC_SUPPORT(sc)) { 16096 cid_count += CNIC_CID_MAX; 16097 } 16098 16099 return (roundup(cid_count, QM_CID_ROUND)); 16100} 16101 16102static void 16103bxe_init_multi_cos(struct bxe_softc *sc) 16104{ 16105 int pri, cos; 16106 16107 uint32_t pri_map = 0; /* XXX change to user config */ 16108 16109 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 16110 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 16111 if (cos < sc->max_cos) { 16112 sc->prio_to_cos[pri] = cos; 16113 } else { 16114 BLOGW(sc, "Invalid COS %d for priority %d " 16115 "(max COS is %d), setting to 0\n", 16116 cos, pri, (sc->max_cos - 1)); 16117 sc->prio_to_cos[pri] = 0; 16118 } 16119 } 16120} 16121 16122static int 16123bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 16124{ 16125 struct bxe_softc *sc; 16126 int error, result; 16127 16128 result = 0; 16129 error = sysctl_handle_int(oidp, &result, 0, req); 16130 16131 if (error || !req->newptr) { 16132 return (error); 16133 } 16134 16135 if (result == 1) { 16136 uint32_t temp; 16137 sc = (struct bxe_softc *)arg1; 16138 16139 BLOGI(sc, "... dumping driver state ...\n"); 16140 temp = SHMEM2_RD(sc, temperature_in_half_celsius); 16141 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); 16142 } 16143 16144 return (error); 16145} 16146 16147static int 16148bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 16149{ 16150 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16151 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 16152 uint32_t *offset; 16153 uint64_t value = 0; 16154 int index = (int)arg2; 16155 16156 if (index >= BXE_NUM_ETH_STATS) { 16157 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 16158 return (-1); 16159 } 16160 16161 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 16162 16163 switch (bxe_eth_stats_arr[index].size) { 16164 case 4: 16165 value = (uint64_t)*offset; 16166 break; 16167 case 8: 16168 value = HILO_U64(*offset, *(offset + 1)); 16169 break; 16170 default: 16171 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 16172 index, bxe_eth_stats_arr[index].size); 16173 return (-1); 16174 } 16175 16176 return (sysctl_handle_64(oidp, &value, 0, req)); 16177} 16178 16179static int 16180bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 16181{ 16182 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16183 uint32_t *eth_stats; 16184 uint32_t *offset; 16185 uint64_t value = 0; 16186 uint32_t q_stat = (uint32_t)arg2; 16187 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 16188 uint32_t index = (q_stat & 0xffff); 16189 16190 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 16191 16192 if (index >= BXE_NUM_ETH_Q_STATS) { 16193 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 16194 return (-1); 16195 } 16196 16197 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 16198 16199 switch (bxe_eth_q_stats_arr[index].size) { 16200 case 4: 16201 value = (uint64_t)*offset; 16202 break; 16203 case 8: 16204 value = HILO_U64(*offset, *(offset + 1)); 16205 break; 16206 default: 16207 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 16208 index, bxe_eth_q_stats_arr[index].size); 16209 return (-1); 16210 } 16211 16212 return (sysctl_handle_64(oidp, &value, 0, req)); 16213} 16214 16215static void 16216bxe_add_sysctls(struct bxe_softc *sc) 16217{ 16218 struct sysctl_ctx_list *ctx; 16219 struct sysctl_oid_list *children; 16220 struct sysctl_oid *queue_top, *queue; 16221 struct sysctl_oid_list *queue_top_children, *queue_children; 16222 char queue_num_buf[32]; 16223 uint32_t q_stat; 16224 int i, j; 16225 16226 ctx = device_get_sysctl_ctx(sc->dev); 16227 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16228 16229 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16230 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16231 "version"); 16232 16233 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16234 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 16235 "bootcode version"); 16236 16237 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16238 BCM_5710_FW_MAJOR_VERSION, 16239 BCM_5710_FW_MINOR_VERSION, 16240 BCM_5710_FW_REVISION_VERSION, 16241 BCM_5710_FW_ENGINEERING_VERSION); 16242 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16243 CTLFLAG_RD, sc->fw_ver_str, 0, 16244 "firmware version"); 16245 16246 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16247 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16248 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16249 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16250 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16251 "Unknown")); 16252 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16253 CTLFLAG_RD, sc->mf_mode_str, 0, 16254 "multifunction mode"); 16255 16256 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16257 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16258 "multifunction vnics per port"); 16259 16260 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16261 CTLFLAG_RD, sc->mac_addr_str, 0, 16262 "mac address"); 16263 16264 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16265 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16266 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16267 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16268 "???GT/s"), 16269 sc->devinfo.pcie_link_width); 16270 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16271 CTLFLAG_RD, sc->pci_link_str, 0, 16272 "pci link status"); 16273 16274 sc->debug = bxe_debug; 16275 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 16276 CTLFLAG_RW, &sc->debug, 16277 "debug logging mode"); 16278 16279 sc->rx_budget = bxe_rx_budget; 16280 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16281 CTLFLAG_RW, &sc->rx_budget, 0, 16282 "rx processing budget"); 16283 16284 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16285 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16286 bxe_sysctl_state, "IU", "dump driver state"); 16287 16288 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16289 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16290 bxe_eth_stats_arr[i].string, 16291 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16292 bxe_sysctl_eth_stat, "LU", 16293 bxe_eth_stats_arr[i].string); 16294 } 16295 16296 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16297 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16298 CTLFLAG_RD, NULL, "queue"); 16299 queue_top_children = SYSCTL_CHILDREN(queue_top); 16300 16301 for (i = 0; i < sc->num_queues; i++) { 16302 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16303 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16304 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16305 queue_num_buf, CTLFLAG_RD, NULL, 16306 "single queue"); 16307 queue_children = SYSCTL_CHILDREN(queue); 16308 16309 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16310 q_stat = ((i << 16) | j); 16311 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16312 bxe_eth_q_stats_arr[j].string, 16313 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16314 bxe_sysctl_eth_q_stat, "LU", 16315 bxe_eth_q_stats_arr[j].string); 16316 } 16317 } 16318} 16319 16320/* 16321 * Device attach function. 16322 * 16323 * Allocates device resources, performs secondary chip identification, and 16324 * initializes driver instance variables. This function is called from driver 16325 * load after a successful probe. 16326 * 16327 * Returns: 16328 * 0 = Success, >0 = Failure 16329 */ 16330static int 16331bxe_attach(device_t dev) 16332{ 16333 struct bxe_softc *sc; 16334 16335 sc = device_get_softc(dev); 16336 16337 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16338 16339 sc->state = BXE_STATE_CLOSED; 16340 16341 sc->dev = dev; 16342 sc->unit = device_get_unit(dev); 16343 16344 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16345 16346 sc->pcie_bus = pci_get_bus(dev); 16347 sc->pcie_device = pci_get_slot(dev); 16348 sc->pcie_func = pci_get_function(dev); 16349 16350 /* enable bus master capability */ 16351 pci_enable_busmaster(dev); 16352 16353 /* get the BARs */ 16354 if (bxe_allocate_bars(sc) != 0) { 16355 return (ENXIO); 16356 } 16357 16358 /* initialize the mutexes */ 16359 bxe_init_mutexes(sc); 16360 16361 /* prepare the periodic callout */ 16362 callout_init(&sc->periodic_callout, 0); 16363 16364 /* prepare the chip taskqueue */ 16365 sc->chip_tq_flags = CHIP_TQ_NONE; 16366 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16367 "bxe%d_chip_tq", sc->unit); 16368 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16369 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16370 taskqueue_thread_enqueue, 16371 &sc->chip_tq); 16372 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16373 "%s", sc->chip_tq_name); 16374 16375 /* get device info and set params */ 16376 if (bxe_get_device_info(sc) != 0) { 16377 BLOGE(sc, "getting device info\n"); 16378 bxe_deallocate_bars(sc); 16379 pci_disable_busmaster(dev); 16380 return (ENXIO); 16381 } 16382 16383 /* get final misc params */ 16384 bxe_get_params(sc); 16385 16386 /* set the default MTU (changed via ifconfig) */ 16387 sc->mtu = ETHERMTU; 16388 16389 bxe_set_modes_bitmap(sc); 16390 16391 /* XXX 16392 * If in AFEX mode and the function is configured for FCoE 16393 * then bail... no L2 allowed. 16394 */ 16395 16396 /* get phy settings from shmem and 'and' against admin settings */ 16397 bxe_get_phy_info(sc); 16398 16399 /* initialize the FreeBSD ifnet interface */ 16400 if (bxe_init_ifnet(sc) != 0) { 16401 bxe_release_mutexes(sc); 16402 bxe_deallocate_bars(sc); 16403 pci_disable_busmaster(dev); 16404 return (ENXIO); 16405 } 16406 16407 /* allocate device interrupts */ 16408 if (bxe_interrupt_alloc(sc) != 0) { 16409 if (sc->ifp != NULL) { 16410 ether_ifdetach(sc->ifp); 16411 } 16412 ifmedia_removeall(&sc->ifmedia); 16413 bxe_release_mutexes(sc); 16414 bxe_deallocate_bars(sc); 16415 pci_disable_busmaster(dev); 16416 return (ENXIO); 16417 } 16418 16419 /* allocate ilt */ 16420 if (bxe_alloc_ilt_mem(sc) != 0) { 16421 bxe_interrupt_free(sc); 16422 if (sc->ifp != NULL) { 16423 ether_ifdetach(sc->ifp); 16424 } 16425 ifmedia_removeall(&sc->ifmedia); 16426 bxe_release_mutexes(sc); 16427 bxe_deallocate_bars(sc); 16428 pci_disable_busmaster(dev); 16429 return (ENXIO); 16430 } 16431 16432 /* allocate the host hardware/software hsi structures */ 16433 if (bxe_alloc_hsi_mem(sc) != 0) { 16434 bxe_free_ilt_mem(sc); 16435 bxe_interrupt_free(sc); 16436 if (sc->ifp != NULL) { 16437 ether_ifdetach(sc->ifp); 16438 } 16439 ifmedia_removeall(&sc->ifmedia); 16440 bxe_release_mutexes(sc); 16441 bxe_deallocate_bars(sc); 16442 pci_disable_busmaster(dev); 16443 return (ENXIO); 16444 } 16445 16446 /* need to reset chip if UNDI was active */ 16447 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16448 /* init fw_seq */ 16449 sc->fw_seq = 16450 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16451 DRV_MSG_SEQ_NUMBER_MASK); 16452 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16453 bxe_prev_unload(sc); 16454 } 16455 16456#if 1 16457 /* XXX */ 16458 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16459#else 16460 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16461 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16462 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16463 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16464 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16465 bxe_dcbx_init_params(sc); 16466 } else { 16467 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16468 } 16469#endif 16470 16471 /* calculate qm_cid_count */ 16472 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16473 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16474 16475 sc->max_cos = 1; 16476 bxe_init_multi_cos(sc); 16477 16478 bxe_add_sysctls(sc); 16479 16480 return (0); 16481} 16482 16483/* 16484 * Device detach function. 16485 * 16486 * Stops the controller, resets the controller, and releases resources. 16487 * 16488 * Returns: 16489 * 0 = Success, >0 = Failure 16490 */ 16491static int 16492bxe_detach(device_t dev) 16493{ 16494 struct bxe_softc *sc; 16495 if_t ifp; 16496 16497 sc = device_get_softc(dev); 16498 16499 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16500 16501 ifp = sc->ifp; 16502 if (ifp != NULL && if_vlantrunkinuse(ifp)) { 16503 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16504 return(EBUSY); 16505 } 16506 16507 /* stop the periodic callout */ 16508 bxe_periodic_stop(sc); 16509 16510 /* stop the chip taskqueue */ 16511 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16512 if (sc->chip_tq) { 16513 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16514 taskqueue_free(sc->chip_tq); 16515 sc->chip_tq = NULL; 16516 } 16517 16518 /* stop and reset the controller if it was open */ 16519 if (sc->state != BXE_STATE_CLOSED) { 16520 BXE_CORE_LOCK(sc); 16521 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16522 BXE_CORE_UNLOCK(sc); 16523 } 16524 16525 /* release the network interface */ 16526 if (ifp != NULL) { 16527 ether_ifdetach(ifp); 16528 } 16529 ifmedia_removeall(&sc->ifmedia); 16530 16531 /* XXX do the following based on driver state... */ 16532 16533 /* free the host hardware/software hsi structures */ 16534 bxe_free_hsi_mem(sc); 16535 16536 /* free ilt */ 16537 bxe_free_ilt_mem(sc); 16538 16539 /* release the interrupts */ 16540 bxe_interrupt_free(sc); 16541 16542 /* Release the mutexes*/ 16543 bxe_release_mutexes(sc); 16544 16545 /* Release the PCIe BAR mapped memory */ 16546 bxe_deallocate_bars(sc); 16547 16548 /* Release the FreeBSD interface. */ 16549 if (sc->ifp != NULL) { 16550 if_free(sc->ifp); 16551 } 16552 16553 pci_disable_busmaster(dev); 16554 16555 return (0); 16556} 16557 16558/* 16559 * Device shutdown function. 16560 * 16561 * Stops and resets the controller. 16562 * 16563 * Returns: 16564 * Nothing 16565 */ 16566static int 16567bxe_shutdown(device_t dev) 16568{ 16569 struct bxe_softc *sc; 16570 16571 sc = device_get_softc(dev); 16572 16573 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16574 16575 /* stop the periodic callout */ 16576 bxe_periodic_stop(sc); 16577 16578 BXE_CORE_LOCK(sc); 16579 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16580 BXE_CORE_UNLOCK(sc); 16581 16582 return (0); 16583} 16584 16585void 16586bxe_igu_ack_sb(struct bxe_softc *sc, 16587 uint8_t igu_sb_id, 16588 uint8_t segment, 16589 uint16_t index, 16590 uint8_t op, 16591 uint8_t update) 16592{ 16593 uint32_t igu_addr = sc->igu_base_addr; 16594 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16595 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16596} 16597 16598static void 16599bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16600 uint8_t func, 16601 uint8_t idu_sb_id, 16602 uint8_t is_pf) 16603{ 16604 uint32_t data, ctl, cnt = 100; 16605 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16606 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16607 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16608 uint32_t sb_bit = 1 << (idu_sb_id%32); 16609 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16610 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16611 16612 /* Not supported in BC mode */ 16613 if (CHIP_INT_MODE_IS_BC(sc)) { 16614 return; 16615 } 16616 16617 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16618 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16619 IGU_REGULAR_CLEANUP_SET | 16620 IGU_REGULAR_BCLEANUP); 16621 16622 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16623 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16624 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16625 16626 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16627 data, igu_addr_data); 16628 REG_WR(sc, igu_addr_data, data); 16629 16630 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16631 BUS_SPACE_BARRIER_WRITE); 16632 mb(); 16633 16634 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16635 ctl, igu_addr_ctl); 16636 REG_WR(sc, igu_addr_ctl, ctl); 16637 16638 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16639 BUS_SPACE_BARRIER_WRITE); 16640 mb(); 16641 16642 /* wait for clean up to finish */ 16643 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16644 DELAY(20000); 16645 } 16646 16647 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16648 BLOGD(sc, DBG_LOAD, 16649 "Unable to finish IGU cleanup: " 16650 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16651 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16652 } 16653} 16654 16655static void 16656bxe_igu_clear_sb(struct bxe_softc *sc, 16657 uint8_t idu_sb_id) 16658{ 16659 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16660} 16661 16662 16663 16664 16665 16666 16667 16668/*******************/ 16669/* ECORE CALLBACKS */ 16670/*******************/ 16671 16672static void 16673bxe_reset_common(struct bxe_softc *sc) 16674{ 16675 uint32_t val = 0x1400; 16676 16677 /* reset_common */ 16678 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16679 16680 if (CHIP_IS_E3(sc)) { 16681 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16682 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16683 } 16684 16685 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16686} 16687 16688static void 16689bxe_common_init_phy(struct bxe_softc *sc) 16690{ 16691 uint32_t shmem_base[2]; 16692 uint32_t shmem2_base[2]; 16693 16694 /* Avoid common init in case MFW supports LFA */ 16695 if (SHMEM2_RD(sc, size) > 16696 (uint32_t)offsetof(struct shmem2_region, 16697 lfa_host_addr[SC_PORT(sc)])) { 16698 return; 16699 } 16700 16701 shmem_base[0] = sc->devinfo.shmem_base; 16702 shmem2_base[0] = sc->devinfo.shmem2_base; 16703 16704 if (!CHIP_IS_E1x(sc)) { 16705 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16706 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16707 } 16708 16709 bxe_acquire_phy_lock(sc); 16710 elink_common_init_phy(sc, shmem_base, shmem2_base, 16711 sc->devinfo.chip_id, 0); 16712 bxe_release_phy_lock(sc); 16713} 16714 16715static void 16716bxe_pf_disable(struct bxe_softc *sc) 16717{ 16718 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16719 16720 val &= ~IGU_PF_CONF_FUNC_EN; 16721 16722 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16723 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16724 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16725} 16726 16727static void 16728bxe_init_pxp(struct bxe_softc *sc) 16729{ 16730 uint16_t devctl; 16731 int r_order, w_order; 16732 16733 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16734 16735 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16736 16737 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16738 16739 if (sc->mrrs == -1) { 16740 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16741 } else { 16742 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16743 r_order = sc->mrrs; 16744 } 16745 16746 ecore_init_pxp_arb(sc, r_order, w_order); 16747} 16748 16749static uint32_t 16750bxe_get_pretend_reg(struct bxe_softc *sc) 16751{ 16752 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16753 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16754 return (base + (SC_ABS_FUNC(sc)) * stride); 16755} 16756 16757/* 16758 * Called only on E1H or E2. 16759 * When pretending to be PF, the pretend value is the function number 0..7. 16760 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16761 * combination. 16762 */ 16763static int 16764bxe_pretend_func(struct bxe_softc *sc, 16765 uint16_t pretend_func_val) 16766{ 16767 uint32_t pretend_reg; 16768 16769 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16770 return (-1); 16771 } 16772 16773 /* get my own pretend register */ 16774 pretend_reg = bxe_get_pretend_reg(sc); 16775 REG_WR(sc, pretend_reg, pretend_func_val); 16776 REG_RD(sc, pretend_reg); 16777 return (0); 16778} 16779 16780static void 16781bxe_iov_init_dmae(struct bxe_softc *sc) 16782{ 16783 return; 16784#if 0 16785 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); 16786 16787 if (!IS_SRIOV(sc)) { 16788 return; 16789 } 16790 16791 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); 16792#endif 16793} 16794 16795#if 0 16796static int 16797bxe_iov_init_ilt(struct bxe_softc *sc, 16798 uint16_t line) 16799{ 16800 return (line); 16801#if 0 16802 int i; 16803 struct ecore_ilt* ilt = sc->ilt; 16804 16805 if (!IS_SRIOV(sc)) { 16806 return (line); 16807 } 16808 16809 /* set vfs ilt lines */ 16810 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { 16811 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); 16812 ilt->lines[line+i].page = hw_cxt->addr; 16813 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 16814 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 16815 } 16816 return (line+i); 16817#endif 16818} 16819#endif 16820 16821static void 16822bxe_iov_init_dq(struct bxe_softc *sc) 16823{ 16824 return; 16825#if 0 16826 if (!IS_SRIOV(sc)) { 16827 return; 16828 } 16829 16830 /* Set the DQ such that the CID reflect the abs_vfid */ 16831 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); 16832 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 16833 16834 /* 16835 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to 16836 * the PF L2 queues 16837 */ 16838 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 16839 16840 /* The VF window size is the log2 of the max number of CIDs per VF */ 16841 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 16842 16843 /* 16844 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 16845 * the Pf doorbell size although the 2 are independent. 16846 */ 16847 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, 16848 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 16849 16850 /* 16851 * No security checks for now - 16852 * configure single rule (out of 16) mask = 0x1, value = 0x0, 16853 * CID range 0 - 0x1ffff 16854 */ 16855 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); 16856 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); 16857 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 16858 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 16859 16860 /* set the number of VF alllowed doorbells to the full DQ range */ 16861 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 16862 16863 /* set the VF doorbell threshold */ 16864 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 16865#endif 16866} 16867 16868/* send a NIG loopback debug packet */ 16869static void 16870bxe_lb_pckt(struct bxe_softc *sc) 16871{ 16872 uint32_t wb_write[3]; 16873 16874 /* Ethernet source and destination addresses */ 16875 wb_write[0] = 0x55555555; 16876 wb_write[1] = 0x55555555; 16877 wb_write[2] = 0x20; /* SOP */ 16878 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16879 16880 /* NON-IP protocol */ 16881 wb_write[0] = 0x09000000; 16882 wb_write[1] = 0x55555555; 16883 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16884 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16885} 16886 16887/* 16888 * Some of the internal memories are not directly readable from the driver. 16889 * To test them we send debug packets. 16890 */ 16891static int 16892bxe_int_mem_test(struct bxe_softc *sc) 16893{ 16894 int factor; 16895 int count, i; 16896 uint32_t val = 0; 16897 16898 if (CHIP_REV_IS_FPGA(sc)) { 16899 factor = 120; 16900 } else if (CHIP_REV_IS_EMUL(sc)) { 16901 factor = 200; 16902 } else { 16903 factor = 1; 16904 } 16905 16906 /* disable inputs of parser neighbor blocks */ 16907 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16908 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16909 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16910 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16911 16912 /* write 0 to parser credits for CFC search request */ 16913 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16914 16915 /* send Ethernet packet */ 16916 bxe_lb_pckt(sc); 16917 16918 /* TODO do i reset NIG statistic? */ 16919 /* Wait until NIG register shows 1 packet of size 0x10 */ 16920 count = 1000 * factor; 16921 while (count) { 16922 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16923 val = *BXE_SP(sc, wb_data[0]); 16924 if (val == 0x10) { 16925 break; 16926 } 16927 16928 DELAY(10000); 16929 count--; 16930 } 16931 16932 if (val != 0x10) { 16933 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16934 return (-1); 16935 } 16936 16937 /* wait until PRS register shows 1 packet */ 16938 count = (1000 * factor); 16939 while (count) { 16940 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16941 if (val == 1) { 16942 break; 16943 } 16944 16945 DELAY(10000); 16946 count--; 16947 } 16948 16949 if (val != 0x1) { 16950 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16951 return (-2); 16952 } 16953 16954 /* Reset and init BRB, PRS */ 16955 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16956 DELAY(50000); 16957 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16958 DELAY(50000); 16959 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16960 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16961 16962 /* Disable inputs of parser neighbor blocks */ 16963 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16964 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16965 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16966 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16967 16968 /* Write 0 to parser credits for CFC search request */ 16969 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16970 16971 /* send 10 Ethernet packets */ 16972 for (i = 0; i < 10; i++) { 16973 bxe_lb_pckt(sc); 16974 } 16975 16976 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 16977 count = (1000 * factor); 16978 while (count) { 16979 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16980 val = *BXE_SP(sc, wb_data[0]); 16981 if (val == 0xb0) { 16982 break; 16983 } 16984 16985 DELAY(10000); 16986 count--; 16987 } 16988 16989 if (val != 0xb0) { 16990 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16991 return (-3); 16992 } 16993 16994 /* Wait until PRS register shows 2 packets */ 16995 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16996 if (val != 2) { 16997 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16998 } 16999 17000 /* Write 1 to parser credits for CFC search request */ 17001 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 17002 17003 /* Wait until PRS register shows 3 packets */ 17004 DELAY(10000 * factor); 17005 17006 /* Wait until NIG register shows 1 packet of size 0x10 */ 17007 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17008 if (val != 3) { 17009 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17010 } 17011 17012 /* clear NIG EOP FIFO */ 17013 for (i = 0; i < 11; i++) { 17014 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 17015 } 17016 17017 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 17018 if (val != 1) { 17019 BLOGE(sc, "clear of NIG failed\n"); 17020 return (-4); 17021 } 17022 17023 /* Reset and init BRB, PRS, NIG */ 17024 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 17025 DELAY(50000); 17026 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 17027 DELAY(50000); 17028 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17029 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17030 if (!CNIC_SUPPORT(sc)) { 17031 /* set NIC mode */ 17032 REG_WR(sc, PRS_REG_NIC_MODE, 1); 17033 } 17034 17035 /* Enable inputs of parser neighbor blocks */ 17036 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 17037 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 17038 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 17039 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 17040 17041 return (0); 17042} 17043 17044static void 17045bxe_setup_fan_failure_detection(struct bxe_softc *sc) 17046{ 17047 int is_required; 17048 uint32_t val; 17049 int port; 17050 17051 is_required = 0; 17052 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 17053 SHARED_HW_CFG_FAN_FAILURE_MASK); 17054 17055 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17056 is_required = 1; 17057 } 17058 /* 17059 * The fan failure mechanism is usually related to the PHY type since 17060 * the power consumption of the board is affected by the PHY. Currently, 17061 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17062 */ 17063 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17064 for (port = PORT_0; port < PORT_MAX; port++) { 17065 is_required |= elink_fan_failure_det_req(sc, 17066 sc->devinfo.shmem_base, 17067 sc->devinfo.shmem2_base, 17068 port); 17069 } 17070 } 17071 17072 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17073 17074 if (is_required == 0) { 17075 return; 17076 } 17077 17078 /* Fan failure is indicated by SPIO 5 */ 17079 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17080 17081 /* set to active low mode */ 17082 val = REG_RD(sc, MISC_REG_SPIO_INT); 17083 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17084 REG_WR(sc, MISC_REG_SPIO_INT, val); 17085 17086 /* enable interrupt to signal the IGU */ 17087 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17088 val |= MISC_SPIO_SPIO5; 17089 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17090} 17091 17092static void 17093bxe_enable_blocks_attention(struct bxe_softc *sc) 17094{ 17095 uint32_t val; 17096 17097 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17098 if (!CHIP_IS_E1x(sc)) { 17099 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17100 } else { 17101 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17102 } 17103 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17104 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17105 /* 17106 * mask read length error interrupts in brb for parser 17107 * (parsing unit and 'checksum and crc' unit) 17108 * these errors are legal (PU reads fixed length and CAC can cause 17109 * read length error on truncated packets) 17110 */ 17111 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17112 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17113 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17114 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17115 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17116 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17117/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17118/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17119 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17120 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17121 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17122/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17123/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17124 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17125 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17126 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17127 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17128/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17129/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17130 17131 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17132 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17133 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17134 if (!CHIP_IS_E1x(sc)) { 17135 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17136 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17137 } 17138 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17139 17140 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17141 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17142 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17143/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17144 17145 if (!CHIP_IS_E1x(sc)) { 17146 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17147 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17148 } 17149 17150 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17151 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17152/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17153 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17154} 17155 17156/** 17157 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17158 * 17159 * @sc: driver handle 17160 */ 17161static int 17162bxe_init_hw_common(struct bxe_softc *sc) 17163{ 17164 uint8_t abs_func_id; 17165 uint32_t val; 17166 17167 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17168 SC_ABS_FUNC(sc)); 17169 17170 /* 17171 * take the RESET lock to protect undi_unload flow from accessing 17172 * registers while we are resetting the chip 17173 */ 17174 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17175 17176 bxe_reset_common(sc); 17177 17178 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17179 17180 val = 0xfffc; 17181 if (CHIP_IS_E3(sc)) { 17182 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17183 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17184 } 17185 17186 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17187 17188 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17189 17190 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17191 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17192 17193 if (!CHIP_IS_E1x(sc)) { 17194 /* 17195 * 4-port mode or 2-port mode we need to turn off master-enable for 17196 * everyone. After that we turn it back on for self. So, we disregard 17197 * multi-function, and always disable all functions on the given path, 17198 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17199 */ 17200 for (abs_func_id = SC_PATH(sc); 17201 abs_func_id < (E2_FUNC_MAX * 2); 17202 abs_func_id += 2) { 17203 if (abs_func_id == SC_ABS_FUNC(sc)) { 17204 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17205 continue; 17206 } 17207 17208 bxe_pretend_func(sc, abs_func_id); 17209 17210 /* clear pf enable */ 17211 bxe_pf_disable(sc); 17212 17213 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17214 } 17215 } 17216 17217 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17218 17219 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17220 17221 if (CHIP_IS_E1(sc)) { 17222 /* 17223 * enable HW interrupt from PXP on USDM overflow 17224 * bit 16 on INT_MASK_0 17225 */ 17226 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17227 } 17228 17229 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17230 bxe_init_pxp(sc); 17231 17232#ifdef __BIG_ENDIAN 17233 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17234 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17235 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17236 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17237 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17238 /* make sure this value is 0 */ 17239 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17240 17241 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17242 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17243 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17244 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17245 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17246#endif 17247 17248 ecore_ilt_init_page_size(sc, INITOP_SET); 17249 17250 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17251 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17252 } 17253 17254 /* let the HW do it's magic... */ 17255 DELAY(100000); 17256 17257 /* finish PXP init */ 17258 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17259 if (val != 1) { 17260 BLOGE(sc, "PXP2 CFG failed\n"); 17261 return (-1); 17262 } 17263 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17264 if (val != 1) { 17265 BLOGE(sc, "PXP2 RD_INIT failed\n"); 17266 return (-1); 17267 } 17268 17269 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17270 17271 /* 17272 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17273 * entries with value "0" and valid bit on. This needs to be done by the 17274 * first PF that is loaded in a path (i.e. common phase) 17275 */ 17276 if (!CHIP_IS_E1x(sc)) { 17277/* 17278 * In E2 there is a bug in the timers block that can cause function 6 / 7 17279 * (i.e. vnic3) to start even if it is marked as "scan-off". 17280 * This occurs when a different function (func2,3) is being marked 17281 * as "scan-off". Real-life scenario for example: if a driver is being 17282 * load-unloaded while func6,7 are down. This will cause the timer to access 17283 * the ilt, translate to a logical address and send a request to read/write. 17284 * Since the ilt for the function that is down is not valid, this will cause 17285 * a translation error which is unrecoverable. 17286 * The Workaround is intended to make sure that when this happens nothing 17287 * fatal will occur. The workaround: 17288 * 1. First PF driver which loads on a path will: 17289 * a. After taking the chip out of reset, by using pretend, 17290 * it will write "0" to the following registers of 17291 * the other vnics. 17292 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17293 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17294 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17295 * And for itself it will write '1' to 17296 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17297 * dmae-operations (writing to pram for example.) 17298 * note: can be done for only function 6,7 but cleaner this 17299 * way. 17300 * b. Write zero+valid to the entire ILT. 17301 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17302 * VNIC3 (of that port). The range allocated will be the 17303 * entire ILT. This is needed to prevent ILT range error. 17304 * 2. Any PF driver load flow: 17305 * a. ILT update with the physical addresses of the allocated 17306 * logical pages. 17307 * b. Wait 20msec. - note that this timeout is needed to make 17308 * sure there are no requests in one of the PXP internal 17309 * queues with "old" ILT addresses. 17310 * c. PF enable in the PGLC. 17311 * d. Clear the was_error of the PF in the PGLC. (could have 17312 * occurred while driver was down) 17313 * e. PF enable in the CFC (WEAK + STRONG) 17314 * f. Timers scan enable 17315 * 3. PF driver unload flow: 17316 * a. Clear the Timers scan_en. 17317 * b. Polling for scan_on=0 for that PF. 17318 * c. Clear the PF enable bit in the PXP. 17319 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17320 * e. Write zero+valid to all ILT entries (The valid bit must 17321 * stay set) 17322 * f. If this is VNIC 3 of a port then also init 17323 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17324 * to the last enrty in the ILT. 17325 * 17326 * Notes: 17327 * Currently the PF error in the PGLC is non recoverable. 17328 * In the future the there will be a recovery routine for this error. 17329 * Currently attention is masked. 17330 * Having an MCP lock on the load/unload process does not guarantee that 17331 * there is no Timer disable during Func6/7 enable. This is because the 17332 * Timers scan is currently being cleared by the MCP on FLR. 17333 * Step 2.d can be done only for PF6/7 and the driver can also check if 17334 * there is error before clearing it. But the flow above is simpler and 17335 * more general. 17336 * All ILT entries are written by zero+valid and not just PF6/7 17337 * ILT entries since in the future the ILT entries allocation for 17338 * PF-s might be dynamic. 17339 */ 17340 struct ilt_client_info ilt_cli; 17341 struct ecore_ilt ilt; 17342 17343 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17344 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17345 17346 /* initialize dummy TM client */ 17347 ilt_cli.start = 0; 17348 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17349 ilt_cli.client_num = ILT_CLIENT_TM; 17350 17351 /* 17352 * Step 1: set zeroes to all ilt page entries with valid bit on 17353 * Step 2: set the timers first/last ilt entry to point 17354 * to the entire range to prevent ILT range error for 3rd/4th 17355 * vnic (this code assumes existence of the vnic) 17356 * 17357 * both steps performed by call to ecore_ilt_client_init_op() 17358 * with dummy TM client 17359 * 17360 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17361 * and his brother are split registers 17362 */ 17363 17364 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17365 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17366 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17367 17368 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17369 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17370 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17371 } 17372 17373 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17374 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17375 17376 if (!CHIP_IS_E1x(sc)) { 17377 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17378 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17379 17380 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17381 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17382 17383 /* let the HW do it's magic... */ 17384 do { 17385 DELAY(200000); 17386 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17387 } while (factor-- && (val != 1)); 17388 17389 if (val != 1) { 17390 BLOGE(sc, "ATC_INIT failed\n"); 17391 return (-1); 17392 } 17393 } 17394 17395 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17396 17397 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17398 17399 bxe_iov_init_dmae(sc); 17400 17401 /* clean the DMAE memory */ 17402 sc->dmae_ready = 1; 17403 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17404 17405 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17406 17407 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17408 17409 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17410 17411 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17412 17413 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17414 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17415 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17416 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17417 17418 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17419 17420 /* QM queues pointers table */ 17421 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17422 17423 /* soft reset pulse */ 17424 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17425 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17426 17427 if (CNIC_SUPPORT(sc)) 17428 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17429 17430 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17431 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17432 if (!CHIP_REV_IS_SLOW(sc)) { 17433 /* enable hw interrupt from doorbell Q */ 17434 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17435 } 17436 17437 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17438 17439 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17440 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17441 17442 if (!CHIP_IS_E1(sc)) { 17443 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17444 } 17445 17446 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17447 if (IS_MF_AFEX(sc)) { 17448 /* 17449 * configure that AFEX and VLAN headers must be 17450 * received in AFEX mode 17451 */ 17452 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17453 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17454 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17455 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17456 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17457 } else { 17458 /* 17459 * Bit-map indicating which L2 hdrs may appear 17460 * after the basic Ethernet header 17461 */ 17462 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17463 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17464 } 17465 } 17466 17467 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17468 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17469 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17470 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17471 17472 if (!CHIP_IS_E1x(sc)) { 17473 /* reset VFC memories */ 17474 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17475 VFC_MEMORIES_RST_REG_CAM_RST | 17476 VFC_MEMORIES_RST_REG_RAM_RST); 17477 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17478 VFC_MEMORIES_RST_REG_CAM_RST | 17479 VFC_MEMORIES_RST_REG_RAM_RST); 17480 17481 DELAY(20000); 17482 } 17483 17484 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17485 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17486 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17487 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17488 17489 /* sync semi rtc */ 17490 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17491 0x80000000); 17492 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17493 0x80000000); 17494 17495 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17496 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17497 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17498 17499 if (!CHIP_IS_E1x(sc)) { 17500 if (IS_MF_AFEX(sc)) { 17501 /* 17502 * configure that AFEX and VLAN headers must be 17503 * sent in AFEX mode 17504 */ 17505 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17506 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17507 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17508 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17509 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17510 } else { 17511 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17512 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17513 } 17514 } 17515 17516 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17517 17518 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17519 17520 if (CNIC_SUPPORT(sc)) { 17521 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17522 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17523 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17524 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17525 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17526 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17527 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17528 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17529 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17530 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17531 } 17532 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17533 17534 if (sizeof(union cdu_context) != 1024) { 17535 /* we currently assume that a context is 1024 bytes */ 17536 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17537 (long)sizeof(union cdu_context)); 17538 } 17539 17540 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17541 val = (4 << 24) + (0 << 12) + 1024; 17542 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17543 17544 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17545 17546 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17547 /* enable context validation interrupt from CFC */ 17548 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17549 17550 /* set the thresholds to prevent CFC/CDU race */ 17551 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17552 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17553 17554 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17555 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17556 } 17557 17558 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17559 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17560 17561 /* Reset PCIE errors for debug */ 17562 REG_WR(sc, 0x2814, 0xffffffff); 17563 REG_WR(sc, 0x3820, 0xffffffff); 17564 17565 if (!CHIP_IS_E1x(sc)) { 17566 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17567 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17568 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17569 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17570 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17571 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17572 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17573 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17574 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17575 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17576 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17577 } 17578 17579 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17580 17581 if (!CHIP_IS_E1(sc)) { 17582 /* in E3 this done in per-port section */ 17583 if (!CHIP_IS_E3(sc)) 17584 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17585 } 17586 17587 if (CHIP_IS_E1H(sc)) { 17588 /* not applicable for E2 (and above ...) */ 17589 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17590 } 17591 17592 if (CHIP_REV_IS_SLOW(sc)) { 17593 DELAY(200000); 17594 } 17595 17596 /* finish CFC init */ 17597 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17598 if (val != 1) { 17599 BLOGE(sc, "CFC LL_INIT failed\n"); 17600 return (-1); 17601 } 17602 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17603 if (val != 1) { 17604 BLOGE(sc, "CFC AC_INIT failed\n"); 17605 return (-1); 17606 } 17607 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17608 if (val != 1) { 17609 BLOGE(sc, "CFC CAM_INIT failed\n"); 17610 return (-1); 17611 } 17612 REG_WR(sc, CFC_REG_DEBUG0, 0); 17613 17614 if (CHIP_IS_E1(sc)) { 17615 /* read NIG statistic to see if this is our first up since powerup */ 17616 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17617 val = *BXE_SP(sc, wb_data[0]); 17618 17619 /* do internal memory self test */ 17620 if ((val == 0) && bxe_int_mem_test(sc)) { 17621 BLOGE(sc, "internal mem self test failed\n"); 17622 return (-1); 17623 } 17624 } 17625 17626 bxe_setup_fan_failure_detection(sc); 17627 17628 /* clear PXP2 attentions */ 17629 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17630 17631 bxe_enable_blocks_attention(sc); 17632 17633 if (!CHIP_REV_IS_SLOW(sc)) { 17634 ecore_enable_blocks_parity(sc); 17635 } 17636 17637 if (!BXE_NOMCP(sc)) { 17638 if (CHIP_IS_E1x(sc)) { 17639 bxe_common_init_phy(sc); 17640 } 17641 } 17642 17643 return (0); 17644} 17645 17646/** 17647 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17648 * 17649 * @sc: driver handle 17650 */ 17651static int 17652bxe_init_hw_common_chip(struct bxe_softc *sc) 17653{ 17654 int rc = bxe_init_hw_common(sc); 17655 17656 if (rc) { 17657 return (rc); 17658 } 17659 17660 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17661 if (!BXE_NOMCP(sc)) { 17662 bxe_common_init_phy(sc); 17663 } 17664 17665 return (0); 17666} 17667 17668static int 17669bxe_init_hw_port(struct bxe_softc *sc) 17670{ 17671 int port = SC_PORT(sc); 17672 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17673 uint32_t low, high; 17674 uint32_t val; 17675 17676 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17677 17678 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17679 17680 ecore_init_block(sc, BLOCK_MISC, init_phase); 17681 ecore_init_block(sc, BLOCK_PXP, init_phase); 17682 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17683 17684 /* 17685 * Timers bug workaround: disables the pf_master bit in pglue at 17686 * common phase, we need to enable it here before any dmae access are 17687 * attempted. Therefore we manually added the enable-master to the 17688 * port phase (it also happens in the function phase) 17689 */ 17690 if (!CHIP_IS_E1x(sc)) { 17691 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17692 } 17693 17694 ecore_init_block(sc, BLOCK_ATC, init_phase); 17695 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17696 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17697 ecore_init_block(sc, BLOCK_QM, init_phase); 17698 17699 ecore_init_block(sc, BLOCK_TCM, init_phase); 17700 ecore_init_block(sc, BLOCK_UCM, init_phase); 17701 ecore_init_block(sc, BLOCK_CCM, init_phase); 17702 ecore_init_block(sc, BLOCK_XCM, init_phase); 17703 17704 /* QM cid (connection) count */ 17705 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17706 17707 if (CNIC_SUPPORT(sc)) { 17708 ecore_init_block(sc, BLOCK_TM, init_phase); 17709 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17710 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17711 } 17712 17713 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17714 17715 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17716 17717 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17718 if (IS_MF(sc)) { 17719 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17720 } else if (sc->mtu > 4096) { 17721 if (BXE_ONE_PORT(sc)) { 17722 low = 160; 17723 } else { 17724 val = sc->mtu; 17725 /* (24*1024 + val*4)/256 */ 17726 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17727 } 17728 } else { 17729 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17730 } 17731 high = (low + 56); /* 14*1024/256 */ 17732 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17733 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17734 } 17735 17736 if (CHIP_IS_MODE_4_PORT(sc)) { 17737 REG_WR(sc, SC_PORT(sc) ? 17738 BRB1_REG_MAC_GUARANTIED_1 : 17739 BRB1_REG_MAC_GUARANTIED_0, 40); 17740 } 17741 17742 ecore_init_block(sc, BLOCK_PRS, init_phase); 17743 if (CHIP_IS_E3B0(sc)) { 17744 if (IS_MF_AFEX(sc)) { 17745 /* configure headers for AFEX mode */ 17746 REG_WR(sc, SC_PORT(sc) ? 17747 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17748 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17749 REG_WR(sc, SC_PORT(sc) ? 17750 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17751 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17752 REG_WR(sc, SC_PORT(sc) ? 17753 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17754 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17755 } else { 17756 /* Ovlan exists only if we are in multi-function + 17757 * switch-dependent mode, in switch-independent there 17758 * is no ovlan headers 17759 */ 17760 REG_WR(sc, SC_PORT(sc) ? 17761 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17762 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17763 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17764 } 17765 } 17766 17767 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17768 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17769 ecore_init_block(sc, BLOCK_USDM, init_phase); 17770 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17771 17772 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17773 ecore_init_block(sc, BLOCK_USEM, init_phase); 17774 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17775 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17776 17777 ecore_init_block(sc, BLOCK_UPB, init_phase); 17778 ecore_init_block(sc, BLOCK_XPB, init_phase); 17779 17780 ecore_init_block(sc, BLOCK_PBF, init_phase); 17781 17782 if (CHIP_IS_E1x(sc)) { 17783 /* configure PBF to work without PAUSE mtu 9000 */ 17784 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17785 17786 /* update threshold */ 17787 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17788 /* update init credit */ 17789 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17790 17791 /* probe changes */ 17792 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17793 DELAY(50); 17794 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17795 } 17796 17797 if (CNIC_SUPPORT(sc)) { 17798 ecore_init_block(sc, BLOCK_SRC, init_phase); 17799 } 17800 17801 ecore_init_block(sc, BLOCK_CDU, init_phase); 17802 ecore_init_block(sc, BLOCK_CFC, init_phase); 17803 17804 if (CHIP_IS_E1(sc)) { 17805 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17806 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17807 } 17808 ecore_init_block(sc, BLOCK_HC, init_phase); 17809 17810 ecore_init_block(sc, BLOCK_IGU, init_phase); 17811 17812 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17813 /* init aeu_mask_attn_func_0/1: 17814 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17815 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17816 * bits 4-7 are used for "per vn group attention" */ 17817 val = IS_MF(sc) ? 0xF7 : 0x7; 17818 /* Enable DCBX attention for all but E1 */ 17819 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17820 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17821 17822 ecore_init_block(sc, BLOCK_NIG, init_phase); 17823 17824 if (!CHIP_IS_E1x(sc)) { 17825 /* Bit-map indicating which L2 hdrs may appear after the 17826 * basic Ethernet header 17827 */ 17828 if (IS_MF_AFEX(sc)) { 17829 REG_WR(sc, SC_PORT(sc) ? 17830 NIG_REG_P1_HDRS_AFTER_BASIC : 17831 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17832 } else { 17833 REG_WR(sc, SC_PORT(sc) ? 17834 NIG_REG_P1_HDRS_AFTER_BASIC : 17835 NIG_REG_P0_HDRS_AFTER_BASIC, 17836 IS_MF_SD(sc) ? 7 : 6); 17837 } 17838 17839 if (CHIP_IS_E3(sc)) { 17840 REG_WR(sc, SC_PORT(sc) ? 17841 NIG_REG_LLH1_MF_MODE : 17842 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17843 } 17844 } 17845 if (!CHIP_IS_E3(sc)) { 17846 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17847 } 17848 17849 if (!CHIP_IS_E1(sc)) { 17850 /* 0x2 disable mf_ov, 0x1 enable */ 17851 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17852 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17853 17854 if (!CHIP_IS_E1x(sc)) { 17855 val = 0; 17856 switch (sc->devinfo.mf_info.mf_mode) { 17857 case MULTI_FUNCTION_SD: 17858 val = 1; 17859 break; 17860 case MULTI_FUNCTION_SI: 17861 case MULTI_FUNCTION_AFEX: 17862 val = 2; 17863 break; 17864 } 17865 17866 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17867 NIG_REG_LLH0_CLS_TYPE), val); 17868 } 17869 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17870 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17871 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17872 } 17873 17874 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17875 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17876 if (val & MISC_SPIO_SPIO5) { 17877 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17878 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17879 val = REG_RD(sc, reg_addr); 17880 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17881 REG_WR(sc, reg_addr, val); 17882 } 17883 17884 return (0); 17885} 17886 17887static uint32_t 17888bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17889 uint32_t reg, 17890 uint32_t expected, 17891 uint32_t poll_count) 17892{ 17893 uint32_t cur_cnt = poll_count; 17894 uint32_t val; 17895 17896 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17897 DELAY(FLR_WAIT_INTERVAL); 17898 } 17899 17900 return (val); 17901} 17902 17903static int 17904bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17905 uint32_t reg, 17906 char *msg, 17907 uint32_t poll_cnt) 17908{ 17909 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17910 17911 if (val != 0) { 17912 BLOGE(sc, "%s usage count=%d\n", msg, val); 17913 return (1); 17914 } 17915 17916 return (0); 17917} 17918 17919/* Common routines with VF FLR cleanup */ 17920static uint32_t 17921bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17922{ 17923 /* adjust polling timeout */ 17924 if (CHIP_REV_IS_EMUL(sc)) { 17925 return (FLR_POLL_CNT * 2000); 17926 } 17927 17928 if (CHIP_REV_IS_FPGA(sc)) { 17929 return (FLR_POLL_CNT * 120); 17930 } 17931 17932 return (FLR_POLL_CNT); 17933} 17934 17935static int 17936bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17937 uint32_t poll_cnt) 17938{ 17939 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17940 if (bxe_flr_clnup_poll_hw_counter(sc, 17941 CFC_REG_NUM_LCIDS_INSIDE_PF, 17942 "CFC PF usage counter timed out", 17943 poll_cnt)) { 17944 return (1); 17945 } 17946 17947 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17948 if (bxe_flr_clnup_poll_hw_counter(sc, 17949 DORQ_REG_PF_USAGE_CNT, 17950 "DQ PF usage counter timed out", 17951 poll_cnt)) { 17952 return (1); 17953 } 17954 17955 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17956 if (bxe_flr_clnup_poll_hw_counter(sc, 17957 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17958 "QM PF usage counter timed out", 17959 poll_cnt)) { 17960 return (1); 17961 } 17962 17963 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17964 if (bxe_flr_clnup_poll_hw_counter(sc, 17965 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17966 "Timers VNIC usage counter timed out", 17967 poll_cnt)) { 17968 return (1); 17969 } 17970 17971 if (bxe_flr_clnup_poll_hw_counter(sc, 17972 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17973 "Timers NUM_SCANS usage counter timed out", 17974 poll_cnt)) { 17975 return (1); 17976 } 17977 17978 /* Wait DMAE PF usage counter to zero */ 17979 if (bxe_flr_clnup_poll_hw_counter(sc, 17980 dmae_reg_go_c[INIT_DMAE_C(sc)], 17981 "DMAE dommand register timed out", 17982 poll_cnt)) { 17983 return (1); 17984 } 17985 17986 return (0); 17987} 17988 17989#define OP_GEN_PARAM(param) \ 17990 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 17991#define OP_GEN_TYPE(type) \ 17992 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 17993#define OP_GEN_AGG_VECT(index) \ 17994 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 17995 17996static int 17997bxe_send_final_clnup(struct bxe_softc *sc, 17998 uint8_t clnup_func, 17999 uint32_t poll_cnt) 18000{ 18001 uint32_t op_gen_command = 0; 18002 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 18003 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 18004 int ret = 0; 18005 18006 if (REG_RD(sc, comp_addr)) { 18007 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 18008 return (1); 18009 } 18010 18011 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 18012 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 18013 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 18014 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 18015 18016 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 18017 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 18018 18019 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 18020 BLOGE(sc, "FW final cleanup did not succeed\n"); 18021 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 18022 (REG_RD(sc, comp_addr))); 18023 bxe_panic(sc, ("FLR cleanup failed\n")); 18024 return (1); 18025 } 18026 18027 /* Zero completion for nxt FLR */ 18028 REG_WR(sc, comp_addr, 0); 18029 18030 return (ret); 18031} 18032 18033static void 18034bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 18035 struct pbf_pN_buf_regs *regs, 18036 uint32_t poll_count) 18037{ 18038 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 18039 uint32_t cur_cnt = poll_count; 18040 18041 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 18042 crd = crd_start = REG_RD(sc, regs->crd); 18043 init_crd = REG_RD(sc, regs->init_crd); 18044 18045 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 18046 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 18047 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 18048 18049 while ((crd != init_crd) && 18050 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 18051 (init_crd - crd_start))) { 18052 if (cur_cnt--) { 18053 DELAY(FLR_WAIT_INTERVAL); 18054 crd = REG_RD(sc, regs->crd); 18055 crd_freed = REG_RD(sc, regs->crd_freed); 18056 } else { 18057 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18058 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18059 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18060 break; 18061 } 18062 } 18063 18064 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18065 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18066} 18067 18068static void 18069bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18070 struct pbf_pN_cmd_regs *regs, 18071 uint32_t poll_count) 18072{ 18073 uint32_t occup, to_free, freed, freed_start; 18074 uint32_t cur_cnt = poll_count; 18075 18076 occup = to_free = REG_RD(sc, regs->lines_occup); 18077 freed = freed_start = REG_RD(sc, regs->lines_freed); 18078 18079 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18080 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18081 18082 while (occup && 18083 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18084 if (cur_cnt--) { 18085 DELAY(FLR_WAIT_INTERVAL); 18086 occup = REG_RD(sc, regs->lines_occup); 18087 freed = REG_RD(sc, regs->lines_freed); 18088 } else { 18089 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18090 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18091 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18092 break; 18093 } 18094 } 18095 18096 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18097 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18098} 18099 18100static void 18101bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18102{ 18103 struct pbf_pN_cmd_regs cmd_regs[] = { 18104 {0, (CHIP_IS_E3B0(sc)) ? 18105 PBF_REG_TQ_OCCUPANCY_Q0 : 18106 PBF_REG_P0_TQ_OCCUPANCY, 18107 (CHIP_IS_E3B0(sc)) ? 18108 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18109 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18110 {1, (CHIP_IS_E3B0(sc)) ? 18111 PBF_REG_TQ_OCCUPANCY_Q1 : 18112 PBF_REG_P1_TQ_OCCUPANCY, 18113 (CHIP_IS_E3B0(sc)) ? 18114 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18115 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18116 {4, (CHIP_IS_E3B0(sc)) ? 18117 PBF_REG_TQ_OCCUPANCY_LB_Q : 18118 PBF_REG_P4_TQ_OCCUPANCY, 18119 (CHIP_IS_E3B0(sc)) ? 18120 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18121 PBF_REG_P4_TQ_LINES_FREED_CNT} 18122 }; 18123 18124 struct pbf_pN_buf_regs buf_regs[] = { 18125 {0, (CHIP_IS_E3B0(sc)) ? 18126 PBF_REG_INIT_CRD_Q0 : 18127 PBF_REG_P0_INIT_CRD , 18128 (CHIP_IS_E3B0(sc)) ? 18129 PBF_REG_CREDIT_Q0 : 18130 PBF_REG_P0_CREDIT, 18131 (CHIP_IS_E3B0(sc)) ? 18132 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18133 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18134 {1, (CHIP_IS_E3B0(sc)) ? 18135 PBF_REG_INIT_CRD_Q1 : 18136 PBF_REG_P1_INIT_CRD, 18137 (CHIP_IS_E3B0(sc)) ? 18138 PBF_REG_CREDIT_Q1 : 18139 PBF_REG_P1_CREDIT, 18140 (CHIP_IS_E3B0(sc)) ? 18141 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18142 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18143 {4, (CHIP_IS_E3B0(sc)) ? 18144 PBF_REG_INIT_CRD_LB_Q : 18145 PBF_REG_P4_INIT_CRD, 18146 (CHIP_IS_E3B0(sc)) ? 18147 PBF_REG_CREDIT_LB_Q : 18148 PBF_REG_P4_CREDIT, 18149 (CHIP_IS_E3B0(sc)) ? 18150 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18151 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18152 }; 18153 18154 int i; 18155 18156 /* Verify the command queues are flushed P0, P1, P4 */ 18157 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18158 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18159 } 18160 18161 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18162 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18163 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18164 } 18165} 18166 18167static void 18168bxe_hw_enable_status(struct bxe_softc *sc) 18169{ 18170 uint32_t val; 18171 18172 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18173 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18174 18175 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18176 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18177 18178 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18179 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18180 18181 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18182 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18183 18184 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18185 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18186 18187 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18188 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18189 18190 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18191 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18192 18193 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18194 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18195} 18196 18197static int 18198bxe_pf_flr_clnup(struct bxe_softc *sc) 18199{ 18200 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18201 18202 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18203 18204 /* Re-enable PF target read access */ 18205 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18206 18207 /* Poll HW usage counters */ 18208 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18209 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18210 return (-1); 18211 } 18212 18213 /* Zero the igu 'trailing edge' and 'leading edge' */ 18214 18215 /* Send the FW cleanup command */ 18216 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18217 return (-1); 18218 } 18219 18220 /* ATC cleanup */ 18221 18222 /* Verify TX hw is flushed */ 18223 bxe_tx_hw_flushed(sc, poll_cnt); 18224 18225 /* Wait 100ms (not adjusted according to platform) */ 18226 DELAY(100000); 18227 18228 /* Verify no pending pci transactions */ 18229 if (bxe_is_pcie_pending(sc)) { 18230 BLOGE(sc, "PCIE Transactions still pending\n"); 18231 } 18232 18233 /* Debug */ 18234 bxe_hw_enable_status(sc); 18235 18236 /* 18237 * Master enable - Due to WB DMAE writes performed before this 18238 * register is re-initialized as part of the regular function init 18239 */ 18240 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18241 18242 return (0); 18243} 18244 18245#if 0 18246static void 18247bxe_init_searcher(struct bxe_softc *sc) 18248{ 18249 int port = SC_PORT(sc); 18250 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); 18251 /* T1 hash bits value determines the T1 number of entries */ 18252 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 18253} 18254#endif 18255 18256static int 18257bxe_init_hw_func(struct bxe_softc *sc) 18258{ 18259 int port = SC_PORT(sc); 18260 int func = SC_FUNC(sc); 18261 int init_phase = PHASE_PF0 + func; 18262 struct ecore_ilt *ilt = sc->ilt; 18263 uint16_t cdu_ilt_start; 18264 uint32_t addr, val; 18265 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18266 int i, main_mem_width, rc; 18267 18268 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18269 18270 /* FLR cleanup */ 18271 if (!CHIP_IS_E1x(sc)) { 18272 rc = bxe_pf_flr_clnup(sc); 18273 if (rc) { 18274 BLOGE(sc, "FLR cleanup failed!\n"); 18275 // XXX bxe_fw_dump(sc); 18276 // XXX bxe_idle_chk(sc); 18277 return (rc); 18278 } 18279 } 18280 18281 /* set MSI reconfigure capability */ 18282 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18283 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18284 val = REG_RD(sc, addr); 18285 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18286 REG_WR(sc, addr, val); 18287 } 18288 18289 ecore_init_block(sc, BLOCK_PXP, init_phase); 18290 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18291 18292 ilt = sc->ilt; 18293 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18294 18295#if 0 18296 if (IS_SRIOV(sc)) { 18297 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; 18298 } 18299 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); 18300 18301#if (BXE_FIRST_VF_CID > 0) 18302 /* 18303 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes 18304 * those of the VFs, so start line should be reset 18305 */ 18306 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18307#endif 18308#endif 18309 18310 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18311 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18312 ilt->lines[cdu_ilt_start + i].page_mapping = 18313 sc->context[i].vcxt_dma.paddr; 18314 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18315 } 18316 ecore_ilt_init_op(sc, INITOP_SET); 18317 18318#if 0 18319 if (!CONFIGURE_NIC_MODE(sc)) { 18320 bxe_init_searcher(sc); 18321 REG_WR(sc, PRS_REG_NIC_MODE, 0); 18322 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); 18323 } else 18324#endif 18325 { 18326 /* Set NIC mode */ 18327 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18328 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18329 } 18330 18331 if (!CHIP_IS_E1x(sc)) { 18332 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18333 18334 /* Turn on a single ISR mode in IGU if driver is going to use 18335 * INT#x or MSI 18336 */ 18337 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18338 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18339 } 18340 18341 /* 18342 * Timers workaround bug: function init part. 18343 * Need to wait 20msec after initializing ILT, 18344 * needed to make sure there are no requests in 18345 * one of the PXP internal queues with "old" ILT addresses 18346 */ 18347 DELAY(20000); 18348 18349 /* 18350 * Master enable - Due to WB DMAE writes performed before this 18351 * register is re-initialized as part of the regular function 18352 * init 18353 */ 18354 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18355 /* Enable the function in IGU */ 18356 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18357 } 18358 18359 sc->dmae_ready = 1; 18360 18361 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18362 18363 if (!CHIP_IS_E1x(sc)) 18364 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18365 18366 ecore_init_block(sc, BLOCK_ATC, init_phase); 18367 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18368 ecore_init_block(sc, BLOCK_NIG, init_phase); 18369 ecore_init_block(sc, BLOCK_SRC, init_phase); 18370 ecore_init_block(sc, BLOCK_MISC, init_phase); 18371 ecore_init_block(sc, BLOCK_TCM, init_phase); 18372 ecore_init_block(sc, BLOCK_UCM, init_phase); 18373 ecore_init_block(sc, BLOCK_CCM, init_phase); 18374 ecore_init_block(sc, BLOCK_XCM, init_phase); 18375 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18376 ecore_init_block(sc, BLOCK_USEM, init_phase); 18377 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18378 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18379 18380 if (!CHIP_IS_E1x(sc)) 18381 REG_WR(sc, QM_REG_PF_EN, 1); 18382 18383 if (!CHIP_IS_E1x(sc)) { 18384 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18385 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18386 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18387 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18388 } 18389 ecore_init_block(sc, BLOCK_QM, init_phase); 18390 18391 ecore_init_block(sc, BLOCK_TM, init_phase); 18392 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18393 18394 bxe_iov_init_dq(sc); 18395 18396 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18397 ecore_init_block(sc, BLOCK_PRS, init_phase); 18398 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18399 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18400 ecore_init_block(sc, BLOCK_USDM, init_phase); 18401 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18402 ecore_init_block(sc, BLOCK_UPB, init_phase); 18403 ecore_init_block(sc, BLOCK_XPB, init_phase); 18404 ecore_init_block(sc, BLOCK_PBF, init_phase); 18405 if (!CHIP_IS_E1x(sc)) 18406 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18407 18408 ecore_init_block(sc, BLOCK_CDU, init_phase); 18409 18410 ecore_init_block(sc, BLOCK_CFC, init_phase); 18411 18412 if (!CHIP_IS_E1x(sc)) 18413 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18414 18415 if (IS_MF(sc)) { 18416 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18417 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18418 } 18419 18420 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18421 18422 /* HC init per function */ 18423 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18424 if (CHIP_IS_E1H(sc)) { 18425 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18426 18427 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18428 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18429 } 18430 ecore_init_block(sc, BLOCK_HC, init_phase); 18431 18432 } else { 18433 int num_segs, sb_idx, prod_offset; 18434 18435 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18436 18437 if (!CHIP_IS_E1x(sc)) { 18438 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18439 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18440 } 18441 18442 ecore_init_block(sc, BLOCK_IGU, init_phase); 18443 18444 if (!CHIP_IS_E1x(sc)) { 18445 int dsb_idx = 0; 18446 /** 18447 * Producer memory: 18448 * E2 mode: address 0-135 match to the mapping memory; 18449 * 136 - PF0 default prod; 137 - PF1 default prod; 18450 * 138 - PF2 default prod; 139 - PF3 default prod; 18451 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18452 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18453 * 144-147 reserved. 18454 * 18455 * E1.5 mode - In backward compatible mode; 18456 * for non default SB; each even line in the memory 18457 * holds the U producer and each odd line hold 18458 * the C producer. The first 128 producers are for 18459 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18460 * producers are for the DSB for each PF. 18461 * Each PF has five segments: (the order inside each 18462 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18463 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18464 * 144-147 attn prods; 18465 */ 18466 /* non-default-status-blocks */ 18467 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18468 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18469 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18470 prod_offset = (sc->igu_base_sb + sb_idx) * 18471 num_segs; 18472 18473 for (i = 0; i < num_segs; i++) { 18474 addr = IGU_REG_PROD_CONS_MEMORY + 18475 (prod_offset + i) * 4; 18476 REG_WR(sc, addr, 0); 18477 } 18478 /* send consumer update with value 0 */ 18479 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18480 USTORM_ID, 0, IGU_INT_NOP, 1); 18481 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18482 } 18483 18484 /* default-status-blocks */ 18485 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18486 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18487 18488 if (CHIP_IS_MODE_4_PORT(sc)) 18489 dsb_idx = SC_FUNC(sc); 18490 else 18491 dsb_idx = SC_VN(sc); 18492 18493 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18494 IGU_BC_BASE_DSB_PROD + dsb_idx : 18495 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18496 18497 /* 18498 * igu prods come in chunks of E1HVN_MAX (4) - 18499 * does not matters what is the current chip mode 18500 */ 18501 for (i = 0; i < (num_segs * E1HVN_MAX); 18502 i += E1HVN_MAX) { 18503 addr = IGU_REG_PROD_CONS_MEMORY + 18504 (prod_offset + i)*4; 18505 REG_WR(sc, addr, 0); 18506 } 18507 /* send consumer update with 0 */ 18508 if (CHIP_INT_MODE_IS_BC(sc)) { 18509 bxe_ack_sb(sc, sc->igu_dsb_id, 18510 USTORM_ID, 0, IGU_INT_NOP, 1); 18511 bxe_ack_sb(sc, sc->igu_dsb_id, 18512 CSTORM_ID, 0, IGU_INT_NOP, 1); 18513 bxe_ack_sb(sc, sc->igu_dsb_id, 18514 XSTORM_ID, 0, IGU_INT_NOP, 1); 18515 bxe_ack_sb(sc, sc->igu_dsb_id, 18516 TSTORM_ID, 0, IGU_INT_NOP, 1); 18517 bxe_ack_sb(sc, sc->igu_dsb_id, 18518 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18519 } else { 18520 bxe_ack_sb(sc, sc->igu_dsb_id, 18521 USTORM_ID, 0, IGU_INT_NOP, 1); 18522 bxe_ack_sb(sc, sc->igu_dsb_id, 18523 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18524 } 18525 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18526 18527 /* !!! these should become driver const once 18528 rf-tool supports split-68 const */ 18529 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18530 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18531 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18532 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18533 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18534 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18535 } 18536 } 18537 18538 /* Reset PCIE errors for debug */ 18539 REG_WR(sc, 0x2114, 0xffffffff); 18540 REG_WR(sc, 0x2120, 0xffffffff); 18541 18542 if (CHIP_IS_E1x(sc)) { 18543 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18544 main_mem_base = HC_REG_MAIN_MEMORY + 18545 SC_PORT(sc) * (main_mem_size * 4); 18546 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18547 main_mem_width = 8; 18548 18549 val = REG_RD(sc, main_mem_prty_clr); 18550 if (val) { 18551 BLOGD(sc, DBG_LOAD, 18552 "Parity errors in HC block during function init (0x%x)!\n", 18553 val); 18554 } 18555 18556 /* Clear "false" parity errors in MSI-X table */ 18557 for (i = main_mem_base; 18558 i < main_mem_base + main_mem_size * 4; 18559 i += main_mem_width) { 18560 bxe_read_dmae(sc, i, main_mem_width / 4); 18561 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18562 i, main_mem_width / 4); 18563 } 18564 /* Clear HC parity attention */ 18565 REG_RD(sc, main_mem_prty_clr); 18566 } 18567 18568#if 1 18569 /* Enable STORMs SP logging */ 18570 REG_WR8(sc, BAR_USTRORM_INTMEM + 18571 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18572 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18573 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18574 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18575 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18576 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18577 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18578#endif 18579 18580 elink_phy_probe(&sc->link_params); 18581 18582 return (0); 18583} 18584 18585static void 18586bxe_link_reset(struct bxe_softc *sc) 18587{ 18588 if (!BXE_NOMCP(sc)) { 18589 bxe_acquire_phy_lock(sc); 18590 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18591 bxe_release_phy_lock(sc); 18592 } else { 18593 if (!CHIP_REV_IS_SLOW(sc)) { 18594 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18595 } 18596 } 18597} 18598 18599static void 18600bxe_reset_port(struct bxe_softc *sc) 18601{ 18602 int port = SC_PORT(sc); 18603 uint32_t val; 18604 18605 /* reset physical Link */ 18606 bxe_link_reset(sc); 18607 18608 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18609 18610 /* Do not rcv packets to BRB */ 18611 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18612 /* Do not direct rcv packets that are not for MCP to the BRB */ 18613 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18614 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18615 18616 /* Configure AEU */ 18617 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18618 18619 DELAY(100000); 18620 18621 /* Check for BRB port occupancy */ 18622 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18623 if (val) { 18624 BLOGD(sc, DBG_LOAD, 18625 "BRB1 is not empty, %d blocks are occupied\n", val); 18626 } 18627 18628 /* TODO: Close Doorbell port? */ 18629} 18630 18631static void 18632bxe_ilt_wr(struct bxe_softc *sc, 18633 uint32_t index, 18634 bus_addr_t addr) 18635{ 18636 int reg; 18637 uint32_t wb_write[2]; 18638 18639 if (CHIP_IS_E1(sc)) { 18640 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18641 } else { 18642 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18643 } 18644 18645 wb_write[0] = ONCHIP_ADDR1(addr); 18646 wb_write[1] = ONCHIP_ADDR2(addr); 18647 REG_WR_DMAE(sc, reg, wb_write, 2); 18648} 18649 18650static void 18651bxe_clear_func_ilt(struct bxe_softc *sc, 18652 uint32_t func) 18653{ 18654 uint32_t i, base = FUNC_ILT_BASE(func); 18655 for (i = base; i < base + ILT_PER_FUNC; i++) { 18656 bxe_ilt_wr(sc, i, 0); 18657 } 18658} 18659 18660static void 18661bxe_reset_func(struct bxe_softc *sc) 18662{ 18663 struct bxe_fastpath *fp; 18664 int port = SC_PORT(sc); 18665 int func = SC_FUNC(sc); 18666 int i; 18667 18668 /* Disable the function in the FW */ 18669 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18670 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18671 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18672 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18673 18674 /* FP SBs */ 18675 FOR_EACH_ETH_QUEUE(sc, i) { 18676 fp = &sc->fp[i]; 18677 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18678 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18679 SB_DISABLED); 18680 } 18681 18682#if 0 18683 if (CNIC_LOADED(sc)) { 18684 /* CNIC SB */ 18685 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18686 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 18687 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED); 18688 } 18689#endif 18690 18691 /* SP SB */ 18692 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18693 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18694 SB_DISABLED); 18695 18696 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18697 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18698 } 18699 18700 /* Configure IGU */ 18701 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18702 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18703 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18704 } else { 18705 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18706 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18707 } 18708 18709 if (CNIC_LOADED(sc)) { 18710 /* Disable Timer scan */ 18711 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18712 /* 18713 * Wait for at least 10ms and up to 2 second for the timers 18714 * scan to complete 18715 */ 18716 for (i = 0; i < 200; i++) { 18717 DELAY(10000); 18718 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18719 break; 18720 } 18721 } 18722 18723 /* Clear ILT */ 18724 bxe_clear_func_ilt(sc, func); 18725 18726 /* 18727 * Timers workaround bug for E2: if this is vnic-3, 18728 * we need to set the entire ilt range for this timers. 18729 */ 18730 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18731 struct ilt_client_info ilt_cli; 18732 /* use dummy TM client */ 18733 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18734 ilt_cli.start = 0; 18735 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18736 ilt_cli.client_num = ILT_CLIENT_TM; 18737 18738 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18739 } 18740 18741 /* this assumes that reset_port() called before reset_func()*/ 18742 if (!CHIP_IS_E1x(sc)) { 18743 bxe_pf_disable(sc); 18744 } 18745 18746 sc->dmae_ready = 0; 18747} 18748 18749static int 18750bxe_gunzip_init(struct bxe_softc *sc) 18751{ 18752 return (0); 18753} 18754 18755static void 18756bxe_gunzip_end(struct bxe_softc *sc) 18757{ 18758 return; 18759} 18760 18761static int 18762bxe_init_firmware(struct bxe_softc *sc) 18763{ 18764 if (CHIP_IS_E1(sc)) { 18765 ecore_init_e1_firmware(sc); 18766 sc->iro_array = e1_iro_arr; 18767 } else if (CHIP_IS_E1H(sc)) { 18768 ecore_init_e1h_firmware(sc); 18769 sc->iro_array = e1h_iro_arr; 18770 } else if (!CHIP_IS_E1x(sc)) { 18771 ecore_init_e2_firmware(sc); 18772 sc->iro_array = e2_iro_arr; 18773 } else { 18774 BLOGE(sc, "Unsupported chip revision\n"); 18775 return (-1); 18776 } 18777 18778 return (0); 18779} 18780 18781static void 18782bxe_release_firmware(struct bxe_softc *sc) 18783{ 18784 /* Do nothing */ 18785 return; 18786} 18787 18788static int 18789ecore_gunzip(struct bxe_softc *sc, 18790 const uint8_t *zbuf, 18791 int len) 18792{ 18793 /* XXX : Implement... */ 18794 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18795 return (FALSE); 18796} 18797 18798static void 18799ecore_reg_wr_ind(struct bxe_softc *sc, 18800 uint32_t addr, 18801 uint32_t val) 18802{ 18803 bxe_reg_wr_ind(sc, addr, val); 18804} 18805 18806static void 18807ecore_write_dmae_phys_len(struct bxe_softc *sc, 18808 bus_addr_t phys_addr, 18809 uint32_t addr, 18810 uint32_t len) 18811{ 18812 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18813} 18814 18815void 18816ecore_storm_memset_struct(struct bxe_softc *sc, 18817 uint32_t addr, 18818 size_t size, 18819 uint32_t *data) 18820{ 18821 uint8_t i; 18822 for (i = 0; i < size/4; i++) { 18823 REG_WR(sc, addr + (i * 4), data[i]); 18824 } 18825} 18826 18827