1/*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD$"); 29 30#define BXE_DRIVER_VERSION "1.78.78" 31 32#include "bxe.h" 33#include "ecore_sp.h" 34#include "ecore_init.h" 35#include "ecore_init_ops.h" 36 37#include "57710_int_offsets.h" 38#include "57711_int_offsets.h" 39#include "57712_int_offsets.h" 40 41/* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45#ifndef CTLTYPE_U64 46#define CTLTYPE_U64 CTLTYPE_QUAD 47#define sysctl_handle_64 sysctl_handle_quad 48#endif 49 50/* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55#ifndef CSUM_TCP_IPV6 56#define CSUM_TCP_IPV6 0 57#define CSUM_UDP_IPV6 0 58#endif 59 60/* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64#if __FreeBSD_version < 900035 65#define pci_find_cap pci_find_extcap 66#endif 67 68#define BXE_DEF_SB_ATT_IDX 0x0001 69#define BXE_DEF_SB_IDX 0x0002 70 71/* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75#define FLR_WAIT_USEC 10000 /* 10 msecs */ 76#define FLR_WAIT_INTERVAL 50 /* usecs */ 77#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84}; 85 86struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90}; 91 92/* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95#define BXE_DEVDESC_MAX 64 96static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127#if 0 128 { 129 BRCM_VENDORID, 130 CHIP_NUM_57712_VF, 131 PCI_ANY_ID, PCI_ANY_ID, 132 "QLogic NetXtreme II BCM57712 VF 10GbE" 133 }, 134#endif 135 { 136 BRCM_VENDORID, 137 CHIP_NUM_57800, 138 PCI_ANY_ID, PCI_ANY_ID, 139 "QLogic NetXtreme II BCM57800 10GbE" 140 }, 141 { 142 BRCM_VENDORID, 143 CHIP_NUM_57800_MF, 144 PCI_ANY_ID, PCI_ANY_ID, 145 "QLogic NetXtreme II BCM57800 MF 10GbE" 146 }, 147#if 0 148 { 149 BRCM_VENDORID, 150 CHIP_NUM_57800_VF, 151 PCI_ANY_ID, PCI_ANY_ID, 152 "QLogic NetXtreme II BCM57800 VF 10GbE" 153 }, 154#endif 155 { 156 BRCM_VENDORID, 157 CHIP_NUM_57810, 158 PCI_ANY_ID, PCI_ANY_ID, 159 "QLogic NetXtreme II BCM57810 10GbE" 160 }, 161 { 162 BRCM_VENDORID, 163 CHIP_NUM_57810_MF, 164 PCI_ANY_ID, PCI_ANY_ID, 165 "QLogic NetXtreme II BCM57810 MF 10GbE" 166 }, 167#if 0 168 { 169 BRCM_VENDORID, 170 CHIP_NUM_57810_VF, 171 PCI_ANY_ID, PCI_ANY_ID, 172 "QLogic NetXtreme II BCM57810 VF 10GbE" 173 }, 174#endif 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57811, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57811 10GbE" 180 }, 181 { 182 BRCM_VENDORID, 183 CHIP_NUM_57811_MF, 184 PCI_ANY_ID, PCI_ANY_ID, 185 "QLogic NetXtreme II BCM57811 MF 10GbE" 186 }, 187#if 0 188 { 189 BRCM_VENDORID, 190 CHIP_NUM_57811_VF, 191 PCI_ANY_ID, PCI_ANY_ID, 192 "QLogic NetXtreme II BCM57811 VF 10GbE" 193 }, 194#endif 195 { 196 BRCM_VENDORID, 197 CHIP_NUM_57840_4_10, 198 PCI_ANY_ID, PCI_ANY_ID, 199 "QLogic NetXtreme II BCM57840 4x10GbE" 200 }, 201#if 0 202 { 203 BRCM_VENDORID, 204 CHIP_NUM_57840_2_20, 205 PCI_ANY_ID, PCI_ANY_ID, 206 "QLogic NetXtreme II BCM57840 2x20GbE" 207 }, 208#endif 209 { 210 BRCM_VENDORID, 211 CHIP_NUM_57840_MF, 212 PCI_ANY_ID, PCI_ANY_ID, 213 "QLogic NetXtreme II BCM57840 MF 10GbE" 214 }, 215#if 0 216 { 217 BRCM_VENDORID, 218 CHIP_NUM_57840_VF, 219 PCI_ANY_ID, PCI_ANY_ID, 220 "QLogic NetXtreme II BCM57840 VF 10GbE" 221 }, 222#endif 223 { 224 0, 0, 0, 0, NULL 225 } 226}; 227 228MALLOC_DECLARE(M_BXE_ILT); 229MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 230 231/* 232 * FreeBSD device entry points. 233 */ 234static int bxe_probe(device_t); 235static int bxe_attach(device_t); 236static int bxe_detach(device_t); 237static int bxe_shutdown(device_t); 238 239/* 240 * FreeBSD KLD module/device interface event handler method. 241 */ 242static device_method_t bxe_methods[] = { 243 /* Device interface (device_if.h) */ 244 DEVMETHOD(device_probe, bxe_probe), 245 DEVMETHOD(device_attach, bxe_attach), 246 DEVMETHOD(device_detach, bxe_detach), 247 DEVMETHOD(device_shutdown, bxe_shutdown), 248#if 0 249 DEVMETHOD(device_suspend, bxe_suspend), 250 DEVMETHOD(device_resume, bxe_resume), 251#endif 252 /* Bus interface (bus_if.h) */ 253 DEVMETHOD(bus_print_child, bus_generic_print_child), 254 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 255 KOBJMETHOD_END 256}; 257 258/* 259 * FreeBSD KLD Module data declaration 260 */ 261static driver_t bxe_driver = { 262 "bxe", /* module name */ 263 bxe_methods, /* event handler */ 264 sizeof(struct bxe_softc) /* extra data */ 265}; 266 267/* 268 * FreeBSD dev class is needed to manage dev instances and 269 * to associate with a bus type 270 */ 271static devclass_t bxe_devclass; 272 273MODULE_DEPEND(bxe, pci, 1, 1, 1); 274MODULE_DEPEND(bxe, ether, 1, 1, 1); 275DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 276 277/* resources needed for unloading a previously loaded device */ 278 279#define BXE_PREV_WAIT_NEEDED 1 280struct mtx bxe_prev_mtx; 281MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 282struct bxe_prev_list_node { 283 LIST_ENTRY(bxe_prev_list_node) node; 284 uint8_t bus; 285 uint8_t slot; 286 uint8_t path; 287 uint8_t aer; /* XXX automatic error recovery */ 288 uint8_t undi; 289}; 290static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 291 292static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 293 294/* Tunable device values... */ 295 296SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 297 298/* Debug */ 299unsigned long bxe_debug = 0; 300TUNABLE_ULONG("hw.bxe.debug", &bxe_debug); 301SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN), 302 &bxe_debug, 0, "Debug logging mode"); 303 304/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 305static int bxe_interrupt_mode = INTR_MODE_MSIX; 306TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode); 307SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 308 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 309 310/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 311static int bxe_queue_count = 4; 312TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count); 313SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 314 &bxe_queue_count, 0, "Multi-Queue queue count"); 315 316/* max number of buffers per queue (default RX_BD_USABLE) */ 317static int bxe_max_rx_bufs = 0; 318TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs); 319SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 320 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 321 322/* Host interrupt coalescing RX tick timer (usecs) */ 323static int bxe_hc_rx_ticks = 25; 324TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks); 325SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 326 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 327 328/* Host interrupt coalescing TX tick timer (usecs) */ 329static int bxe_hc_tx_ticks = 50; 330TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks); 331SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 332 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 333 334/* Maximum number of Rx packets to process at a time */ 335static int bxe_rx_budget = 0xffffffff; 336TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget); 337SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 338 &bxe_rx_budget, 0, "Rx processing budget"); 339 340/* Maximum LRO aggregation size */ 341static int bxe_max_aggregation_size = 0; 342TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size); 343SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 344 &bxe_max_aggregation_size, 0, "max aggregation size"); 345 346/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 347static int bxe_mrrs = -1; 348TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs); 349SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 350 &bxe_mrrs, 0, "PCIe maximum read request size"); 351 352/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 353static int bxe_autogreeen = 0; 354TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen); 355SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 356 &bxe_autogreeen, 0, "AutoGrEEEn support"); 357 358/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 359static int bxe_udp_rss = 0; 360TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss); 361SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 362 &bxe_udp_rss, 0, "UDP RSS support"); 363 364 365#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 366 367#define STATS_OFFSET32(stat_name) \ 368 (offsetof(struct bxe_eth_stats, stat_name) / 4) 369 370#define Q_STATS_OFFSET32(stat_name) \ 371 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 372 373static const struct { 374 uint32_t offset; 375 uint32_t size; 376 uint32_t flags; 377#define STATS_FLAGS_PORT 1 378#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 379#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 380 char string[STAT_NAME_LEN]; 381} bxe_eth_stats_arr[] = { 382 { STATS_OFFSET32(total_bytes_received_hi), 383 8, STATS_FLAGS_BOTH, "rx_bytes" }, 384 { STATS_OFFSET32(error_bytes_received_hi), 385 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 386 { STATS_OFFSET32(total_unicast_packets_received_hi), 387 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 388 { STATS_OFFSET32(total_multicast_packets_received_hi), 389 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 390 { STATS_OFFSET32(total_broadcast_packets_received_hi), 391 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 392 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 393 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 394 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 395 8, STATS_FLAGS_PORT, "rx_align_errors" }, 396 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 397 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 398 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 399 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 400 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 401 8, STATS_FLAGS_PORT, "rx_fragments" }, 402 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 403 8, STATS_FLAGS_PORT, "rx_jabbers" }, 404 { STATS_OFFSET32(no_buff_discard_hi), 405 8, STATS_FLAGS_BOTH, "rx_discards" }, 406 { STATS_OFFSET32(mac_filter_discard), 407 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 408 { STATS_OFFSET32(mf_tag_discard), 409 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 410 { STATS_OFFSET32(pfc_frames_received_hi), 411 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 412 { STATS_OFFSET32(pfc_frames_sent_hi), 413 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 414 { STATS_OFFSET32(brb_drop_hi), 415 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 416 { STATS_OFFSET32(brb_truncate_hi), 417 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 418 { STATS_OFFSET32(pause_frames_received_hi), 419 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 420 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 421 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 422 { STATS_OFFSET32(nig_timer_max), 423 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 424 { STATS_OFFSET32(total_bytes_transmitted_hi), 425 8, STATS_FLAGS_BOTH, "tx_bytes" }, 426 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 427 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 428 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 429 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 430 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 431 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 432 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 433 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 434 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 435 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 436 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 437 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 438 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 439 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 440 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 441 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 442 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 443 8, STATS_FLAGS_PORT, "tx_deferred" }, 444 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 445 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 446 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 447 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 448 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 449 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 450 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 451 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 452 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 453 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 454 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 455 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 456 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 457 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 458 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 459 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 460 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 461 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 462 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 463 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 464 { STATS_OFFSET32(pause_frames_sent_hi), 465 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 466 { STATS_OFFSET32(total_tpa_aggregations_hi), 467 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 468 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 469 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 470 { STATS_OFFSET32(total_tpa_bytes_hi), 471 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 472#if 0 473 { STATS_OFFSET32(recoverable_error), 474 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 475 { STATS_OFFSET32(unrecoverable_error), 476 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 477#endif 478 { STATS_OFFSET32(eee_tx_lpi), 479 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 480 { STATS_OFFSET32(rx_calls), 481 4, STATS_FLAGS_FUNC, "rx_calls"}, 482 { STATS_OFFSET32(rx_pkts), 483 4, STATS_FLAGS_FUNC, "rx_pkts"}, 484 { STATS_OFFSET32(rx_tpa_pkts), 485 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 486 { STATS_OFFSET32(rx_soft_errors), 487 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 488 { STATS_OFFSET32(rx_hw_csum_errors), 489 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 490 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 491 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 492 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 493 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 494 { STATS_OFFSET32(rx_budget_reached), 495 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 496 { STATS_OFFSET32(tx_pkts), 497 4, STATS_FLAGS_FUNC, "tx_pkts"}, 498 { STATS_OFFSET32(tx_soft_errors), 499 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 500 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 501 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 502 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 503 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 504 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 505 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 506 { STATS_OFFSET32(tx_ofld_frames_lso), 507 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 508 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 509 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 510 { STATS_OFFSET32(tx_encap_failures), 511 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 512 { STATS_OFFSET32(tx_hw_queue_full), 513 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 514 { STATS_OFFSET32(tx_hw_max_queue_depth), 515 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 516 { STATS_OFFSET32(tx_dma_mapping_failure), 517 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 518 { STATS_OFFSET32(tx_max_drbr_queue_depth), 519 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 520 { STATS_OFFSET32(tx_window_violation_std), 521 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 522 { STATS_OFFSET32(tx_window_violation_tso), 523 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 524#if 0 525 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 526 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, 527 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 528 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, 529#endif 530 { STATS_OFFSET32(tx_chain_lost_mbuf), 531 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 532 { STATS_OFFSET32(tx_frames_deferred), 533 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 534 { STATS_OFFSET32(tx_queue_xoff), 535 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 536 { STATS_OFFSET32(mbuf_defrag_attempts), 537 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 538 { STATS_OFFSET32(mbuf_defrag_failures), 539 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 540 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 541 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 542 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 543 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 544 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 545 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 546 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 547 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 548 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 549 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 550 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 551 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 552 { STATS_OFFSET32(mbuf_alloc_tx), 553 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 554 { STATS_OFFSET32(mbuf_alloc_rx), 555 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 556 { STATS_OFFSET32(mbuf_alloc_sge), 557 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 558 { STATS_OFFSET32(mbuf_alloc_tpa), 559 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} 560}; 561 562static const struct { 563 uint32_t offset; 564 uint32_t size; 565 char string[STAT_NAME_LEN]; 566} bxe_eth_q_stats_arr[] = { 567 { Q_STATS_OFFSET32(total_bytes_received_hi), 568 8, "rx_bytes" }, 569 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 570 8, "rx_ucast_packets" }, 571 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 572 8, "rx_mcast_packets" }, 573 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 574 8, "rx_bcast_packets" }, 575 { Q_STATS_OFFSET32(no_buff_discard_hi), 576 8, "rx_discards" }, 577 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 578 8, "tx_bytes" }, 579 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 580 8, "tx_ucast_packets" }, 581 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 582 8, "tx_mcast_packets" }, 583 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 584 8, "tx_bcast_packets" }, 585 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 586 8, "tpa_aggregations" }, 587 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 588 8, "tpa_aggregated_frames"}, 589 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 590 8, "tpa_bytes"}, 591 { Q_STATS_OFFSET32(rx_calls), 592 4, "rx_calls"}, 593 { Q_STATS_OFFSET32(rx_pkts), 594 4, "rx_pkts"}, 595 { Q_STATS_OFFSET32(rx_tpa_pkts), 596 4, "rx_tpa_pkts"}, 597 { Q_STATS_OFFSET32(rx_soft_errors), 598 4, "rx_soft_errors"}, 599 { Q_STATS_OFFSET32(rx_hw_csum_errors), 600 4, "rx_hw_csum_errors"}, 601 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 602 4, "rx_ofld_frames_csum_ip"}, 603 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 604 4, "rx_ofld_frames_csum_tcp_udp"}, 605 { Q_STATS_OFFSET32(rx_budget_reached), 606 4, "rx_budget_reached"}, 607 { Q_STATS_OFFSET32(tx_pkts), 608 4, "tx_pkts"}, 609 { Q_STATS_OFFSET32(tx_soft_errors), 610 4, "tx_soft_errors"}, 611 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 612 4, "tx_ofld_frames_csum_ip"}, 613 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 614 4, "tx_ofld_frames_csum_tcp"}, 615 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 616 4, "tx_ofld_frames_csum_udp"}, 617 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 618 4, "tx_ofld_frames_lso"}, 619 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 620 4, "tx_ofld_frames_lso_hdr_splits"}, 621 { Q_STATS_OFFSET32(tx_encap_failures), 622 4, "tx_encap_failures"}, 623 { Q_STATS_OFFSET32(tx_hw_queue_full), 624 4, "tx_hw_queue_full"}, 625 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 626 4, "tx_hw_max_queue_depth"}, 627 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 628 4, "tx_dma_mapping_failure"}, 629 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 630 4, "tx_max_drbr_queue_depth"}, 631 { Q_STATS_OFFSET32(tx_window_violation_std), 632 4, "tx_window_violation_std"}, 633 { Q_STATS_OFFSET32(tx_window_violation_tso), 634 4, "tx_window_violation_tso"}, 635#if 0 636 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 637 4, "tx_unsupported_tso_request_ipv6"}, 638 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 639 4, "tx_unsupported_tso_request_not_tcp"}, 640#endif 641 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 642 4, "tx_chain_lost_mbuf"}, 643 { Q_STATS_OFFSET32(tx_frames_deferred), 644 4, "tx_frames_deferred"}, 645 { Q_STATS_OFFSET32(tx_queue_xoff), 646 4, "tx_queue_xoff"}, 647 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 648 4, "mbuf_defrag_attempts"}, 649 { Q_STATS_OFFSET32(mbuf_defrag_failures), 650 4, "mbuf_defrag_failures"}, 651 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 652 4, "mbuf_rx_bd_alloc_failed"}, 653 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 654 4, "mbuf_rx_bd_mapping_failed"}, 655 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 656 4, "mbuf_rx_tpa_alloc_failed"}, 657 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 658 4, "mbuf_rx_tpa_mapping_failed"}, 659 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 660 4, "mbuf_rx_sge_alloc_failed"}, 661 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 662 4, "mbuf_rx_sge_mapping_failed"}, 663 { Q_STATS_OFFSET32(mbuf_alloc_tx), 664 4, "mbuf_alloc_tx"}, 665 { Q_STATS_OFFSET32(mbuf_alloc_rx), 666 4, "mbuf_alloc_rx"}, 667 { Q_STATS_OFFSET32(mbuf_alloc_sge), 668 4, "mbuf_alloc_sge"}, 669 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 670 4, "mbuf_alloc_tpa"} 671}; 672 673#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 674#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 675 676 677static void bxe_cmng_fns_init(struct bxe_softc *sc, 678 uint8_t read_cfg, 679 uint8_t cmng_type); 680static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 681static void storm_memset_cmng(struct bxe_softc *sc, 682 struct cmng_init *cmng, 683 uint8_t port); 684static void bxe_set_reset_global(struct bxe_softc *sc); 685static void bxe_set_reset_in_progress(struct bxe_softc *sc); 686static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 687 int engine); 688static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 689static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 690 uint8_t *global, 691 uint8_t print); 692static void bxe_int_disable(struct bxe_softc *sc); 693static int bxe_release_leader_lock(struct bxe_softc *sc); 694static void bxe_pf_disable(struct bxe_softc *sc); 695static void bxe_free_fp_buffers(struct bxe_softc *sc); 696static inline void bxe_update_rx_prod(struct bxe_softc *sc, 697 struct bxe_fastpath *fp, 698 uint16_t rx_bd_prod, 699 uint16_t rx_cq_prod, 700 uint16_t rx_sge_prod); 701static void bxe_link_report_locked(struct bxe_softc *sc); 702static void bxe_link_report(struct bxe_softc *sc); 703static void bxe_link_status_update(struct bxe_softc *sc); 704static void bxe_periodic_callout_func(void *xsc); 705static void bxe_periodic_start(struct bxe_softc *sc); 706static void bxe_periodic_stop(struct bxe_softc *sc); 707static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 708 uint16_t prev_index, 709 uint16_t index); 710static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 711 int queue); 712static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 713 uint16_t index); 714static uint8_t bxe_txeof(struct bxe_softc *sc, 715 struct bxe_fastpath *fp); 716static void bxe_task_fp(struct bxe_fastpath *fp); 717static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 718 struct mbuf *m, 719 uint8_t contents); 720static int bxe_alloc_mem(struct bxe_softc *sc); 721static void bxe_free_mem(struct bxe_softc *sc); 722static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 723static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 724static int bxe_interrupt_attach(struct bxe_softc *sc); 725static void bxe_interrupt_detach(struct bxe_softc *sc); 726static void bxe_set_rx_mode(struct bxe_softc *sc); 727static int bxe_init_locked(struct bxe_softc *sc); 728static int bxe_stop_locked(struct bxe_softc *sc); 729static __noinline int bxe_nic_load(struct bxe_softc *sc, 730 int load_mode); 731static __noinline int bxe_nic_unload(struct bxe_softc *sc, 732 uint32_t unload_mode, 733 uint8_t keep_link); 734 735static void bxe_handle_sp_tq(void *context, int pending); 736static void bxe_handle_rx_mode_tq(void *context, int pending); 737static void bxe_handle_fp_tq(void *context, int pending); 738 739 740/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 741uint32_t 742calc_crc32(uint8_t *crc32_packet, 743 uint32_t crc32_length, 744 uint32_t crc32_seed, 745 uint8_t complement) 746{ 747 uint32_t byte = 0; 748 uint32_t bit = 0; 749 uint8_t msb = 0; 750 uint32_t temp = 0; 751 uint32_t shft = 0; 752 uint8_t current_byte = 0; 753 uint32_t crc32_result = crc32_seed; 754 const uint32_t CRC32_POLY = 0x1edc6f41; 755 756 if ((crc32_packet == NULL) || 757 (crc32_length == 0) || 758 ((crc32_length % 8) != 0)) 759 { 760 return (crc32_result); 761 } 762 763 for (byte = 0; byte < crc32_length; byte = byte + 1) 764 { 765 current_byte = crc32_packet[byte]; 766 for (bit = 0; bit < 8; bit = bit + 1) 767 { 768 /* msb = crc32_result[31]; */ 769 msb = (uint8_t)(crc32_result >> 31); 770 771 crc32_result = crc32_result << 1; 772 773 /* it (msb != current_byte[bit]) */ 774 if (msb != (0x1 & (current_byte >> bit))) 775 { 776 crc32_result = crc32_result ^ CRC32_POLY; 777 /* crc32_result[0] = 1 */ 778 crc32_result |= 1; 779 } 780 } 781 } 782 783 /* Last step is to: 784 * 1. "mirror" every bit 785 * 2. swap the 4 bytes 786 * 3. complement each bit 787 */ 788 789 /* Mirror */ 790 temp = crc32_result; 791 shft = sizeof(crc32_result) * 8 - 1; 792 793 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 794 { 795 temp <<= 1; 796 temp |= crc32_result & 1; 797 shft-- ; 798 } 799 800 /* temp[31-bit] = crc32_result[bit] */ 801 temp <<= shft; 802 803 /* Swap */ 804 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 805 { 806 uint32_t t0, t1, t2, t3; 807 t0 = (0x000000ff & (temp >> 24)); 808 t1 = (0x0000ff00 & (temp >> 8)); 809 t2 = (0x00ff0000 & (temp << 8)); 810 t3 = (0xff000000 & (temp << 24)); 811 crc32_result = t0 | t1 | t2 | t3; 812 } 813 814 /* Complement */ 815 if (complement) 816 { 817 crc32_result = ~crc32_result; 818 } 819 820 return (crc32_result); 821} 822 823int 824bxe_test_bit(int nr, 825 volatile unsigned long *addr) 826{ 827 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 828} 829 830void 831bxe_set_bit(unsigned int nr, 832 volatile unsigned long *addr) 833{ 834 atomic_set_acq_long(addr, (1 << nr)); 835} 836 837void 838bxe_clear_bit(int nr, 839 volatile unsigned long *addr) 840{ 841 atomic_clear_acq_long(addr, (1 << nr)); 842} 843 844int 845bxe_test_and_set_bit(int nr, 846 volatile unsigned long *addr) 847{ 848 unsigned long x; 849 nr = (1 << nr); 850 do { 851 x = *addr; 852 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 853 // if (x & nr) bit_was_set; else bit_was_not_set; 854 return (x & nr); 855} 856 857int 858bxe_test_and_clear_bit(int nr, 859 volatile unsigned long *addr) 860{ 861 unsigned long x; 862 nr = (1 << nr); 863 do { 864 x = *addr; 865 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 866 // if (x & nr) bit_was_set; else bit_was_not_set; 867 return (x & nr); 868} 869 870int 871bxe_cmpxchg(volatile int *addr, 872 int old, 873 int new) 874{ 875 int x; 876 do { 877 x = *addr; 878 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 879 return (x); 880} 881 882/* 883 * Get DMA memory from the OS. 884 * 885 * Validates that the OS has provided DMA buffers in response to a 886 * bus_dmamap_load call and saves the physical address of those buffers. 887 * When the callback is used the OS will return 0 for the mapping function 888 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 889 * failures back to the caller. 890 * 891 * Returns: 892 * Nothing. 893 */ 894static void 895bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 896{ 897 struct bxe_dma *dma = arg; 898 899 if (error) { 900 dma->paddr = 0; 901 dma->nseg = 0; 902 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 903 } else { 904 dma->paddr = segs->ds_addr; 905 dma->nseg = nseg; 906#if 0 907 BLOGD(dma->sc, DBG_LOAD, 908 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 909 dma->msg, dma->vaddr, (void *)dma->paddr, 910 dma->nseg, dma->size); 911#endif 912 } 913} 914 915/* 916 * Allocate a block of memory and map it for DMA. No partial completions 917 * allowed and release any resources acquired if we can't acquire all 918 * resources. 919 * 920 * Returns: 921 * 0 = Success, !0 = Failure 922 */ 923int 924bxe_dma_alloc(struct bxe_softc *sc, 925 bus_size_t size, 926 struct bxe_dma *dma, 927 const char *msg) 928{ 929 int rc; 930 931 if (dma->size > 0) { 932 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 933 (unsigned long)dma->size); 934 return (1); 935 } 936 937 memset(dma, 0, sizeof(*dma)); /* sanity */ 938 dma->sc = sc; 939 dma->size = size; 940 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 941 942 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 943 BCM_PAGE_SIZE, /* alignment */ 944 0, /* boundary limit */ 945 BUS_SPACE_MAXADDR, /* restricted low */ 946 BUS_SPACE_MAXADDR, /* restricted hi */ 947 NULL, /* addr filter() */ 948 NULL, /* addr filter() arg */ 949 size, /* max map size */ 950 1, /* num discontinuous */ 951 size, /* max seg size */ 952 BUS_DMA_ALLOCNOW, /* flags */ 953 NULL, /* lock() */ 954 NULL, /* lock() arg */ 955 &dma->tag); /* returned dma tag */ 956 if (rc != 0) { 957 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 958 memset(dma, 0, sizeof(*dma)); 959 return (1); 960 } 961 962 rc = bus_dmamem_alloc(dma->tag, 963 (void **)&dma->vaddr, 964 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 965 &dma->map); 966 if (rc != 0) { 967 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 968 bus_dma_tag_destroy(dma->tag); 969 memset(dma, 0, sizeof(*dma)); 970 return (1); 971 } 972 973 rc = bus_dmamap_load(dma->tag, 974 dma->map, 975 dma->vaddr, 976 size, 977 bxe_dma_map_addr, /* BLOGD in here */ 978 dma, 979 BUS_DMA_NOWAIT); 980 if (rc != 0) { 981 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 982 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 983 bus_dma_tag_destroy(dma->tag); 984 memset(dma, 0, sizeof(*dma)); 985 return (1); 986 } 987 988 return (0); 989} 990 991void 992bxe_dma_free(struct bxe_softc *sc, 993 struct bxe_dma *dma) 994{ 995 if (dma->size > 0) { 996#if 0 997 BLOGD(sc, DBG_LOAD, 998 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 999 dma->msg, dma->vaddr, (void *)dma->paddr, 1000 dma->nseg, dma->size); 1001#endif 1002 1003 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 1004 1005 bus_dmamap_sync(dma->tag, dma->map, 1006 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 1007 bus_dmamap_unload(dma->tag, dma->map); 1008 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1009 bus_dma_tag_destroy(dma->tag); 1010 } 1011 1012 memset(dma, 0, sizeof(*dma)); 1013} 1014 1015/* 1016 * These indirect read and write routines are only during init. 1017 * The locking is handled by the MCP. 1018 */ 1019 1020void 1021bxe_reg_wr_ind(struct bxe_softc *sc, 1022 uint32_t addr, 1023 uint32_t val) 1024{ 1025 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1026 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 1027 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1028} 1029 1030uint32_t 1031bxe_reg_rd_ind(struct bxe_softc *sc, 1032 uint32_t addr) 1033{ 1034 uint32_t val; 1035 1036 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1037 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1038 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1039 1040 return (val); 1041} 1042 1043#if 0 1044void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) 1045{ 1046 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; 1047 1048 switch (dmae->opcode & DMAE_COMMAND_DST) { 1049 case DMAE_CMD_DST_PCI: 1050 if (src_type == DMAE_CMD_SRC_PCI) 1051 DP(msglvl, "DMAE: opcode 0x%08x\n" 1052 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 1053 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1054 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1055 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1056 dmae->comp_addr_hi, dmae->comp_addr_lo, 1057 dmae->comp_val); 1058 else 1059 DP(msglvl, "DMAE: opcode 0x%08x\n" 1060 "src [%08x], len [%d*4], dst [%x:%08x]\n" 1061 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1062 dmae->opcode, dmae->src_addr_lo >> 2, 1063 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1064 dmae->comp_addr_hi, dmae->comp_addr_lo, 1065 dmae->comp_val); 1066 break; 1067 case DMAE_CMD_DST_GRC: 1068 if (src_type == DMAE_CMD_SRC_PCI) 1069 DP(msglvl, "DMAE: opcode 0x%08x\n" 1070 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 1071 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1072 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1073 dmae->len, dmae->dst_addr_lo >> 2, 1074 dmae->comp_addr_hi, dmae->comp_addr_lo, 1075 dmae->comp_val); 1076 else 1077 DP(msglvl, "DMAE: opcode 0x%08x\n" 1078 "src [%08x], len [%d*4], dst [%08x]\n" 1079 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1080 dmae->opcode, dmae->src_addr_lo >> 2, 1081 dmae->len, dmae->dst_addr_lo >> 2, 1082 dmae->comp_addr_hi, dmae->comp_addr_lo, 1083 dmae->comp_val); 1084 break; 1085 default: 1086 if (src_type == DMAE_CMD_SRC_PCI) 1087 DP(msglvl, "DMAE: opcode 0x%08x\n" 1088 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 1089 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1090 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1091 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1092 dmae->comp_val); 1093 else 1094 DP(msglvl, "DMAE: opcode 0x%08x\n" 1095 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 1096 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1097 dmae->opcode, dmae->src_addr_lo >> 2, 1098 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1099 dmae->comp_val); 1100 break; 1101 } 1102 1103} 1104#endif 1105 1106static int 1107bxe_acquire_hw_lock(struct bxe_softc *sc, 1108 uint32_t resource) 1109{ 1110 uint32_t lock_status; 1111 uint32_t resource_bit = (1 << resource); 1112 int func = SC_FUNC(sc); 1113 uint32_t hw_lock_control_reg; 1114 int cnt; 1115 1116 /* validate the resource is within range */ 1117 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1118 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1119 return (-1); 1120 } 1121 1122 if (func <= 5) { 1123 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1124 } else { 1125 hw_lock_control_reg = 1126 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1127 } 1128 1129 /* validate the resource is not already taken */ 1130 lock_status = REG_RD(sc, hw_lock_control_reg); 1131 if (lock_status & resource_bit) { 1132 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", 1133 lock_status, resource_bit); 1134 return (-1); 1135 } 1136 1137 /* try every 5ms for 5 seconds */ 1138 for (cnt = 0; cnt < 1000; cnt++) { 1139 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1140 lock_status = REG_RD(sc, hw_lock_control_reg); 1141 if (lock_status & resource_bit) { 1142 return (0); 1143 } 1144 DELAY(5000); 1145 } 1146 1147 BLOGE(sc, "Resource lock timeout!\n"); 1148 return (-1); 1149} 1150 1151static int 1152bxe_release_hw_lock(struct bxe_softc *sc, 1153 uint32_t resource) 1154{ 1155 uint32_t lock_status; 1156 uint32_t resource_bit = (1 << resource); 1157 int func = SC_FUNC(sc); 1158 uint32_t hw_lock_control_reg; 1159 1160 /* validate the resource is within range */ 1161 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1162 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1163 return (-1); 1164 } 1165 1166 if (func <= 5) { 1167 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1168 } else { 1169 hw_lock_control_reg = 1170 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1171 } 1172 1173 /* validate the resource is currently taken */ 1174 lock_status = REG_RD(sc, hw_lock_control_reg); 1175 if (!(lock_status & resource_bit)) { 1176 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", 1177 lock_status, resource_bit); 1178 return (-1); 1179 } 1180 1181 REG_WR(sc, hw_lock_control_reg, resource_bit); 1182 return (0); 1183} 1184 1185/* 1186 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1187 * had we done things the other way around, if two pfs from the same port 1188 * would attempt to access nvram at the same time, we could run into a 1189 * scenario such as: 1190 * pf A takes the port lock. 1191 * pf B succeeds in taking the same lock since they are from the same port. 1192 * pf A takes the per pf misc lock. Performs eeprom access. 1193 * pf A finishes. Unlocks the per pf misc lock. 1194 * Pf B takes the lock and proceeds to perform it's own access. 1195 * pf A unlocks the per port lock, while pf B is still working (!). 1196 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1197 * access corrupted by pf B).* 1198 */ 1199static int 1200bxe_acquire_nvram_lock(struct bxe_softc *sc) 1201{ 1202 int port = SC_PORT(sc); 1203 int count, i; 1204 uint32_t val = 0; 1205 1206 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1207 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1208 1209 /* adjust timeout for emulation/FPGA */ 1210 count = NVRAM_TIMEOUT_COUNT; 1211 if (CHIP_REV_IS_SLOW(sc)) { 1212 count *= 100; 1213 } 1214 1215 /* request access to nvram interface */ 1216 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1217 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1218 1219 for (i = 0; i < count*10; i++) { 1220 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1221 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1222 break; 1223 } 1224 1225 DELAY(5); 1226 } 1227 1228 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1229 BLOGE(sc, "Cannot get access to nvram interface\n"); 1230 return (-1); 1231 } 1232 1233 return (0); 1234} 1235 1236static int 1237bxe_release_nvram_lock(struct bxe_softc *sc) 1238{ 1239 int port = SC_PORT(sc); 1240 int count, i; 1241 uint32_t val = 0; 1242 1243 /* adjust timeout for emulation/FPGA */ 1244 count = NVRAM_TIMEOUT_COUNT; 1245 if (CHIP_REV_IS_SLOW(sc)) { 1246 count *= 100; 1247 } 1248 1249 /* relinquish nvram interface */ 1250 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1251 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1252 1253 for (i = 0; i < count*10; i++) { 1254 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1255 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1256 break; 1257 } 1258 1259 DELAY(5); 1260 } 1261 1262 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1263 BLOGE(sc, "Cannot free access to nvram interface\n"); 1264 return (-1); 1265 } 1266 1267 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1268 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1269 1270 return (0); 1271} 1272 1273static void 1274bxe_enable_nvram_access(struct bxe_softc *sc) 1275{ 1276 uint32_t val; 1277 1278 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1279 1280 /* enable both bits, even on read */ 1281 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1282 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1283} 1284 1285static void 1286bxe_disable_nvram_access(struct bxe_softc *sc) 1287{ 1288 uint32_t val; 1289 1290 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1291 1292 /* disable both bits, even after read */ 1293 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1294 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1295 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1296} 1297 1298static int 1299bxe_nvram_read_dword(struct bxe_softc *sc, 1300 uint32_t offset, 1301 uint32_t *ret_val, 1302 uint32_t cmd_flags) 1303{ 1304 int count, i, rc; 1305 uint32_t val; 1306 1307 /* build the command word */ 1308 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1309 1310 /* need to clear DONE bit separately */ 1311 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1312 1313 /* address of the NVRAM to read from */ 1314 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1315 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1316 1317 /* issue a read command */ 1318 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1319 1320 /* adjust timeout for emulation/FPGA */ 1321 count = NVRAM_TIMEOUT_COUNT; 1322 if (CHIP_REV_IS_SLOW(sc)) { 1323 count *= 100; 1324 } 1325 1326 /* wait for completion */ 1327 *ret_val = 0; 1328 rc = -1; 1329 for (i = 0; i < count; i++) { 1330 DELAY(5); 1331 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1332 1333 if (val & MCPR_NVM_COMMAND_DONE) { 1334 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1335 /* we read nvram data in cpu order 1336 * but ethtool sees it as an array of bytes 1337 * converting to big-endian will do the work 1338 */ 1339 *ret_val = htobe32(val); 1340 rc = 0; 1341 break; 1342 } 1343 } 1344 1345 if (rc == -1) { 1346 BLOGE(sc, "nvram read timeout expired\n"); 1347 } 1348 1349 return (rc); 1350} 1351 1352static int 1353bxe_nvram_read(struct bxe_softc *sc, 1354 uint32_t offset, 1355 uint8_t *ret_buf, 1356 int buf_size) 1357{ 1358 uint32_t cmd_flags; 1359 uint32_t val; 1360 int rc; 1361 1362 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1363 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1364 offset, buf_size); 1365 return (-1); 1366 } 1367 1368 if ((offset + buf_size) > sc->devinfo.flash_size) { 1369 BLOGE(sc, "Invalid parameter, " 1370 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1371 offset, buf_size, sc->devinfo.flash_size); 1372 return (-1); 1373 } 1374 1375 /* request access to nvram interface */ 1376 rc = bxe_acquire_nvram_lock(sc); 1377 if (rc) { 1378 return (rc); 1379 } 1380 1381 /* enable access to nvram interface */ 1382 bxe_enable_nvram_access(sc); 1383 1384 /* read the first word(s) */ 1385 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1386 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1387 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1388 memcpy(ret_buf, &val, 4); 1389 1390 /* advance to the next dword */ 1391 offset += sizeof(uint32_t); 1392 ret_buf += sizeof(uint32_t); 1393 buf_size -= sizeof(uint32_t); 1394 cmd_flags = 0; 1395 } 1396 1397 if (rc == 0) { 1398 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1399 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1400 memcpy(ret_buf, &val, 4); 1401 } 1402 1403 /* disable access to nvram interface */ 1404 bxe_disable_nvram_access(sc); 1405 bxe_release_nvram_lock(sc); 1406 1407 return (rc); 1408} 1409 1410static int 1411bxe_nvram_write_dword(struct bxe_softc *sc, 1412 uint32_t offset, 1413 uint32_t val, 1414 uint32_t cmd_flags) 1415{ 1416 int count, i, rc; 1417 1418 /* build the command word */ 1419 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1420 1421 /* need to clear DONE bit separately */ 1422 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1423 1424 /* write the data */ 1425 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1426 1427 /* address of the NVRAM to write to */ 1428 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1429 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1430 1431 /* issue the write command */ 1432 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1433 1434 /* adjust timeout for emulation/FPGA */ 1435 count = NVRAM_TIMEOUT_COUNT; 1436 if (CHIP_REV_IS_SLOW(sc)) { 1437 count *= 100; 1438 } 1439 1440 /* wait for completion */ 1441 rc = -1; 1442 for (i = 0; i < count; i++) { 1443 DELAY(5); 1444 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1445 if (val & MCPR_NVM_COMMAND_DONE) { 1446 rc = 0; 1447 break; 1448 } 1449 } 1450 1451 if (rc == -1) { 1452 BLOGE(sc, "nvram write timeout expired\n"); 1453 } 1454 1455 return (rc); 1456} 1457 1458#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1459 1460static int 1461bxe_nvram_write1(struct bxe_softc *sc, 1462 uint32_t offset, 1463 uint8_t *data_buf, 1464 int buf_size) 1465{ 1466 uint32_t cmd_flags; 1467 uint32_t align_offset; 1468 uint32_t val; 1469 int rc; 1470 1471 if ((offset + buf_size) > sc->devinfo.flash_size) { 1472 BLOGE(sc, "Invalid parameter, " 1473 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1474 offset, buf_size, sc->devinfo.flash_size); 1475 return (-1); 1476 } 1477 1478 /* request access to nvram interface */ 1479 rc = bxe_acquire_nvram_lock(sc); 1480 if (rc) { 1481 return (rc); 1482 } 1483 1484 /* enable access to nvram interface */ 1485 bxe_enable_nvram_access(sc); 1486 1487 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1488 align_offset = (offset & ~0x03); 1489 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1490 1491 if (rc == 0) { 1492 val &= ~(0xff << BYTE_OFFSET(offset)); 1493 val |= (*data_buf << BYTE_OFFSET(offset)); 1494 1495 /* nvram data is returned as an array of bytes 1496 * convert it back to cpu order 1497 */ 1498 val = be32toh(val); 1499 1500 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1501 } 1502 1503 /* disable access to nvram interface */ 1504 bxe_disable_nvram_access(sc); 1505 bxe_release_nvram_lock(sc); 1506 1507 return (rc); 1508} 1509 1510static int 1511bxe_nvram_write(struct bxe_softc *sc, 1512 uint32_t offset, 1513 uint8_t *data_buf, 1514 int buf_size) 1515{ 1516 uint32_t cmd_flags; 1517 uint32_t val; 1518 uint32_t written_so_far; 1519 int rc; 1520 1521 if (buf_size == 1) { 1522 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1523 } 1524 1525 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1526 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1527 offset, buf_size); 1528 return (-1); 1529 } 1530 1531 if (buf_size == 0) { 1532 return (0); /* nothing to do */ 1533 } 1534 1535 if ((offset + buf_size) > sc->devinfo.flash_size) { 1536 BLOGE(sc, "Invalid parameter, " 1537 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1538 offset, buf_size, sc->devinfo.flash_size); 1539 return (-1); 1540 } 1541 1542 /* request access to nvram interface */ 1543 rc = bxe_acquire_nvram_lock(sc); 1544 if (rc) { 1545 return (rc); 1546 } 1547 1548 /* enable access to nvram interface */ 1549 bxe_enable_nvram_access(sc); 1550 1551 written_so_far = 0; 1552 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1553 while ((written_so_far < buf_size) && (rc == 0)) { 1554 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1555 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1556 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1557 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1558 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1559 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1560 } 1561 1562 memcpy(&val, data_buf, 4); 1563 1564 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1565 1566 /* advance to the next dword */ 1567 offset += sizeof(uint32_t); 1568 data_buf += sizeof(uint32_t); 1569 written_so_far += sizeof(uint32_t); 1570 cmd_flags = 0; 1571 } 1572 1573 /* disable access to nvram interface */ 1574 bxe_disable_nvram_access(sc); 1575 bxe_release_nvram_lock(sc); 1576 1577 return (rc); 1578} 1579 1580/* copy command into DMAE command memory and set DMAE command Go */ 1581void 1582bxe_post_dmae(struct bxe_softc *sc, 1583 struct dmae_command *dmae, 1584 int idx) 1585{ 1586 uint32_t cmd_offset; 1587 int i; 1588 1589 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 1590 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 1591 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1592 } 1593 1594 REG_WR(sc, dmae_reg_go_c[idx], 1); 1595} 1596 1597uint32_t 1598bxe_dmae_opcode_add_comp(uint32_t opcode, 1599 uint8_t comp_type) 1600{ 1601 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 1602 DMAE_COMMAND_C_TYPE_ENABLE)); 1603} 1604 1605uint32_t 1606bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1607{ 1608 return (opcode & ~DMAE_COMMAND_SRC_RESET); 1609} 1610 1611uint32_t 1612bxe_dmae_opcode(struct bxe_softc *sc, 1613 uint8_t src_type, 1614 uint8_t dst_type, 1615 uint8_t with_comp, 1616 uint8_t comp_type) 1617{ 1618 uint32_t opcode = 0; 1619 1620 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 1621 (dst_type << DMAE_COMMAND_DST_SHIFT)); 1622 1623 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 1624 1625 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1626 1627 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 1628 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 1629 1630 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 1631 1632#ifdef __BIG_ENDIAN 1633 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1634#else 1635 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1636#endif 1637 1638 if (with_comp) { 1639 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1640 } 1641 1642 return (opcode); 1643} 1644 1645static void 1646bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1647 struct dmae_command *dmae, 1648 uint8_t src_type, 1649 uint8_t dst_type) 1650{ 1651 memset(dmae, 0, sizeof(struct dmae_command)); 1652 1653 /* set the opcode */ 1654 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1655 TRUE, DMAE_COMP_PCI); 1656 1657 /* fill in the completion parameters */ 1658 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1659 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1660 dmae->comp_val = DMAE_COMP_VAL; 1661} 1662 1663/* issue a DMAE command over the init channel and wait for completion */ 1664static int 1665bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1666 struct dmae_command *dmae) 1667{ 1668 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1669 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1670 1671 BXE_DMAE_LOCK(sc); 1672 1673 /* reset completion */ 1674 *wb_comp = 0; 1675 1676 /* post the command on the channel used for initializations */ 1677 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1678 1679 /* wait for completion */ 1680 DELAY(5); 1681 1682 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1683 if (!timeout || 1684 (sc->recovery_state != BXE_RECOVERY_DONE && 1685 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1686 BLOGE(sc, "DMAE timeout!\n"); 1687 BXE_DMAE_UNLOCK(sc); 1688 return (DMAE_TIMEOUT); 1689 } 1690 1691 timeout--; 1692 DELAY(50); 1693 } 1694 1695 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1696 BLOGE(sc, "DMAE PCI error!\n"); 1697 BXE_DMAE_UNLOCK(sc); 1698 return (DMAE_PCI_ERROR); 1699 } 1700 1701 BXE_DMAE_UNLOCK(sc); 1702 return (0); 1703} 1704 1705void 1706bxe_read_dmae(struct bxe_softc *sc, 1707 uint32_t src_addr, 1708 uint32_t len32) 1709{ 1710 struct dmae_command dmae; 1711 uint32_t *data; 1712 int i, rc; 1713 1714 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1715 1716 if (!sc->dmae_ready) { 1717 data = BXE_SP(sc, wb_data[0]); 1718 1719 for (i = 0; i < len32; i++) { 1720 data[i] = (CHIP_IS_E1(sc)) ? 1721 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1722 REG_RD(sc, (src_addr + (i * 4))); 1723 } 1724 1725 return; 1726 } 1727 1728 /* set opcode and fixed command fields */ 1729 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1730 1731 /* fill in addresses and len */ 1732 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1733 dmae.src_addr_hi = 0; 1734 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1735 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1736 dmae.len = len32; 1737 1738 /* issue the command and wait for completion */ 1739 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1740 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1741 }; 1742} 1743 1744void 1745bxe_write_dmae(struct bxe_softc *sc, 1746 bus_addr_t dma_addr, 1747 uint32_t dst_addr, 1748 uint32_t len32) 1749{ 1750 struct dmae_command dmae; 1751 int rc; 1752 1753 if (!sc->dmae_ready) { 1754 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1755 1756 if (CHIP_IS_E1(sc)) { 1757 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1758 } else { 1759 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1760 } 1761 1762 return; 1763 } 1764 1765 /* set opcode and fixed command fields */ 1766 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1767 1768 /* fill in addresses and len */ 1769 dmae.src_addr_lo = U64_LO(dma_addr); 1770 dmae.src_addr_hi = U64_HI(dma_addr); 1771 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1772 dmae.dst_addr_hi = 0; 1773 dmae.len = len32; 1774 1775 /* issue the command and wait for completion */ 1776 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1777 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1778 } 1779} 1780 1781void 1782bxe_write_dmae_phys_len(struct bxe_softc *sc, 1783 bus_addr_t phys_addr, 1784 uint32_t addr, 1785 uint32_t len) 1786{ 1787 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1788 int offset = 0; 1789 1790 while (len > dmae_wr_max) { 1791 bxe_write_dmae(sc, 1792 (phys_addr + offset), /* src DMA address */ 1793 (addr + offset), /* dst GRC address */ 1794 dmae_wr_max); 1795 offset += (dmae_wr_max * 4); 1796 len -= dmae_wr_max; 1797 } 1798 1799 bxe_write_dmae(sc, 1800 (phys_addr + offset), /* src DMA address */ 1801 (addr + offset), /* dst GRC address */ 1802 len); 1803} 1804 1805void 1806bxe_set_ctx_validation(struct bxe_softc *sc, 1807 struct eth_context *cxt, 1808 uint32_t cid) 1809{ 1810 /* ustorm cxt validation */ 1811 cxt->ustorm_ag_context.cdu_usage = 1812 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1813 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1814 /* xcontext validation */ 1815 cxt->xstorm_ag_context.cdu_reserved = 1816 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1817 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1818} 1819 1820static void 1821bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1822 uint8_t port, 1823 uint8_t fw_sb_id, 1824 uint8_t sb_index, 1825 uint8_t ticks) 1826{ 1827 uint32_t addr = 1828 (BAR_CSTRORM_INTMEM + 1829 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1830 1831 REG_WR8(sc, addr, ticks); 1832 1833 BLOGD(sc, DBG_LOAD, 1834 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1835 port, fw_sb_id, sb_index, ticks); 1836} 1837 1838static void 1839bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1840 uint8_t port, 1841 uint16_t fw_sb_id, 1842 uint8_t sb_index, 1843 uint8_t disable) 1844{ 1845 uint32_t enable_flag = 1846 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1847 uint32_t addr = 1848 (BAR_CSTRORM_INTMEM + 1849 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1850 uint8_t flags; 1851 1852 /* clear and set */ 1853 flags = REG_RD8(sc, addr); 1854 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1855 flags |= enable_flag; 1856 REG_WR8(sc, addr, flags); 1857 1858 BLOGD(sc, DBG_LOAD, 1859 "port %d fw_sb_id %d sb_index %d disable %d\n", 1860 port, fw_sb_id, sb_index, disable); 1861} 1862 1863void 1864bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1865 uint8_t fw_sb_id, 1866 uint8_t sb_index, 1867 uint8_t disable, 1868 uint16_t usec) 1869{ 1870 int port = SC_PORT(sc); 1871 uint8_t ticks = (usec / 4); /* XXX ??? */ 1872 1873 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1874 1875 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1876 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1877} 1878 1879void 1880elink_cb_udelay(struct bxe_softc *sc, 1881 uint32_t usecs) 1882{ 1883 DELAY(usecs); 1884} 1885 1886uint32_t 1887elink_cb_reg_read(struct bxe_softc *sc, 1888 uint32_t reg_addr) 1889{ 1890 return (REG_RD(sc, reg_addr)); 1891} 1892 1893void 1894elink_cb_reg_write(struct bxe_softc *sc, 1895 uint32_t reg_addr, 1896 uint32_t val) 1897{ 1898 REG_WR(sc, reg_addr, val); 1899} 1900 1901void 1902elink_cb_reg_wb_write(struct bxe_softc *sc, 1903 uint32_t offset, 1904 uint32_t *wb_write, 1905 uint16_t len) 1906{ 1907 REG_WR_DMAE(sc, offset, wb_write, len); 1908} 1909 1910void 1911elink_cb_reg_wb_read(struct bxe_softc *sc, 1912 uint32_t offset, 1913 uint32_t *wb_write, 1914 uint16_t len) 1915{ 1916 REG_RD_DMAE(sc, offset, wb_write, len); 1917} 1918 1919uint8_t 1920elink_cb_path_id(struct bxe_softc *sc) 1921{ 1922 return (SC_PATH(sc)); 1923} 1924 1925void 1926elink_cb_event_log(struct bxe_softc *sc, 1927 const elink_log_id_t elink_log_id, 1928 ...) 1929{ 1930 /* XXX */ 1931#if 0 1932 //va_list ap; 1933 va_start(ap, elink_log_id); 1934 _XXX_(sc, lm_log_id, ap); 1935 va_end(ap); 1936#endif 1937 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1938} 1939 1940static int 1941bxe_set_spio(struct bxe_softc *sc, 1942 int spio, 1943 uint32_t mode) 1944{ 1945 uint32_t spio_reg; 1946 1947 /* Only 2 SPIOs are configurable */ 1948 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1949 BLOGE(sc, "Invalid SPIO 0x%x\n", spio); 1950 return (-1); 1951 } 1952 1953 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1954 1955 /* read SPIO and mask except the float bits */ 1956 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1957 1958 switch (mode) { 1959 case MISC_SPIO_OUTPUT_LOW: 1960 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1961 /* clear FLOAT and set CLR */ 1962 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1963 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1964 break; 1965 1966 case MISC_SPIO_OUTPUT_HIGH: 1967 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1968 /* clear FLOAT and set SET */ 1969 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1970 spio_reg |= (spio << MISC_SPIO_SET_POS); 1971 break; 1972 1973 case MISC_SPIO_INPUT_HI_Z: 1974 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1975 /* set FLOAT */ 1976 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1977 break; 1978 1979 default: 1980 break; 1981 } 1982 1983 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1984 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1985 1986 return (0); 1987} 1988 1989static int 1990bxe_gpio_read(struct bxe_softc *sc, 1991 int gpio_num, 1992 uint8_t port) 1993{ 1994 /* The GPIO should be swapped if swap register is set and active */ 1995 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1996 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1997 int gpio_shift = (gpio_num + 1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1999 uint32_t gpio_mask = (1 << gpio_shift); 2000 uint32_t gpio_reg; 2001 2002 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2003 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2004 return (-1); 2005 } 2006 2007 /* read GPIO value */ 2008 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2009 2010 /* get the requested pin value */ 2011 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 2012} 2013 2014static int 2015bxe_gpio_write(struct bxe_softc *sc, 2016 int gpio_num, 2017 uint32_t mode, 2018 uint8_t port) 2019{ 2020 /* The GPIO should be swapped if swap register is set and active */ 2021 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2022 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2023 int gpio_shift = (gpio_num + 2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2025 uint32_t gpio_mask = (1 << gpio_shift); 2026 uint32_t gpio_reg; 2027 2028 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2029 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2030 return (-1); 2031 } 2032 2033 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2034 2035 /* read GPIO and mask except the float bits */ 2036 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2037 2038 switch (mode) { 2039 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2040 BLOGD(sc, DBG_PHY, 2041 "Set GPIO %d (shift %d) -> output low\n", 2042 gpio_num, gpio_shift); 2043 /* clear FLOAT and set CLR */ 2044 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2045 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2046 break; 2047 2048 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2049 BLOGD(sc, DBG_PHY, 2050 "Set GPIO %d (shift %d) -> output high\n", 2051 gpio_num, gpio_shift); 2052 /* clear FLOAT and set SET */ 2053 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2054 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2055 break; 2056 2057 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2058 BLOGD(sc, DBG_PHY, 2059 "Set GPIO %d (shift %d) -> input\n", 2060 gpio_num, gpio_shift); 2061 /* set FLOAT */ 2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2063 break; 2064 2065 default: 2066 break; 2067 } 2068 2069 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2070 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2071 2072 return (0); 2073} 2074 2075static int 2076bxe_gpio_mult_write(struct bxe_softc *sc, 2077 uint8_t pins, 2078 uint32_t mode) 2079{ 2080 uint32_t gpio_reg; 2081 2082 /* any port swapping should be handled by caller */ 2083 2084 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2085 2086 /* read GPIO and mask except the float bits */ 2087 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2088 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2089 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2090 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2091 2092 switch (mode) { 2093 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2094 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2095 /* set CLR */ 2096 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2097 break; 2098 2099 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2100 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2101 /* set SET */ 2102 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2103 break; 2104 2105 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2106 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2107 /* set FLOAT */ 2108 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2109 break; 2110 2111 default: 2112 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); 2113 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2114 return (-1); 2115 } 2116 2117 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2118 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2119 2120 return (0); 2121} 2122 2123static int 2124bxe_gpio_int_write(struct bxe_softc *sc, 2125 int gpio_num, 2126 uint32_t mode, 2127 uint8_t port) 2128{ 2129 /* The GPIO should be swapped if swap register is set and active */ 2130 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2131 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2132 int gpio_shift = (gpio_num + 2133 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2134 uint32_t gpio_mask = (1 << gpio_shift); 2135 uint32_t gpio_reg; 2136 2137 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2138 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2139 return (-1); 2140 } 2141 2142 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2143 2144 /* read GPIO int */ 2145 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2146 2147 switch (mode) { 2148 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2149 BLOGD(sc, DBG_PHY, 2150 "Clear GPIO INT %d (shift %d) -> output low\n", 2151 gpio_num, gpio_shift); 2152 /* clear SET and set CLR */ 2153 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2154 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2155 break; 2156 2157 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2158 BLOGD(sc, DBG_PHY, 2159 "Set GPIO INT %d (shift %d) -> output high\n", 2160 gpio_num, gpio_shift); 2161 /* clear CLR and set SET */ 2162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2164 break; 2165 2166 default: 2167 break; 2168 } 2169 2170 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2171 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2172 2173 return (0); 2174} 2175 2176uint32_t 2177elink_cb_gpio_read(struct bxe_softc *sc, 2178 uint16_t gpio_num, 2179 uint8_t port) 2180{ 2181 return (bxe_gpio_read(sc, gpio_num, port)); 2182} 2183 2184uint8_t 2185elink_cb_gpio_write(struct bxe_softc *sc, 2186 uint16_t gpio_num, 2187 uint8_t mode, /* 0=low 1=high */ 2188 uint8_t port) 2189{ 2190 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2191} 2192 2193uint8_t 2194elink_cb_gpio_mult_write(struct bxe_softc *sc, 2195 uint8_t pins, 2196 uint8_t mode) /* 0=low 1=high */ 2197{ 2198 return (bxe_gpio_mult_write(sc, pins, mode)); 2199} 2200 2201uint8_t 2202elink_cb_gpio_int_write(struct bxe_softc *sc, 2203 uint16_t gpio_num, 2204 uint8_t mode, /* 0=low 1=high */ 2205 uint8_t port) 2206{ 2207 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2208} 2209 2210void 2211elink_cb_notify_link_changed(struct bxe_softc *sc) 2212{ 2213 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2214 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2215} 2216 2217/* send the MCP a request, block until there is a reply */ 2218uint32_t 2219elink_cb_fw_command(struct bxe_softc *sc, 2220 uint32_t command, 2221 uint32_t param) 2222{ 2223 int mb_idx = SC_FW_MB_IDX(sc); 2224 uint32_t seq; 2225 uint32_t rc = 0; 2226 uint32_t cnt = 1; 2227 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2228 2229 BXE_FWMB_LOCK(sc); 2230 2231 seq = ++sc->fw_seq; 2232 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2233 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2234 2235 BLOGD(sc, DBG_PHY, 2236 "wrote command 0x%08x to FW MB param 0x%08x\n", 2237 (command | seq), param); 2238 2239 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2240 do { 2241 DELAY(delay * 1000); 2242 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2243 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2244 2245 BLOGD(sc, DBG_PHY, 2246 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2247 cnt*delay, rc, seq); 2248 2249 /* is this a reply to our command? */ 2250 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2251 rc &= FW_MSG_CODE_MASK; 2252 } else { 2253 /* Ruh-roh! */ 2254 BLOGE(sc, "FW failed to respond!\n"); 2255 // XXX bxe_fw_dump(sc); 2256 rc = 0; 2257 } 2258 2259 BXE_FWMB_UNLOCK(sc); 2260 return (rc); 2261} 2262 2263static uint32_t 2264bxe_fw_command(struct bxe_softc *sc, 2265 uint32_t command, 2266 uint32_t param) 2267{ 2268 return (elink_cb_fw_command(sc, command, param)); 2269} 2270 2271static void 2272__storm_memset_dma_mapping(struct bxe_softc *sc, 2273 uint32_t addr, 2274 bus_addr_t mapping) 2275{ 2276 REG_WR(sc, addr, U64_LO(mapping)); 2277 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2278} 2279 2280static void 2281storm_memset_spq_addr(struct bxe_softc *sc, 2282 bus_addr_t mapping, 2283 uint16_t abs_fid) 2284{ 2285 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2286 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2287 __storm_memset_dma_mapping(sc, addr, mapping); 2288} 2289 2290static void 2291storm_memset_vf_to_pf(struct bxe_softc *sc, 2292 uint16_t abs_fid, 2293 uint16_t pf_id) 2294{ 2295 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2296 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2297 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2298 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2299} 2300 2301static void 2302storm_memset_func_en(struct bxe_softc *sc, 2303 uint16_t abs_fid, 2304 uint8_t enable) 2305{ 2306 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2307 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2308 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2309 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2310} 2311 2312static void 2313storm_memset_eq_data(struct bxe_softc *sc, 2314 struct event_ring_data *eq_data, 2315 uint16_t pfid) 2316{ 2317 uint32_t addr; 2318 size_t size; 2319 2320 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2321 size = sizeof(struct event_ring_data); 2322 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2323} 2324 2325static void 2326storm_memset_eq_prod(struct bxe_softc *sc, 2327 uint16_t eq_prod, 2328 uint16_t pfid) 2329{ 2330 uint32_t addr = (BAR_CSTRORM_INTMEM + 2331 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2332 REG_WR16(sc, addr, eq_prod); 2333} 2334 2335/* 2336 * Post a slowpath command. 2337 * 2338 * A slowpath command is used to propogate a configuration change through 2339 * the controller in a controlled manner, allowing each STORM processor and 2340 * other H/W blocks to phase in the change. The commands sent on the 2341 * slowpath are referred to as ramrods. Depending on the ramrod used the 2342 * completion of the ramrod will occur in different ways. Here's a 2343 * breakdown of ramrods and how they complete: 2344 * 2345 * RAMROD_CMD_ID_ETH_PORT_SETUP 2346 * Used to setup the leading connection on a port. Completes on the 2347 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2348 * 2349 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2350 * Used to setup an additional connection on a port. Completes on the 2351 * RCQ of the multi-queue/RSS connection being initialized. 2352 * 2353 * RAMROD_CMD_ID_ETH_STAT_QUERY 2354 * Used to force the storm processors to update the statistics database 2355 * in host memory. This ramrod is send on the leading connection CID and 2356 * completes as an index increment of the CSTORM on the default status 2357 * block. 2358 * 2359 * RAMROD_CMD_ID_ETH_UPDATE 2360 * Used to update the state of the leading connection, usually to udpate 2361 * the RSS indirection table. Completes on the RCQ of the leading 2362 * connection. (Not currently used under FreeBSD until OS support becomes 2363 * available.) 2364 * 2365 * RAMROD_CMD_ID_ETH_HALT 2366 * Used when tearing down a connection prior to driver unload. Completes 2367 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2368 * use this on the leading connection. 2369 * 2370 * RAMROD_CMD_ID_ETH_SET_MAC 2371 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2372 * the RCQ of the leading connection. 2373 * 2374 * RAMROD_CMD_ID_ETH_CFC_DEL 2375 * Used when tearing down a conneciton prior to driver unload. Completes 2376 * on the RCQ of the leading connection (since the current connection 2377 * has been completely removed from controller memory). 2378 * 2379 * RAMROD_CMD_ID_ETH_PORT_DEL 2380 * Used to tear down the leading connection prior to driver unload, 2381 * typically fp[0]. Completes as an index increment of the CSTORM on the 2382 * default status block. 2383 * 2384 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2385 * Used for connection offload. Completes on the RCQ of the multi-queue 2386 * RSS connection that is being offloaded. (Not currently used under 2387 * FreeBSD.) 2388 * 2389 * There can only be one command pending per function. 2390 * 2391 * Returns: 2392 * 0 = Success, !0 = Failure. 2393 */ 2394 2395/* must be called under the spq lock */ 2396static inline 2397struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2398{ 2399 struct eth_spe *next_spe = sc->spq_prod_bd; 2400 2401 if (sc->spq_prod_bd == sc->spq_last_bd) { 2402 /* wrap back to the first eth_spq */ 2403 sc->spq_prod_bd = sc->spq; 2404 sc->spq_prod_idx = 0; 2405 } else { 2406 sc->spq_prod_bd++; 2407 sc->spq_prod_idx++; 2408 } 2409 2410 return (next_spe); 2411} 2412 2413/* must be called under the spq lock */ 2414static inline 2415void bxe_sp_prod_update(struct bxe_softc *sc) 2416{ 2417 int func = SC_FUNC(sc); 2418 2419 /* 2420 * Make sure that BD data is updated before writing the producer. 2421 * BD data is written to the memory, the producer is read from the 2422 * memory, thus we need a full memory barrier to ensure the ordering. 2423 */ 2424 mb(); 2425 2426 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2427 sc->spq_prod_idx); 2428 2429 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2430 BUS_SPACE_BARRIER_WRITE); 2431} 2432 2433/** 2434 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2435 * 2436 * @cmd: command to check 2437 * @cmd_type: command type 2438 */ 2439static inline 2440int bxe_is_contextless_ramrod(int cmd, 2441 int cmd_type) 2442{ 2443 if ((cmd_type == NONE_CONNECTION_TYPE) || 2444 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2445 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2446 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2447 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2448 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2449 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2450 return (TRUE); 2451 } else { 2452 return (FALSE); 2453 } 2454} 2455 2456/** 2457 * bxe_sp_post - place a single command on an SP ring 2458 * 2459 * @sc: driver handle 2460 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2461 * @cid: SW CID the command is related to 2462 * @data_hi: command private data address (high 32 bits) 2463 * @data_lo: command private data address (low 32 bits) 2464 * @cmd_type: command type (e.g. NONE, ETH) 2465 * 2466 * SP data is handled as if it's always an address pair, thus data fields are 2467 * not swapped to little endian in upper functions. Instead this function swaps 2468 * data as if it's two uint32 fields. 2469 */ 2470int 2471bxe_sp_post(struct bxe_softc *sc, 2472 int command, 2473 int cid, 2474 uint32_t data_hi, 2475 uint32_t data_lo, 2476 int cmd_type) 2477{ 2478 struct eth_spe *spe; 2479 uint16_t type; 2480 int common; 2481 2482 common = bxe_is_contextless_ramrod(command, cmd_type); 2483 2484 BXE_SP_LOCK(sc); 2485 2486 if (common) { 2487 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2488 BLOGE(sc, "EQ ring is full!\n"); 2489 BXE_SP_UNLOCK(sc); 2490 return (-1); 2491 } 2492 } else { 2493 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2494 BLOGE(sc, "SPQ ring is full!\n"); 2495 BXE_SP_UNLOCK(sc); 2496 return (-1); 2497 } 2498 } 2499 2500 spe = bxe_sp_get_next(sc); 2501 2502 /* CID needs port number to be encoded int it */ 2503 spe->hdr.conn_and_cmd_data = 2504 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2505 2506 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 2507 2508 /* TBD: Check if it works for VFs */ 2509 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 2510 SPE_HDR_FUNCTION_ID); 2511 2512 spe->hdr.type = htole16(type); 2513 2514 spe->data.update_data_addr.hi = htole32(data_hi); 2515 spe->data.update_data_addr.lo = htole32(data_lo); 2516 2517 /* 2518 * It's ok if the actual decrement is issued towards the memory 2519 * somewhere between the lock and unlock. Thus no more explict 2520 * memory barrier is needed. 2521 */ 2522 if (common) { 2523 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2524 } else { 2525 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2526 } 2527 2528 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2529 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2530 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2531 BLOGD(sc, DBG_SP, 2532 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2533 sc->spq_prod_idx, 2534 (uint32_t)U64_HI(sc->spq_dma.paddr), 2535 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2536 command, 2537 common, 2538 HW_CID(sc, cid), 2539 data_hi, 2540 data_lo, 2541 type, 2542 atomic_load_acq_long(&sc->cq_spq_left), 2543 atomic_load_acq_long(&sc->eq_spq_left)); 2544 2545 bxe_sp_prod_update(sc); 2546 2547 BXE_SP_UNLOCK(sc); 2548 return (0); 2549} 2550 2551/** 2552 * bxe_debug_print_ind_table - prints the indirection table configuration. 2553 * 2554 * @sc: driver hanlde 2555 * @p: pointer to rss configuration 2556 */ 2557#if 0 2558static void 2559bxe_debug_print_ind_table(struct bxe_softc *sc, 2560 struct ecore_config_rss_params *p) 2561{ 2562 int i; 2563 2564 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); 2565 BLOGD(sc, DBG_LOAD, " 0x0000: "); 2566 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2567 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); 2568 2569 /* Print 4 bytes in a line */ 2570 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 2571 (((i + 1) & 0x3) == 0)) { 2572 BLOGD(sc, DBG_LOAD, "\n"); 2573 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); 2574 } 2575 } 2576 2577 BLOGD(sc, DBG_LOAD, "\n"); 2578} 2579#endif 2580 2581/* 2582 * FreeBSD Device probe function. 2583 * 2584 * Compares the device found to the driver's list of supported devices and 2585 * reports back to the bsd loader whether this is the right driver for the device. 2586 * This is the driver entry function called from the "kldload" command. 2587 * 2588 * Returns: 2589 * BUS_PROBE_DEFAULT on success, positive value on failure. 2590 */ 2591static int 2592bxe_probe(device_t dev) 2593{ 2594 struct bxe_softc *sc; 2595 struct bxe_device_type *t; 2596 char *descbuf; 2597 uint16_t did, sdid, svid, vid; 2598 2599 /* Find our device structure */ 2600 sc = device_get_softc(dev); 2601 sc->dev = dev; 2602 t = bxe_devs; 2603 2604 /* Get the data for the device to be probed. */ 2605 vid = pci_get_vendor(dev); 2606 did = pci_get_device(dev); 2607 svid = pci_get_subvendor(dev); 2608 sdid = pci_get_subdevice(dev); 2609 2610 BLOGD(sc, DBG_LOAD, 2611 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 2612 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 2613 2614 /* Look through the list of known devices for a match. */ 2615 while (t->bxe_name != NULL) { 2616 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2617 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2618 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2619 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2620 if (descbuf == NULL) 2621 return (ENOMEM); 2622 2623 /* Print out the device identity. */ 2624 snprintf(descbuf, BXE_DEVDESC_MAX, 2625 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2626 (((pci_read_config(dev, PCIR_REVID, 4) & 2627 0xf0) >> 4) + 'A'), 2628 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2629 BXE_DRIVER_VERSION); 2630 2631 device_set_desc_copy(dev, descbuf); 2632 free(descbuf, M_TEMP); 2633 return (BUS_PROBE_DEFAULT); 2634 } 2635 t++; 2636 } 2637 2638 return (ENXIO); 2639} 2640 2641static void 2642bxe_init_mutexes(struct bxe_softc *sc) 2643{ 2644#ifdef BXE_CORE_LOCK_SX 2645 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2646 "bxe%d_core_lock", sc->unit); 2647 sx_init(&sc->core_sx, sc->core_sx_name); 2648#else 2649 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2650 "bxe%d_core_lock", sc->unit); 2651 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2652#endif 2653 2654 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2655 "bxe%d_sp_lock", sc->unit); 2656 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2657 2658 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2659 "bxe%d_dmae_lock", sc->unit); 2660 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2661 2662 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2663 "bxe%d_phy_lock", sc->unit); 2664 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2665 2666 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2667 "bxe%d_fwmb_lock", sc->unit); 2668 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2669 2670 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2671 "bxe%d_print_lock", sc->unit); 2672 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2673 2674 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2675 "bxe%d_stats_lock", sc->unit); 2676 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2677 2678 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2679 "bxe%d_mcast_lock", sc->unit); 2680 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2681} 2682 2683static void 2684bxe_release_mutexes(struct bxe_softc *sc) 2685{ 2686#ifdef BXE_CORE_LOCK_SX 2687 sx_destroy(&sc->core_sx); 2688#else 2689 if (mtx_initialized(&sc->core_mtx)) { 2690 mtx_destroy(&sc->core_mtx); 2691 } 2692#endif 2693 2694 if (mtx_initialized(&sc->sp_mtx)) { 2695 mtx_destroy(&sc->sp_mtx); 2696 } 2697 2698 if (mtx_initialized(&sc->dmae_mtx)) { 2699 mtx_destroy(&sc->dmae_mtx); 2700 } 2701 2702 if (mtx_initialized(&sc->port.phy_mtx)) { 2703 mtx_destroy(&sc->port.phy_mtx); 2704 } 2705 2706 if (mtx_initialized(&sc->fwmb_mtx)) { 2707 mtx_destroy(&sc->fwmb_mtx); 2708 } 2709 2710 if (mtx_initialized(&sc->print_mtx)) { 2711 mtx_destroy(&sc->print_mtx); 2712 } 2713 2714 if (mtx_initialized(&sc->stats_mtx)) { 2715 mtx_destroy(&sc->stats_mtx); 2716 } 2717 2718 if (mtx_initialized(&sc->mcast_mtx)) { 2719 mtx_destroy(&sc->mcast_mtx); 2720 } 2721} 2722 2723static void 2724bxe_tx_disable(struct bxe_softc* sc) 2725{ 2726 struct ifnet *ifp = sc->ifnet; 2727 2728 /* tell the stack the driver is stopped and TX queue is full */ 2729 if (ifp != NULL) { 2730 ifp->if_drv_flags = 0; 2731 } 2732} 2733 2734static void 2735bxe_drv_pulse(struct bxe_softc *sc) 2736{ 2737 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2738 sc->fw_drv_pulse_wr_seq); 2739} 2740 2741static inline uint16_t 2742bxe_tx_avail(struct bxe_softc *sc, 2743 struct bxe_fastpath *fp) 2744{ 2745 int16_t used; 2746 uint16_t prod; 2747 uint16_t cons; 2748 2749 prod = fp->tx_bd_prod; 2750 cons = fp->tx_bd_cons; 2751 2752 used = SUB_S16(prod, cons); 2753 2754#if 0 2755 KASSERT((used < 0), ("used tx bds < 0")); 2756 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); 2757 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), 2758 ("invalid number of tx bds used")); 2759#endif 2760 2761 return (int16_t)(sc->tx_ring_size) - used; 2762} 2763 2764static inline int 2765bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2766{ 2767 uint16_t hw_cons; 2768 2769 mb(); /* status block fields can change */ 2770 hw_cons = le16toh(*fp->tx_cons_sb); 2771 return (hw_cons != fp->tx_pkt_cons); 2772} 2773 2774static inline uint8_t 2775bxe_has_tx_work(struct bxe_fastpath *fp) 2776{ 2777 /* expand this for multi-cos if ever supported */ 2778 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2779} 2780 2781static inline int 2782bxe_has_rx_work(struct bxe_fastpath *fp) 2783{ 2784 uint16_t rx_cq_cons_sb; 2785 2786 mb(); /* status block fields can change */ 2787 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2788 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2789 rx_cq_cons_sb++; 2790 return (fp->rx_cq_cons != rx_cq_cons_sb); 2791} 2792 2793static void 2794bxe_sp_event(struct bxe_softc *sc, 2795 struct bxe_fastpath *fp, 2796 union eth_rx_cqe *rr_cqe) 2797{ 2798 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2799 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2800 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2801 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2802 2803 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2804 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2805 2806#if 0 2807 /* 2808 * If cid is within VF range, replace the slowpath object with the 2809 * one corresponding to this VF 2810 */ 2811 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { 2812 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); 2813 } 2814#endif 2815 2816 switch (command) { 2817 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2818 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2819 drv_cmd = ECORE_Q_CMD_UPDATE; 2820 break; 2821 2822 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2823 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2824 drv_cmd = ECORE_Q_CMD_SETUP; 2825 break; 2826 2827 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2828 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2829 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2830 break; 2831 2832 case (RAMROD_CMD_ID_ETH_HALT): 2833 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2834 drv_cmd = ECORE_Q_CMD_HALT; 2835 break; 2836 2837 case (RAMROD_CMD_ID_ETH_TERMINATE): 2838 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2839 drv_cmd = ECORE_Q_CMD_TERMINATE; 2840 break; 2841 2842 case (RAMROD_CMD_ID_ETH_EMPTY): 2843 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2844 drv_cmd = ECORE_Q_CMD_EMPTY; 2845 break; 2846 2847 default: 2848 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2849 command, fp->index); 2850 return; 2851 } 2852 2853 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2854 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2855 /* 2856 * q_obj->complete_cmd() failure means that this was 2857 * an unexpected completion. 2858 * 2859 * In this case we don't want to increase the sc->spq_left 2860 * because apparently we haven't sent this command the first 2861 * place. 2862 */ 2863 // bxe_panic(sc, ("Unexpected SP completion\n")); 2864 return; 2865 } 2866 2867#if 0 2868 /* SRIOV: reschedule any 'in_progress' operations */ 2869 bxe_iov_sp_event(sc, cid, TRUE); 2870#endif 2871 2872 atomic_add_acq_long(&sc->cq_spq_left, 1); 2873 2874 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2875 atomic_load_acq_long(&sc->cq_spq_left)); 2876 2877#if 0 2878 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 2879 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { 2880 /* 2881 * If Queue update ramrod is completed for last Queue in AFEX VIF set 2882 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to 2883 * prevent case that both bits are cleared. At the end of load/unload 2884 * driver checks that sp_state is cleared and this order prevents 2885 * races. 2886 */ 2887 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); 2888 wmb(); 2889 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); 2890 2891 /* schedule the sp task as MCP ack is required */ 2892 bxe_schedule_sp_task(sc); 2893 } 2894#endif 2895} 2896 2897/* 2898 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2899 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2900 * the current aggregation queue as in-progress. 2901 */ 2902static void 2903bxe_tpa_start(struct bxe_softc *sc, 2904 struct bxe_fastpath *fp, 2905 uint16_t queue, 2906 uint16_t cons, 2907 uint16_t prod, 2908 struct eth_fast_path_rx_cqe *cqe) 2909{ 2910 struct bxe_sw_rx_bd tmp_bd; 2911 struct bxe_sw_rx_bd *rx_buf; 2912 struct eth_rx_bd *rx_bd; 2913 int max_agg_queues; 2914 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2915 uint16_t index; 2916 2917 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2918 "cons=%d prod=%d\n", 2919 fp->index, queue, cons, prod); 2920 2921 max_agg_queues = MAX_AGG_QS(sc); 2922 2923 KASSERT((queue < max_agg_queues), 2924 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2925 fp->index, queue, max_agg_queues)); 2926 2927 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2928 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2929 fp->index, queue)); 2930 2931 /* copy the existing mbuf and mapping from the TPA pool */ 2932 tmp_bd = tpa_info->bd; 2933 2934 if (tmp_bd.m == NULL) { 2935 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", 2936 fp->index, queue); 2937 /* XXX Error handling? */ 2938 return; 2939 } 2940 2941 /* change the TPA queue to the start state */ 2942 tpa_info->state = BXE_TPA_STATE_START; 2943 tpa_info->placement_offset = cqe->placement_offset; 2944 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2945 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2946 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2947 2948 fp->rx_tpa_queue_used |= (1 << queue); 2949 2950 /* 2951 * If all the buffer descriptors are filled with mbufs then fill in 2952 * the current consumer index with a new BD. Else if a maximum Rx 2953 * buffer limit is imposed then fill in the next producer index. 2954 */ 2955 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2956 prod : cons; 2957 2958 /* move the received mbuf and mapping to TPA pool */ 2959 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2960 2961 /* release any existing RX BD mbuf mappings */ 2962 if (cons != index) { 2963 rx_buf = &fp->rx_mbuf_chain[cons]; 2964 2965 if (rx_buf->m_map != NULL) { 2966 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2967 BUS_DMASYNC_POSTREAD); 2968 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2969 } 2970 2971 /* 2972 * We get here when the maximum number of rx buffers is less than 2973 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2974 * it out here without concern of a memory leak. 2975 */ 2976 fp->rx_mbuf_chain[cons].m = NULL; 2977 } 2978 2979 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2980 fp->rx_mbuf_chain[index] = tmp_bd; 2981 2982 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2983 rx_bd = &fp->rx_chain[index]; 2984 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2985 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2986} 2987 2988/* 2989 * When a TPA aggregation is completed, loop through the individual mbufs 2990 * of the aggregation, combining them into a single mbuf which will be sent 2991 * up the stack. Refill all freed SGEs with mbufs as we go along. 2992 */ 2993static int 2994bxe_fill_frag_mbuf(struct bxe_softc *sc, 2995 struct bxe_fastpath *fp, 2996 struct bxe_sw_tpa_info *tpa_info, 2997 uint16_t queue, 2998 uint16_t pages, 2999 struct mbuf *m, 3000 struct eth_end_agg_rx_cqe *cqe, 3001 uint16_t cqe_idx) 3002{ 3003 struct mbuf *m_frag; 3004 uint32_t frag_len, frag_size, i; 3005 uint16_t sge_idx; 3006 int rc = 0; 3007 int j; 3008 3009 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 3010 3011 BLOGD(sc, DBG_LRO, 3012 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 3013 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 3014 3015 /* make sure the aggregated frame is not too big to handle */ 3016 if (pages > 8 * PAGES_PER_SGE) { 3017 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 3018 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 3019 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 3020 tpa_info->len_on_bd, frag_size); 3021 bxe_panic(sc, ("sge page count error\n")); 3022 return (EINVAL); 3023 } 3024 3025 /* 3026 * Scan through the scatter gather list pulling individual mbufs into a 3027 * single mbuf for the host stack. 3028 */ 3029 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 3030 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 3031 3032 /* 3033 * Firmware gives the indices of the SGE as if the ring is an array 3034 * (meaning that the "next" element will consume 2 indices). 3035 */ 3036 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 3037 3038 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 3039 "sge_idx=%d frag_size=%d frag_len=%d\n", 3040 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 3041 3042 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3043 3044 /* allocate a new mbuf for the SGE */ 3045 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3046 if (rc) { 3047 /* Leave all remaining SGEs in the ring! */ 3048 return (rc); 3049 } 3050 3051 /* update the fragment length */ 3052 m_frag->m_len = frag_len; 3053 3054 /* concatenate the fragment to the head mbuf */ 3055 m_cat(m, m_frag); 3056 fp->eth_q_stats.mbuf_alloc_sge--; 3057 3058 /* update the TPA mbuf size and remaining fragment size */ 3059 m->m_pkthdr.len += frag_len; 3060 frag_size -= frag_len; 3061 } 3062 3063 BLOGD(sc, DBG_LRO, 3064 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 3065 fp->index, queue, frag_size); 3066 3067 return (rc); 3068} 3069 3070static inline void 3071bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 3072{ 3073 int i, j; 3074 3075 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 3076 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 3077 3078 for (j = 0; j < 2; j++) { 3079 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 3080 idx--; 3081 } 3082 } 3083} 3084 3085static inline void 3086bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 3087{ 3088 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 3089 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 3090 3091 /* 3092 * Clear the two last indices in the page to 1. These are the indices that 3093 * correspond to the "next" element, hence will never be indicated and 3094 * should be removed from the calculations. 3095 */ 3096 bxe_clear_sge_mask_next_elems(fp); 3097} 3098 3099static inline void 3100bxe_update_last_max_sge(struct bxe_fastpath *fp, 3101 uint16_t idx) 3102{ 3103 uint16_t last_max = fp->last_max_sge; 3104 3105 if (SUB_S16(idx, last_max) > 0) { 3106 fp->last_max_sge = idx; 3107 } 3108} 3109 3110static inline void 3111bxe_update_sge_prod(struct bxe_softc *sc, 3112 struct bxe_fastpath *fp, 3113 uint16_t sge_len, 3114 struct eth_end_agg_rx_cqe *cqe) 3115{ 3116 uint16_t last_max, last_elem, first_elem; 3117 uint16_t delta = 0; 3118 uint16_t i; 3119 3120 if (!sge_len) { 3121 return; 3122 } 3123 3124 /* first mark all used pages */ 3125 for (i = 0; i < sge_len; i++) { 3126 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3127 RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[i]))); 3128 } 3129 3130 BLOGD(sc, DBG_LRO, 3131 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3132 fp->index, sge_len - 1, 3133 le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 3134 3135 /* assume that the last SGE index is the biggest */ 3136 bxe_update_last_max_sge(fp, 3137 le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 3138 3139 last_max = RX_SGE(fp->last_max_sge); 3140 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3141 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3142 3143 /* if ring is not full */ 3144 if (last_elem + 1 != first_elem) { 3145 last_elem++; 3146 } 3147 3148 /* now update the prod */ 3149 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3150 if (__predict_true(fp->sge_mask[i])) { 3151 break; 3152 } 3153 3154 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3155 delta += BIT_VEC64_ELEM_SZ; 3156 } 3157 3158 if (delta > 0) { 3159 fp->rx_sge_prod += delta; 3160 /* clear page-end entries */ 3161 bxe_clear_sge_mask_next_elems(fp); 3162 } 3163 3164 BLOGD(sc, DBG_LRO, 3165 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3166 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3167} 3168 3169/* 3170 * The aggregation on the current TPA queue has completed. Pull the individual 3171 * mbuf fragments together into a single mbuf, perform all necessary checksum 3172 * calculations, and send the resuting mbuf to the stack. 3173 */ 3174static void 3175bxe_tpa_stop(struct bxe_softc *sc, 3176 struct bxe_fastpath *fp, 3177 struct bxe_sw_tpa_info *tpa_info, 3178 uint16_t queue, 3179 uint16_t pages, 3180 struct eth_end_agg_rx_cqe *cqe, 3181 uint16_t cqe_idx) 3182{ 3183 struct ifnet *ifp = sc->ifnet; 3184 struct mbuf *m; 3185 int rc = 0; 3186 3187 BLOGD(sc, DBG_LRO, 3188 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3189 fp->index, queue, tpa_info->placement_offset, 3190 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3191 3192 m = tpa_info->bd.m; 3193 3194 /* allocate a replacement before modifying existing mbuf */ 3195 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3196 if (rc) { 3197 /* drop the frame and log an error */ 3198 fp->eth_q_stats.rx_soft_errors++; 3199 goto bxe_tpa_stop_exit; 3200 } 3201 3202 /* we have a replacement, fixup the current mbuf */ 3203 m_adj(m, tpa_info->placement_offset); 3204 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3205 3206 /* mark the checksums valid (taken care of by the firmware) */ 3207 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3208 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3209 m->m_pkthdr.csum_data = 0xffff; 3210 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3211 CSUM_IP_VALID | 3212 CSUM_DATA_VALID | 3213 CSUM_PSEUDO_HDR); 3214 3215 /* aggregate all of the SGEs into a single mbuf */ 3216 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3217 if (rc) { 3218 /* drop the packet and log an error */ 3219 fp->eth_q_stats.rx_soft_errors++; 3220 m_freem(m); 3221 } else { 3222 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) { 3223 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3224 m->m_flags |= M_VLANTAG; 3225 } 3226 3227 /* assign packet to this interface interface */ 3228 m->m_pkthdr.rcvif = ifp; 3229 3230#if __FreeBSD_version >= 800000 3231 /* specify what RSS queue was used for this flow */ 3232 m->m_pkthdr.flowid = fp->index; 3233 m->m_flags |= M_FLOWID; 3234#endif 3235 3236 ifp->if_ipackets++; 3237 fp->eth_q_stats.rx_tpa_pkts++; 3238 3239 /* pass the frame to the stack */ 3240 (*ifp->if_input)(ifp, m); 3241 } 3242 3243 /* we passed an mbuf up the stack or dropped the frame */ 3244 fp->eth_q_stats.mbuf_alloc_tpa--; 3245 3246bxe_tpa_stop_exit: 3247 3248 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3249 fp->rx_tpa_queue_used &= ~(1 << queue); 3250} 3251 3252static uint8_t 3253bxe_rxeof(struct bxe_softc *sc, 3254 struct bxe_fastpath *fp) 3255{ 3256 struct ifnet *ifp = sc->ifnet; 3257 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3258 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3259 int rx_pkts = 0; 3260 int rc; 3261 3262 BXE_FP_RX_LOCK(fp); 3263 3264 /* CQ "next element" is of the size of the regular element */ 3265 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3266 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3267 hw_cq_cons++; 3268 } 3269 3270 bd_cons = fp->rx_bd_cons; 3271 bd_prod = fp->rx_bd_prod; 3272 bd_prod_fw = bd_prod; 3273 sw_cq_cons = fp->rx_cq_cons; 3274 sw_cq_prod = fp->rx_cq_prod; 3275 3276 /* 3277 * Memory barrier necessary as speculative reads of the rx 3278 * buffer can be ahead of the index in the status block 3279 */ 3280 rmb(); 3281 3282 BLOGD(sc, DBG_RX, 3283 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3284 fp->index, hw_cq_cons, sw_cq_cons); 3285 3286 while (sw_cq_cons != hw_cq_cons) { 3287 struct bxe_sw_rx_bd *rx_buf = NULL; 3288 union eth_rx_cqe *cqe; 3289 struct eth_fast_path_rx_cqe *cqe_fp; 3290 uint8_t cqe_fp_flags; 3291 enum eth_rx_cqe_type cqe_fp_type; 3292 uint16_t len, pad; 3293 struct mbuf *m = NULL; 3294 3295 comp_ring_cons = RCQ(sw_cq_cons); 3296 bd_prod = RX_BD(bd_prod); 3297 bd_cons = RX_BD(bd_cons); 3298 3299 cqe = &fp->rcq_chain[comp_ring_cons]; 3300 cqe_fp = &cqe->fast_path_cqe; 3301 cqe_fp_flags = cqe_fp->type_error_flags; 3302 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3303 3304 BLOGD(sc, DBG_RX, 3305 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3306 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3307 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u\n", 3308 fp->index, 3309 hw_cq_cons, 3310 sw_cq_cons, 3311 bd_prod, 3312 bd_cons, 3313 CQE_TYPE(cqe_fp_flags), 3314 cqe_fp_flags, 3315 cqe_fp->status_flags, 3316 le32toh(cqe_fp->rss_hash_result), 3317 le16toh(cqe_fp->vlan_tag), 3318 le16toh(cqe_fp->pkt_len_or_gro_seg_len)); 3319 3320 /* is this a slowpath msg? */ 3321 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3322 bxe_sp_event(sc, fp, cqe); 3323 goto next_cqe; 3324 } 3325 3326 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3327 3328 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3329 struct bxe_sw_tpa_info *tpa_info; 3330 uint16_t frag_size, pages; 3331 uint8_t queue; 3332 3333#if 0 3334 /* sanity check */ 3335 if (!fp->tpa_enable && 3336 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { 3337 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", 3338 CQE_TYPE(cqe_fp_type)); 3339 } 3340#endif 3341 3342 if (CQE_TYPE_START(cqe_fp_type)) { 3343 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3344 bd_cons, bd_prod, cqe_fp); 3345 m = NULL; /* packet not ready yet */ 3346 goto next_rx; 3347 } 3348 3349 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3350 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3351 3352 queue = cqe->end_agg_cqe.queue_index; 3353 tpa_info = &fp->rx_tpa_info[queue]; 3354 3355 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3356 fp->index, queue); 3357 3358 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3359 tpa_info->len_on_bd); 3360 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3361 3362 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3363 &cqe->end_agg_cqe, comp_ring_cons); 3364 3365 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe); 3366 3367 goto next_cqe; 3368 } 3369 3370 /* non TPA */ 3371 3372 /* is this an error packet? */ 3373 if (__predict_false(cqe_fp_flags & 3374 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3375 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3376 fp->eth_q_stats.rx_soft_errors++; 3377 goto next_rx; 3378 } 3379 3380 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3381 pad = cqe_fp->placement_offset; 3382 3383 m = rx_buf->m; 3384 3385 if (__predict_false(m == NULL)) { 3386 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3387 bd_cons, fp->index); 3388 goto next_rx; 3389 } 3390 3391 /* XXX double copy if packet length under a threshold */ 3392 3393 /* 3394 * If all the buffer descriptors are filled with mbufs then fill in 3395 * the current consumer index with a new BD. Else if a maximum Rx 3396 * buffer limit is imposed then fill in the next producer index. 3397 */ 3398 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3399 (sc->max_rx_bufs != RX_BD_USABLE) ? 3400 bd_prod : bd_cons); 3401 if (rc != 0) { 3402 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3403 fp->index, rc); 3404 fp->eth_q_stats.rx_soft_errors++; 3405 3406 if (sc->max_rx_bufs != RX_BD_USABLE) { 3407 /* copy this consumer index to the producer index */ 3408 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3409 sizeof(struct bxe_sw_rx_bd)); 3410 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3411 } 3412 3413 goto next_rx; 3414 } 3415 3416 /* current mbuf was detached from the bd */ 3417 fp->eth_q_stats.mbuf_alloc_rx--; 3418 3419 /* we allocated a replacement mbuf, fixup the current one */ 3420 m_adj(m, pad); 3421 m->m_pkthdr.len = m->m_len = len; 3422 3423 /* assign packet to this interface interface */ 3424 m->m_pkthdr.rcvif = ifp; 3425 3426 /* assume no hardware checksum has complated */ 3427 m->m_pkthdr.csum_flags = 0; 3428 3429 /* validate checksum if offload enabled */ 3430 if (ifp->if_capenable & IFCAP_RXCSUM) { 3431 /* check for a valid IP frame */ 3432 if (!(cqe->fast_path_cqe.status_flags & 3433 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3434 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3435 if (__predict_false(cqe_fp_flags & 3436 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3437 fp->eth_q_stats.rx_hw_csum_errors++; 3438 } else { 3439 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3440 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3441 } 3442 } 3443 3444 /* check for a valid TCP/UDP frame */ 3445 if (!(cqe->fast_path_cqe.status_flags & 3446 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3447 if (__predict_false(cqe_fp_flags & 3448 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3449 fp->eth_q_stats.rx_hw_csum_errors++; 3450 } else { 3451 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3452 m->m_pkthdr.csum_data = 0xFFFF; 3453 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3454 CSUM_PSEUDO_HDR); 3455 } 3456 } 3457 } 3458 3459 /* if there is a VLAN tag then flag that info */ 3460 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) { 3461 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3462 m->m_flags |= M_VLANTAG; 3463 } 3464 3465#if __FreeBSD_version >= 800000 3466 /* specify what RSS queue was used for this flow */ 3467 m->m_pkthdr.flowid = fp->index; 3468 m->m_flags |= M_FLOWID; 3469#endif 3470 3471next_rx: 3472 3473 bd_cons = RX_BD_NEXT(bd_cons); 3474 bd_prod = RX_BD_NEXT(bd_prod); 3475 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3476 3477 /* pass the frame to the stack */ 3478 if (__predict_true(m != NULL)) { 3479 ifp->if_ipackets++; 3480 rx_pkts++; 3481 (*ifp->if_input)(ifp, m); 3482 } 3483 3484next_cqe: 3485 3486 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3487 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3488 3489 /* limit spinning on the queue */ 3490 if (rx_pkts == sc->rx_budget) { 3491 fp->eth_q_stats.rx_budget_reached++; 3492 break; 3493 } 3494 } /* while work to do */ 3495 3496 fp->rx_bd_cons = bd_cons; 3497 fp->rx_bd_prod = bd_prod_fw; 3498 fp->rx_cq_cons = sw_cq_cons; 3499 fp->rx_cq_prod = sw_cq_prod; 3500 3501 /* Update producers */ 3502 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3503 3504 fp->eth_q_stats.rx_pkts += rx_pkts; 3505 fp->eth_q_stats.rx_calls++; 3506 3507 BXE_FP_RX_UNLOCK(fp); 3508 3509 return (sw_cq_cons != hw_cq_cons); 3510} 3511 3512static uint16_t 3513bxe_free_tx_pkt(struct bxe_softc *sc, 3514 struct bxe_fastpath *fp, 3515 uint16_t idx) 3516{ 3517 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3518 struct eth_tx_start_bd *tx_start_bd; 3519 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3520 uint16_t new_cons; 3521 int nbd; 3522 3523 /* unmap the mbuf from non-paged memory */ 3524 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3525 3526 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3527 nbd = le16toh(tx_start_bd->nbd) - 1; 3528 3529#if 0 3530 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { 3531 bxe_panic(sc, ("BAD nbd!\n")); 3532 } 3533#endif 3534 3535 new_cons = (tx_buf->first_bd + nbd); 3536 3537#if 0 3538 struct eth_tx_bd *tx_data_bd; 3539 3540 /* 3541 * The following code doesn't do anything but is left here 3542 * for clarity on what the new value of new_cons skipped. 3543 */ 3544 3545 /* get the next bd */ 3546 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3547 3548 /* skip the parse bd */ 3549 --nbd; 3550 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3551 3552 /* skip the TSO split header bd since they have no mapping */ 3553 if (tx_buf->flags & BXE_TSO_SPLIT_BD) { 3554 --nbd; 3555 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3556 } 3557 3558 /* now free frags */ 3559 while (nbd > 0) { 3560 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; 3561 if (--nbd) { 3562 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3563 } 3564 } 3565#endif 3566 3567 /* free the mbuf */ 3568 if (__predict_true(tx_buf->m != NULL)) { 3569 m_freem(tx_buf->m); 3570 fp->eth_q_stats.mbuf_alloc_tx--; 3571 } else { 3572 fp->eth_q_stats.tx_chain_lost_mbuf++; 3573 } 3574 3575 tx_buf->m = NULL; 3576 tx_buf->first_bd = 0; 3577 3578 return (new_cons); 3579} 3580 3581/* transmit timeout watchdog */ 3582static int 3583bxe_watchdog(struct bxe_softc *sc, 3584 struct bxe_fastpath *fp) 3585{ 3586 BXE_FP_TX_LOCK(fp); 3587 3588 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3589 BXE_FP_TX_UNLOCK(fp); 3590 return (0); 3591 } 3592 3593 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3594 3595 BXE_FP_TX_UNLOCK(fp); 3596 3597 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 3598 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 3599 3600 return (-1); 3601} 3602 3603/* processes transmit completions */ 3604static uint8_t 3605bxe_txeof(struct bxe_softc *sc, 3606 struct bxe_fastpath *fp) 3607{ 3608 struct ifnet *ifp = sc->ifnet; 3609 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3610 uint16_t tx_bd_avail; 3611 3612 BXE_FP_TX_LOCK_ASSERT(fp); 3613 3614 bd_cons = fp->tx_bd_cons; 3615 hw_cons = le16toh(*fp->tx_cons_sb); 3616 sw_cons = fp->tx_pkt_cons; 3617 3618 while (sw_cons != hw_cons) { 3619 pkt_cons = TX_BD(sw_cons); 3620 3621 BLOGD(sc, DBG_TX, 3622 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3623 fp->index, hw_cons, sw_cons, pkt_cons); 3624 3625 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3626 3627 sw_cons++; 3628 } 3629 3630 fp->tx_pkt_cons = sw_cons; 3631 fp->tx_bd_cons = bd_cons; 3632 3633 BLOGD(sc, DBG_TX, 3634 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3635 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3636 3637 mb(); 3638 3639 tx_bd_avail = bxe_tx_avail(sc, fp); 3640 3641 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3642 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3643 } else { 3644 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3645 } 3646 3647 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3648 /* reset the watchdog timer if there are pending transmits */ 3649 fp->watchdog_timer = BXE_TX_TIMEOUT; 3650 return (TRUE); 3651 } else { 3652 /* clear watchdog when there are no pending transmits */ 3653 fp->watchdog_timer = 0; 3654 return (FALSE); 3655 } 3656} 3657 3658static void 3659bxe_drain_tx_queues(struct bxe_softc *sc) 3660{ 3661 struct bxe_fastpath *fp; 3662 int i, count; 3663 3664 /* wait until all TX fastpath tasks have completed */ 3665 for (i = 0; i < sc->num_queues; i++) { 3666 fp = &sc->fp[i]; 3667 3668 count = 1000; 3669 3670 while (bxe_has_tx_work(fp)) { 3671 3672 BXE_FP_TX_LOCK(fp); 3673 bxe_txeof(sc, fp); 3674 BXE_FP_TX_UNLOCK(fp); 3675 3676 if (count == 0) { 3677 BLOGE(sc, "Timeout waiting for fp[%d] " 3678 "transmits to complete!\n", i); 3679 bxe_panic(sc, ("tx drain failure\n")); 3680 return; 3681 } 3682 3683 count--; 3684 DELAY(1000); 3685 rmb(); 3686 } 3687 } 3688 3689 return; 3690} 3691 3692static int 3693bxe_del_all_macs(struct bxe_softc *sc, 3694 struct ecore_vlan_mac_obj *mac_obj, 3695 int mac_type, 3696 uint8_t wait_for_comp) 3697{ 3698 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3699 int rc; 3700 3701 /* wait for completion of requested */ 3702 if (wait_for_comp) { 3703 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3704 } 3705 3706 /* Set the mac type of addresses we want to clear */ 3707 bxe_set_bit(mac_type, &vlan_mac_flags); 3708 3709 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3710 if (rc < 0) { 3711 BLOGE(sc, "Failed to delete MACs (%d)\n", rc); 3712 } 3713 3714 return (rc); 3715} 3716 3717static int 3718bxe_fill_accept_flags(struct bxe_softc *sc, 3719 uint32_t rx_mode, 3720 unsigned long *rx_accept_flags, 3721 unsigned long *tx_accept_flags) 3722{ 3723 /* Clear the flags first */ 3724 *rx_accept_flags = 0; 3725 *tx_accept_flags = 0; 3726 3727 switch (rx_mode) { 3728 case BXE_RX_MODE_NONE: 3729 /* 3730 * 'drop all' supersedes any accept flags that may have been 3731 * passed to the function. 3732 */ 3733 break; 3734 3735 case BXE_RX_MODE_NORMAL: 3736 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3737 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3738 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3739 3740 /* internal switching mode */ 3741 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3742 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3743 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3744 3745 break; 3746 3747 case BXE_RX_MODE_ALLMULTI: 3748 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3749 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3750 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3751 3752 /* internal switching mode */ 3753 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3754 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3755 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3756 3757 break; 3758 3759 case BXE_RX_MODE_PROMISC: 3760 /* 3761 * According to deffinition of SI mode, iface in promisc mode 3762 * should receive matched and unmatched (in resolution of port) 3763 * unicast packets. 3764 */ 3765 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3766 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3767 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3768 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3769 3770 /* internal switching mode */ 3771 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3772 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3773 3774 if (IS_MF_SI(sc)) { 3775 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3776 } else { 3777 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3778 } 3779 3780 break; 3781 3782 default: 3783 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); 3784 return (-1); 3785 } 3786 3787 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3788 if (rx_mode != BXE_RX_MODE_NONE) { 3789 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3790 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3791 } 3792 3793 return (0); 3794} 3795 3796static int 3797bxe_set_q_rx_mode(struct bxe_softc *sc, 3798 uint8_t cl_id, 3799 unsigned long rx_mode_flags, 3800 unsigned long rx_accept_flags, 3801 unsigned long tx_accept_flags, 3802 unsigned long ramrod_flags) 3803{ 3804 struct ecore_rx_mode_ramrod_params ramrod_param; 3805 int rc; 3806 3807 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3808 3809 /* Prepare ramrod parameters */ 3810 ramrod_param.cid = 0; 3811 ramrod_param.cl_id = cl_id; 3812 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3813 ramrod_param.func_id = SC_FUNC(sc); 3814 3815 ramrod_param.pstate = &sc->sp_state; 3816 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3817 3818 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3819 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3820 3821 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3822 3823 ramrod_param.ramrod_flags = ramrod_flags; 3824 ramrod_param.rx_mode_flags = rx_mode_flags; 3825 3826 ramrod_param.rx_accept_flags = rx_accept_flags; 3827 ramrod_param.tx_accept_flags = tx_accept_flags; 3828 3829 rc = ecore_config_rx_mode(sc, &ramrod_param); 3830 if (rc < 0) { 3831 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); 3832 return (rc); 3833 } 3834 3835 return (0); 3836} 3837 3838static int 3839bxe_set_storm_rx_mode(struct bxe_softc *sc) 3840{ 3841 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3842 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3843 int rc; 3844 3845 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3846 &tx_accept_flags); 3847 if (rc) { 3848 return (rc); 3849 } 3850 3851 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3852 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3853 3854 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3855 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3856 rx_accept_flags, tx_accept_flags, 3857 ramrod_flags)); 3858} 3859 3860/* returns the "mcp load_code" according to global load_count array */ 3861static int 3862bxe_nic_load_no_mcp(struct bxe_softc *sc) 3863{ 3864 int path = SC_PATH(sc); 3865 int port = SC_PORT(sc); 3866 3867 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3868 path, load_count[path][0], load_count[path][1], 3869 load_count[path][2]); 3870 load_count[path][0]++; 3871 load_count[path][1 + port]++; 3872 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3873 path, load_count[path][0], load_count[path][1], 3874 load_count[path][2]); 3875 if (load_count[path][0] == 1) { 3876 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3877 } else if (load_count[path][1 + port] == 1) { 3878 return (FW_MSG_CODE_DRV_LOAD_PORT); 3879 } else { 3880 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3881 } 3882} 3883 3884/* returns the "mcp load_code" according to global load_count array */ 3885static int 3886bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3887{ 3888 int port = SC_PORT(sc); 3889 int path = SC_PATH(sc); 3890 3891 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3892 path, load_count[path][0], load_count[path][1], 3893 load_count[path][2]); 3894 load_count[path][0]--; 3895 load_count[path][1 + port]--; 3896 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3897 path, load_count[path][0], load_count[path][1], 3898 load_count[path][2]); 3899 if (load_count[path][0] == 0) { 3900 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3901 } else if (load_count[path][1 + port] == 0) { 3902 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3903 } else { 3904 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3905 } 3906} 3907 3908/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3909static uint32_t 3910bxe_send_unload_req(struct bxe_softc *sc, 3911 int unload_mode) 3912{ 3913 uint32_t reset_code = 0; 3914#if 0 3915 int port = SC_PORT(sc); 3916 int path = SC_PATH(sc); 3917#endif 3918 3919 /* Select the UNLOAD request mode */ 3920 if (unload_mode == UNLOAD_NORMAL) { 3921 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3922 } 3923#if 0 3924 else if (sc->flags & BXE_NO_WOL_FLAG) { 3925 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 3926 } else if (sc->wol) { 3927 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 3928 uint8_t *mac_addr = sc->dev->dev_addr; 3929 uint32_t val; 3930 uint16_t pmc; 3931 3932 /* 3933 * The mac address is written to entries 1-4 to 3934 * preserve entry 0 which is used by the PMF 3935 */ 3936 uint8_t entry = (SC_VN(sc) + 1)*8; 3937 3938 val = (mac_addr[0] << 8) | mac_addr[1]; 3939 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); 3940 3941 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3942 (mac_addr[4] << 8) | mac_addr[5]; 3943 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 3944 3945 /* Enable the PME and clear the status */ 3946 pmc = pci_read_config(sc->dev, 3947 (sc->devinfo.pcie_pm_cap_reg + 3948 PCIR_POWER_STATUS), 3949 2); 3950 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; 3951 pci_write_config(sc->dev, 3952 (sc->devinfo.pcie_pm_cap_reg + 3953 PCIR_POWER_STATUS), 3954 pmc, 4); 3955 3956 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 3957 } 3958#endif 3959 else { 3960 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3961 } 3962 3963 /* Send the request to the MCP */ 3964 if (!BXE_NOMCP(sc)) { 3965 reset_code = bxe_fw_command(sc, reset_code, 0); 3966 } else { 3967 reset_code = bxe_nic_unload_no_mcp(sc); 3968 } 3969 3970 return (reset_code); 3971} 3972 3973/* send UNLOAD_DONE command to the MCP */ 3974static void 3975bxe_send_unload_done(struct bxe_softc *sc, 3976 uint8_t keep_link) 3977{ 3978 uint32_t reset_param = 3979 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 3980 3981 /* Report UNLOAD_DONE to MCP */ 3982 if (!BXE_NOMCP(sc)) { 3983 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 3984 } 3985} 3986 3987static int 3988bxe_func_wait_started(struct bxe_softc *sc) 3989{ 3990 int tout = 50; 3991 3992 if (!sc->port.pmf) { 3993 return (0); 3994 } 3995 3996 /* 3997 * (assumption: No Attention from MCP at this stage) 3998 * PMF probably in the middle of TX disable/enable transaction 3999 * 1. Sync IRS for default SB 4000 * 2. Sync SP queue - this guarantees us that attention handling started 4001 * 3. Wait, that TX disable/enable transaction completes 4002 * 4003 * 1+2 guarantee that if DCBX attention was scheduled it already changed 4004 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 4005 * received completion for the transaction the state is TX_STOPPED. 4006 * State will return to STARTED after completion of TX_STOPPED-->STARTED 4007 * transaction. 4008 */ 4009 4010 /* XXX make sure default SB ISR is done */ 4011 /* need a way to synchronize an irq (intr_mtx?) */ 4012 4013 /* XXX flush any work queues */ 4014 4015 while (ecore_func_get_state(sc, &sc->func_obj) != 4016 ECORE_F_STATE_STARTED && tout--) { 4017 DELAY(20000); 4018 } 4019 4020 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 4021 /* 4022 * Failed to complete the transaction in a "good way" 4023 * Force both transactions with CLR bit. 4024 */ 4025 struct ecore_func_state_params func_params = { NULL }; 4026 4027 BLOGE(sc, "Unexpected function state! " 4028 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 4029 4030 func_params.f_obj = &sc->func_obj; 4031 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4032 4033 /* STARTED-->TX_STOPPED */ 4034 func_params.cmd = ECORE_F_CMD_TX_STOP; 4035 ecore_func_state_change(sc, &func_params); 4036 4037 /* TX_STOPPED-->STARTED */ 4038 func_params.cmd = ECORE_F_CMD_TX_START; 4039 return (ecore_func_state_change(sc, &func_params)); 4040 } 4041 4042 return (0); 4043} 4044 4045static int 4046bxe_stop_queue(struct bxe_softc *sc, 4047 int index) 4048{ 4049 struct bxe_fastpath *fp = &sc->fp[index]; 4050 struct ecore_queue_state_params q_params = { NULL }; 4051 int rc; 4052 4053 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 4054 4055 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 4056 /* We want to wait for completion in this context */ 4057 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 4058 4059 /* Stop the primary connection: */ 4060 4061 /* ...halt the connection */ 4062 q_params.cmd = ECORE_Q_CMD_HALT; 4063 rc = ecore_queue_state_change(sc, &q_params); 4064 if (rc) { 4065 return (rc); 4066 } 4067 4068 /* ...terminate the connection */ 4069 q_params.cmd = ECORE_Q_CMD_TERMINATE; 4070 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 4071 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 4072 rc = ecore_queue_state_change(sc, &q_params); 4073 if (rc) { 4074 return (rc); 4075 } 4076 4077 /* ...delete cfc entry */ 4078 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 4079 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 4080 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 4081 return (ecore_queue_state_change(sc, &q_params)); 4082} 4083 4084/* wait for the outstanding SP commands */ 4085static inline uint8_t 4086bxe_wait_sp_comp(struct bxe_softc *sc, 4087 unsigned long mask) 4088{ 4089 unsigned long tmp; 4090 int tout = 5000; /* wait for 5 secs tops */ 4091 4092 while (tout--) { 4093 mb(); 4094 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 4095 return (TRUE); 4096 } 4097 4098 DELAY(1000); 4099 } 4100 4101 mb(); 4102 4103 tmp = atomic_load_acq_long(&sc->sp_state); 4104 if (tmp & mask) { 4105 BLOGE(sc, "Filtering completion timed out: " 4106 "sp_state 0x%lx, mask 0x%lx\n", 4107 tmp, mask); 4108 return (FALSE); 4109 } 4110 4111 return (FALSE); 4112} 4113 4114static int 4115bxe_func_stop(struct bxe_softc *sc) 4116{ 4117 struct ecore_func_state_params func_params = { NULL }; 4118 int rc; 4119 4120 /* prepare parameters for function state transitions */ 4121 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4122 func_params.f_obj = &sc->func_obj; 4123 func_params.cmd = ECORE_F_CMD_STOP; 4124 4125 /* 4126 * Try to stop the function the 'good way'. If it fails (in case 4127 * of a parity error during bxe_chip_cleanup()) and we are 4128 * not in a debug mode, perform a state transaction in order to 4129 * enable further HW_RESET transaction. 4130 */ 4131 rc = ecore_func_state_change(sc, &func_params); 4132 if (rc) { 4133 BLOGE(sc, "FUNC_STOP ramrod failed. " 4134 "Running a dry transaction\n"); 4135 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4136 return (ecore_func_state_change(sc, &func_params)); 4137 } 4138 4139 return (0); 4140} 4141 4142static int 4143bxe_reset_hw(struct bxe_softc *sc, 4144 uint32_t load_code) 4145{ 4146 struct ecore_func_state_params func_params = { NULL }; 4147 4148 /* Prepare parameters for function state transitions */ 4149 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4150 4151 func_params.f_obj = &sc->func_obj; 4152 func_params.cmd = ECORE_F_CMD_HW_RESET; 4153 4154 func_params.params.hw_init.load_phase = load_code; 4155 4156 return (ecore_func_state_change(sc, &func_params)); 4157} 4158 4159static void 4160bxe_int_disable_sync(struct bxe_softc *sc, 4161 int disable_hw) 4162{ 4163 if (disable_hw) { 4164 /* prevent the HW from sending interrupts */ 4165 bxe_int_disable(sc); 4166 } 4167 4168 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4169 /* make sure all ISRs are done */ 4170 4171 /* XXX make sure sp_task is not running */ 4172 /* cancel and flush work queues */ 4173} 4174 4175static void 4176bxe_chip_cleanup(struct bxe_softc *sc, 4177 uint32_t unload_mode, 4178 uint8_t keep_link) 4179{ 4180 int port = SC_PORT(sc); 4181 struct ecore_mcast_ramrod_params rparam = { NULL }; 4182 uint32_t reset_code; 4183 int i, rc = 0; 4184 4185 bxe_drain_tx_queues(sc); 4186 4187 /* give HW time to discard old tx messages */ 4188 DELAY(1000); 4189 4190 /* Clean all ETH MACs */ 4191 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4192 if (rc < 0) { 4193 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4194 } 4195 4196 /* Clean up UC list */ 4197 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4198 if (rc < 0) { 4199 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4200 } 4201 4202 /* Disable LLH */ 4203 if (!CHIP_IS_E1(sc)) { 4204 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4205 } 4206 4207 /* Set "drop all" to stop Rx */ 4208 4209 /* 4210 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4211 * a race between the completion code and this code. 4212 */ 4213 BXE_MCAST_LOCK(sc); 4214 4215 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4216 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4217 } else { 4218 bxe_set_storm_rx_mode(sc); 4219 } 4220 4221 /* Clean up multicast configuration */ 4222 rparam.mcast_obj = &sc->mcast_obj; 4223 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4224 if (rc < 0) { 4225 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4226 } 4227 4228 BXE_MCAST_UNLOCK(sc); 4229 4230 // XXX bxe_iov_chip_cleanup(sc); 4231 4232 /* 4233 * Send the UNLOAD_REQUEST to the MCP. This will return if 4234 * this function should perform FUNCTION, PORT, or COMMON HW 4235 * reset. 4236 */ 4237 reset_code = bxe_send_unload_req(sc, unload_mode); 4238 4239 /* 4240 * (assumption: No Attention from MCP at this stage) 4241 * PMF probably in the middle of TX disable/enable transaction 4242 */ 4243 rc = bxe_func_wait_started(sc); 4244 if (rc) { 4245 BLOGE(sc, "bxe_func_wait_started failed\n"); 4246 } 4247 4248 /* 4249 * Close multi and leading connections 4250 * Completions for ramrods are collected in a synchronous way 4251 */ 4252 for (i = 0; i < sc->num_queues; i++) { 4253 if (bxe_stop_queue(sc, i)) { 4254 goto unload_error; 4255 } 4256 } 4257 4258 /* 4259 * If SP settings didn't get completed so far - something 4260 * very wrong has happen. 4261 */ 4262 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4263 BLOGE(sc, "Common slow path ramrods got stuck!\n"); 4264 } 4265 4266unload_error: 4267 4268 rc = bxe_func_stop(sc); 4269 if (rc) { 4270 BLOGE(sc, "Function stop failed!\n"); 4271 } 4272 4273 /* disable HW interrupts */ 4274 bxe_int_disable_sync(sc, TRUE); 4275 4276 /* detach interrupts */ 4277 bxe_interrupt_detach(sc); 4278 4279 /* Reset the chip */ 4280 rc = bxe_reset_hw(sc, reset_code); 4281 if (rc) { 4282 BLOGE(sc, "Hardware reset failed\n"); 4283 } 4284 4285 /* Report UNLOAD_DONE to MCP */ 4286 bxe_send_unload_done(sc, keep_link); 4287} 4288 4289static void 4290bxe_disable_close_the_gate(struct bxe_softc *sc) 4291{ 4292 uint32_t val; 4293 int port = SC_PORT(sc); 4294 4295 BLOGD(sc, DBG_LOAD, 4296 "Disabling 'close the gates'\n"); 4297 4298 if (CHIP_IS_E1(sc)) { 4299 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4300 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4301 val = REG_RD(sc, addr); 4302 val &= ~(0x300); 4303 REG_WR(sc, addr, val); 4304 } else { 4305 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4306 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4307 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4308 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4309 } 4310} 4311 4312/* 4313 * Cleans the object that have internal lists without sending 4314 * ramrods. Should be run when interrutps are disabled. 4315 */ 4316static void 4317bxe_squeeze_objects(struct bxe_softc *sc) 4318{ 4319 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4320 struct ecore_mcast_ramrod_params rparam = { NULL }; 4321 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4322 int rc; 4323 4324 /* Cleanup MACs' object first... */ 4325 4326 /* Wait for completion of requested */ 4327 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4328 /* Perform a dry cleanup */ 4329 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4330 4331 /* Clean ETH primary MAC */ 4332 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4333 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4334 &ramrod_flags); 4335 if (rc != 0) { 4336 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4337 } 4338 4339 /* Cleanup UC list */ 4340 vlan_mac_flags = 0; 4341 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4342 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4343 &ramrod_flags); 4344 if (rc != 0) { 4345 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4346 } 4347 4348 /* Now clean mcast object... */ 4349 4350 rparam.mcast_obj = &sc->mcast_obj; 4351 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4352 4353 /* Add a DEL command... */ 4354 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4355 if (rc < 0) { 4356 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4357 } 4358 4359 /* now wait until all pending commands are cleared */ 4360 4361 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4362 while (rc != 0) { 4363 if (rc < 0) { 4364 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4365 return; 4366 } 4367 4368 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4369 } 4370} 4371 4372/* stop the controller */ 4373static __noinline int 4374bxe_nic_unload(struct bxe_softc *sc, 4375 uint32_t unload_mode, 4376 uint8_t keep_link) 4377{ 4378 uint8_t global = FALSE; 4379 uint32_t val; 4380 4381 BXE_CORE_LOCK_ASSERT(sc); 4382 4383 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4384 4385 /* mark driver as unloaded in shmem2 */ 4386 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4387 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4388 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4389 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4390 } 4391 4392 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4393 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4394 /* 4395 * We can get here if the driver has been unloaded 4396 * during parity error recovery and is either waiting for a 4397 * leader to complete or for other functions to unload and 4398 * then ifconfig down has been issued. In this case we want to 4399 * unload and let other functions to complete a recovery 4400 * process. 4401 */ 4402 sc->recovery_state = BXE_RECOVERY_DONE; 4403 sc->is_leader = 0; 4404 bxe_release_leader_lock(sc); 4405 mb(); 4406 4407 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4408 BLOGE(sc, "Can't unload in closed or error state\n"); 4409 return (-1); 4410 } 4411 4412 /* 4413 * Nothing to do during unload if previous bxe_nic_load() 4414 * did not completed succesfully - all resourses are released. 4415 */ 4416 if ((sc->state == BXE_STATE_CLOSED) || 4417 (sc->state == BXE_STATE_ERROR)) { 4418 return (0); 4419 } 4420 4421 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4422 mb(); 4423 4424 /* stop tx */ 4425 bxe_tx_disable(sc); 4426 4427 sc->rx_mode = BXE_RX_MODE_NONE; 4428 /* XXX set rx mode ??? */ 4429 4430 if (IS_PF(sc)) { 4431 /* set ALWAYS_ALIVE bit in shmem */ 4432 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4433 4434 bxe_drv_pulse(sc); 4435 4436 bxe_stats_handle(sc, STATS_EVENT_STOP); 4437 bxe_save_statistics(sc); 4438 } 4439 4440 /* wait till consumers catch up with producers in all queues */ 4441 bxe_drain_tx_queues(sc); 4442 4443 /* if VF indicate to PF this function is going down (PF will delete sp 4444 * elements and clear initializations 4445 */ 4446 if (IS_VF(sc)) { 4447 ; /* bxe_vfpf_close_vf(sc); */ 4448 } else if (unload_mode != UNLOAD_RECOVERY) { 4449 /* if this is a normal/close unload need to clean up chip */ 4450 bxe_chip_cleanup(sc, unload_mode, keep_link); 4451 } else { 4452 /* Send the UNLOAD_REQUEST to the MCP */ 4453 bxe_send_unload_req(sc, unload_mode); 4454 4455 /* 4456 * Prevent transactions to host from the functions on the 4457 * engine that doesn't reset global blocks in case of global 4458 * attention once gloabl blocks are reset and gates are opened 4459 * (the engine which leader will perform the recovery 4460 * last). 4461 */ 4462 if (!CHIP_IS_E1x(sc)) { 4463 bxe_pf_disable(sc); 4464 } 4465 4466 /* disable HW interrupts */ 4467 bxe_int_disable_sync(sc, TRUE); 4468 4469 /* detach interrupts */ 4470 bxe_interrupt_detach(sc); 4471 4472 /* Report UNLOAD_DONE to MCP */ 4473 bxe_send_unload_done(sc, FALSE); 4474 } 4475 4476 /* 4477 * At this stage no more interrupts will arrive so we may safely clean 4478 * the queue'able objects here in case they failed to get cleaned so far. 4479 */ 4480 if (IS_PF(sc)) { 4481 bxe_squeeze_objects(sc); 4482 } 4483 4484 /* There should be no more pending SP commands at this stage */ 4485 sc->sp_state = 0; 4486 4487 sc->port.pmf = 0; 4488 4489 bxe_free_fp_buffers(sc); 4490 4491 if (IS_PF(sc)) { 4492 bxe_free_mem(sc); 4493 } 4494 4495 bxe_free_fw_stats_mem(sc); 4496 4497 sc->state = BXE_STATE_CLOSED; 4498 4499 /* 4500 * Check if there are pending parity attentions. If there are - set 4501 * RECOVERY_IN_PROGRESS. 4502 */ 4503 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4504 bxe_set_reset_in_progress(sc); 4505 4506 /* Set RESET_IS_GLOBAL if needed */ 4507 if (global) { 4508 bxe_set_reset_global(sc); 4509 } 4510 } 4511 4512 /* 4513 * The last driver must disable a "close the gate" if there is no 4514 * parity attention or "process kill" pending. 4515 */ 4516 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4517 bxe_reset_is_done(sc, SC_PATH(sc))) { 4518 bxe_disable_close_the_gate(sc); 4519 } 4520 4521 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4522 4523 return (0); 4524} 4525 4526/* 4527 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4528 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4529 */ 4530static int 4531bxe_ifmedia_update(struct ifnet *ifp) 4532{ 4533 struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc; 4534 struct ifmedia *ifm; 4535 4536 ifm = &sc->ifmedia; 4537 4538 /* We only support Ethernet media type. */ 4539 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4540 return (EINVAL); 4541 } 4542 4543 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4544 case IFM_AUTO: 4545 break; 4546 case IFM_10G_CX4: 4547 case IFM_10G_SR: 4548 case IFM_10G_T: 4549 case IFM_10G_TWINAX: 4550 default: 4551 /* We don't support changing the media type. */ 4552 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4553 IFM_SUBTYPE(ifm->ifm_media)); 4554 return (EINVAL); 4555 } 4556 4557 return (0); 4558} 4559 4560/* 4561 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4562 */ 4563static void 4564bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4565{ 4566 struct bxe_softc *sc = ifp->if_softc; 4567 4568 /* Report link down if the driver isn't running. */ 4569 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4570 ifmr->ifm_active |= IFM_NONE; 4571 return; 4572 } 4573 4574 /* Setup the default interface info. */ 4575 ifmr->ifm_status = IFM_AVALID; 4576 ifmr->ifm_active = IFM_ETHER; 4577 4578 if (sc->link_vars.link_up) { 4579 ifmr->ifm_status |= IFM_ACTIVE; 4580 } else { 4581 ifmr->ifm_active |= IFM_NONE; 4582 return; 4583 } 4584 4585 ifmr->ifm_active |= sc->media; 4586 4587 if (sc->link_vars.duplex == DUPLEX_FULL) { 4588 ifmr->ifm_active |= IFM_FDX; 4589 } else { 4590 ifmr->ifm_active |= IFM_HDX; 4591 } 4592} 4593 4594static int 4595bxe_ioctl_nvram(struct bxe_softc *sc, 4596 uint32_t priv_op, 4597 struct ifreq *ifr) 4598{ 4599 struct bxe_nvram_data nvdata_base; 4600 struct bxe_nvram_data *nvdata; 4601 int len; 4602 int error = 0; 4603 4604 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); 4605 4606 len = (sizeof(struct bxe_nvram_data) + 4607 nvdata_base.len - 4608 sizeof(uint32_t)); 4609 4610 if (len > sizeof(struct bxe_nvram_data)) { 4611 if ((nvdata = (struct bxe_nvram_data *) 4612 malloc(len, M_DEVBUF, 4613 (M_NOWAIT | M_ZERO))) == NULL) { 4614 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); 4615 return (1); 4616 } 4617 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); 4618 } else { 4619 nvdata = &nvdata_base; 4620 } 4621 4622 if (priv_op == BXE_IOC_RD_NVRAM) { 4623 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", 4624 nvdata->offset, nvdata->len); 4625 error = bxe_nvram_read(sc, 4626 nvdata->offset, 4627 (uint8_t *)nvdata->value, 4628 nvdata->len); 4629 copyout(nvdata, ifr->ifr_data, len); 4630 } else { /* BXE_IOC_WR_NVRAM */ 4631 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", 4632 nvdata->offset, nvdata->len); 4633 copyin(ifr->ifr_data, nvdata, len); 4634 error = bxe_nvram_write(sc, 4635 nvdata->offset, 4636 (uint8_t *)nvdata->value, 4637 nvdata->len); 4638 } 4639 4640 if (len > sizeof(struct bxe_nvram_data)) { 4641 free(nvdata, M_DEVBUF); 4642 } 4643 4644 return (error); 4645} 4646 4647static int 4648bxe_ioctl_stats_show(struct bxe_softc *sc, 4649 uint32_t priv_op, 4650 struct ifreq *ifr) 4651{ 4652 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); 4653 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); 4654 caddr_t p_tmp; 4655 uint32_t *offset; 4656 int i; 4657 4658 switch (priv_op) 4659 { 4660 case BXE_IOC_STATS_SHOW_NUM: 4661 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); 4662 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = 4663 BXE_NUM_ETH_STATS; 4664 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = 4665 STAT_NAME_LEN; 4666 return (0); 4667 4668 case BXE_IOC_STATS_SHOW_STR: 4669 memset(ifr->ifr_data, 0, str_size); 4670 p_tmp = ifr->ifr_data; 4671 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4672 strcpy(p_tmp, bxe_eth_stats_arr[i].string); 4673 p_tmp += STAT_NAME_LEN; 4674 } 4675 return (0); 4676 4677 case BXE_IOC_STATS_SHOW_CNT: 4678 memset(ifr->ifr_data, 0, stats_size); 4679 p_tmp = ifr->ifr_data; 4680 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4681 offset = ((uint32_t *)&sc->eth_stats + 4682 bxe_eth_stats_arr[i].offset); 4683 switch (bxe_eth_stats_arr[i].size) { 4684 case 4: 4685 *((uint64_t *)p_tmp) = (uint64_t)*offset; 4686 break; 4687 case 8: 4688 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); 4689 break; 4690 default: 4691 *((uint64_t *)p_tmp) = 0; 4692 } 4693 p_tmp += sizeof(uint64_t); 4694 } 4695 return (0); 4696 4697 default: 4698 return (-1); 4699 } 4700} 4701 4702static void 4703bxe_handle_chip_tq(void *context, 4704 int pending) 4705{ 4706 struct bxe_softc *sc = (struct bxe_softc *)context; 4707 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4708 4709 switch (work) 4710 { 4711 case CHIP_TQ_START: 4712 if ((sc->ifnet->if_flags & IFF_UP) && 4713 !(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 4714 /* start the interface */ 4715 BLOGD(sc, DBG_LOAD, "Starting the interface...\n"); 4716 BXE_CORE_LOCK(sc); 4717 bxe_init_locked(sc); 4718 BXE_CORE_UNLOCK(sc); 4719 } 4720 break; 4721 4722 case CHIP_TQ_STOP: 4723 if (!(sc->ifnet->if_flags & IFF_UP) && 4724 (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 4725 /* bring down the interface */ 4726 BLOGD(sc, DBG_LOAD, "Stopping the interface...\n"); 4727 bxe_periodic_stop(sc); 4728 BXE_CORE_LOCK(sc); 4729 bxe_stop_locked(sc); 4730 BXE_CORE_UNLOCK(sc); 4731 } 4732 break; 4733 4734 case CHIP_TQ_REINIT: 4735 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 4736 /* restart the interface */ 4737 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4738 bxe_periodic_stop(sc); 4739 BXE_CORE_LOCK(sc); 4740 bxe_stop_locked(sc); 4741 bxe_init_locked(sc); 4742 BXE_CORE_UNLOCK(sc); 4743 } 4744 break; 4745 4746 default: 4747 break; 4748 } 4749} 4750 4751/* 4752 * Handles any IOCTL calls from the operating system. 4753 * 4754 * Returns: 4755 * 0 = Success, >0 Failure 4756 */ 4757static int 4758bxe_ioctl(struct ifnet *ifp, 4759 u_long command, 4760 caddr_t data) 4761{ 4762 struct bxe_softc *sc = ifp->if_softc; 4763 struct ifreq *ifr = (struct ifreq *)data; 4764 struct bxe_nvram_data *nvdata; 4765 uint32_t priv_op; 4766 int mask = 0; 4767 int reinit = 0; 4768 int error = 0; 4769 4770 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4771 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4772 4773 switch (command) 4774 { 4775 case SIOCSIFMTU: 4776 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4777 ifr->ifr_mtu); 4778 4779 if (sc->mtu == ifr->ifr_mtu) { 4780 /* nothing to change */ 4781 break; 4782 } 4783 4784 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4785 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4786 ifr->ifr_mtu, mtu_min, mtu_max); 4787 error = EINVAL; 4788 break; 4789 } 4790 4791 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4792 (unsigned long)ifr->ifr_mtu); 4793 atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu, 4794 (unsigned long)ifr->ifr_mtu); 4795 4796 reinit = 1; 4797 break; 4798 4799 case SIOCSIFFLAGS: 4800 /* toggle the interface state up or down */ 4801 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4802 4803 /* check if the interface is up */ 4804 if (ifp->if_flags & IFF_UP) { 4805 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4806 /* set the receive mode flags */ 4807 bxe_set_rx_mode(sc); 4808 } else { 4809 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_START); 4810 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 4811 } 4812 } else { 4813 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4814 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_STOP); 4815 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 4816 } 4817 } 4818 4819 break; 4820 4821 case SIOCADDMULTI: 4822 case SIOCDELMULTI: 4823 /* add/delete multicast addresses */ 4824 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4825 4826 /* check if the interface is up */ 4827 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4828 /* set the receive mode flags */ 4829 bxe_set_rx_mode(sc); 4830 } 4831 4832 break; 4833 4834 case SIOCSIFCAP: 4835 /* find out which capabilities have changed */ 4836 mask = (ifr->ifr_reqcap ^ ifp->if_capenable); 4837 4838 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4839 mask); 4840 4841 /* toggle the LRO capabilites enable flag */ 4842 if (mask & IFCAP_LRO) { 4843 ifp->if_capenable ^= IFCAP_LRO; 4844 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4845 (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF"); 4846 reinit = 1; 4847 } 4848 4849 /* toggle the TXCSUM checksum capabilites enable flag */ 4850 if (mask & IFCAP_TXCSUM) { 4851 ifp->if_capenable ^= IFCAP_TXCSUM; 4852 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4853 (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF"); 4854 if (ifp->if_capenable & IFCAP_TXCSUM) { 4855 ifp->if_hwassist = (CSUM_IP | 4856 CSUM_TCP | 4857 CSUM_UDP | 4858 CSUM_TSO | 4859 CSUM_TCP_IPV6 | 4860 CSUM_UDP_IPV6); 4861 } else { 4862 ifp->if_hwassist = 0; 4863 } 4864 } 4865 4866 /* toggle the RXCSUM checksum capabilities enable flag */ 4867 if (mask & IFCAP_RXCSUM) { 4868 ifp->if_capenable ^= IFCAP_RXCSUM; 4869 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4870 (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF"); 4871 if (ifp->if_capenable & IFCAP_RXCSUM) { 4872 ifp->if_hwassist = (CSUM_IP | 4873 CSUM_TCP | 4874 CSUM_UDP | 4875 CSUM_TSO | 4876 CSUM_TCP_IPV6 | 4877 CSUM_UDP_IPV6); 4878 } else { 4879 ifp->if_hwassist = 0; 4880 } 4881 } 4882 4883 /* toggle TSO4 capabilities enabled flag */ 4884 if (mask & IFCAP_TSO4) { 4885 ifp->if_capenable ^= IFCAP_TSO4; 4886 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4887 (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF"); 4888 } 4889 4890 /* toggle TSO6 capabilities enabled flag */ 4891 if (mask & IFCAP_TSO6) { 4892 ifp->if_capenable ^= IFCAP_TSO6; 4893 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4894 (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF"); 4895 } 4896 4897 /* toggle VLAN_HWTSO capabilities enabled flag */ 4898 if (mask & IFCAP_VLAN_HWTSO) { 4899 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 4900 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4901 (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4902 } 4903 4904 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4905 if (mask & IFCAP_VLAN_HWCSUM) { 4906 /* XXX investigate this... */ 4907 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4908 error = EINVAL; 4909 } 4910 4911 /* toggle VLAN_MTU capabilities enable flag */ 4912 if (mask & IFCAP_VLAN_MTU) { 4913 /* XXX investigate this... */ 4914 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4915 error = EINVAL; 4916 } 4917 4918 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4919 if (mask & IFCAP_VLAN_HWTAGGING) { 4920 /* XXX investigate this... */ 4921 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4922 error = EINVAL; 4923 } 4924 4925 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4926 if (mask & IFCAP_VLAN_HWFILTER) { 4927 /* XXX investigate this... */ 4928 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4929 error = EINVAL; 4930 } 4931 4932 /* XXX not yet... 4933 * IFCAP_WOL_MAGIC 4934 */ 4935 4936 break; 4937 4938 case SIOCSIFMEDIA: 4939 case SIOCGIFMEDIA: 4940 /* set/get interface media */ 4941 BLOGD(sc, DBG_IOCTL, 4942 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 4943 (command & 0xff)); 4944 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 4945 break; 4946 4947 case SIOCGPRIVATE_0: 4948 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); 4949 4950 switch (priv_op) 4951 { 4952 case BXE_IOC_RD_NVRAM: 4953 case BXE_IOC_WR_NVRAM: 4954 nvdata = (struct bxe_nvram_data *)ifr->ifr_data; 4955 BLOGD(sc, DBG_IOCTL, 4956 "Received Private NVRAM ioctl addr=0x%x size=%u\n", 4957 nvdata->offset, nvdata->len); 4958 error = bxe_ioctl_nvram(sc, priv_op, ifr); 4959 break; 4960 4961 case BXE_IOC_STATS_SHOW_NUM: 4962 case BXE_IOC_STATS_SHOW_STR: 4963 case BXE_IOC_STATS_SHOW_CNT: 4964 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", 4965 priv_op); 4966 error = bxe_ioctl_stats_show(sc, priv_op, ifr); 4967 break; 4968 4969 default: 4970 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); 4971 error = EINVAL; 4972 break; 4973 } 4974 4975 break; 4976 4977 default: 4978 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 4979 (command & 0xff)); 4980 error = ether_ioctl(ifp, command, data); 4981 break; 4982 } 4983 4984 if (reinit && (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 4985 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 4986 "Re-initializing hardware from IOCTL change\n"); 4987 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 4988 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 4989 } 4990 4991 return (error); 4992} 4993 4994static __noinline void 4995bxe_dump_mbuf(struct bxe_softc *sc, 4996 struct mbuf *m, 4997 uint8_t contents) 4998{ 4999 char * type; 5000 int i = 0; 5001 5002 if (!(sc->debug & DBG_MBUF)) { 5003 return; 5004 } 5005 5006 if (m == NULL) { 5007 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 5008 return; 5009 } 5010 5011 while (m) { 5012 BLOGD(sc, DBG_MBUF, 5013 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 5014 i, m, m->m_len, m->m_flags, 5015 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data); 5016 5017 if (m->m_flags & M_PKTHDR) { 5018 BLOGD(sc, DBG_MBUF, 5019 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 5020 i, m->m_pkthdr.len, m->m_flags, 5021 "\20\12M_BCAST\13M_MCAST\14M_FRAG" 5022 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" 5023 "\22M_PROMISC\23M_NOFREE", 5024 (int)m->m_pkthdr.csum_flags, 5025 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 5026 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 5027 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 5028 "\14CSUM_PSEUDO_HDR"); 5029 } 5030 5031 if (m->m_flags & M_EXT) { 5032 switch (m->m_ext.ext_type) { 5033 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 5034 case EXT_SFBUF: type = "EXT_SFBUF"; break; 5035 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 5036 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 5037 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 5038 case EXT_PACKET: type = "EXT_PACKET"; break; 5039 case EXT_MBUF: type = "EXT_MBUF"; break; 5040 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 5041 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 5042 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 5043 case EXT_EXTREF: type = "EXT_EXTREF"; break; 5044 default: type = "UNKNOWN"; break; 5045 } 5046 5047 BLOGD(sc, DBG_MBUF, 5048 "%02d: - m_ext: %p ext_size=%d type=%s\n", 5049 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 5050 } 5051 5052 if (contents) { 5053 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 5054 } 5055 5056 m = m->m_next; 5057 i++; 5058 } 5059} 5060 5061/* 5062 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 5063 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 5064 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 5065 * The headers comes in a seperate bd in FreeBSD so 13-3=10. 5066 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 5067 */ 5068static int 5069bxe_chktso_window(struct bxe_softc *sc, 5070 int nsegs, 5071 bus_dma_segment_t *segs, 5072 struct mbuf *m) 5073{ 5074 uint32_t num_wnds, wnd_size, wnd_sum; 5075 int32_t frag_idx, wnd_idx; 5076 unsigned short lso_mss; 5077 int defrag; 5078 5079 defrag = 0; 5080 wnd_sum = 0; 5081 wnd_size = 10; 5082 num_wnds = nsegs - wnd_size; 5083 lso_mss = htole16(m->m_pkthdr.tso_segsz); 5084 5085 /* 5086 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 5087 * first window sum of data while skipping the first assuming it is the 5088 * header in FreeBSD. 5089 */ 5090 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 5091 wnd_sum += htole16(segs[frag_idx].ds_len); 5092 } 5093 5094 /* check the first 10 bd window size */ 5095 if (wnd_sum < lso_mss) { 5096 return (1); 5097 } 5098 5099 /* run through the windows */ 5100 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 5101 /* subtract the first mbuf->m_len of the last wndw(-header) */ 5102 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 5103 /* add the next mbuf len to the len of our new window */ 5104 wnd_sum += htole16(segs[frag_idx].ds_len); 5105 if (wnd_sum < lso_mss) { 5106 return (1); 5107 } 5108 } 5109 5110 return (0); 5111} 5112 5113static uint8_t 5114bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 5115 struct mbuf *m, 5116 uint32_t *parsing_data) 5117{ 5118 struct ether_vlan_header *eh = NULL; 5119 struct ip *ip4 = NULL; 5120 struct ip6_hdr *ip6 = NULL; 5121 caddr_t ip = NULL; 5122 struct tcphdr *th = NULL; 5123 int e_hlen, ip_hlen, l4_off; 5124 uint16_t proto; 5125 5126 if (m->m_pkthdr.csum_flags == CSUM_IP) { 5127 /* no L4 checksum offload needed */ 5128 return (0); 5129 } 5130 5131 /* get the Ethernet header */ 5132 eh = mtod(m, struct ether_vlan_header *); 5133 5134 /* handle VLAN encapsulation if present */ 5135 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5136 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5137 proto = ntohs(eh->evl_proto); 5138 } else { 5139 e_hlen = ETHER_HDR_LEN; 5140 proto = ntohs(eh->evl_encap_proto); 5141 } 5142 5143 switch (proto) { 5144 case ETHERTYPE_IP: 5145 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5146 ip4 = (m->m_len < sizeof(struct ip)) ? 5147 (struct ip *)m->m_next->m_data : 5148 (struct ip *)(m->m_data + e_hlen); 5149 /* ip_hl is number of 32-bit words */ 5150 ip_hlen = (ip4->ip_hl << 2); 5151 ip = (caddr_t)ip4; 5152 break; 5153 case ETHERTYPE_IPV6: 5154 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5155 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5156 (struct ip6_hdr *)m->m_next->m_data : 5157 (struct ip6_hdr *)(m->m_data + e_hlen); 5158 /* XXX cannot support offload with IPv6 extensions */ 5159 ip_hlen = sizeof(struct ip6_hdr); 5160 ip = (caddr_t)ip6; 5161 break; 5162 default: 5163 /* We can't offload in this case... */ 5164 /* XXX error stat ??? */ 5165 return (0); 5166 } 5167 5168 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5169 l4_off = (e_hlen + ip_hlen); 5170 5171 *parsing_data |= 5172 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 5173 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 5174 5175 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5176 CSUM_TSO | 5177 CSUM_TCP_IPV6)) { 5178 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5179 th = (struct tcphdr *)(ip + ip_hlen); 5180 /* th_off is number of 32-bit words */ 5181 *parsing_data |= ((th->th_off << 5182 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 5183 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 5184 return (l4_off + (th->th_off << 2)); /* entire header length */ 5185 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5186 CSUM_UDP_IPV6)) { 5187 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5188 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 5189 } else { 5190 /* XXX error stat ??? */ 5191 return (0); 5192 } 5193} 5194 5195static uint8_t 5196bxe_set_pbd_csum(struct bxe_fastpath *fp, 5197 struct mbuf *m, 5198 struct eth_tx_parse_bd_e1x *pbd) 5199{ 5200 struct ether_vlan_header *eh = NULL; 5201 struct ip *ip4 = NULL; 5202 struct ip6_hdr *ip6 = NULL; 5203 caddr_t ip = NULL; 5204 struct tcphdr *th = NULL; 5205 struct udphdr *uh = NULL; 5206 int e_hlen, ip_hlen; 5207 uint16_t proto; 5208 uint8_t hlen; 5209 uint16_t tmp_csum; 5210 uint32_t *tmp_uh; 5211 5212 /* get the Ethernet header */ 5213 eh = mtod(m, struct ether_vlan_header *); 5214 5215 /* handle VLAN encapsulation if present */ 5216 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5217 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5218 proto = ntohs(eh->evl_proto); 5219 } else { 5220 e_hlen = ETHER_HDR_LEN; 5221 proto = ntohs(eh->evl_encap_proto); 5222 } 5223 5224 switch (proto) { 5225 case ETHERTYPE_IP: 5226 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5227 ip4 = (m->m_len < sizeof(struct ip)) ? 5228 (struct ip *)m->m_next->m_data : 5229 (struct ip *)(m->m_data + e_hlen); 5230 /* ip_hl is number of 32-bit words */ 5231 ip_hlen = (ip4->ip_hl << 1); 5232 ip = (caddr_t)ip4; 5233 break; 5234 case ETHERTYPE_IPV6: 5235 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5236 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5237 (struct ip6_hdr *)m->m_next->m_data : 5238 (struct ip6_hdr *)(m->m_data + e_hlen); 5239 /* XXX cannot support offload with IPv6 extensions */ 5240 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 5241 ip = (caddr_t)ip6; 5242 break; 5243 default: 5244 /* We can't offload in this case... */ 5245 /* XXX error stat ??? */ 5246 return (0); 5247 } 5248 5249 hlen = (e_hlen >> 1); 5250 5251 /* note that rest of global_data is indirectly zeroed here */ 5252 if (m->m_flags & M_VLANTAG) { 5253 pbd->global_data = 5254 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5255 } else { 5256 pbd->global_data = htole16(hlen); 5257 } 5258 5259 pbd->ip_hlen_w = ip_hlen; 5260 5261 hlen += pbd->ip_hlen_w; 5262 5263 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5264 5265 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5266 CSUM_TSO | 5267 CSUM_TCP_IPV6)) { 5268 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5269 /* th_off is number of 32-bit words */ 5270 hlen += (uint16_t)(th->th_off << 1); 5271 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5272 CSUM_UDP_IPV6)) { 5273 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5274 hlen += (sizeof(struct udphdr) / 2); 5275 } else { 5276 /* valid case as only CSUM_IP was set */ 5277 return (0); 5278 } 5279 5280 pbd->total_hlen_w = htole16(hlen); 5281 5282 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5283 CSUM_TSO | 5284 CSUM_TCP_IPV6)) { 5285 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5286 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5287 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5288 CSUM_UDP_IPV6)) { 5289 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5290 5291 /* 5292 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5293 * checksums and does not know anything about the UDP header and where 5294 * the checksum field is located. It only knows about TCP. Therefore 5295 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5296 * offload. Since the checksum field offset for TCP is 16 bytes and 5297 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5298 * bytes less than the start of the UDP header. This allows the 5299 * hardware to write the checksum in the correct spot. But the 5300 * hardware will compute a checksum which includes the last 10 bytes 5301 * of the IP header. To correct this we tweak the stack computed 5302 * pseudo checksum by folding in the calculation of the inverse 5303 * checksum for those final 10 bytes of the IP header. This allows 5304 * the correct checksum to be computed by the hardware. 5305 */ 5306 5307 /* set pointer 10 bytes before UDP header */ 5308 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5309 5310 /* calculate a pseudo header checksum over the first 10 bytes */ 5311 tmp_csum = in_pseudo(*tmp_uh, 5312 *(tmp_uh + 1), 5313 *(uint16_t *)(tmp_uh + 2)); 5314 5315 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5316 } 5317 5318 return (hlen * 2); /* entire header length, number of bytes */ 5319} 5320 5321static void 5322bxe_set_pbd_lso_e2(struct mbuf *m, 5323 uint32_t *parsing_data) 5324{ 5325 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5326 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5327 ETH_TX_PARSE_BD_E2_LSO_MSS); 5328 5329 /* XXX test for IPv6 with extension header... */ 5330#if 0 5331 struct ip6_hdr *ip6; 5332 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') 5333 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 5334#endif 5335} 5336 5337static void 5338bxe_set_pbd_lso(struct mbuf *m, 5339 struct eth_tx_parse_bd_e1x *pbd) 5340{ 5341 struct ether_vlan_header *eh = NULL; 5342 struct ip *ip = NULL; 5343 struct tcphdr *th = NULL; 5344 int e_hlen; 5345 5346 /* get the Ethernet header */ 5347 eh = mtod(m, struct ether_vlan_header *); 5348 5349 /* handle VLAN encapsulation if present */ 5350 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5351 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5352 5353 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5354 /* XXX assuming IPv4 */ 5355 ip = (struct ip *)(m->m_data + e_hlen); 5356 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5357 5358 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5359 pbd->tcp_send_seq = ntohl(th->th_seq); 5360 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5361 5362#if 1 5363 /* XXX IPv4 */ 5364 pbd->ip_id = ntohs(ip->ip_id); 5365 pbd->tcp_pseudo_csum = 5366 ntohs(in_pseudo(ip->ip_src.s_addr, 5367 ip->ip_dst.s_addr, 5368 htons(IPPROTO_TCP))); 5369#else 5370 /* XXX IPv6 */ 5371 pbd->tcp_pseudo_csum = 5372 ntohs(in_pseudo(&ip6->ip6_src, 5373 &ip6->ip6_dst, 5374 htons(IPPROTO_TCP))); 5375#endif 5376 5377 pbd->global_data |= 5378 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5379} 5380 5381/* 5382 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5383 * visible to the controller. 5384 * 5385 * If an mbuf is submitted to this routine and cannot be given to the 5386 * controller (e.g. it has too many fragments) then the function may free 5387 * the mbuf and return to the caller. 5388 * 5389 * Returns: 5390 * 0 = Success, !0 = Failure 5391 * Note the side effect that an mbuf may be freed if it causes a problem. 5392 */ 5393static int 5394bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5395{ 5396 bus_dma_segment_t segs[32]; 5397 struct mbuf *m0; 5398 struct bxe_sw_tx_bd *tx_buf; 5399 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5400 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5401 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5402 struct eth_tx_bd *tx_data_bd; 5403 struct eth_tx_bd *tx_total_pkt_size_bd; 5404 struct eth_tx_start_bd *tx_start_bd; 5405 uint16_t bd_prod, pkt_prod, total_pkt_size; 5406 uint8_t mac_type; 5407 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5408 struct bxe_softc *sc; 5409 uint16_t tx_bd_avail; 5410 struct ether_vlan_header *eh; 5411 uint32_t pbd_e2_parsing_data = 0; 5412 uint8_t hlen = 0; 5413 int tmp_bd; 5414 int i; 5415 5416 sc = fp->sc; 5417 5418 M_ASSERTPKTHDR(*m_head); 5419 5420 m0 = *m_head; 5421 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5422 tx_start_bd = NULL; 5423 tx_data_bd = NULL; 5424 tx_total_pkt_size_bd = NULL; 5425 5426 /* get the H/W pointer for packets and BDs */ 5427 pkt_prod = fp->tx_pkt_prod; 5428 bd_prod = fp->tx_bd_prod; 5429 5430 mac_type = UNICAST_ADDRESS; 5431 5432 /* map the mbuf into the next open DMAable memory */ 5433 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5434 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5435 tx_buf->m_map, m0, 5436 segs, &nsegs, BUS_DMA_NOWAIT); 5437 5438 /* mapping errors */ 5439 if(__predict_false(error != 0)) { 5440 fp->eth_q_stats.tx_dma_mapping_failure++; 5441 if (error == ENOMEM) { 5442 /* resource issue, try again later */ 5443 rc = ENOMEM; 5444 } else if (error == EFBIG) { 5445 /* possibly recoverable with defragmentation */ 5446 fp->eth_q_stats.mbuf_defrag_attempts++; 5447 m0 = m_defrag(*m_head, M_DONTWAIT); 5448 if (m0 == NULL) { 5449 fp->eth_q_stats.mbuf_defrag_failures++; 5450 rc = ENOBUFS; 5451 } else { 5452 /* defrag successful, try mapping again */ 5453 *m_head = m0; 5454 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5455 tx_buf->m_map, m0, 5456 segs, &nsegs, BUS_DMA_NOWAIT); 5457 if (error) { 5458 fp->eth_q_stats.tx_dma_mapping_failure++; 5459 rc = error; 5460 } 5461 } 5462 } else { 5463 /* unknown, unrecoverable mapping error */ 5464 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5465 bxe_dump_mbuf(sc, m0, FALSE); 5466 rc = error; 5467 } 5468 5469 goto bxe_tx_encap_continue; 5470 } 5471 5472 tx_bd_avail = bxe_tx_avail(sc, fp); 5473 5474 /* make sure there is enough room in the send queue */ 5475 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5476 /* Recoverable, try again later. */ 5477 fp->eth_q_stats.tx_hw_queue_full++; 5478 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5479 rc = ENOMEM; 5480 goto bxe_tx_encap_continue; 5481 } 5482 5483 /* capture the current H/W TX chain high watermark */ 5484 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5485 (TX_BD_USABLE - tx_bd_avail))) { 5486 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5487 } 5488 5489 /* make sure it fits in the packet window */ 5490 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5491 /* 5492 * The mbuf may be to big for the controller to handle. If the frame 5493 * is a TSO frame we'll need to do an additional check. 5494 */ 5495 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5496 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5497 goto bxe_tx_encap_continue; /* OK to send */ 5498 } else { 5499 fp->eth_q_stats.tx_window_violation_tso++; 5500 } 5501 } else { 5502 fp->eth_q_stats.tx_window_violation_std++; 5503 } 5504 5505 /* lets try to defragment this mbuf and remap it */ 5506 fp->eth_q_stats.mbuf_defrag_attempts++; 5507 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5508 5509 m0 = m_defrag(*m_head, M_DONTWAIT); 5510 if (m0 == NULL) { 5511 fp->eth_q_stats.mbuf_defrag_failures++; 5512 /* Ugh, just drop the frame... :( */ 5513 rc = ENOBUFS; 5514 } else { 5515 /* defrag successful, try mapping again */ 5516 *m_head = m0; 5517 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5518 tx_buf->m_map, m0, 5519 segs, &nsegs, BUS_DMA_NOWAIT); 5520 if (error) { 5521 fp->eth_q_stats.tx_dma_mapping_failure++; 5522 /* No sense in trying to defrag/copy chain, drop it. :( */ 5523 rc = error; 5524 } 5525 else { 5526 /* if the chain is still too long then drop it */ 5527 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5528 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5529 rc = ENODEV; 5530 } 5531 } 5532 } 5533 } 5534 5535bxe_tx_encap_continue: 5536 5537 /* Check for errors */ 5538 if (rc) { 5539 if (rc == ENOMEM) { 5540 /* recoverable try again later */ 5541 } else { 5542 fp->eth_q_stats.tx_soft_errors++; 5543 fp->eth_q_stats.mbuf_alloc_tx--; 5544 m_freem(*m_head); 5545 *m_head = NULL; 5546 } 5547 5548 return (rc); 5549 } 5550 5551 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5552 if (m0->m_flags & M_BCAST) { 5553 mac_type = BROADCAST_ADDRESS; 5554 } else if (m0->m_flags & M_MCAST) { 5555 mac_type = MULTICAST_ADDRESS; 5556 } 5557 5558 /* store the mbuf into the mbuf ring */ 5559 tx_buf->m = m0; 5560 tx_buf->first_bd = fp->tx_bd_prod; 5561 tx_buf->flags = 0; 5562 5563 /* prepare the first transmit (start) BD for the mbuf */ 5564 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5565 5566 BLOGD(sc, DBG_TX, 5567 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5568 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5569 5570 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5571 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5572 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5573 total_pkt_size += tx_start_bd->nbytes; 5574 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5575 5576 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5577 5578 /* all frames have at least Start BD + Parsing BD */ 5579 nbds = nsegs + 1; 5580 tx_start_bd->nbd = htole16(nbds); 5581 5582 if (m0->m_flags & M_VLANTAG) { 5583 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5584 tx_start_bd->bd_flags.as_bitfield |= 5585 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5586 } else { 5587 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5588 if (IS_VF(sc)) { 5589 /* map ethernet header to find type and header length */ 5590 eh = mtod(m0, struct ether_vlan_header *); 5591 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5592 } else { 5593 /* used by FW for packet accounting */ 5594 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5595#if 0 5596 /* 5597 * If NPAR-SD is active then FW should do the tagging regardless 5598 * of value of priority. Otherwise, if priority indicates this is 5599 * a control packet we need to indicate to FW to avoid tagging. 5600 */ 5601 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { 5602 SET_FLAG(tx_start_bd->general_data, 5603 ETH_TX_START_BD_FORCE_VLAN_MODE, 1); 5604 } 5605#endif 5606 } 5607 } 5608 5609 /* 5610 * add a parsing BD from the chain. The parsing BD is always added 5611 * though it is only used for TSO and chksum 5612 */ 5613 bd_prod = TX_BD_NEXT(bd_prod); 5614 5615 if (m0->m_pkthdr.csum_flags) { 5616 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5617 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5618 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5619 } 5620 5621 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5622 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5623 ETH_TX_BD_FLAGS_L4_CSUM); 5624 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5625 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5626 ETH_TX_BD_FLAGS_IS_UDP | 5627 ETH_TX_BD_FLAGS_L4_CSUM); 5628 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5629 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5630 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5631 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5632 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5633 ETH_TX_BD_FLAGS_IS_UDP); 5634 } 5635 } 5636 5637 if (!CHIP_IS_E1x(sc)) { 5638 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5639 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5640 5641 if (m0->m_pkthdr.csum_flags) { 5642 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5643 } 5644 5645#if 0 5646 /* 5647 * Add the MACs to the parsing BD if the module param was 5648 * explicitly set, if this is a vf, or in switch independent 5649 * mode. 5650 */ 5651 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { 5652 eh = mtod(m0, struct ether_vlan_header *); 5653 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 5654 &pbd_e2->data.mac_addr.src_mid, 5655 &pbd_e2->data.mac_addr.src_lo, 5656 eh->evl_shost); 5657 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 5658 &pbd_e2->data.mac_addr.dst_mid, 5659 &pbd_e2->data.mac_addr.dst_lo, 5660 eh->evl_dhost); 5661 } 5662#endif 5663 5664 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5665 mac_type); 5666 } else { 5667 uint16_t global_data = 0; 5668 5669 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5670 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5671 5672 if (m0->m_pkthdr.csum_flags) { 5673 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5674 } 5675 5676 SET_FLAG(global_data, 5677 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5678 pbd_e1x->global_data |= htole16(global_data); 5679 } 5680 5681 /* setup the parsing BD with TSO specific info */ 5682 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5683 fp->eth_q_stats.tx_ofld_frames_lso++; 5684 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5685 5686 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5687 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5688 5689 /* split the first BD into header/data making the fw job easy */ 5690 nbds++; 5691 tx_start_bd->nbd = htole16(nbds); 5692 tx_start_bd->nbytes = htole16(hlen); 5693 5694 bd_prod = TX_BD_NEXT(bd_prod); 5695 5696 /* new transmit BD after the tx_parse_bd */ 5697 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5698 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5699 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5700 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5701 if (tx_total_pkt_size_bd == NULL) { 5702 tx_total_pkt_size_bd = tx_data_bd; 5703 } 5704 5705 BLOGD(sc, DBG_TX, 5706 "TSO split header size is %d (%x:%x) nbds %d\n", 5707 le16toh(tx_start_bd->nbytes), 5708 le32toh(tx_start_bd->addr_hi), 5709 le32toh(tx_start_bd->addr_lo), 5710 nbds); 5711 } 5712 5713 if (!CHIP_IS_E1x(sc)) { 5714 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5715 } else { 5716 bxe_set_pbd_lso(m0, pbd_e1x); 5717 } 5718 } 5719 5720 if (pbd_e2_parsing_data) { 5721 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5722 } 5723 5724 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5725 for (i = 1; i < nsegs ; i++) { 5726 bd_prod = TX_BD_NEXT(bd_prod); 5727 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5728 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5729 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5730 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5731 if (tx_total_pkt_size_bd == NULL) { 5732 tx_total_pkt_size_bd = tx_data_bd; 5733 } 5734 total_pkt_size += tx_data_bd->nbytes; 5735 } 5736 5737 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5738 5739 if (tx_total_pkt_size_bd != NULL) { 5740 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5741 } 5742 5743 if (__predict_false(sc->debug & DBG_TX)) { 5744 tmp_bd = tx_buf->first_bd; 5745 for (i = 0; i < nbds; i++) 5746 { 5747 if (i == 0) { 5748 BLOGD(sc, DBG_TX, 5749 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5750 "bd_flags=0x%x hdr_nbds=%d\n", 5751 tx_start_bd, 5752 tmp_bd, 5753 le16toh(tx_start_bd->nbd), 5754 le16toh(tx_start_bd->vlan_or_ethertype), 5755 tx_start_bd->bd_flags.as_bitfield, 5756 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5757 } else if (i == 1) { 5758 if (pbd_e1x) { 5759 BLOGD(sc, DBG_TX, 5760 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5761 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5762 "tcp_seq=%u total_hlen_w=%u\n", 5763 pbd_e1x, 5764 tmp_bd, 5765 pbd_e1x->global_data, 5766 pbd_e1x->ip_hlen_w, 5767 pbd_e1x->ip_id, 5768 pbd_e1x->lso_mss, 5769 pbd_e1x->tcp_flags, 5770 pbd_e1x->tcp_pseudo_csum, 5771 pbd_e1x->tcp_send_seq, 5772 le16toh(pbd_e1x->total_hlen_w)); 5773 } else { /* if (pbd_e2) */ 5774 BLOGD(sc, DBG_TX, 5775 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5776 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5777 pbd_e2, 5778 tmp_bd, 5779 pbd_e2->data.mac_addr.dst_hi, 5780 pbd_e2->data.mac_addr.dst_mid, 5781 pbd_e2->data.mac_addr.dst_lo, 5782 pbd_e2->data.mac_addr.src_hi, 5783 pbd_e2->data.mac_addr.src_mid, 5784 pbd_e2->data.mac_addr.src_lo, 5785 pbd_e2->parsing_data); 5786 } 5787 } 5788 5789 if (i != 1) { /* skip parse db as it doesn't hold data */ 5790 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5791 BLOGD(sc, DBG_TX, 5792 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5793 tx_data_bd, 5794 tmp_bd, 5795 le16toh(tx_data_bd->nbytes), 5796 le32toh(tx_data_bd->addr_hi), 5797 le32toh(tx_data_bd->addr_lo)); 5798 } 5799 5800 tmp_bd = TX_BD_NEXT(tmp_bd); 5801 } 5802 } 5803 5804 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5805 5806 /* update TX BD producer index value for next TX */ 5807 bd_prod = TX_BD_NEXT(bd_prod); 5808 5809 /* 5810 * If the chain of tx_bd's describing this frame is adjacent to or spans 5811 * an eth_tx_next_bd element then we need to increment the nbds value. 5812 */ 5813 if (TX_BD_IDX(bd_prod) < nbds) { 5814 nbds++; 5815 } 5816 5817 /* don't allow reordering of writes for nbd and packets */ 5818 mb(); 5819 5820 fp->tx_db.data.prod += nbds; 5821 5822 /* producer points to the next free tx_bd at this point */ 5823 fp->tx_pkt_prod++; 5824 fp->tx_bd_prod = bd_prod; 5825 5826 DOORBELL(sc, fp->index, fp->tx_db.raw); 5827 5828 fp->eth_q_stats.tx_pkts++; 5829 5830 /* Prevent speculative reads from getting ahead of the status block. */ 5831 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5832 0, 0, BUS_SPACE_BARRIER_READ); 5833 5834 /* Prevent speculative reads from getting ahead of the doorbell. */ 5835 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5836 0, 0, BUS_SPACE_BARRIER_READ); 5837 5838 return (0); 5839} 5840 5841static void 5842bxe_tx_start_locked(struct bxe_softc *sc, 5843 struct ifnet *ifp, 5844 struct bxe_fastpath *fp) 5845{ 5846 struct mbuf *m = NULL; 5847 int tx_count = 0; 5848 uint16_t tx_bd_avail; 5849 5850 BXE_FP_TX_LOCK_ASSERT(fp); 5851 5852 /* keep adding entries while there are frames to send */ 5853 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 5854 5855 /* 5856 * check for any frames to send 5857 * dequeue can still be NULL even if queue is not empty 5858 */ 5859 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 5860 if (__predict_false(m == NULL)) { 5861 break; 5862 } 5863 5864 /* the mbuf now belongs to us */ 5865 fp->eth_q_stats.mbuf_alloc_tx++; 5866 5867 /* 5868 * Put the frame into the transmit ring. If we don't have room, 5869 * place the mbuf back at the head of the TX queue, set the 5870 * OACTIVE flag, and wait for the NIC to drain the chain. 5871 */ 5872 if (__predict_false(bxe_tx_encap(fp, &m))) { 5873 fp->eth_q_stats.tx_encap_failures++; 5874 if (m != NULL) { 5875 /* mark the TX queue as full and return the frame */ 5876 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5877 IFQ_DRV_PREPEND(&ifp->if_snd, m); 5878 fp->eth_q_stats.mbuf_alloc_tx--; 5879 fp->eth_q_stats.tx_queue_xoff++; 5880 } 5881 5882 /* stop looking for more work */ 5883 break; 5884 } 5885 5886 /* the frame was enqueued successfully */ 5887 tx_count++; 5888 5889 /* send a copy of the frame to any BPF listeners. */ 5890 BPF_MTAP(ifp, m); 5891 5892 tx_bd_avail = bxe_tx_avail(sc, fp); 5893 5894 /* handle any completions if we're running low */ 5895 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5896 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5897 bxe_txeof(sc, fp); 5898 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 5899 break; 5900 } 5901 } 5902 } 5903 5904 /* all TX packets were dequeued and/or the tx ring is full */ 5905 if (tx_count > 0) { 5906 /* reset the TX watchdog timeout timer */ 5907 fp->watchdog_timer = BXE_TX_TIMEOUT; 5908 } 5909} 5910 5911/* Legacy (non-RSS) dispatch routine */ 5912static void 5913bxe_tx_start(struct ifnet *ifp) 5914{ 5915 struct bxe_softc *sc; 5916 struct bxe_fastpath *fp; 5917 5918 sc = ifp->if_softc; 5919 5920 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5921 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5922 return; 5923 } 5924 5925 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 5926 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 5927 return; 5928 } 5929 5930 if (!sc->link_vars.link_up) { 5931 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5932 return; 5933 } 5934 5935 fp = &sc->fp[0]; 5936 5937 BXE_FP_TX_LOCK(fp); 5938 bxe_tx_start_locked(sc, ifp, fp); 5939 BXE_FP_TX_UNLOCK(fp); 5940} 5941 5942#if __FreeBSD_version >= 800000 5943 5944static int 5945bxe_tx_mq_start_locked(struct bxe_softc *sc, 5946 struct ifnet *ifp, 5947 struct bxe_fastpath *fp, 5948 struct mbuf *m) 5949{ 5950 struct buf_ring *tx_br = fp->tx_br; 5951 struct mbuf *next; 5952 int depth, rc, tx_count; 5953 uint16_t tx_bd_avail; 5954 5955 rc = tx_count = 0; 5956 5957 if (!tx_br) { 5958 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 5959 return (EINVAL); 5960 } 5961 5962 /* fetch the depth of the driver queue */ 5963 depth = drbr_inuse(ifp, tx_br); 5964 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 5965 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 5966 } 5967 5968 BXE_FP_TX_LOCK_ASSERT(fp); 5969 5970 if (m == NULL) { 5971 /* no new work, check for pending frames */ 5972 next = drbr_dequeue(ifp, tx_br); 5973 } else if (drbr_needs_enqueue(ifp, tx_br)) { 5974 /* have both new and pending work, maintain packet order */ 5975 rc = drbr_enqueue(ifp, tx_br, m); 5976 if (rc != 0) { 5977 fp->eth_q_stats.tx_soft_errors++; 5978 goto bxe_tx_mq_start_locked_exit; 5979 } 5980 next = drbr_dequeue(ifp, tx_br); 5981 } else { 5982 /* new work only and nothing pending */ 5983 next = m; 5984 } 5985 5986 /* keep adding entries while there are frames to send */ 5987 while (next != NULL) { 5988 5989 /* the mbuf now belongs to us */ 5990 fp->eth_q_stats.mbuf_alloc_tx++; 5991 5992 /* 5993 * Put the frame into the transmit ring. If we don't have room, 5994 * place the mbuf back at the head of the TX queue, set the 5995 * OACTIVE flag, and wait for the NIC to drain the chain. 5996 */ 5997 rc = bxe_tx_encap(fp, &next); 5998 if (__predict_false(rc != 0)) { 5999 fp->eth_q_stats.tx_encap_failures++; 6000 if (next != NULL) { 6001 /* mark the TX queue as full and save the frame */ 6002 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 6003 /* XXX this may reorder the frame */ 6004 rc = drbr_enqueue(ifp, tx_br, next); 6005 fp->eth_q_stats.mbuf_alloc_tx--; 6006 fp->eth_q_stats.tx_frames_deferred++; 6007 } 6008 6009 /* stop looking for more work */ 6010 break; 6011 } 6012 6013 /* the transmit frame was enqueued successfully */ 6014 tx_count++; 6015 6016 /* send a copy of the frame to any BPF listeners */ 6017 BPF_MTAP(ifp, next); 6018 6019 tx_bd_avail = bxe_tx_avail(sc, fp); 6020 6021 /* handle any completions if we're running low */ 6022 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 6023 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 6024 bxe_txeof(sc, fp); 6025 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 6026 break; 6027 } 6028 } 6029 6030 next = drbr_dequeue(ifp, tx_br); 6031 } 6032 6033 /* all TX packets were dequeued and/or the tx ring is full */ 6034 if (tx_count > 0) { 6035 /* reset the TX watchdog timeout timer */ 6036 fp->watchdog_timer = BXE_TX_TIMEOUT; 6037 } 6038 6039bxe_tx_mq_start_locked_exit: 6040 6041 return (rc); 6042} 6043 6044/* Multiqueue (TSS) dispatch routine. */ 6045static int 6046bxe_tx_mq_start(struct ifnet *ifp, 6047 struct mbuf *m) 6048{ 6049 struct bxe_softc *sc = ifp->if_softc; 6050 struct bxe_fastpath *fp; 6051 int fp_index, rc; 6052 6053 fp_index = 0; /* default is the first queue */ 6054 6055 /* change the queue if using flow ID */ 6056 if ((m->m_flags & M_FLOWID) != 0) { 6057 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 6058 } 6059 6060 fp = &sc->fp[fp_index]; 6061 6062 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 6063 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 6064 return (ENETDOWN); 6065 } 6066 6067 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 6068 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 6069 return (EBUSY); 6070 } 6071 6072 if (!sc->link_vars.link_up) { 6073 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 6074 return (ENETDOWN); 6075 } 6076 6077 /* XXX change to TRYLOCK here and if failed then schedule taskqueue */ 6078 6079 BXE_FP_TX_LOCK(fp); 6080 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 6081 BXE_FP_TX_UNLOCK(fp); 6082 6083 return (rc); 6084} 6085 6086static void 6087bxe_mq_flush(struct ifnet *ifp) 6088{ 6089 struct bxe_softc *sc = ifp->if_softc; 6090 struct bxe_fastpath *fp; 6091 struct mbuf *m; 6092 int i; 6093 6094 for (i = 0; i < sc->num_queues; i++) { 6095 fp = &sc->fp[i]; 6096 6097 if (fp->state != BXE_FP_STATE_OPEN) { 6098 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 6099 fp->index, fp->state); 6100 continue; 6101 } 6102 6103 if (fp->tx_br != NULL) { 6104 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 6105 BXE_FP_TX_LOCK(fp); 6106 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6107 m_freem(m); 6108 } 6109 BXE_FP_TX_UNLOCK(fp); 6110 } 6111 } 6112 6113 if_qflush(ifp); 6114} 6115 6116#endif /* FreeBSD_version >= 800000 */ 6117 6118static uint16_t 6119bxe_cid_ilt_lines(struct bxe_softc *sc) 6120{ 6121 if (IS_SRIOV(sc)) { 6122 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 6123 } 6124 return (L2_ILT_LINES(sc)); 6125} 6126 6127static void 6128bxe_ilt_set_info(struct bxe_softc *sc) 6129{ 6130 struct ilt_client_info *ilt_client; 6131 struct ecore_ilt *ilt = sc->ilt; 6132 uint16_t line = 0; 6133 6134 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 6135 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 6136 6137 /* CDU */ 6138 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 6139 ilt_client->client_num = ILT_CLIENT_CDU; 6140 ilt_client->page_size = CDU_ILT_PAGE_SZ; 6141 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 6142 ilt_client->start = line; 6143 line += bxe_cid_ilt_lines(sc); 6144 6145 if (CNIC_SUPPORT(sc)) { 6146 line += CNIC_ILT_LINES; 6147 } 6148 6149 ilt_client->end = (line - 1); 6150 6151 BLOGD(sc, DBG_LOAD, 6152 "ilt client[CDU]: start %d, end %d, " 6153 "psz 0x%x, flags 0x%x, hw psz %d\n", 6154 ilt_client->start, ilt_client->end, 6155 ilt_client->page_size, 6156 ilt_client->flags, 6157 ilog2(ilt_client->page_size >> 12)); 6158 6159 /* QM */ 6160 if (QM_INIT(sc->qm_cid_count)) { 6161 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 6162 ilt_client->client_num = ILT_CLIENT_QM; 6163 ilt_client->page_size = QM_ILT_PAGE_SZ; 6164 ilt_client->flags = 0; 6165 ilt_client->start = line; 6166 6167 /* 4 bytes for each cid */ 6168 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 6169 QM_ILT_PAGE_SZ); 6170 6171 ilt_client->end = (line - 1); 6172 6173 BLOGD(sc, DBG_LOAD, 6174 "ilt client[QM]: start %d, end %d, " 6175 "psz 0x%x, flags 0x%x, hw psz %d\n", 6176 ilt_client->start, ilt_client->end, 6177 ilt_client->page_size, ilt_client->flags, 6178 ilog2(ilt_client->page_size >> 12)); 6179 } 6180 6181 if (CNIC_SUPPORT(sc)) { 6182 /* SRC */ 6183 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 6184 ilt_client->client_num = ILT_CLIENT_SRC; 6185 ilt_client->page_size = SRC_ILT_PAGE_SZ; 6186 ilt_client->flags = 0; 6187 ilt_client->start = line; 6188 line += SRC_ILT_LINES; 6189 ilt_client->end = (line - 1); 6190 6191 BLOGD(sc, DBG_LOAD, 6192 "ilt client[SRC]: start %d, end %d, " 6193 "psz 0x%x, flags 0x%x, hw psz %d\n", 6194 ilt_client->start, ilt_client->end, 6195 ilt_client->page_size, ilt_client->flags, 6196 ilog2(ilt_client->page_size >> 12)); 6197 6198 /* TM */ 6199 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 6200 ilt_client->client_num = ILT_CLIENT_TM; 6201 ilt_client->page_size = TM_ILT_PAGE_SZ; 6202 ilt_client->flags = 0; 6203 ilt_client->start = line; 6204 line += TM_ILT_LINES; 6205 ilt_client->end = (line - 1); 6206 6207 BLOGD(sc, DBG_LOAD, 6208 "ilt client[TM]: start %d, end %d, " 6209 "psz 0x%x, flags 0x%x, hw psz %d\n", 6210 ilt_client->start, ilt_client->end, 6211 ilt_client->page_size, ilt_client->flags, 6212 ilog2(ilt_client->page_size >> 12)); 6213 } 6214 6215 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 6216} 6217 6218static void 6219bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 6220{ 6221 int i; 6222 6223 BLOGD(sc, DBG_LOAD, "mtu = %d\n", sc->mtu); 6224 6225 for (i = 0; i < sc->num_queues; i++) { 6226 /* get the Rx buffer size for RX frames */ 6227 sc->fp[i].rx_buf_size = 6228 (IP_HEADER_ALIGNMENT_PADDING + 6229 ETH_OVERHEAD + 6230 sc->mtu); 6231 6232 BLOGD(sc, DBG_LOAD, "rx_buf_size for fp[%02d] = %d\n", 6233 i, sc->fp[i].rx_buf_size); 6234 6235 /* get the mbuf allocation size for RX frames */ 6236 if (sc->fp[i].rx_buf_size <= MCLBYTES) { 6237 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6238 } else if (sc->fp[i].rx_buf_size <= BCM_PAGE_SIZE) { 6239 sc->fp[i].mbuf_alloc_size = PAGE_SIZE; 6240 } else { 6241 sc->fp[i].mbuf_alloc_size = MJUM9BYTES; 6242 } 6243 6244 BLOGD(sc, DBG_LOAD, "mbuf_alloc_size for fp[%02d] = %d\n", 6245 i, sc->fp[i].mbuf_alloc_size); 6246 } 6247} 6248 6249static int 6250bxe_alloc_ilt_mem(struct bxe_softc *sc) 6251{ 6252 int rc = 0; 6253 6254 if ((sc->ilt = 6255 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 6256 M_BXE_ILT, 6257 (M_NOWAIT | M_ZERO))) == NULL) { 6258 rc = 1; 6259 } 6260 6261 return (rc); 6262} 6263 6264static int 6265bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6266{ 6267 int rc = 0; 6268 6269 if ((sc->ilt->lines = 6270 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6271 M_BXE_ILT, 6272 (M_NOWAIT | M_ZERO))) == NULL) { 6273 rc = 1; 6274 } 6275 6276 return (rc); 6277} 6278 6279static void 6280bxe_free_ilt_mem(struct bxe_softc *sc) 6281{ 6282 if (sc->ilt != NULL) { 6283 free(sc->ilt, M_BXE_ILT); 6284 sc->ilt = NULL; 6285 } 6286} 6287 6288static void 6289bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6290{ 6291 if (sc->ilt->lines != NULL) { 6292 free(sc->ilt->lines, M_BXE_ILT); 6293 sc->ilt->lines = NULL; 6294 } 6295} 6296 6297static void 6298bxe_free_mem(struct bxe_softc *sc) 6299{ 6300 int i; 6301 6302#if 0 6303 if (!CONFIGURE_NIC_MODE(sc)) { 6304 /* free searcher T2 table */ 6305 bxe_dma_free(sc, &sc->t2); 6306 } 6307#endif 6308 6309 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6310 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6311 sc->context[i].vcxt = NULL; 6312 sc->context[i].size = 0; 6313 } 6314 6315 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6316 6317 bxe_free_ilt_lines_mem(sc); 6318 6319#if 0 6320 bxe_iov_free_mem(sc); 6321#endif 6322} 6323 6324static int 6325bxe_alloc_mem(struct bxe_softc *sc) 6326{ 6327 int context_size; 6328 int allocated; 6329 int i; 6330 6331#if 0 6332 if (!CONFIGURE_NIC_MODE(sc)) { 6333 /* allocate searcher T2 table */ 6334 if (bxe_dma_alloc(sc, SRC_T2_SZ, 6335 &sc->t2, "searcher t2 table") != 0) { 6336 return (-1); 6337 } 6338 } 6339#endif 6340 6341 /* 6342 * Allocate memory for CDU context: 6343 * This memory is allocated separately and not in the generic ILT 6344 * functions because CDU differs in few aspects: 6345 * 1. There can be multiple entities allocating memory for context - 6346 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6347 * its own ILT lines. 6348 * 2. Since CDU page-size is not a single 4KB page (which is the case 6349 * for the other ILT clients), to be efficient we want to support 6350 * allocation of sub-page-size in the last entry. 6351 * 3. Context pointers are used by the driver to pass to FW / update 6352 * the context (for the other ILT clients the pointers are used just to 6353 * free the memory during unload). 6354 */ 6355 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6356 for (i = 0, allocated = 0; allocated < context_size; i++) { 6357 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6358 (context_size - allocated)); 6359 6360 if (bxe_dma_alloc(sc, sc->context[i].size, 6361 &sc->context[i].vcxt_dma, 6362 "cdu context") != 0) { 6363 bxe_free_mem(sc); 6364 return (-1); 6365 } 6366 6367 sc->context[i].vcxt = 6368 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6369 6370 allocated += sc->context[i].size; 6371 } 6372 6373 bxe_alloc_ilt_lines_mem(sc); 6374 6375 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6376 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6377 { 6378 for (i = 0; i < 4; i++) { 6379 BLOGD(sc, DBG_LOAD, 6380 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6381 i, 6382 sc->ilt->clients[i].page_size, 6383 sc->ilt->clients[i].start, 6384 sc->ilt->clients[i].end, 6385 sc->ilt->clients[i].client_num, 6386 sc->ilt->clients[i].flags); 6387 } 6388 } 6389 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6390 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6391 bxe_free_mem(sc); 6392 return (-1); 6393 } 6394 6395#if 0 6396 if (bxe_iov_alloc_mem(sc)) { 6397 BLOGE(sc, "Failed to allocate memory for SRIOV\n"); 6398 bxe_free_mem(sc); 6399 return (-1); 6400 } 6401#endif 6402 6403 return (0); 6404} 6405 6406static void 6407bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6408{ 6409 struct bxe_softc *sc; 6410 int i; 6411 6412 sc = fp->sc; 6413 6414 if (fp->rx_mbuf_tag == NULL) { 6415 return; 6416 } 6417 6418 /* free all mbufs and unload all maps */ 6419 for (i = 0; i < RX_BD_TOTAL; i++) { 6420 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6421 bus_dmamap_sync(fp->rx_mbuf_tag, 6422 fp->rx_mbuf_chain[i].m_map, 6423 BUS_DMASYNC_POSTREAD); 6424 bus_dmamap_unload(fp->rx_mbuf_tag, 6425 fp->rx_mbuf_chain[i].m_map); 6426 } 6427 6428 if (fp->rx_mbuf_chain[i].m != NULL) { 6429 m_freem(fp->rx_mbuf_chain[i].m); 6430 fp->rx_mbuf_chain[i].m = NULL; 6431 fp->eth_q_stats.mbuf_alloc_rx--; 6432 } 6433 } 6434} 6435 6436static void 6437bxe_free_tpa_pool(struct bxe_fastpath *fp) 6438{ 6439 struct bxe_softc *sc; 6440 int i, max_agg_queues; 6441 6442 sc = fp->sc; 6443 6444 if (fp->rx_mbuf_tag == NULL) { 6445 return; 6446 } 6447 6448 max_agg_queues = MAX_AGG_QS(sc); 6449 6450 /* release all mbufs and unload all DMA maps in the TPA pool */ 6451 for (i = 0; i < max_agg_queues; i++) { 6452 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6453 bus_dmamap_sync(fp->rx_mbuf_tag, 6454 fp->rx_tpa_info[i].bd.m_map, 6455 BUS_DMASYNC_POSTREAD); 6456 bus_dmamap_unload(fp->rx_mbuf_tag, 6457 fp->rx_tpa_info[i].bd.m_map); 6458 } 6459 6460 if (fp->rx_tpa_info[i].bd.m != NULL) { 6461 m_freem(fp->rx_tpa_info[i].bd.m); 6462 fp->rx_tpa_info[i].bd.m = NULL; 6463 fp->eth_q_stats.mbuf_alloc_tpa--; 6464 } 6465 } 6466} 6467 6468static void 6469bxe_free_sge_chain(struct bxe_fastpath *fp) 6470{ 6471 struct bxe_softc *sc; 6472 int i; 6473 6474 sc = fp->sc; 6475 6476 if (fp->rx_sge_mbuf_tag == NULL) { 6477 return; 6478 } 6479 6480 /* rree all mbufs and unload all maps */ 6481 for (i = 0; i < RX_SGE_TOTAL; i++) { 6482 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6483 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6484 fp->rx_sge_mbuf_chain[i].m_map, 6485 BUS_DMASYNC_POSTREAD); 6486 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6487 fp->rx_sge_mbuf_chain[i].m_map); 6488 } 6489 6490 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6491 m_freem(fp->rx_sge_mbuf_chain[i].m); 6492 fp->rx_sge_mbuf_chain[i].m = NULL; 6493 fp->eth_q_stats.mbuf_alloc_sge--; 6494 } 6495 } 6496} 6497 6498static void 6499bxe_free_fp_buffers(struct bxe_softc *sc) 6500{ 6501 struct bxe_fastpath *fp; 6502 int i; 6503 6504 for (i = 0; i < sc->num_queues; i++) { 6505 fp = &sc->fp[i]; 6506 6507#if __FreeBSD_version >= 800000 6508 if (fp->tx_br != NULL) { 6509 struct mbuf *m; 6510 /* just in case bxe_mq_flush() wasn't called */ 6511 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6512 m_freem(m); 6513 } 6514 buf_ring_free(fp->tx_br, M_DEVBUF); 6515 fp->tx_br = NULL; 6516 } 6517#endif 6518 6519 /* free all RX buffers */ 6520 bxe_free_rx_bd_chain(fp); 6521 bxe_free_tpa_pool(fp); 6522 bxe_free_sge_chain(fp); 6523 6524 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6525 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6526 fp->eth_q_stats.mbuf_alloc_rx); 6527 } 6528 6529 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6530 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6531 fp->eth_q_stats.mbuf_alloc_sge); 6532 } 6533 6534 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6535 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6536 fp->eth_q_stats.mbuf_alloc_tpa); 6537 } 6538 6539 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6540 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6541 fp->eth_q_stats.mbuf_alloc_tx); 6542 } 6543 6544 /* XXX verify all mbufs were reclaimed */ 6545 6546 if (mtx_initialized(&fp->tx_mtx)) { 6547 mtx_destroy(&fp->tx_mtx); 6548 } 6549 6550 if (mtx_initialized(&fp->rx_mtx)) { 6551 mtx_destroy(&fp->rx_mtx); 6552 } 6553 } 6554} 6555 6556static int 6557bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6558 uint16_t prev_index, 6559 uint16_t index) 6560{ 6561 struct bxe_sw_rx_bd *rx_buf; 6562 struct eth_rx_bd *rx_bd; 6563 bus_dma_segment_t segs[1]; 6564 bus_dmamap_t map; 6565 struct mbuf *m; 6566 int nsegs, rc; 6567 6568 rc = 0; 6569 6570 /* allocate the new RX BD mbuf */ 6571 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6572 if (__predict_false(m == NULL)) { 6573 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6574 return (ENOBUFS); 6575 } 6576 6577 fp->eth_q_stats.mbuf_alloc_rx++; 6578 6579 /* initialize the mbuf buffer length */ 6580 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6581 6582 /* map the mbuf into non-paged pool */ 6583 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6584 fp->rx_mbuf_spare_map, 6585 m, segs, &nsegs, BUS_DMA_NOWAIT); 6586 if (__predict_false(rc != 0)) { 6587 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6588 m_freem(m); 6589 fp->eth_q_stats.mbuf_alloc_rx--; 6590 return (rc); 6591 } 6592 6593 /* all mbufs must map to a single segment */ 6594 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6595 6596 /* release any existing RX BD mbuf mappings */ 6597 6598 if (prev_index != index) { 6599 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6600 6601 if (rx_buf->m_map != NULL) { 6602 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6603 BUS_DMASYNC_POSTREAD); 6604 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6605 } 6606 6607 /* 6608 * We only get here from bxe_rxeof() when the maximum number 6609 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6610 * holds the mbuf in the prev_index so it's OK to NULL it out 6611 * here without concern of a memory leak. 6612 */ 6613 fp->rx_mbuf_chain[prev_index].m = NULL; 6614 } 6615 6616 rx_buf = &fp->rx_mbuf_chain[index]; 6617 6618 if (rx_buf->m_map != NULL) { 6619 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6620 BUS_DMASYNC_POSTREAD); 6621 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6622 } 6623 6624 /* save the mbuf and mapping info for a future packet */ 6625 map = (prev_index != index) ? 6626 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6627 rx_buf->m_map = fp->rx_mbuf_spare_map; 6628 fp->rx_mbuf_spare_map = map; 6629 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6630 BUS_DMASYNC_PREREAD); 6631 rx_buf->m = m; 6632 6633 rx_bd = &fp->rx_chain[index]; 6634 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6635 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6636 6637 return (rc); 6638} 6639 6640static int 6641bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6642 int queue) 6643{ 6644 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6645 bus_dma_segment_t segs[1]; 6646 bus_dmamap_t map; 6647 struct mbuf *m; 6648 int nsegs; 6649 int rc = 0; 6650 6651 /* allocate the new TPA mbuf */ 6652 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6653 if (__predict_false(m == NULL)) { 6654 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6655 return (ENOBUFS); 6656 } 6657 6658 fp->eth_q_stats.mbuf_alloc_tpa++; 6659 6660 /* initialize the mbuf buffer length */ 6661 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6662 6663 /* map the mbuf into non-paged pool */ 6664 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6665 fp->rx_tpa_info_mbuf_spare_map, 6666 m, segs, &nsegs, BUS_DMA_NOWAIT); 6667 if (__predict_false(rc != 0)) { 6668 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6669 m_free(m); 6670 fp->eth_q_stats.mbuf_alloc_tpa--; 6671 return (rc); 6672 } 6673 6674 /* all mbufs must map to a single segment */ 6675 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6676 6677 /* release any existing TPA mbuf mapping */ 6678 if (tpa_info->bd.m_map != NULL) { 6679 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6680 BUS_DMASYNC_POSTREAD); 6681 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6682 } 6683 6684 /* save the mbuf and mapping info for the TPA mbuf */ 6685 map = tpa_info->bd.m_map; 6686 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6687 fp->rx_tpa_info_mbuf_spare_map = map; 6688 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6689 BUS_DMASYNC_PREREAD); 6690 tpa_info->bd.m = m; 6691 tpa_info->seg = segs[0]; 6692 6693 return (rc); 6694} 6695 6696/* 6697 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6698 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6699 * chain. 6700 */ 6701static int 6702bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6703 uint16_t index) 6704{ 6705 struct bxe_sw_rx_bd *sge_buf; 6706 struct eth_rx_sge *sge; 6707 bus_dma_segment_t segs[1]; 6708 bus_dmamap_t map; 6709 struct mbuf *m; 6710 int nsegs; 6711 int rc = 0; 6712 6713 /* allocate a new SGE mbuf */ 6714 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6715 if (__predict_false(m == NULL)) { 6716 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6717 return (ENOMEM); 6718 } 6719 6720 fp->eth_q_stats.mbuf_alloc_sge++; 6721 6722 /* initialize the mbuf buffer length */ 6723 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6724 6725 /* map the SGE mbuf into non-paged pool */ 6726 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6727 fp->rx_sge_mbuf_spare_map, 6728 m, segs, &nsegs, BUS_DMA_NOWAIT); 6729 if (__predict_false(rc != 0)) { 6730 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6731 m_freem(m); 6732 fp->eth_q_stats.mbuf_alloc_sge--; 6733 return (rc); 6734 } 6735 6736 /* all mbufs must map to a single segment */ 6737 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6738 6739 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6740 6741 /* release any existing SGE mbuf mapping */ 6742 if (sge_buf->m_map != NULL) { 6743 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6744 BUS_DMASYNC_POSTREAD); 6745 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6746 } 6747 6748 /* save the mbuf and mapping info for a future packet */ 6749 map = sge_buf->m_map; 6750 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6751 fp->rx_sge_mbuf_spare_map = map; 6752 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6753 BUS_DMASYNC_PREREAD); 6754 sge_buf->m = m; 6755 6756 sge = &fp->rx_sge_chain[index]; 6757 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6758 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6759 6760 return (rc); 6761} 6762 6763static __noinline int 6764bxe_alloc_fp_buffers(struct bxe_softc *sc) 6765{ 6766 struct bxe_fastpath *fp; 6767 int i, j, rc = 0; 6768 int ring_prod, cqe_ring_prod; 6769 int max_agg_queues; 6770 6771 for (i = 0; i < sc->num_queues; i++) { 6772 fp = &sc->fp[i]; 6773 6774#if __FreeBSD_version >= 800000 6775 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 6776 M_DONTWAIT, &fp->tx_mtx); 6777 if (fp->tx_br == NULL) { 6778 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); 6779 goto bxe_alloc_fp_buffers_error; 6780 } 6781#endif 6782 6783 ring_prod = cqe_ring_prod = 0; 6784 fp->rx_bd_cons = 0; 6785 fp->rx_cq_cons = 0; 6786 6787 /* allocate buffers for the RX BDs in RX BD chain */ 6788 for (j = 0; j < sc->max_rx_bufs; j++) { 6789 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6790 if (rc != 0) { 6791 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6792 i, rc); 6793 goto bxe_alloc_fp_buffers_error; 6794 } 6795 6796 ring_prod = RX_BD_NEXT(ring_prod); 6797 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6798 } 6799 6800 fp->rx_bd_prod = ring_prod; 6801 fp->rx_cq_prod = cqe_ring_prod; 6802 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6803 6804 if (sc->ifnet->if_capenable & IFCAP_LRO) { 6805 max_agg_queues = MAX_AGG_QS(sc); 6806 6807 fp->tpa_enable = TRUE; 6808 6809 /* fill the TPA pool */ 6810 for (j = 0; j < max_agg_queues; j++) { 6811 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6812 if (rc != 0) { 6813 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6814 i, j); 6815 fp->tpa_enable = FALSE; 6816 goto bxe_alloc_fp_buffers_error; 6817 } 6818 6819 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6820 } 6821 6822 if (fp->tpa_enable) { 6823 /* fill the RX SGE chain */ 6824 ring_prod = 0; 6825 for (j = 0; j < RX_SGE_USABLE; j++) { 6826 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6827 if (rc != 0) { 6828 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6829 i, ring_prod); 6830 fp->tpa_enable = FALSE; 6831 ring_prod = 0; 6832 goto bxe_alloc_fp_buffers_error; 6833 } 6834 6835 ring_prod = RX_SGE_NEXT(ring_prod); 6836 } 6837 6838 fp->rx_sge_prod = ring_prod; 6839 } 6840 } 6841 } 6842 6843 return (0); 6844 6845bxe_alloc_fp_buffers_error: 6846 6847 /* unwind what was already allocated */ 6848 bxe_free_rx_bd_chain(fp); 6849 bxe_free_tpa_pool(fp); 6850 bxe_free_sge_chain(fp); 6851 6852 return (ENOBUFS); 6853} 6854 6855static void 6856bxe_free_fw_stats_mem(struct bxe_softc *sc) 6857{ 6858 bxe_dma_free(sc, &sc->fw_stats_dma); 6859 6860 sc->fw_stats_num = 0; 6861 6862 sc->fw_stats_req_size = 0; 6863 sc->fw_stats_req = NULL; 6864 sc->fw_stats_req_mapping = 0; 6865 6866 sc->fw_stats_data_size = 0; 6867 sc->fw_stats_data = NULL; 6868 sc->fw_stats_data_mapping = 0; 6869} 6870 6871static int 6872bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6873{ 6874 uint8_t num_queue_stats; 6875 int num_groups; 6876 6877 /* number of queues for statistics is number of eth queues */ 6878 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6879 6880 /* 6881 * Total number of FW statistics requests = 6882 * 1 for port stats + 1 for PF stats + num of queues 6883 */ 6884 sc->fw_stats_num = (2 + num_queue_stats); 6885 6886 /* 6887 * Request is built from stats_query_header and an array of 6888 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6889 * rules. The real number or requests is configured in the 6890 * stats_query_header. 6891 */ 6892 num_groups = 6893 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6894 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6895 6896 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6897 sc->fw_stats_num, num_groups); 6898 6899 sc->fw_stats_req_size = 6900 (sizeof(struct stats_query_header) + 6901 (num_groups * sizeof(struct stats_query_cmd_group))); 6902 6903 /* 6904 * Data for statistics requests + stats_counter. 6905 * stats_counter holds per-STORM counters that are incremented when 6906 * STORM has finished with the current request. Memory for FCoE 6907 * offloaded statistics are counted anyway, even if they will not be sent. 6908 * VF stats are not accounted for here as the data of VF stats is stored 6909 * in memory allocated by the VF, not here. 6910 */ 6911 sc->fw_stats_data_size = 6912 (sizeof(struct stats_counter) + 6913 sizeof(struct per_port_stats) + 6914 sizeof(struct per_pf_stats) + 6915 /* sizeof(struct fcoe_statistics_params) + */ 6916 (sizeof(struct per_queue_stats) * num_queue_stats)); 6917 6918 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6919 &sc->fw_stats_dma, "fw stats") != 0) { 6920 bxe_free_fw_stats_mem(sc); 6921 return (-1); 6922 } 6923 6924 /* set up the shortcuts */ 6925 6926 sc->fw_stats_req = 6927 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6928 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6929 6930 sc->fw_stats_data = 6931 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6932 sc->fw_stats_req_size); 6933 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6934 sc->fw_stats_req_size); 6935 6936 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6937 (uintmax_t)sc->fw_stats_req_mapping); 6938 6939 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6940 (uintmax_t)sc->fw_stats_data_mapping); 6941 6942 return (0); 6943} 6944 6945/* 6946 * Bits map: 6947 * 0-7 - Engine0 load counter. 6948 * 8-15 - Engine1 load counter. 6949 * 16 - Engine0 RESET_IN_PROGRESS bit. 6950 * 17 - Engine1 RESET_IN_PROGRESS bit. 6951 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 6952 * function on the engine 6953 * 19 - Engine1 ONE_IS_LOADED. 6954 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 6955 * leader to complete (check for both RESET_IN_PROGRESS bits and not 6956 * for just the one belonging to its engine). 6957 */ 6958#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 6959#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 6960#define BXE_PATH0_LOAD_CNT_SHIFT 0 6961#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 6962#define BXE_PATH1_LOAD_CNT_SHIFT 8 6963#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 6964#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 6965#define BXE_GLOBAL_RESET_BIT 0x00040000 6966 6967/* set the GLOBAL_RESET bit, should be run under rtnl lock */ 6968static void 6969bxe_set_reset_global(struct bxe_softc *sc) 6970{ 6971 uint32_t val; 6972 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6973 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6974 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 6975 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6976} 6977 6978/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 6979static void 6980bxe_clear_reset_global(struct bxe_softc *sc) 6981{ 6982 uint32_t val; 6983 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6984 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6985 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 6986 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6987} 6988 6989/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 6990static uint8_t 6991bxe_reset_is_global(struct bxe_softc *sc) 6992{ 6993 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6994 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 6995 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 6996} 6997 6998/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 6999static void 7000bxe_set_reset_done(struct bxe_softc *sc) 7001{ 7002 uint32_t val; 7003 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7004 BXE_PATH0_RST_IN_PROG_BIT; 7005 7006 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7007 7008 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7009 /* Clear the bit */ 7010 val &= ~bit; 7011 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7012 7013 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7014} 7015 7016/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 7017static void 7018bxe_set_reset_in_progress(struct bxe_softc *sc) 7019{ 7020 uint32_t val; 7021 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7022 BXE_PATH0_RST_IN_PROG_BIT; 7023 7024 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7025 7026 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7027 /* Set the bit */ 7028 val |= bit; 7029 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7030 7031 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7032} 7033 7034/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 7035static uint8_t 7036bxe_reset_is_done(struct bxe_softc *sc, 7037 int engine) 7038{ 7039 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7040 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 7041 BXE_PATH0_RST_IN_PROG_BIT; 7042 7043 /* return false if bit is set */ 7044 return (val & bit) ? FALSE : TRUE; 7045} 7046 7047/* get the load status for an engine, should be run under rtnl lock */ 7048static uint8_t 7049bxe_get_load_status(struct bxe_softc *sc, 7050 int engine) 7051{ 7052 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 7053 BXE_PATH0_LOAD_CNT_MASK; 7054 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 7055 BXE_PATH0_LOAD_CNT_SHIFT; 7056 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7057 7058 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7059 7060 val = ((val & mask) >> shift); 7061 7062 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 7063 7064 return (val != 0); 7065} 7066 7067/* set pf load mark */ 7068/* XXX needs to be under rtnl lock */ 7069static void 7070bxe_set_pf_load(struct bxe_softc *sc) 7071{ 7072 uint32_t val; 7073 uint32_t val1; 7074 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7075 BXE_PATH0_LOAD_CNT_MASK; 7076 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7077 BXE_PATH0_LOAD_CNT_SHIFT; 7078 7079 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7080 7081 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7082 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7083 7084 /* get the current counter value */ 7085 val1 = ((val & mask) >> shift); 7086 7087 /* set bit of this PF */ 7088 val1 |= (1 << SC_ABS_FUNC(sc)); 7089 7090 /* clear the old value */ 7091 val &= ~mask; 7092 7093 /* set the new one */ 7094 val |= ((val1 << shift) & mask); 7095 7096 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7097 7098 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7099} 7100 7101/* clear pf load mark */ 7102/* XXX needs to be under rtnl lock */ 7103static uint8_t 7104bxe_clear_pf_load(struct bxe_softc *sc) 7105{ 7106 uint32_t val1, val; 7107 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7108 BXE_PATH0_LOAD_CNT_MASK; 7109 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7110 BXE_PATH0_LOAD_CNT_SHIFT; 7111 7112 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7113 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7114 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 7115 7116 /* get the current counter value */ 7117 val1 = (val & mask) >> shift; 7118 7119 /* clear bit of that PF */ 7120 val1 &= ~(1 << SC_ABS_FUNC(sc)); 7121 7122 /* clear the old value */ 7123 val &= ~mask; 7124 7125 /* set the new one */ 7126 val |= ((val1 << shift) & mask); 7127 7128 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7129 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7130 return (val1 != 0); 7131} 7132 7133/* send load requrest to mcp and analyze response */ 7134static int 7135bxe_nic_load_request(struct bxe_softc *sc, 7136 uint32_t *load_code) 7137{ 7138 /* init fw_seq */ 7139 sc->fw_seq = 7140 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 7141 DRV_MSG_SEQ_NUMBER_MASK); 7142 7143 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 7144 7145 /* get the current FW pulse sequence */ 7146 sc->fw_drv_pulse_wr_seq = 7147 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 7148 DRV_PULSE_SEQ_MASK); 7149 7150 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 7151 sc->fw_drv_pulse_wr_seq); 7152 7153 /* load request */ 7154 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 7155 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 7156 7157 /* if the MCP fails to respond we must abort */ 7158 if (!(*load_code)) { 7159 BLOGE(sc, "MCP response failure!\n"); 7160 return (-1); 7161 } 7162 7163 /* if MCP refused then must abort */ 7164 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7165 BLOGE(sc, "MCP refused load request\n"); 7166 return (-1); 7167 } 7168 7169 return (0); 7170} 7171 7172/* 7173 * Check whether another PF has already loaded FW to chip. In virtualized 7174 * environments a pf from anoth VM may have already initialized the device 7175 * including loading FW. 7176 */ 7177static int 7178bxe_nic_load_analyze_req(struct bxe_softc *sc, 7179 uint32_t load_code) 7180{ 7181 uint32_t my_fw, loaded_fw; 7182 7183 /* is another pf loaded on this engine? */ 7184 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 7185 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 7186 /* build my FW version dword */ 7187 my_fw = (BCM_5710_FW_MAJOR_VERSION + 7188 (BCM_5710_FW_MINOR_VERSION << 8 ) + 7189 (BCM_5710_FW_REVISION_VERSION << 16) + 7190 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 7191 7192 /* read loaded FW from chip */ 7193 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 7194 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 7195 loaded_fw, my_fw); 7196 7197 /* abort nic load if version mismatch */ 7198 if (my_fw != loaded_fw) { 7199 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 7200 loaded_fw, my_fw); 7201 return (-1); 7202 } 7203 } 7204 7205 return (0); 7206} 7207 7208/* mark PMF if applicable */ 7209static void 7210bxe_nic_load_pmf(struct bxe_softc *sc, 7211 uint32_t load_code) 7212{ 7213 uint32_t ncsi_oem_data_addr; 7214 7215 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 7216 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 7217 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 7218 /* 7219 * Barrier here for ordering between the writing to sc->port.pmf here 7220 * and reading it from the periodic task. 7221 */ 7222 sc->port.pmf = 1; 7223 mb(); 7224 } else { 7225 sc->port.pmf = 0; 7226 } 7227 7228 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 7229 7230 /* XXX needed? */ 7231 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 7232 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 7233 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 7234 if (ncsi_oem_data_addr) { 7235 REG_WR(sc, 7236 (ncsi_oem_data_addr + 7237 offsetof(struct glob_ncsi_oem_data, driver_version)), 7238 0); 7239 } 7240 } 7241 } 7242} 7243 7244static void 7245bxe_read_mf_cfg(struct bxe_softc *sc) 7246{ 7247 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 7248 int abs_func; 7249 int vn; 7250 7251 if (BXE_NOMCP(sc)) { 7252 return; /* what should be the default bvalue in this case */ 7253 } 7254 7255 /* 7256 * The formula for computing the absolute function number is... 7257 * For 2 port configuration (4 functions per port): 7258 * abs_func = 2 * vn + SC_PORT + SC_PATH 7259 * For 4 port configuration (2 functions per port): 7260 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 7261 */ 7262 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 7263 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 7264 if (abs_func >= E1H_FUNC_MAX) { 7265 break; 7266 } 7267 sc->devinfo.mf_info.mf_config[vn] = 7268 MFCFG_RD(sc, func_mf_config[abs_func].config); 7269 } 7270 7271 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 7272 FUNC_MF_CFG_FUNC_DISABLED) { 7273 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 7274 sc->flags |= BXE_MF_FUNC_DIS; 7275 } else { 7276 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 7277 sc->flags &= ~BXE_MF_FUNC_DIS; 7278 } 7279} 7280 7281/* acquire split MCP access lock register */ 7282static int bxe_acquire_alr(struct bxe_softc *sc) 7283{ 7284 uint32_t j, val; 7285 7286 for (j = 0; j < 1000; j++) { 7287 val = (1UL << 31); 7288 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 7289 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 7290 if (val & (1L << 31)) 7291 break; 7292 7293 DELAY(5000); 7294 } 7295 7296 if (!(val & (1L << 31))) { 7297 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 7298 return (-1); 7299 } 7300 7301 return (0); 7302} 7303 7304/* release split MCP access lock register */ 7305static void bxe_release_alr(struct bxe_softc *sc) 7306{ 7307 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7308} 7309 7310static void 7311bxe_fan_failure(struct bxe_softc *sc) 7312{ 7313 int port = SC_PORT(sc); 7314 uint32_t ext_phy_config; 7315 7316 /* mark the failure */ 7317 ext_phy_config = 7318 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7319 7320 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7321 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7322 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7323 ext_phy_config); 7324 7325 /* log the failure */ 7326 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7327 "the card to prevent permanent damage. " 7328 "Please contact OEM Support for assistance\n"); 7329 7330 /* XXX */ 7331#if 1 7332 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7333#else 7334 /* 7335 * Schedule device reset (unload) 7336 * This is due to some boards consuming sufficient power when driver is 7337 * up to overheat if fan fails. 7338 */ 7339 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7340 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7341#endif 7342} 7343 7344/* this function is called upon a link interrupt */ 7345static void 7346bxe_link_attn(struct bxe_softc *sc) 7347{ 7348 uint32_t pause_enabled = 0; 7349 struct host_port_stats *pstats; 7350 int cmng_fns; 7351 7352 /* Make sure that we are synced with the current statistics */ 7353 bxe_stats_handle(sc, STATS_EVENT_STOP); 7354 7355 elink_link_update(&sc->link_params, &sc->link_vars); 7356 7357 if (sc->link_vars.link_up) { 7358 7359 /* dropless flow control */ 7360 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7361 pause_enabled = 0; 7362 7363 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7364 pause_enabled = 1; 7365 } 7366 7367 REG_WR(sc, 7368 (BAR_USTRORM_INTMEM + 7369 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7370 pause_enabled); 7371 } 7372 7373 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7374 pstats = BXE_SP(sc, port_stats); 7375 /* reset old mac stats */ 7376 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7377 } 7378 7379 if (sc->state == BXE_STATE_OPEN) { 7380 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7381 } 7382 } 7383 7384 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7385 cmng_fns = bxe_get_cmng_fns_mode(sc); 7386 7387 if (cmng_fns != CMNG_FNS_NONE) { 7388 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7389 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7390 } else { 7391 /* rate shaping and fairness are disabled */ 7392 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7393 } 7394 } 7395 7396 bxe_link_report_locked(sc); 7397 7398 if (IS_MF(sc)) { 7399 ; // XXX bxe_link_sync_notify(sc); 7400 } 7401} 7402 7403static void 7404bxe_attn_int_asserted(struct bxe_softc *sc, 7405 uint32_t asserted) 7406{ 7407 int port = SC_PORT(sc); 7408 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7409 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7410 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7411 NIG_REG_MASK_INTERRUPT_PORT0; 7412 uint32_t aeu_mask; 7413 uint32_t nig_mask = 0; 7414 uint32_t reg_addr; 7415 uint32_t igu_acked; 7416 uint32_t cnt; 7417 7418 if (sc->attn_state & asserted) { 7419 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7420 } 7421 7422 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7423 7424 aeu_mask = REG_RD(sc, aeu_addr); 7425 7426 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7427 aeu_mask, asserted); 7428 7429 aeu_mask &= ~(asserted & 0x3ff); 7430 7431 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7432 7433 REG_WR(sc, aeu_addr, aeu_mask); 7434 7435 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7436 7437 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7438 sc->attn_state |= asserted; 7439 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7440 7441 if (asserted & ATTN_HARD_WIRED_MASK) { 7442 if (asserted & ATTN_NIG_FOR_FUNC) { 7443 7444 BXE_PHY_LOCK(sc); 7445 7446 /* save nig interrupt mask */ 7447 nig_mask = REG_RD(sc, nig_int_mask_addr); 7448 7449 /* If nig_mask is not set, no need to call the update function */ 7450 if (nig_mask) { 7451 REG_WR(sc, nig_int_mask_addr, 0); 7452 7453 bxe_link_attn(sc); 7454 } 7455 7456 /* handle unicore attn? */ 7457 } 7458 7459 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7460 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7461 } 7462 7463 if (asserted & GPIO_2_FUNC) { 7464 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7465 } 7466 7467 if (asserted & GPIO_3_FUNC) { 7468 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7469 } 7470 7471 if (asserted & GPIO_4_FUNC) { 7472 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7473 } 7474 7475 if (port == 0) { 7476 if (asserted & ATTN_GENERAL_ATTN_1) { 7477 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7478 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7479 } 7480 if (asserted & ATTN_GENERAL_ATTN_2) { 7481 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7482 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7483 } 7484 if (asserted & ATTN_GENERAL_ATTN_3) { 7485 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7486 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7487 } 7488 } else { 7489 if (asserted & ATTN_GENERAL_ATTN_4) { 7490 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7491 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7492 } 7493 if (asserted & ATTN_GENERAL_ATTN_5) { 7494 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7495 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7496 } 7497 if (asserted & ATTN_GENERAL_ATTN_6) { 7498 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7499 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7500 } 7501 } 7502 } /* hardwired */ 7503 7504 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7505 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7506 } else { 7507 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7508 } 7509 7510 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7511 asserted, 7512 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7513 REG_WR(sc, reg_addr, asserted); 7514 7515 /* now set back the mask */ 7516 if (asserted & ATTN_NIG_FOR_FUNC) { 7517 /* 7518 * Verify that IGU ack through BAR was written before restoring 7519 * NIG mask. This loop should exit after 2-3 iterations max. 7520 */ 7521 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7522 cnt = 0; 7523 7524 do { 7525 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7526 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7527 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7528 7529 if (!igu_acked) { 7530 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7531 } 7532 7533 mb(); 7534 } 7535 7536 REG_WR(sc, nig_int_mask_addr, nig_mask); 7537 7538 BXE_PHY_UNLOCK(sc); 7539 } 7540} 7541 7542static void 7543bxe_print_next_block(struct bxe_softc *sc, 7544 int idx, 7545 const char *blk) 7546{ 7547 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7548} 7549 7550static int 7551bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7552 uint32_t sig, 7553 int par_num, 7554 uint8_t print) 7555{ 7556 uint32_t cur_bit = 0; 7557 int i = 0; 7558 7559 for (i = 0; sig; i++) { 7560 cur_bit = ((uint32_t)0x1 << i); 7561 if (sig & cur_bit) { 7562 switch (cur_bit) { 7563 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7564 if (print) 7565 bxe_print_next_block(sc, par_num++, "BRB"); 7566 break; 7567 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7568 if (print) 7569 bxe_print_next_block(sc, par_num++, "PARSER"); 7570 break; 7571 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7572 if (print) 7573 bxe_print_next_block(sc, par_num++, "TSDM"); 7574 break; 7575 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7576 if (print) 7577 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7578 break; 7579 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7580 if (print) 7581 bxe_print_next_block(sc, par_num++, "TCM"); 7582 break; 7583 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7584 if (print) 7585 bxe_print_next_block(sc, par_num++, "TSEMI"); 7586 break; 7587 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7588 if (print) 7589 bxe_print_next_block(sc, par_num++, "XPB"); 7590 break; 7591 } 7592 7593 /* Clear the bit */ 7594 sig &= ~cur_bit; 7595 } 7596 } 7597 7598 return (par_num); 7599} 7600 7601static int 7602bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7603 uint32_t sig, 7604 int par_num, 7605 uint8_t *global, 7606 uint8_t print) 7607{ 7608 int i = 0; 7609 uint32_t cur_bit = 0; 7610 for (i = 0; sig; i++) { 7611 cur_bit = ((uint32_t)0x1 << i); 7612 if (sig & cur_bit) { 7613 switch (cur_bit) { 7614 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7615 if (print) 7616 bxe_print_next_block(sc, par_num++, "PBF"); 7617 break; 7618 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7619 if (print) 7620 bxe_print_next_block(sc, par_num++, "QM"); 7621 break; 7622 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7623 if (print) 7624 bxe_print_next_block(sc, par_num++, "TM"); 7625 break; 7626 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7627 if (print) 7628 bxe_print_next_block(sc, par_num++, "XSDM"); 7629 break; 7630 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7631 if (print) 7632 bxe_print_next_block(sc, par_num++, "XCM"); 7633 break; 7634 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7635 if (print) 7636 bxe_print_next_block(sc, par_num++, "XSEMI"); 7637 break; 7638 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7639 if (print) 7640 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7641 break; 7642 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7643 if (print) 7644 bxe_print_next_block(sc, par_num++, "NIG"); 7645 break; 7646 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7647 if (print) 7648 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7649 *global = TRUE; 7650 break; 7651 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7652 if (print) 7653 bxe_print_next_block(sc, par_num++, "DEBUG"); 7654 break; 7655 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7656 if (print) 7657 bxe_print_next_block(sc, par_num++, "USDM"); 7658 break; 7659 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7660 if (print) 7661 bxe_print_next_block(sc, par_num++, "UCM"); 7662 break; 7663 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7664 if (print) 7665 bxe_print_next_block(sc, par_num++, "USEMI"); 7666 break; 7667 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7668 if (print) 7669 bxe_print_next_block(sc, par_num++, "UPB"); 7670 break; 7671 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7672 if (print) 7673 bxe_print_next_block(sc, par_num++, "CSDM"); 7674 break; 7675 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7676 if (print) 7677 bxe_print_next_block(sc, par_num++, "CCM"); 7678 break; 7679 } 7680 7681 /* Clear the bit */ 7682 sig &= ~cur_bit; 7683 } 7684 } 7685 7686 return (par_num); 7687} 7688 7689static int 7690bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7691 uint32_t sig, 7692 int par_num, 7693 uint8_t print) 7694{ 7695 uint32_t cur_bit = 0; 7696 int i = 0; 7697 7698 for (i = 0; sig; i++) { 7699 cur_bit = ((uint32_t)0x1 << i); 7700 if (sig & cur_bit) { 7701 switch (cur_bit) { 7702 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7703 if (print) 7704 bxe_print_next_block(sc, par_num++, "CSEMI"); 7705 break; 7706 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7707 if (print) 7708 bxe_print_next_block(sc, par_num++, "PXP"); 7709 break; 7710 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7711 if (print) 7712 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7713 break; 7714 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7715 if (print) 7716 bxe_print_next_block(sc, par_num++, "CFC"); 7717 break; 7718 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7719 if (print) 7720 bxe_print_next_block(sc, par_num++, "CDU"); 7721 break; 7722 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7723 if (print) 7724 bxe_print_next_block(sc, par_num++, "DMAE"); 7725 break; 7726 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7727 if (print) 7728 bxe_print_next_block(sc, par_num++, "IGU"); 7729 break; 7730 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7731 if (print) 7732 bxe_print_next_block(sc, par_num++, "MISC"); 7733 break; 7734 } 7735 7736 /* Clear the bit */ 7737 sig &= ~cur_bit; 7738 } 7739 } 7740 7741 return (par_num); 7742} 7743 7744static int 7745bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7746 uint32_t sig, 7747 int par_num, 7748 uint8_t *global, 7749 uint8_t print) 7750{ 7751 uint32_t cur_bit = 0; 7752 int i = 0; 7753 7754 for (i = 0; sig; i++) { 7755 cur_bit = ((uint32_t)0x1 << i); 7756 if (sig & cur_bit) { 7757 switch (cur_bit) { 7758 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7759 if (print) 7760 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7761 *global = TRUE; 7762 break; 7763 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7764 if (print) 7765 bxe_print_next_block(sc, par_num++, 7766 "MCP UMP RX"); 7767 *global = TRUE; 7768 break; 7769 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7770 if (print) 7771 bxe_print_next_block(sc, par_num++, 7772 "MCP UMP TX"); 7773 *global = TRUE; 7774 break; 7775 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7776 if (print) 7777 bxe_print_next_block(sc, par_num++, 7778 "MCP SCPAD"); 7779 *global = TRUE; 7780 break; 7781 } 7782 7783 /* Clear the bit */ 7784 sig &= ~cur_bit; 7785 } 7786 } 7787 7788 return (par_num); 7789} 7790 7791static int 7792bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7793 uint32_t sig, 7794 int par_num, 7795 uint8_t print) 7796{ 7797 uint32_t cur_bit = 0; 7798 int i = 0; 7799 7800 for (i = 0; sig; i++) { 7801 cur_bit = ((uint32_t)0x1 << i); 7802 if (sig & cur_bit) { 7803 switch (cur_bit) { 7804 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7805 if (print) 7806 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7807 break; 7808 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7809 if (print) 7810 bxe_print_next_block(sc, par_num++, "ATC"); 7811 break; 7812 } 7813 7814 /* Clear the bit */ 7815 sig &= ~cur_bit; 7816 } 7817 } 7818 7819 return (par_num); 7820} 7821 7822static uint8_t 7823bxe_parity_attn(struct bxe_softc *sc, 7824 uint8_t *global, 7825 uint8_t print, 7826 uint32_t *sig) 7827{ 7828 int par_num = 0; 7829 7830 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7831 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7832 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7833 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7834 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7835 BLOGE(sc, "Parity error: HW block parity attention:\n" 7836 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7837 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7838 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7839 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7840 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7841 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7842 7843 if (print) 7844 BLOGI(sc, "Parity errors detected in blocks: "); 7845 7846 par_num = 7847 bxe_check_blocks_with_parity0(sc, sig[0] & 7848 HW_PRTY_ASSERT_SET_0, 7849 par_num, print); 7850 par_num = 7851 bxe_check_blocks_with_parity1(sc, sig[1] & 7852 HW_PRTY_ASSERT_SET_1, 7853 par_num, global, print); 7854 par_num = 7855 bxe_check_blocks_with_parity2(sc, sig[2] & 7856 HW_PRTY_ASSERT_SET_2, 7857 par_num, print); 7858 par_num = 7859 bxe_check_blocks_with_parity3(sc, sig[3] & 7860 HW_PRTY_ASSERT_SET_3, 7861 par_num, global, print); 7862 par_num = 7863 bxe_check_blocks_with_parity4(sc, sig[4] & 7864 HW_PRTY_ASSERT_SET_4, 7865 par_num, print); 7866 7867 if (print) 7868 BLOGI(sc, "\n"); 7869 7870 return (TRUE); 7871 } 7872 7873 return (FALSE); 7874} 7875 7876static uint8_t 7877bxe_chk_parity_attn(struct bxe_softc *sc, 7878 uint8_t *global, 7879 uint8_t print) 7880{ 7881 struct attn_route attn = { {0} }; 7882 int port = SC_PORT(sc); 7883 7884 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7885 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7886 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7887 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7888 7889 if (!CHIP_IS_E1x(sc)) 7890 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7891 7892 return (bxe_parity_attn(sc, global, print, attn.sig)); 7893} 7894 7895static void 7896bxe_attn_int_deasserted4(struct bxe_softc *sc, 7897 uint32_t attn) 7898{ 7899 uint32_t val; 7900 7901 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7902 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7903 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7904 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7905 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7906 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7907 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7908 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7909 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7910 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7911 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7912 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7913 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7914 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7915 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7916 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7917 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7918 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7919 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7920 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7921 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7922 } 7923 7924 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7925 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7926 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7927 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7928 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7929 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7930 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7931 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7932 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7933 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7934 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7935 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7936 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7937 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7938 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7939 } 7940 7941 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7942 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7943 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7944 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7945 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7946 } 7947} 7948 7949static void 7950bxe_e1h_disable(struct bxe_softc *sc) 7951{ 7952 int port = SC_PORT(sc); 7953 7954 bxe_tx_disable(sc); 7955 7956 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7957} 7958 7959static void 7960bxe_e1h_enable(struct bxe_softc *sc) 7961{ 7962 int port = SC_PORT(sc); 7963 7964 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7965 7966 // XXX bxe_tx_enable(sc); 7967} 7968 7969/* 7970 * called due to MCP event (on pmf): 7971 * reread new bandwidth configuration 7972 * configure FW 7973 * notify others function about the change 7974 */ 7975static void 7976bxe_config_mf_bw(struct bxe_softc *sc) 7977{ 7978 if (sc->link_vars.link_up) { 7979 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 7980 // XXX bxe_link_sync_notify(sc); 7981 } 7982 7983 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7984} 7985 7986static void 7987bxe_set_mf_bw(struct bxe_softc *sc) 7988{ 7989 bxe_config_mf_bw(sc); 7990 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 7991} 7992 7993static void 7994bxe_handle_eee_event(struct bxe_softc *sc) 7995{ 7996 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 7997 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 7998} 7999 8000#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 8001 8002static void 8003bxe_drv_info_ether_stat(struct bxe_softc *sc) 8004{ 8005 struct eth_stats_info *ether_stat = 8006 &sc->sp->drv_info_to_mcp.ether_stat; 8007 8008 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 8009 ETH_STAT_INFO_VERSION_LEN); 8010 8011 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 8012 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 8013 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 8014 ether_stat->mac_local + MAC_PAD, 8015 MAC_PAD, ETH_ALEN); 8016 8017 ether_stat->mtu_size = sc->mtu; 8018 8019 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 8020 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) { 8021 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 8022 } 8023 8024 // XXX ether_stat->feature_flags |= ???; 8025 8026 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 8027 8028 ether_stat->txq_size = sc->tx_ring_size; 8029 ether_stat->rxq_size = sc->rx_ring_size; 8030} 8031 8032static void 8033bxe_handle_drv_info_req(struct bxe_softc *sc) 8034{ 8035 enum drv_info_opcode op_code; 8036 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 8037 8038 /* if drv_info version supported by MFW doesn't match - send NACK */ 8039 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 8040 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8041 return; 8042 } 8043 8044 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 8045 DRV_INFO_CONTROL_OP_CODE_SHIFT); 8046 8047 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 8048 8049 switch (op_code) { 8050 case ETH_STATS_OPCODE: 8051 bxe_drv_info_ether_stat(sc); 8052 break; 8053 case FCOE_STATS_OPCODE: 8054 case ISCSI_STATS_OPCODE: 8055 default: 8056 /* if op code isn't supported - send NACK */ 8057 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8058 return; 8059 } 8060 8061 /* 8062 * If we got drv_info attn from MFW then these fields are defined in 8063 * shmem2 for sure 8064 */ 8065 SHMEM2_WR(sc, drv_info_host_addr_lo, 8066 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8067 SHMEM2_WR(sc, drv_info_host_addr_hi, 8068 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8069 8070 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 8071} 8072 8073static void 8074bxe_dcc_event(struct bxe_softc *sc, 8075 uint32_t dcc_event) 8076{ 8077 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 8078 8079 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 8080 /* 8081 * This is the only place besides the function initialization 8082 * where the sc->flags can change so it is done without any 8083 * locks 8084 */ 8085 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 8086 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 8087 sc->flags |= BXE_MF_FUNC_DIS; 8088 bxe_e1h_disable(sc); 8089 } else { 8090 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 8091 sc->flags &= ~BXE_MF_FUNC_DIS; 8092 bxe_e1h_enable(sc); 8093 } 8094 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 8095 } 8096 8097 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 8098 bxe_config_mf_bw(sc); 8099 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 8100 } 8101 8102 /* Report results to MCP */ 8103 if (dcc_event) 8104 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 8105 else 8106 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 8107} 8108 8109static void 8110bxe_pmf_update(struct bxe_softc *sc) 8111{ 8112 int port = SC_PORT(sc); 8113 uint32_t val; 8114 8115 sc->port.pmf = 1; 8116 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 8117 8118 /* 8119 * We need the mb() to ensure the ordering between the writing to 8120 * sc->port.pmf here and reading it from the bxe_periodic_task(). 8121 */ 8122 mb(); 8123 8124 /* queue a periodic task */ 8125 // XXX schedule task... 8126 8127 // XXX bxe_dcbx_pmf_update(sc); 8128 8129 /* enable nig attention */ 8130 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 8131 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8132 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 8133 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 8134 } else if (!CHIP_IS_E1x(sc)) { 8135 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 8136 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 8137 } 8138 8139 bxe_stats_handle(sc, STATS_EVENT_PMF); 8140} 8141 8142static int 8143bxe_mc_assert(struct bxe_softc *sc) 8144{ 8145 char last_idx; 8146 int i, rc = 0; 8147 uint32_t row0, row1, row2, row3; 8148 8149 /* XSTORM */ 8150 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 8151 if (last_idx) 8152 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8153 8154 /* print the asserts */ 8155 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8156 8157 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 8158 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 8159 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 8160 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 8161 8162 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8163 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8164 i, row3, row2, row1, row0); 8165 rc++; 8166 } else { 8167 break; 8168 } 8169 } 8170 8171 /* TSTORM */ 8172 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 8173 if (last_idx) { 8174 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8175 } 8176 8177 /* print the asserts */ 8178 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8179 8180 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 8181 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 8182 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 8183 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 8184 8185 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8186 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8187 i, row3, row2, row1, row0); 8188 rc++; 8189 } else { 8190 break; 8191 } 8192 } 8193 8194 /* CSTORM */ 8195 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 8196 if (last_idx) { 8197 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8198 } 8199 8200 /* print the asserts */ 8201 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8202 8203 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 8204 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 8205 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 8206 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 8207 8208 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8209 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8210 i, row3, row2, row1, row0); 8211 rc++; 8212 } else { 8213 break; 8214 } 8215 } 8216 8217 /* USTORM */ 8218 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 8219 if (last_idx) { 8220 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8221 } 8222 8223 /* print the asserts */ 8224 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8225 8226 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 8227 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 8228 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 8229 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 8230 8231 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8232 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8233 i, row3, row2, row1, row0); 8234 rc++; 8235 } else { 8236 break; 8237 } 8238 } 8239 8240 return (rc); 8241} 8242 8243static void 8244bxe_attn_int_deasserted3(struct bxe_softc *sc, 8245 uint32_t attn) 8246{ 8247 int func = SC_FUNC(sc); 8248 uint32_t val; 8249 8250 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 8251 8252 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 8253 8254 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8255 bxe_read_mf_cfg(sc); 8256 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 8257 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8258 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 8259 8260 if (val & DRV_STATUS_DCC_EVENT_MASK) 8261 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 8262 8263 if (val & DRV_STATUS_SET_MF_BW) 8264 bxe_set_mf_bw(sc); 8265 8266 if (val & DRV_STATUS_DRV_INFO_REQ) 8267 bxe_handle_drv_info_req(sc); 8268 8269#if 0 8270 if (val & DRV_STATUS_VF_DISABLED) 8271 bxe_vf_handle_flr_event(sc); 8272#endif 8273 8274 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 8275 bxe_pmf_update(sc); 8276 8277#if 0 8278 if (sc->port.pmf && 8279 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 8280 (sc->dcbx_enabled > 0)) 8281 /* start dcbx state machine */ 8282 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); 8283#endif 8284 8285#if 0 8286 if (val & DRV_STATUS_AFEX_EVENT_MASK) 8287 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); 8288#endif 8289 8290 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8291 bxe_handle_eee_event(sc); 8292 8293 if (sc->link_vars.periodic_flags & 8294 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8295 /* sync with link */ 8296 BXE_PHY_LOCK(sc); 8297 sc->link_vars.periodic_flags &= 8298 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8299 BXE_PHY_UNLOCK(sc); 8300 if (IS_MF(sc)) 8301 ; // XXX bxe_link_sync_notify(sc); 8302 bxe_link_report(sc); 8303 } 8304 8305 /* 8306 * Always call it here: bxe_link_report() will 8307 * prevent the link indication duplication. 8308 */ 8309 bxe_link_status_update(sc); 8310 8311 } else if (attn & BXE_MC_ASSERT_BITS) { 8312 8313 BLOGE(sc, "MC assert!\n"); 8314 bxe_mc_assert(sc); 8315 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8316 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8317 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8318 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8319 bxe_panic(sc, ("MC assert!\n")); 8320 8321 } else if (attn & BXE_MCP_ASSERT) { 8322 8323 BLOGE(sc, "MCP assert!\n"); 8324 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8325 // XXX bxe_fw_dump(sc); 8326 8327 } else { 8328 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8329 } 8330 } 8331 8332 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8333 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8334 if (attn & BXE_GRC_TIMEOUT) { 8335 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8336 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8337 } 8338 if (attn & BXE_GRC_RSV) { 8339 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8340 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8341 } 8342 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8343 } 8344} 8345 8346static void 8347bxe_attn_int_deasserted2(struct bxe_softc *sc, 8348 uint32_t attn) 8349{ 8350 int port = SC_PORT(sc); 8351 int reg_offset; 8352 uint32_t val0, mask0, val1, mask1; 8353 uint32_t val; 8354 8355 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8356 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8357 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8358 /* CFC error attention */ 8359 if (val & 0x2) { 8360 BLOGE(sc, "FATAL error from CFC\n"); 8361 } 8362 } 8363 8364 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8365 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8366 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8367 /* RQ_USDMDP_FIFO_OVERFLOW */ 8368 if (val & 0x18000) { 8369 BLOGE(sc, "FATAL error from PXP\n"); 8370 } 8371 8372 if (!CHIP_IS_E1x(sc)) { 8373 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8374 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8375 } 8376 } 8377 8378#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8379#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8380 8381 if (attn & AEU_PXP2_HW_INT_BIT) { 8382 /* CQ47854 workaround do not panic on 8383 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8384 */ 8385 if (!CHIP_IS_E1x(sc)) { 8386 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8387 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8388 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8389 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8390 /* 8391 * If the olny PXP2_EOP_ERROR_BIT is set in 8392 * STS0 and STS1 - clear it 8393 * 8394 * probably we lose additional attentions between 8395 * STS0 and STS_CLR0, in this case user will not 8396 * be notified about them 8397 */ 8398 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8399 !(val1 & mask1)) 8400 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8401 8402 /* print the register, since no one can restore it */ 8403 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8404 8405 /* 8406 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8407 * then notify 8408 */ 8409 if (val0 & PXP2_EOP_ERROR_BIT) { 8410 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8411 8412 /* 8413 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8414 * set then clear attention from PXP2 block without panic 8415 */ 8416 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8417 ((val1 & mask1) == 0)) 8418 attn &= ~AEU_PXP2_HW_INT_BIT; 8419 } 8420 } 8421 } 8422 8423 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8424 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8425 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8426 8427 val = REG_RD(sc, reg_offset); 8428 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8429 REG_WR(sc, reg_offset, val); 8430 8431 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8432 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8433 bxe_panic(sc, ("HW block attention set2\n")); 8434 } 8435} 8436 8437static void 8438bxe_attn_int_deasserted1(struct bxe_softc *sc, 8439 uint32_t attn) 8440{ 8441 int port = SC_PORT(sc); 8442 int reg_offset; 8443 uint32_t val; 8444 8445 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8446 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8447 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8448 /* DORQ discard attention */ 8449 if (val & 0x2) { 8450 BLOGE(sc, "FATAL error from DORQ\n"); 8451 } 8452 } 8453 8454 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8455 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8456 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8457 8458 val = REG_RD(sc, reg_offset); 8459 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8460 REG_WR(sc, reg_offset, val); 8461 8462 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8463 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8464 bxe_panic(sc, ("HW block attention set1\n")); 8465 } 8466} 8467 8468static void 8469bxe_attn_int_deasserted0(struct bxe_softc *sc, 8470 uint32_t attn) 8471{ 8472 int port = SC_PORT(sc); 8473 int reg_offset; 8474 uint32_t val; 8475 8476 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8477 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8478 8479 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8480 val = REG_RD(sc, reg_offset); 8481 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8482 REG_WR(sc, reg_offset, val); 8483 8484 BLOGW(sc, "SPIO5 hw attention\n"); 8485 8486 /* Fan failure attention */ 8487 elink_hw_reset_phy(&sc->link_params); 8488 bxe_fan_failure(sc); 8489 } 8490 8491 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8492 BXE_PHY_LOCK(sc); 8493 elink_handle_module_detect_int(&sc->link_params); 8494 BXE_PHY_UNLOCK(sc); 8495 } 8496 8497 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8498 val = REG_RD(sc, reg_offset); 8499 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8500 REG_WR(sc, reg_offset, val); 8501 8502 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8503 (attn & HW_INTERRUT_ASSERT_SET_0))); 8504 } 8505} 8506 8507static void 8508bxe_attn_int_deasserted(struct bxe_softc *sc, 8509 uint32_t deasserted) 8510{ 8511 struct attn_route attn; 8512 struct attn_route *group_mask; 8513 int port = SC_PORT(sc); 8514 int index; 8515 uint32_t reg_addr; 8516 uint32_t val; 8517 uint32_t aeu_mask; 8518 uint8_t global = FALSE; 8519 8520 /* 8521 * Need to take HW lock because MCP or other port might also 8522 * try to handle this event. 8523 */ 8524 bxe_acquire_alr(sc); 8525 8526 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8527 /* XXX 8528 * In case of parity errors don't handle attentions so that 8529 * other function would "see" parity errors. 8530 */ 8531 sc->recovery_state = BXE_RECOVERY_INIT; 8532 // XXX schedule a recovery task... 8533 /* disable HW interrupts */ 8534 bxe_int_disable(sc); 8535 bxe_release_alr(sc); 8536 return; 8537 } 8538 8539 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8540 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8541 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8542 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8543 if (!CHIP_IS_E1x(sc)) { 8544 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8545 } else { 8546 attn.sig[4] = 0; 8547 } 8548 8549 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8550 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8551 8552 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8553 if (deasserted & (1 << index)) { 8554 group_mask = &sc->attn_group[index]; 8555 8556 BLOGD(sc, DBG_INTR, 8557 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8558 group_mask->sig[0], group_mask->sig[1], 8559 group_mask->sig[2], group_mask->sig[3], 8560 group_mask->sig[4]); 8561 8562 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8563 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8564 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8565 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8566 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8567 } 8568 } 8569 8570 bxe_release_alr(sc); 8571 8572 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8573 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8574 COMMAND_REG_ATTN_BITS_CLR); 8575 } else { 8576 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8577 } 8578 8579 val = ~deasserted; 8580 BLOGD(sc, DBG_INTR, 8581 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8582 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8583 REG_WR(sc, reg_addr, val); 8584 8585 if (~sc->attn_state & deasserted) { 8586 BLOGE(sc, "IGU error\n"); 8587 } 8588 8589 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8590 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8591 8592 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8593 8594 aeu_mask = REG_RD(sc, reg_addr); 8595 8596 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8597 aeu_mask, deasserted); 8598 aeu_mask |= (deasserted & 0x3ff); 8599 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8600 8601 REG_WR(sc, reg_addr, aeu_mask); 8602 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8603 8604 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8605 sc->attn_state &= ~deasserted; 8606 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8607} 8608 8609static void 8610bxe_attn_int(struct bxe_softc *sc) 8611{ 8612 /* read local copy of bits */ 8613 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8614 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8615 uint32_t attn_state = sc->attn_state; 8616 8617 /* look for changed bits */ 8618 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8619 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8620 8621 BLOGD(sc, DBG_INTR, 8622 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8623 attn_bits, attn_ack, asserted, deasserted); 8624 8625 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8626 BLOGE(sc, "BAD attention state\n"); 8627 } 8628 8629 /* handle bits that were raised */ 8630 if (asserted) { 8631 bxe_attn_int_asserted(sc, asserted); 8632 } 8633 8634 if (deasserted) { 8635 bxe_attn_int_deasserted(sc, deasserted); 8636 } 8637} 8638 8639static uint16_t 8640bxe_update_dsb_idx(struct bxe_softc *sc) 8641{ 8642 struct host_sp_status_block *def_sb = sc->def_sb; 8643 uint16_t rc = 0; 8644 8645 mb(); /* status block is written to by the chip */ 8646 8647 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8648 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8649 rc |= BXE_DEF_SB_ATT_IDX; 8650 } 8651 8652 if (sc->def_idx != def_sb->sp_sb.running_index) { 8653 sc->def_idx = def_sb->sp_sb.running_index; 8654 rc |= BXE_DEF_SB_IDX; 8655 } 8656 8657 mb(); 8658 8659 return (rc); 8660} 8661 8662static inline struct ecore_queue_sp_obj * 8663bxe_cid_to_q_obj(struct bxe_softc *sc, 8664 uint32_t cid) 8665{ 8666 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8667 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8668} 8669 8670static void 8671bxe_handle_mcast_eqe(struct bxe_softc *sc) 8672{ 8673 struct ecore_mcast_ramrod_params rparam; 8674 int rc; 8675 8676 memset(&rparam, 0, sizeof(rparam)); 8677 8678 rparam.mcast_obj = &sc->mcast_obj; 8679 8680 BXE_MCAST_LOCK(sc); 8681 8682 /* clear pending state for the last command */ 8683 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8684 8685 /* if there are pending mcast commands - send them */ 8686 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8687 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8688 if (rc < 0) { 8689 BLOGD(sc, DBG_SP, 8690 "ERROR: Failed to send pending mcast commands (%d)\n", 8691 rc); 8692 } 8693 } 8694 8695 BXE_MCAST_UNLOCK(sc); 8696} 8697 8698static void 8699bxe_handle_classification_eqe(struct bxe_softc *sc, 8700 union event_ring_elem *elem) 8701{ 8702 unsigned long ramrod_flags = 0; 8703 int rc = 0; 8704 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8705 struct ecore_vlan_mac_obj *vlan_mac_obj; 8706 8707 /* always push next commands out, don't wait here */ 8708 bit_set(&ramrod_flags, RAMROD_CONT); 8709 8710 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8711 case ECORE_FILTER_MAC_PENDING: 8712 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8713 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8714 break; 8715 8716 case ECORE_FILTER_MCAST_PENDING: 8717 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8718 /* 8719 * This is only relevant for 57710 where multicast MACs are 8720 * configured as unicast MACs using the same ramrod. 8721 */ 8722 bxe_handle_mcast_eqe(sc); 8723 return; 8724 8725 default: 8726 BLOGE(sc, "Unsupported classification command: %d\n", 8727 elem->message.data.eth_event.echo); 8728 return; 8729 } 8730 8731 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8732 8733 if (rc < 0) { 8734 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8735 } else if (rc > 0) { 8736 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8737 } 8738} 8739 8740static void 8741bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8742 union event_ring_elem *elem) 8743{ 8744 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8745 8746 /* send rx_mode command again if was requested */ 8747 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8748 &sc->sp_state)) { 8749 bxe_set_storm_rx_mode(sc); 8750 } 8751#if 0 8752 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, 8753 &sc->sp_state)) { 8754 bxe_set_iscsi_eth_rx_mode(sc, TRUE); 8755 } 8756 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 8757 &sc->sp_state)) { 8758 bxe_set_iscsi_eth_rx_mode(sc, FALSE); 8759 } 8760#endif 8761} 8762 8763static void 8764bxe_update_eq_prod(struct bxe_softc *sc, 8765 uint16_t prod) 8766{ 8767 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8768 wmb(); /* keep prod updates ordered */ 8769} 8770 8771static void 8772bxe_eq_int(struct bxe_softc *sc) 8773{ 8774 uint16_t hw_cons, sw_cons, sw_prod; 8775 union event_ring_elem *elem; 8776 uint8_t echo; 8777 uint32_t cid; 8778 uint8_t opcode; 8779 int spqe_cnt = 0; 8780 struct ecore_queue_sp_obj *q_obj; 8781 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8782 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8783 8784 hw_cons = le16toh(*sc->eq_cons_sb); 8785 8786 /* 8787 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8788 * when we get to the next-page we need to adjust so the loop 8789 * condition below will be met. The next element is the size of a 8790 * regular element and hence incrementing by 1 8791 */ 8792 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8793 hw_cons++; 8794 } 8795 8796 /* 8797 * This function may never run in parallel with itself for a 8798 * specific sc and no need for a read memory barrier here. 8799 */ 8800 sw_cons = sc->eq_cons; 8801 sw_prod = sc->eq_prod; 8802 8803 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8804 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8805 8806 for (; 8807 sw_cons != hw_cons; 8808 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8809 8810 elem = &sc->eq[EQ_DESC(sw_cons)]; 8811 8812#if 0 8813 int rc; 8814 rc = bxe_iov_eq_sp_event(sc, elem); 8815 if (!rc) { 8816 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); 8817 goto next_spqe; 8818 } 8819#endif 8820 8821 /* elem CID originates from FW, actually LE */ 8822 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8823 opcode = elem->message.opcode; 8824 8825 /* handle eq element */ 8826 switch (opcode) { 8827#if 0 8828 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 8829 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); 8830 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); 8831 continue; 8832#endif 8833 8834 case EVENT_RING_OPCODE_STAT_QUERY: 8835 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8836 sc->stats_comp++); 8837 /* nothing to do with stats comp */ 8838 goto next_spqe; 8839 8840 case EVENT_RING_OPCODE_CFC_DEL: 8841 /* handle according to cid range */ 8842 /* we may want to verify here that the sc state is HALTING */ 8843 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8844 q_obj = bxe_cid_to_q_obj(sc, cid); 8845 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8846 break; 8847 } 8848 goto next_spqe; 8849 8850 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8851 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8852 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8853 break; 8854 } 8855 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8856 goto next_spqe; 8857 8858 case EVENT_RING_OPCODE_START_TRAFFIC: 8859 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8860 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8861 break; 8862 } 8863 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8864 goto next_spqe; 8865 8866 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8867 echo = elem->message.data.function_update_event.echo; 8868 if (echo == SWITCH_UPDATE) { 8869 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8870 if (f_obj->complete_cmd(sc, f_obj, 8871 ECORE_F_CMD_SWITCH_UPDATE)) { 8872 break; 8873 } 8874 } 8875 else { 8876 BLOGD(sc, DBG_SP, 8877 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8878#if 0 8879 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); 8880 /* 8881 * We will perform the queues update from the sp_core_task as 8882 * all queue SP operations should run with CORE_LOCK. 8883 */ 8884 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); 8885 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8886#endif 8887 } 8888 goto next_spqe; 8889 8890#if 0 8891 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 8892 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); 8893 bxe_after_afex_vif_lists(sc, elem); 8894 goto next_spqe; 8895#endif 8896 8897 case EVENT_RING_OPCODE_FORWARD_SETUP: 8898 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8899 if (q_obj->complete_cmd(sc, q_obj, 8900 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8901 break; 8902 } 8903 goto next_spqe; 8904 8905 case EVENT_RING_OPCODE_FUNCTION_START: 8906 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8907 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8908 break; 8909 } 8910 goto next_spqe; 8911 8912 case EVENT_RING_OPCODE_FUNCTION_STOP: 8913 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8914 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8915 break; 8916 } 8917 goto next_spqe; 8918 } 8919 8920 switch (opcode | sc->state) { 8921 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8922 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8923 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8924 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8925 rss_raw->clear_pending(rss_raw); 8926 break; 8927 8928 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8929 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8930 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8931 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8932 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8933 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8934 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8935 bxe_handle_classification_eqe(sc, elem); 8936 break; 8937 8938 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8939 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8940 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8941 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8942 bxe_handle_mcast_eqe(sc); 8943 break; 8944 8945 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8946 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8947 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8948 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8949 bxe_handle_rx_mode_eqe(sc, elem); 8950 break; 8951 8952 default: 8953 /* unknown event log error and continue */ 8954 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 8955 elem->message.opcode, sc->state); 8956 } 8957 8958next_spqe: 8959 spqe_cnt++; 8960 } /* for */ 8961 8962 mb(); 8963 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 8964 8965 sc->eq_cons = sw_cons; 8966 sc->eq_prod = sw_prod; 8967 8968 /* make sure that above mem writes were issued towards the memory */ 8969 wmb(); 8970 8971 /* update producer */ 8972 bxe_update_eq_prod(sc, sc->eq_prod); 8973} 8974 8975static void 8976bxe_handle_sp_tq(void *context, 8977 int pending) 8978{ 8979 struct bxe_softc *sc = (struct bxe_softc *)context; 8980 uint16_t status; 8981 8982 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 8983 8984 /* what work needs to be performed? */ 8985 status = bxe_update_dsb_idx(sc); 8986 8987 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 8988 8989 /* HW attentions */ 8990 if (status & BXE_DEF_SB_ATT_IDX) { 8991 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 8992 bxe_attn_int(sc); 8993 status &= ~BXE_DEF_SB_ATT_IDX; 8994 } 8995 8996 /* SP events: STAT_QUERY and others */ 8997 if (status & BXE_DEF_SB_IDX) { 8998 /* handle EQ completions */ 8999 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 9000 bxe_eq_int(sc); 9001 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 9002 le16toh(sc->def_idx), IGU_INT_NOP, 1); 9003 status &= ~BXE_DEF_SB_IDX; 9004 } 9005 9006 /* if status is non zero then something went wrong */ 9007 if (__predict_false(status)) { 9008 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 9009 } 9010 9011 /* ack status block only if something was actually handled */ 9012 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 9013 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 9014 9015 /* 9016 * Must be called after the EQ processing (since eq leads to sriov 9017 * ramrod completion flows). 9018 * This flow may have been scheduled by the arrival of a ramrod 9019 * completion, or by the sriov code rescheduling itself. 9020 */ 9021 // XXX bxe_iov_sp_task(sc); 9022 9023#if 0 9024 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ 9025 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, 9026 &sc->sp_state)) { 9027 bxe_link_report(sc); 9028 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 9029 } 9030#endif 9031} 9032 9033static void 9034bxe_handle_fp_tq(void *context, 9035 int pending) 9036{ 9037 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 9038 struct bxe_softc *sc = fp->sc; 9039 uint8_t more_tx = FALSE; 9040 uint8_t more_rx = FALSE; 9041 9042 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 9043 9044 /* XXX 9045 * IFF_DRV_RUNNING state can't be checked here since we process 9046 * slowpath events on a client queue during setup. Instead 9047 * we need to add a "process/continue" flag here that the driver 9048 * can use to tell the task here not to do anything. 9049 */ 9050#if 0 9051 if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 9052 return; 9053 } 9054#endif 9055 9056 /* update the fastpath index */ 9057 bxe_update_fp_sb_idx(fp); 9058 9059 /* XXX add loop here if ever support multiple tx CoS */ 9060 /* fp->txdata[cos] */ 9061 if (bxe_has_tx_work(fp)) { 9062 BXE_FP_TX_LOCK(fp); 9063 more_tx = bxe_txeof(sc, fp); 9064 BXE_FP_TX_UNLOCK(fp); 9065 } 9066 9067 if (bxe_has_rx_work(fp)) { 9068 more_rx = bxe_rxeof(sc, fp); 9069 } 9070 9071 if (more_rx /*|| more_tx*/) { 9072 /* still more work to do */ 9073 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9074 return; 9075 } 9076 9077 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9078 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9079} 9080 9081static void 9082bxe_task_fp(struct bxe_fastpath *fp) 9083{ 9084 struct bxe_softc *sc = fp->sc; 9085 uint8_t more_tx = FALSE; 9086 uint8_t more_rx = FALSE; 9087 9088 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 9089 9090 /* update the fastpath index */ 9091 bxe_update_fp_sb_idx(fp); 9092 9093 /* XXX add loop here if ever support multiple tx CoS */ 9094 /* fp->txdata[cos] */ 9095 if (bxe_has_tx_work(fp)) { 9096 BXE_FP_TX_LOCK(fp); 9097 more_tx = bxe_txeof(sc, fp); 9098 BXE_FP_TX_UNLOCK(fp); 9099 } 9100 9101 if (bxe_has_rx_work(fp)) { 9102 more_rx = bxe_rxeof(sc, fp); 9103 } 9104 9105 if (more_rx /*|| more_tx*/) { 9106 /* still more work to do, bail out if this ISR and process later */ 9107 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9108 return; 9109 } 9110 9111 /* 9112 * Here we write the fastpath index taken before doing any tx or rx work. 9113 * It is very well possible other hw events occurred up to this point and 9114 * they were actually processed accordingly above. Since we're going to 9115 * write an older fastpath index, an interrupt is coming which we might 9116 * not do any work in. 9117 */ 9118 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9119 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9120} 9121 9122/* 9123 * Legacy interrupt entry point. 9124 * 9125 * Verifies that the controller generated the interrupt and 9126 * then calls a separate routine to handle the various 9127 * interrupt causes: link, RX, and TX. 9128 */ 9129static void 9130bxe_intr_legacy(void *xsc) 9131{ 9132 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9133 struct bxe_fastpath *fp; 9134 uint16_t status, mask; 9135 int i; 9136 9137 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 9138 9139#if 0 9140 /* Don't handle any interrupts if we're not ready. */ 9141 if (__predict_false(sc->intr_sem != 0)) { 9142 return; 9143 } 9144#endif 9145 9146 /* 9147 * 0 for ustorm, 1 for cstorm 9148 * the bits returned from ack_int() are 0-15 9149 * bit 0 = attention status block 9150 * bit 1 = fast path status block 9151 * a mask of 0x2 or more = tx/rx event 9152 * a mask of 1 = slow path event 9153 */ 9154 9155 status = bxe_ack_int(sc); 9156 9157 /* the interrupt is not for us */ 9158 if (__predict_false(status == 0)) { 9159 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 9160 return; 9161 } 9162 9163 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 9164 9165 FOR_EACH_ETH_QUEUE(sc, i) { 9166 fp = &sc->fp[i]; 9167 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 9168 if (status & mask) { 9169 /* acknowledge and disable further fastpath interrupts */ 9170 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9171 bxe_task_fp(fp); 9172 status &= ~mask; 9173 } 9174 } 9175 9176#if 0 9177 if (CNIC_SUPPORT(sc)) { 9178 mask = 0x2; 9179 if (status & (mask | 0x1)) { 9180 ... 9181 status &= ~mask; 9182 } 9183 } 9184#endif 9185 9186 if (__predict_false(status & 0x1)) { 9187 /* acknowledge and disable further slowpath interrupts */ 9188 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9189 9190 /* schedule slowpath handler */ 9191 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9192 9193 status &= ~0x1; 9194 } 9195 9196 if (__predict_false(status)) { 9197 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 9198 } 9199} 9200 9201/* slowpath interrupt entry point */ 9202static void 9203bxe_intr_sp(void *xsc) 9204{ 9205 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9206 9207 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 9208 9209 /* acknowledge and disable further slowpath interrupts */ 9210 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9211 9212 /* schedule slowpath handler */ 9213 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9214} 9215 9216/* fastpath interrupt entry point */ 9217static void 9218bxe_intr_fp(void *xfp) 9219{ 9220 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 9221 struct bxe_softc *sc = fp->sc; 9222 9223 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 9224 9225 BLOGD(sc, DBG_INTR, 9226 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 9227 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 9228 9229#if 0 9230 /* Don't handle any interrupts if we're not ready. */ 9231 if (__predict_false(sc->intr_sem != 0)) { 9232 return; 9233 } 9234#endif 9235 9236 /* acknowledge and disable further fastpath interrupts */ 9237 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9238 9239 bxe_task_fp(fp); 9240} 9241 9242/* Release all interrupts allocated by the driver. */ 9243static void 9244bxe_interrupt_free(struct bxe_softc *sc) 9245{ 9246 int i; 9247 9248 switch (sc->interrupt_mode) { 9249 case INTR_MODE_INTX: 9250 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 9251 if (sc->intr[0].resource != NULL) { 9252 bus_release_resource(sc->dev, 9253 SYS_RES_IRQ, 9254 sc->intr[0].rid, 9255 sc->intr[0].resource); 9256 } 9257 break; 9258 case INTR_MODE_MSI: 9259 for (i = 0; i < sc->intr_count; i++) { 9260 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 9261 if (sc->intr[i].resource && sc->intr[i].rid) { 9262 bus_release_resource(sc->dev, 9263 SYS_RES_IRQ, 9264 sc->intr[i].rid, 9265 sc->intr[i].resource); 9266 } 9267 } 9268 pci_release_msi(sc->dev); 9269 break; 9270 case INTR_MODE_MSIX: 9271 for (i = 0; i < sc->intr_count; i++) { 9272 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 9273 if (sc->intr[i].resource && sc->intr[i].rid) { 9274 bus_release_resource(sc->dev, 9275 SYS_RES_IRQ, 9276 sc->intr[i].rid, 9277 sc->intr[i].resource); 9278 } 9279 } 9280 pci_release_msi(sc->dev); 9281 break; 9282 default: 9283 /* nothing to do as initial allocation failed */ 9284 break; 9285 } 9286} 9287 9288/* 9289 * This function determines and allocates the appropriate 9290 * interrupt based on system capabilites and user request. 9291 * 9292 * The user may force a particular interrupt mode, specify 9293 * the number of receive queues, specify the method for 9294 * distribuitng received frames to receive queues, or use 9295 * the default settings which will automatically select the 9296 * best supported combination. In addition, the OS may or 9297 * may not support certain combinations of these settings. 9298 * This routine attempts to reconcile the settings requested 9299 * by the user with the capabilites available from the system 9300 * to select the optimal combination of features. 9301 * 9302 * Returns: 9303 * 0 = Success, !0 = Failure. 9304 */ 9305static int 9306bxe_interrupt_alloc(struct bxe_softc *sc) 9307{ 9308 int msix_count = 0; 9309 int msi_count = 0; 9310 int num_requested = 0; 9311 int num_allocated = 0; 9312 int rid, i, j; 9313 int rc; 9314 9315 /* get the number of available MSI/MSI-X interrupts from the OS */ 9316 if (sc->interrupt_mode > 0) { 9317 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 9318 msix_count = pci_msix_count(sc->dev); 9319 } 9320 9321 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 9322 msi_count = pci_msi_count(sc->dev); 9323 } 9324 9325 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 9326 msi_count, msix_count); 9327 } 9328 9329 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9330 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9331 break; 9332 } 9333 9334 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9335 (msix_count < 2)) { 9336 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9337 break; 9338 } 9339 9340 /* ask for the necessary number of MSI-X vectors */ 9341 num_requested = min((sc->num_queues + 1), msix_count); 9342 9343 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9344 9345 num_allocated = num_requested; 9346 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9347 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9348 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9349 break; 9350 } 9351 9352 if (num_allocated < 2) { /* possible? */ 9353 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9354 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9355 pci_release_msi(sc->dev); 9356 break; 9357 } 9358 9359 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9360 num_requested, num_allocated); 9361 9362 /* best effort so use the number of vectors allocated to us */ 9363 sc->intr_count = num_allocated; 9364 sc->num_queues = num_allocated - 1; 9365 9366 rid = 1; /* initial resource identifier */ 9367 9368 /* allocate the MSI-X vectors */ 9369 for (i = 0; i < num_allocated; i++) { 9370 sc->intr[i].rid = (rid + i); 9371 9372 if ((sc->intr[i].resource = 9373 bus_alloc_resource_any(sc->dev, 9374 SYS_RES_IRQ, 9375 &sc->intr[i].rid, 9376 RF_ACTIVE)) == NULL) { 9377 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9378 i, (rid + i)); 9379 9380 for (j = (i - 1); j >= 0; j--) { 9381 bus_release_resource(sc->dev, 9382 SYS_RES_IRQ, 9383 sc->intr[j].rid, 9384 sc->intr[j].resource); 9385 } 9386 9387 sc->intr_count = 0; 9388 sc->num_queues = 0; 9389 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9390 pci_release_msi(sc->dev); 9391 break; 9392 } 9393 9394 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9395 } 9396 } while (0); 9397 9398 do { /* try allocating MSI vector resources (at least 2) */ 9399 if (sc->interrupt_mode != INTR_MODE_MSI) { 9400 break; 9401 } 9402 9403 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9404 (msi_count < 1)) { 9405 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9406 break; 9407 } 9408 9409 /* ask for a single MSI vector */ 9410 num_requested = 1; 9411 9412 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9413 9414 num_allocated = num_requested; 9415 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9416 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9417 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9418 break; 9419 } 9420 9421 if (num_allocated != 1) { /* possible? */ 9422 BLOGE(sc, "MSI allocation is not 1!\n"); 9423 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9424 pci_release_msi(sc->dev); 9425 break; 9426 } 9427 9428 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9429 num_requested, num_allocated); 9430 9431 /* best effort so use the number of vectors allocated to us */ 9432 sc->intr_count = num_allocated; 9433 sc->num_queues = num_allocated; 9434 9435 rid = 1; /* initial resource identifier */ 9436 9437 sc->intr[0].rid = rid; 9438 9439 if ((sc->intr[0].resource = 9440 bus_alloc_resource_any(sc->dev, 9441 SYS_RES_IRQ, 9442 &sc->intr[0].rid, 9443 RF_ACTIVE)) == NULL) { 9444 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9445 sc->intr_count = 0; 9446 sc->num_queues = 0; 9447 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9448 pci_release_msi(sc->dev); 9449 break; 9450 } 9451 9452 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9453 } while (0); 9454 9455 do { /* try allocating INTx vector resources */ 9456 if (sc->interrupt_mode != INTR_MODE_INTX) { 9457 break; 9458 } 9459 9460 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9461 9462 /* only one vector for INTx */ 9463 sc->intr_count = 1; 9464 sc->num_queues = 1; 9465 9466 rid = 0; /* initial resource identifier */ 9467 9468 sc->intr[0].rid = rid; 9469 9470 if ((sc->intr[0].resource = 9471 bus_alloc_resource_any(sc->dev, 9472 SYS_RES_IRQ, 9473 &sc->intr[0].rid, 9474 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9475 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9476 sc->intr_count = 0; 9477 sc->num_queues = 0; 9478 sc->interrupt_mode = -1; /* Failed! */ 9479 break; 9480 } 9481 9482 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9483 } while (0); 9484 9485 if (sc->interrupt_mode == -1) { 9486 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9487 rc = 1; 9488 } else { 9489 BLOGD(sc, DBG_LOAD, 9490 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9491 sc->interrupt_mode, sc->num_queues); 9492 rc = 0; 9493 } 9494 9495 return (rc); 9496} 9497 9498static void 9499bxe_interrupt_detach(struct bxe_softc *sc) 9500{ 9501 struct bxe_fastpath *fp; 9502 int i; 9503 9504 /* release interrupt resources */ 9505 for (i = 0; i < sc->intr_count; i++) { 9506 if (sc->intr[i].resource && sc->intr[i].tag) { 9507 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9508 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9509 } 9510 } 9511 9512 for (i = 0; i < sc->num_queues; i++) { 9513 fp = &sc->fp[i]; 9514 if (fp->tq) { 9515 taskqueue_drain(fp->tq, &fp->tq_task); 9516 taskqueue_free(fp->tq); 9517 fp->tq = NULL; 9518 } 9519 } 9520 9521 if (sc->rx_mode_tq) { 9522 taskqueue_drain(sc->rx_mode_tq, &sc->rx_mode_tq_task); 9523 taskqueue_free(sc->rx_mode_tq); 9524 sc->rx_mode_tq = NULL; 9525 } 9526 9527 if (sc->sp_tq) { 9528 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9529 taskqueue_free(sc->sp_tq); 9530 sc->sp_tq = NULL; 9531 } 9532} 9533 9534/* 9535 * Enables interrupts and attach to the ISR. 9536 * 9537 * When using multiple MSI/MSI-X vectors the first vector 9538 * is used for slowpath operations while all remaining 9539 * vectors are used for fastpath operations. If only a 9540 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9541 * ISR must look for both slowpath and fastpath completions. 9542 */ 9543static int 9544bxe_interrupt_attach(struct bxe_softc *sc) 9545{ 9546 struct bxe_fastpath *fp; 9547 int rc = 0; 9548 int i; 9549 9550 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9551 "bxe%d_sp_tq", sc->unit); 9552 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9553 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, 9554 taskqueue_thread_enqueue, 9555 &sc->sp_tq); 9556 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9557 "%s", sc->sp_tq_name); 9558 9559 snprintf(sc->rx_mode_tq_name, sizeof(sc->rx_mode_tq_name), 9560 "bxe%d_rx_mode_tq", sc->unit); 9561 TASK_INIT(&sc->rx_mode_tq_task, 0, bxe_handle_rx_mode_tq, sc); 9562 sc->rx_mode_tq = taskqueue_create_fast(sc->rx_mode_tq_name, M_NOWAIT, 9563 taskqueue_thread_enqueue, 9564 &sc->rx_mode_tq); 9565 taskqueue_start_threads(&sc->rx_mode_tq, 1, PWAIT, /* lower priority */ 9566 "%s", sc->rx_mode_tq_name); 9567 9568 for (i = 0; i < sc->num_queues; i++) { 9569 fp = &sc->fp[i]; 9570 snprintf(fp->tq_name, sizeof(fp->tq_name), 9571 "bxe%d_fp%d_tq", sc->unit, i); 9572 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9573 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, 9574 taskqueue_thread_enqueue, 9575 &fp->tq); 9576 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9577 "%s", fp->tq_name); 9578 } 9579 9580 /* setup interrupt handlers */ 9581 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9582 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9583 9584 /* 9585 * Setup the interrupt handler. Note that we pass the driver instance 9586 * to the interrupt handler for the slowpath. 9587 */ 9588 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9589 (INTR_TYPE_NET | INTR_MPSAFE), 9590 NULL, bxe_intr_sp, sc, 9591 &sc->intr[0].tag)) != 0) { 9592 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9593 goto bxe_interrupt_attach_exit; 9594 } 9595 9596 bus_describe_intr(sc->dev, sc->intr[0].resource, 9597 sc->intr[0].tag, "sp"); 9598 9599 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9600 9601 /* initialize the fastpath vectors (note the first was used for sp) */ 9602 for (i = 0; i < sc->num_queues; i++) { 9603 fp = &sc->fp[i]; 9604 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9605 9606 /* 9607 * Setup the interrupt handler. Note that we pass the 9608 * fastpath context to the interrupt handler in this 9609 * case. 9610 */ 9611 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9612 (INTR_TYPE_NET | INTR_MPSAFE), 9613 NULL, bxe_intr_fp, fp, 9614 &sc->intr[i + 1].tag)) != 0) { 9615 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9616 (i + 1), rc); 9617 goto bxe_interrupt_attach_exit; 9618 } 9619 9620 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9621 sc->intr[i + 1].tag, "fp%02d", i); 9622 9623 /* bind the fastpath instance to a cpu */ 9624 if (sc->num_queues > 1) { 9625 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9626 } 9627 9628 fp->state = BXE_FP_STATE_IRQ; 9629 } 9630 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9631 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9632 9633 /* 9634 * Setup the interrupt handler. Note that we pass the 9635 * driver instance to the interrupt handler which 9636 * will handle both the slowpath and fastpath. 9637 */ 9638 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9639 (INTR_TYPE_NET | INTR_MPSAFE), 9640 NULL, bxe_intr_legacy, sc, 9641 &sc->intr[0].tag)) != 0) { 9642 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9643 goto bxe_interrupt_attach_exit; 9644 } 9645 9646 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9647 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9648 9649 /* 9650 * Setup the interrupt handler. Note that we pass the 9651 * driver instance to the interrupt handler which 9652 * will handle both the slowpath and fastpath. 9653 */ 9654 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9655 (INTR_TYPE_NET | INTR_MPSAFE), 9656 NULL, bxe_intr_legacy, sc, 9657 &sc->intr[0].tag)) != 0) { 9658 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9659 goto bxe_interrupt_attach_exit; 9660 } 9661 } 9662 9663bxe_interrupt_attach_exit: 9664 9665 return (rc); 9666} 9667 9668static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9669static int bxe_init_hw_common(struct bxe_softc *sc); 9670static int bxe_init_hw_port(struct bxe_softc *sc); 9671static int bxe_init_hw_func(struct bxe_softc *sc); 9672static void bxe_reset_common(struct bxe_softc *sc); 9673static void bxe_reset_port(struct bxe_softc *sc); 9674static void bxe_reset_func(struct bxe_softc *sc); 9675static int bxe_gunzip_init(struct bxe_softc *sc); 9676static void bxe_gunzip_end(struct bxe_softc *sc); 9677static int bxe_init_firmware(struct bxe_softc *sc); 9678static void bxe_release_firmware(struct bxe_softc *sc); 9679 9680static struct 9681ecore_func_sp_drv_ops bxe_func_sp_drv = { 9682 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9683 .init_hw_cmn = bxe_init_hw_common, 9684 .init_hw_port = bxe_init_hw_port, 9685 .init_hw_func = bxe_init_hw_func, 9686 9687 .reset_hw_cmn = bxe_reset_common, 9688 .reset_hw_port = bxe_reset_port, 9689 .reset_hw_func = bxe_reset_func, 9690 9691 .gunzip_init = bxe_gunzip_init, 9692 .gunzip_end = bxe_gunzip_end, 9693 9694 .init_fw = bxe_init_firmware, 9695 .release_fw = bxe_release_firmware, 9696}; 9697 9698static void 9699bxe_init_func_obj(struct bxe_softc *sc) 9700{ 9701 sc->dmae_ready = 0; 9702 9703 ecore_init_func_obj(sc, 9704 &sc->func_obj, 9705 BXE_SP(sc, func_rdata), 9706 BXE_SP_MAPPING(sc, func_rdata), 9707 BXE_SP(sc, func_afex_rdata), 9708 BXE_SP_MAPPING(sc, func_afex_rdata), 9709 &bxe_func_sp_drv); 9710} 9711 9712static int 9713bxe_init_hw(struct bxe_softc *sc, 9714 uint32_t load_code) 9715{ 9716 struct ecore_func_state_params func_params = { NULL }; 9717 int rc; 9718 9719 /* prepare the parameters for function state transitions */ 9720 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9721 9722 func_params.f_obj = &sc->func_obj; 9723 func_params.cmd = ECORE_F_CMD_HW_INIT; 9724 9725 func_params.params.hw_init.load_phase = load_code; 9726 9727 /* 9728 * Via a plethora of function pointers, we will eventually reach 9729 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9730 */ 9731 rc = ecore_func_state_change(sc, &func_params); 9732 9733 return (rc); 9734} 9735 9736static void 9737bxe_fill(struct bxe_softc *sc, 9738 uint32_t addr, 9739 int fill, 9740 uint32_t len) 9741{ 9742 uint32_t i; 9743 9744 if (!(len % 4) && !(addr % 4)) { 9745 for (i = 0; i < len; i += 4) { 9746 REG_WR(sc, (addr + i), fill); 9747 } 9748 } else { 9749 for (i = 0; i < len; i++) { 9750 REG_WR8(sc, (addr + i), fill); 9751 } 9752 } 9753} 9754 9755/* writes FP SP data to FW - data_size in dwords */ 9756static void 9757bxe_wr_fp_sb_data(struct bxe_softc *sc, 9758 int fw_sb_id, 9759 uint32_t *sb_data_p, 9760 uint32_t data_size) 9761{ 9762 int index; 9763 9764 for (index = 0; index < data_size; index++) { 9765 REG_WR(sc, 9766 (BAR_CSTRORM_INTMEM + 9767 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9768 (sizeof(uint32_t) * index)), 9769 *(sb_data_p + index)); 9770 } 9771} 9772 9773static void 9774bxe_zero_fp_sb(struct bxe_softc *sc, 9775 int fw_sb_id) 9776{ 9777 struct hc_status_block_data_e2 sb_data_e2; 9778 struct hc_status_block_data_e1x sb_data_e1x; 9779 uint32_t *sb_data_p; 9780 uint32_t data_size = 0; 9781 9782 if (!CHIP_IS_E1x(sc)) { 9783 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9784 sb_data_e2.common.state = SB_DISABLED; 9785 sb_data_e2.common.p_func.vf_valid = FALSE; 9786 sb_data_p = (uint32_t *)&sb_data_e2; 9787 data_size = (sizeof(struct hc_status_block_data_e2) / 9788 sizeof(uint32_t)); 9789 } else { 9790 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9791 sb_data_e1x.common.state = SB_DISABLED; 9792 sb_data_e1x.common.p_func.vf_valid = FALSE; 9793 sb_data_p = (uint32_t *)&sb_data_e1x; 9794 data_size = (sizeof(struct hc_status_block_data_e1x) / 9795 sizeof(uint32_t)); 9796 } 9797 9798 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9799 9800 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9801 0, CSTORM_STATUS_BLOCK_SIZE); 9802 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9803 0, CSTORM_SYNC_BLOCK_SIZE); 9804} 9805 9806static void 9807bxe_wr_sp_sb_data(struct bxe_softc *sc, 9808 struct hc_sp_status_block_data *sp_sb_data) 9809{ 9810 int i; 9811 9812 for (i = 0; 9813 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9814 i++) { 9815 REG_WR(sc, 9816 (BAR_CSTRORM_INTMEM + 9817 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9818 (i * sizeof(uint32_t))), 9819 *((uint32_t *)sp_sb_data + i)); 9820 } 9821} 9822 9823static void 9824bxe_zero_sp_sb(struct bxe_softc *sc) 9825{ 9826 struct hc_sp_status_block_data sp_sb_data; 9827 9828 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9829 9830 sp_sb_data.state = SB_DISABLED; 9831 sp_sb_data.p_func.vf_valid = FALSE; 9832 9833 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9834 9835 bxe_fill(sc, 9836 (BAR_CSTRORM_INTMEM + 9837 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9838 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9839 bxe_fill(sc, 9840 (BAR_CSTRORM_INTMEM + 9841 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9842 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9843} 9844 9845static void 9846bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9847 int igu_sb_id, 9848 int igu_seg_id) 9849{ 9850 hc_sm->igu_sb_id = igu_sb_id; 9851 hc_sm->igu_seg_id = igu_seg_id; 9852 hc_sm->timer_value = 0xFF; 9853 hc_sm->time_to_expire = 0xFFFFFFFF; 9854} 9855 9856static void 9857bxe_map_sb_state_machines(struct hc_index_data *index_data) 9858{ 9859 /* zero out state machine indices */ 9860 9861 /* rx indices */ 9862 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9863 9864 /* tx indices */ 9865 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9866 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9867 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9868 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9869 9870 /* map indices */ 9871 9872 /* rx indices */ 9873 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9874 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9875 9876 /* tx indices */ 9877 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9878 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9879 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9880 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9881 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9882 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9883 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9884 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9885} 9886 9887static void 9888bxe_init_sb(struct bxe_softc *sc, 9889 bus_addr_t busaddr, 9890 int vfid, 9891 uint8_t vf_valid, 9892 int fw_sb_id, 9893 int igu_sb_id) 9894{ 9895 struct hc_status_block_data_e2 sb_data_e2; 9896 struct hc_status_block_data_e1x sb_data_e1x; 9897 struct hc_status_block_sm *hc_sm_p; 9898 uint32_t *sb_data_p; 9899 int igu_seg_id; 9900 int data_size; 9901 9902 if (CHIP_INT_MODE_IS_BC(sc)) { 9903 igu_seg_id = HC_SEG_ACCESS_NORM; 9904 } else { 9905 igu_seg_id = IGU_SEG_ACCESS_NORM; 9906 } 9907 9908 bxe_zero_fp_sb(sc, fw_sb_id); 9909 9910 if (!CHIP_IS_E1x(sc)) { 9911 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9912 sb_data_e2.common.state = SB_ENABLED; 9913 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9914 sb_data_e2.common.p_func.vf_id = vfid; 9915 sb_data_e2.common.p_func.vf_valid = vf_valid; 9916 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9917 sb_data_e2.common.same_igu_sb_1b = TRUE; 9918 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9919 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9920 hc_sm_p = sb_data_e2.common.state_machine; 9921 sb_data_p = (uint32_t *)&sb_data_e2; 9922 data_size = (sizeof(struct hc_status_block_data_e2) / 9923 sizeof(uint32_t)); 9924 bxe_map_sb_state_machines(sb_data_e2.index_data); 9925 } else { 9926 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9927 sb_data_e1x.common.state = SB_ENABLED; 9928 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9929 sb_data_e1x.common.p_func.vf_id = 0xff; 9930 sb_data_e1x.common.p_func.vf_valid = FALSE; 9931 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9932 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9933 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9934 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9935 hc_sm_p = sb_data_e1x.common.state_machine; 9936 sb_data_p = (uint32_t *)&sb_data_e1x; 9937 data_size = (sizeof(struct hc_status_block_data_e1x) / 9938 sizeof(uint32_t)); 9939 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9940 } 9941 9942 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9943 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9944 9945 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9946 9947 /* write indices to HW - PCI guarantees endianity of regpairs */ 9948 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9949} 9950 9951static inline uint8_t 9952bxe_fp_qzone_id(struct bxe_fastpath *fp) 9953{ 9954 if (CHIP_IS_E1x(fp->sc)) { 9955 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9956 } else { 9957 return (fp->cl_id); 9958 } 9959} 9960 9961static inline uint32_t 9962bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9963 struct bxe_fastpath *fp) 9964{ 9965 uint32_t offset = BAR_USTRORM_INTMEM; 9966 9967#if 0 9968 if (IS_VF(sc)) { 9969 return (PXP_VF_ADDR_USDM_QUEUES_START + 9970 (sc->acquire_resp.resc.hw_qid[fp->index] * 9971 sizeof(struct ustorm_queue_zone_data))); 9972 } else 9973#endif 9974 if (!CHIP_IS_E1x(sc)) { 9975 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 9976 } else { 9977 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 9978 } 9979 9980 return (offset); 9981} 9982 9983static void 9984bxe_init_eth_fp(struct bxe_softc *sc, 9985 int idx) 9986{ 9987 struct bxe_fastpath *fp = &sc->fp[idx]; 9988 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 9989 unsigned long q_type = 0; 9990 int cos; 9991 9992 fp->sc = sc; 9993 fp->index = idx; 9994 9995 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 9996 "bxe%d_fp%d_tx_lock", sc->unit, idx); 9997 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 9998 9999 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 10000 "bxe%d_fp%d_rx_lock", sc->unit, idx); 10001 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 10002 10003 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 10004 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 10005 10006 fp->cl_id = (CHIP_IS_E1x(sc)) ? 10007 (SC_L_ID(sc) + idx) : 10008 /* want client ID same as IGU SB ID for non-E1 */ 10009 fp->igu_sb_id; 10010 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 10011 10012 /* setup sb indices */ 10013 if (!CHIP_IS_E1x(sc)) { 10014 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 10015 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 10016 } else { 10017 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 10018 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 10019 } 10020 10021 /* init shortcut */ 10022 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 10023 10024 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 10025 10026 /* 10027 * XXX If multiple CoS is ever supported then each fastpath structure 10028 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 10029 */ 10030 for (cos = 0; cos < sc->max_cos; cos++) { 10031 cids[cos] = idx; 10032 } 10033 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 10034 10035 /* nothing more for a VF to do */ 10036 if (IS_VF(sc)) { 10037 return; 10038 } 10039 10040 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 10041 fp->fw_sb_id, fp->igu_sb_id); 10042 10043 bxe_update_fp_sb_idx(fp); 10044 10045 /* Configure Queue State object */ 10046 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 10047 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 10048 10049 ecore_init_queue_obj(sc, 10050 &sc->sp_objs[idx].q_obj, 10051 fp->cl_id, 10052 cids, 10053 sc->max_cos, 10054 SC_FUNC(sc), 10055 BXE_SP(sc, q_rdata), 10056 BXE_SP_MAPPING(sc, q_rdata), 10057 q_type); 10058 10059 /* configure classification DBs */ 10060 ecore_init_mac_obj(sc, 10061 &sc->sp_objs[idx].mac_obj, 10062 fp->cl_id, 10063 idx, 10064 SC_FUNC(sc), 10065 BXE_SP(sc, mac_rdata), 10066 BXE_SP_MAPPING(sc, mac_rdata), 10067 ECORE_FILTER_MAC_PENDING, 10068 &sc->sp_state, 10069 ECORE_OBJ_TYPE_RX_TX, 10070 &sc->macs_pool); 10071 10072 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 10073 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 10074} 10075 10076static inline void 10077bxe_update_rx_prod(struct bxe_softc *sc, 10078 struct bxe_fastpath *fp, 10079 uint16_t rx_bd_prod, 10080 uint16_t rx_cq_prod, 10081 uint16_t rx_sge_prod) 10082{ 10083 struct ustorm_eth_rx_producers rx_prods = { 0 }; 10084 uint32_t i; 10085 10086 /* update producers */ 10087 rx_prods.bd_prod = rx_bd_prod; 10088 rx_prods.cqe_prod = rx_cq_prod; 10089 rx_prods.sge_prod = rx_sge_prod; 10090 10091 /* 10092 * Make sure that the BD and SGE data is updated before updating the 10093 * producers since FW might read the BD/SGE right after the producer 10094 * is updated. 10095 * This is only applicable for weak-ordered memory model archs such 10096 * as IA-64. The following barrier is also mandatory since FW will 10097 * assumes BDs must have buffers. 10098 */ 10099 wmb(); 10100 10101 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 10102 REG_WR(sc, 10103 (fp->ustorm_rx_prods_offset + (i * 4)), 10104 ((uint32_t *)&rx_prods)[i]); 10105 } 10106 10107 wmb(); /* keep prod updates ordered */ 10108 10109 BLOGD(sc, DBG_RX, 10110 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 10111 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 10112} 10113 10114static void 10115bxe_init_rx_rings(struct bxe_softc *sc) 10116{ 10117 struct bxe_fastpath *fp; 10118 int i; 10119 10120 for (i = 0; i < sc->num_queues; i++) { 10121 fp = &sc->fp[i]; 10122 10123 fp->rx_bd_cons = 0; 10124 10125 /* 10126 * Activate the BD ring... 10127 * Warning, this will generate an interrupt (to the TSTORM) 10128 * so this can only be done after the chip is initialized 10129 */ 10130 bxe_update_rx_prod(sc, fp, 10131 fp->rx_bd_prod, 10132 fp->rx_cq_prod, 10133 fp->rx_sge_prod); 10134 10135 if (i != 0) { 10136 continue; 10137 } 10138 10139 if (CHIP_IS_E1(sc)) { 10140 REG_WR(sc, 10141 (BAR_USTRORM_INTMEM + 10142 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 10143 U64_LO(fp->rcq_dma.paddr)); 10144 REG_WR(sc, 10145 (BAR_USTRORM_INTMEM + 10146 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 10147 U64_HI(fp->rcq_dma.paddr)); 10148 } 10149 } 10150} 10151 10152static void 10153bxe_init_tx_ring_one(struct bxe_fastpath *fp) 10154{ 10155 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 10156 fp->tx_db.data.zero_fill1 = 0; 10157 fp->tx_db.data.prod = 0; 10158 10159 fp->tx_pkt_prod = 0; 10160 fp->tx_pkt_cons = 0; 10161 fp->tx_bd_prod = 0; 10162 fp->tx_bd_cons = 0; 10163 fp->eth_q_stats.tx_pkts = 0; 10164} 10165 10166static inline void 10167bxe_init_tx_rings(struct bxe_softc *sc) 10168{ 10169 int i; 10170 10171 for (i = 0; i < sc->num_queues; i++) { 10172#if 0 10173 uint8_t cos; 10174 for (cos = 0; cos < sc->max_cos; cos++) { 10175 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); 10176 } 10177#else 10178 bxe_init_tx_ring_one(&sc->fp[i]); 10179#endif 10180 } 10181} 10182 10183static void 10184bxe_init_def_sb(struct bxe_softc *sc) 10185{ 10186 struct host_sp_status_block *def_sb = sc->def_sb; 10187 bus_addr_t mapping = sc->def_sb_dma.paddr; 10188 int igu_sp_sb_index; 10189 int igu_seg_id; 10190 int port = SC_PORT(sc); 10191 int func = SC_FUNC(sc); 10192 int reg_offset, reg_offset_en5; 10193 uint64_t section; 10194 int index, sindex; 10195 struct hc_sp_status_block_data sp_sb_data; 10196 10197 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 10198 10199 if (CHIP_INT_MODE_IS_BC(sc)) { 10200 igu_sp_sb_index = DEF_SB_IGU_ID; 10201 igu_seg_id = HC_SEG_ACCESS_DEF; 10202 } else { 10203 igu_sp_sb_index = sc->igu_dsb_id; 10204 igu_seg_id = IGU_SEG_ACCESS_DEF; 10205 } 10206 10207 /* attentions */ 10208 section = ((uint64_t)mapping + 10209 offsetof(struct host_sp_status_block, atten_status_block)); 10210 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 10211 sc->attn_state = 0; 10212 10213 reg_offset = (port) ? 10214 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10215 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 10216 reg_offset_en5 = (port) ? 10217 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 10218 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 10219 10220 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 10221 /* take care of sig[0]..sig[4] */ 10222 for (sindex = 0; sindex < 4; sindex++) { 10223 sc->attn_group[index].sig[sindex] = 10224 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 10225 } 10226 10227 if (!CHIP_IS_E1x(sc)) { 10228 /* 10229 * enable5 is separate from the rest of the registers, 10230 * and the address skip is 4 and not 16 between the 10231 * different groups 10232 */ 10233 sc->attn_group[index].sig[4] = 10234 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 10235 } else { 10236 sc->attn_group[index].sig[4] = 0; 10237 } 10238 } 10239 10240 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10241 reg_offset = (port) ? 10242 HC_REG_ATTN_MSG1_ADDR_L : 10243 HC_REG_ATTN_MSG0_ADDR_L; 10244 REG_WR(sc, reg_offset, U64_LO(section)); 10245 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 10246 } else if (!CHIP_IS_E1x(sc)) { 10247 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 10248 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 10249 } 10250 10251 section = ((uint64_t)mapping + 10252 offsetof(struct host_sp_status_block, sp_sb)); 10253 10254 bxe_zero_sp_sb(sc); 10255 10256 /* PCI guarantees endianity of regpair */ 10257 sp_sb_data.state = SB_ENABLED; 10258 sp_sb_data.host_sb_addr.lo = U64_LO(section); 10259 sp_sb_data.host_sb_addr.hi = U64_HI(section); 10260 sp_sb_data.igu_sb_id = igu_sp_sb_index; 10261 sp_sb_data.igu_seg_id = igu_seg_id; 10262 sp_sb_data.p_func.pf_id = func; 10263 sp_sb_data.p_func.vnic_id = SC_VN(sc); 10264 sp_sb_data.p_func.vf_id = 0xff; 10265 10266 bxe_wr_sp_sb_data(sc, &sp_sb_data); 10267 10268 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 10269} 10270 10271static void 10272bxe_init_sp_ring(struct bxe_softc *sc) 10273{ 10274 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 10275 sc->spq_prod_idx = 0; 10276 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 10277 sc->spq_prod_bd = sc->spq; 10278 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 10279} 10280 10281static void 10282bxe_init_eq_ring(struct bxe_softc *sc) 10283{ 10284 union event_ring_elem *elem; 10285 int i; 10286 10287 for (i = 1; i <= NUM_EQ_PAGES; i++) { 10288 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 10289 10290 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 10291 BCM_PAGE_SIZE * 10292 (i % NUM_EQ_PAGES))); 10293 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 10294 BCM_PAGE_SIZE * 10295 (i % NUM_EQ_PAGES))); 10296 } 10297 10298 sc->eq_cons = 0; 10299 sc->eq_prod = NUM_EQ_DESC; 10300 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 10301 10302 atomic_store_rel_long(&sc->eq_spq_left, 10303 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 10304 NUM_EQ_DESC) - 1)); 10305} 10306 10307static void 10308bxe_init_internal_common(struct bxe_softc *sc) 10309{ 10310 int i; 10311 10312 if (IS_MF_SI(sc)) { 10313 /* 10314 * In switch independent mode, the TSTORM needs to accept 10315 * packets that failed classification, since approximate match 10316 * mac addresses aren't written to NIG LLH. 10317 */ 10318 REG_WR8(sc, 10319 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10320 2); 10321 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ 10322 REG_WR8(sc, 10323 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10324 0); 10325 } 10326 10327 /* 10328 * Zero this manually as its initialization is currently missing 10329 * in the initTool. 10330 */ 10331 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 10332 REG_WR(sc, 10333 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 10334 0); 10335 } 10336 10337 if (!CHIP_IS_E1x(sc)) { 10338 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 10339 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 10340 } 10341} 10342 10343static void 10344bxe_init_internal(struct bxe_softc *sc, 10345 uint32_t load_code) 10346{ 10347 switch (load_code) { 10348 case FW_MSG_CODE_DRV_LOAD_COMMON: 10349 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 10350 bxe_init_internal_common(sc); 10351 /* no break */ 10352 10353 case FW_MSG_CODE_DRV_LOAD_PORT: 10354 /* nothing to do */ 10355 /* no break */ 10356 10357 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 10358 /* internal memory per function is initialized inside bxe_pf_init */ 10359 break; 10360 10361 default: 10362 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 10363 break; 10364 } 10365} 10366 10367static void 10368storm_memset_func_cfg(struct bxe_softc *sc, 10369 struct tstorm_eth_function_common_config *tcfg, 10370 uint16_t abs_fid) 10371{ 10372 uint32_t addr; 10373 size_t size; 10374 10375 addr = (BAR_TSTRORM_INTMEM + 10376 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10377 size = sizeof(struct tstorm_eth_function_common_config); 10378 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10379} 10380 10381static void 10382bxe_func_init(struct bxe_softc *sc, 10383 struct bxe_func_init_params *p) 10384{ 10385 struct tstorm_eth_function_common_config tcfg = { 0 }; 10386 10387 if (CHIP_IS_E1x(sc)) { 10388 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10389 } 10390 10391 /* Enable the function in the FW */ 10392 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10393 storm_memset_func_en(sc, p->func_id, 1); 10394 10395 /* spq */ 10396 if (p->func_flgs & FUNC_FLG_SPQ) { 10397 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10398 REG_WR(sc, 10399 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10400 p->spq_prod); 10401 } 10402} 10403 10404/* 10405 * Calculates the sum of vn_min_rates. 10406 * It's needed for further normalizing of the min_rates. 10407 * Returns: 10408 * sum of vn_min_rates. 10409 * or 10410 * 0 - if all the min_rates are 0. 10411 * In the later case fainess algorithm should be deactivated. 10412 * If all min rates are not zero then those that are zeroes will be set to 1. 10413 */ 10414static void 10415bxe_calc_vn_min(struct bxe_softc *sc, 10416 struct cmng_init_input *input) 10417{ 10418 uint32_t vn_cfg; 10419 uint32_t vn_min_rate; 10420 int all_zero = 1; 10421 int vn; 10422 10423 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10424 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10425 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10426 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10427 10428 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10429 /* skip hidden VNs */ 10430 vn_min_rate = 0; 10431 } else if (!vn_min_rate) { 10432 /* If min rate is zero - set it to 100 */ 10433 vn_min_rate = DEF_MIN_RATE; 10434 } else { 10435 all_zero = 0; 10436 } 10437 10438 input->vnic_min_rate[vn] = vn_min_rate; 10439 } 10440 10441 /* if ETS or all min rates are zeros - disable fairness */ 10442 if (BXE_IS_ETS_ENABLED(sc)) { 10443 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10444 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10445 } else if (all_zero) { 10446 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10447 BLOGD(sc, DBG_LOAD, 10448 "Fariness disabled (all MIN values are zeroes)\n"); 10449 } else { 10450 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10451 } 10452} 10453 10454static inline uint16_t 10455bxe_extract_max_cfg(struct bxe_softc *sc, 10456 uint32_t mf_cfg) 10457{ 10458 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10459 FUNC_MF_CFG_MAX_BW_SHIFT); 10460 10461 if (!max_cfg) { 10462 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10463 max_cfg = 100; 10464 } 10465 10466 return (max_cfg); 10467} 10468 10469static void 10470bxe_calc_vn_max(struct bxe_softc *sc, 10471 int vn, 10472 struct cmng_init_input *input) 10473{ 10474 uint16_t vn_max_rate; 10475 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10476 uint32_t max_cfg; 10477 10478 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10479 vn_max_rate = 0; 10480 } else { 10481 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10482 10483 if (IS_MF_SI(sc)) { 10484 /* max_cfg in percents of linkspeed */ 10485 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10486 } else { /* SD modes */ 10487 /* max_cfg is absolute in 100Mb units */ 10488 vn_max_rate = (max_cfg * 100); 10489 } 10490 } 10491 10492 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10493 10494 input->vnic_max_rate[vn] = vn_max_rate; 10495} 10496 10497static void 10498bxe_cmng_fns_init(struct bxe_softc *sc, 10499 uint8_t read_cfg, 10500 uint8_t cmng_type) 10501{ 10502 struct cmng_init_input input; 10503 int vn; 10504 10505 memset(&input, 0, sizeof(struct cmng_init_input)); 10506 10507 input.port_rate = sc->link_vars.line_speed; 10508 10509 if (cmng_type == CMNG_FNS_MINMAX) { 10510 /* read mf conf from shmem */ 10511 if (read_cfg) { 10512 bxe_read_mf_cfg(sc); 10513 } 10514 10515 /* get VN min rate and enable fairness if not 0 */ 10516 bxe_calc_vn_min(sc, &input); 10517 10518 /* get VN max rate */ 10519 if (sc->port.pmf) { 10520 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10521 bxe_calc_vn_max(sc, vn, &input); 10522 } 10523 } 10524 10525 /* always enable rate shaping and fairness */ 10526 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10527 10528 ecore_init_cmng(&input, &sc->cmng); 10529 return; 10530 } 10531 10532 /* rate shaping and fairness are disabled */ 10533 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10534} 10535 10536static int 10537bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10538{ 10539 if (CHIP_REV_IS_SLOW(sc)) { 10540 return (CMNG_FNS_NONE); 10541 } 10542 10543 if (IS_MF(sc)) { 10544 return (CMNG_FNS_MINMAX); 10545 } 10546 10547 return (CMNG_FNS_NONE); 10548} 10549 10550static void 10551storm_memset_cmng(struct bxe_softc *sc, 10552 struct cmng_init *cmng, 10553 uint8_t port) 10554{ 10555 int vn; 10556 int func; 10557 uint32_t addr; 10558 size_t size; 10559 10560 addr = (BAR_XSTRORM_INTMEM + 10561 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10562 size = sizeof(struct cmng_struct_per_port); 10563 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10564 10565 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10566 func = func_by_vn(sc, vn); 10567 10568 addr = (BAR_XSTRORM_INTMEM + 10569 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10570 size = sizeof(struct rate_shaping_vars_per_vn); 10571 ecore_storm_memset_struct(sc, addr, size, 10572 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10573 10574 addr = (BAR_XSTRORM_INTMEM + 10575 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10576 size = sizeof(struct fairness_vars_per_vn); 10577 ecore_storm_memset_struct(sc, addr, size, 10578 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10579 } 10580} 10581 10582static void 10583bxe_pf_init(struct bxe_softc *sc) 10584{ 10585 struct bxe_func_init_params func_init = { 0 }; 10586 struct event_ring_data eq_data = { { 0 } }; 10587 uint16_t flags; 10588 10589 if (!CHIP_IS_E1x(sc)) { 10590 /* reset IGU PF statistics: MSIX + ATTN */ 10591 /* PF */ 10592 REG_WR(sc, 10593 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10594 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10595 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10596 0); 10597 /* ATTN */ 10598 REG_WR(sc, 10599 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10600 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10601 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10602 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10603 0); 10604 } 10605 10606 /* function setup flags */ 10607 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10608 10609 /* 10610 * This flag is relevant for E1x only. 10611 * E2 doesn't have a TPA configuration in a function level. 10612 */ 10613 flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10614 10615 func_init.func_flgs = flags; 10616 func_init.pf_id = SC_FUNC(sc); 10617 func_init.func_id = SC_FUNC(sc); 10618 func_init.spq_map = sc->spq_dma.paddr; 10619 func_init.spq_prod = sc->spq_prod_idx; 10620 10621 bxe_func_init(sc, &func_init); 10622 10623 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10624 10625 /* 10626 * Congestion management values depend on the link rate. 10627 * There is no active link so initial link rate is set to 10Gbps. 10628 * When the link comes up the congestion management values are 10629 * re-calculated according to the actual link rate. 10630 */ 10631 sc->link_vars.line_speed = SPEED_10000; 10632 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10633 10634 /* Only the PMF sets the HW */ 10635 if (sc->port.pmf) { 10636 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10637 } 10638 10639 /* init Event Queue - PCI bus guarantees correct endainity */ 10640 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10641 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10642 eq_data.producer = sc->eq_prod; 10643 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10644 eq_data.sb_id = DEF_SB_ID; 10645 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10646} 10647 10648static void 10649bxe_hc_int_enable(struct bxe_softc *sc) 10650{ 10651 int port = SC_PORT(sc); 10652 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10653 uint32_t val = REG_RD(sc, addr); 10654 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10655 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10656 (sc->intr_count == 1)) ? TRUE : FALSE; 10657 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10658 10659 if (msix) { 10660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10661 HC_CONFIG_0_REG_INT_LINE_EN_0); 10662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10663 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10664 if (single_msix) { 10665 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10666 } 10667 } else if (msi) { 10668 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10669 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10670 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10671 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10672 } else { 10673 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10674 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10675 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10676 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10677 10678 if (!CHIP_IS_E1(sc)) { 10679 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10680 val, port, addr); 10681 10682 REG_WR(sc, addr, val); 10683 10684 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10685 } 10686 } 10687 10688 if (CHIP_IS_E1(sc)) { 10689 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10690 } 10691 10692 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10693 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10694 10695 REG_WR(sc, addr, val); 10696 10697 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10698 mb(); 10699 10700 if (!CHIP_IS_E1(sc)) { 10701 /* init leading/trailing edge */ 10702 if (IS_MF(sc)) { 10703 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10704 if (sc->port.pmf) { 10705 /* enable nig and gpio3 attention */ 10706 val |= 0x1100; 10707 } 10708 } else { 10709 val = 0xffff; 10710 } 10711 10712 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10713 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10714 } 10715 10716 /* make sure that interrupts are indeed enabled from here on */ 10717 mb(); 10718} 10719 10720static void 10721bxe_igu_int_enable(struct bxe_softc *sc) 10722{ 10723 uint32_t val; 10724 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10725 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10726 (sc->intr_count == 1)) ? TRUE : FALSE; 10727 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10728 10729 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10730 10731 if (msix) { 10732 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10733 IGU_PF_CONF_SINGLE_ISR_EN); 10734 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10735 IGU_PF_CONF_ATTN_BIT_EN); 10736 if (single_msix) { 10737 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10738 } 10739 } else if (msi) { 10740 val &= ~IGU_PF_CONF_INT_LINE_EN; 10741 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10742 IGU_PF_CONF_ATTN_BIT_EN | 10743 IGU_PF_CONF_SINGLE_ISR_EN); 10744 } else { 10745 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10746 val |= (IGU_PF_CONF_INT_LINE_EN | 10747 IGU_PF_CONF_ATTN_BIT_EN | 10748 IGU_PF_CONF_SINGLE_ISR_EN); 10749 } 10750 10751 /* clean previous status - need to configure igu prior to ack*/ 10752 if ((!msix) || single_msix) { 10753 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10754 bxe_ack_int(sc); 10755 } 10756 10757 val |= IGU_PF_CONF_FUNC_EN; 10758 10759 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10760 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10761 10762 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10763 10764 mb(); 10765 10766 /* init leading/trailing edge */ 10767 if (IS_MF(sc)) { 10768 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10769 if (sc->port.pmf) { 10770 /* enable nig and gpio3 attention */ 10771 val |= 0x1100; 10772 } 10773 } else { 10774 val = 0xffff; 10775 } 10776 10777 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10778 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10779 10780 /* make sure that interrupts are indeed enabled from here on */ 10781 mb(); 10782} 10783 10784static void 10785bxe_int_enable(struct bxe_softc *sc) 10786{ 10787 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10788 bxe_hc_int_enable(sc); 10789 } else { 10790 bxe_igu_int_enable(sc); 10791 } 10792} 10793 10794static void 10795bxe_hc_int_disable(struct bxe_softc *sc) 10796{ 10797 int port = SC_PORT(sc); 10798 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10799 uint32_t val = REG_RD(sc, addr); 10800 10801 /* 10802 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10803 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10804 * block 10805 */ 10806 if (CHIP_IS_E1(sc)) { 10807 /* 10808 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10809 * to prevent from HC sending interrupts after we exit the function 10810 */ 10811 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10812 10813 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10814 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10815 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10816 } else { 10817 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10818 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10819 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10820 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10821 } 10822 10823 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10824 10825 /* flush all outstanding writes */ 10826 mb(); 10827 10828 REG_WR(sc, addr, val); 10829 if (REG_RD(sc, addr) != val) { 10830 BLOGE(sc, "proper val not read from HC IGU!\n"); 10831 } 10832} 10833 10834static void 10835bxe_igu_int_disable(struct bxe_softc *sc) 10836{ 10837 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10838 10839 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10840 IGU_PF_CONF_INT_LINE_EN | 10841 IGU_PF_CONF_ATTN_BIT_EN); 10842 10843 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10844 10845 /* flush all outstanding writes */ 10846 mb(); 10847 10848 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10849 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10850 BLOGE(sc, "proper val not read from IGU!\n"); 10851 } 10852} 10853 10854static void 10855bxe_int_disable(struct bxe_softc *sc) 10856{ 10857 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10858 bxe_hc_int_disable(sc); 10859 } else { 10860 bxe_igu_int_disable(sc); 10861 } 10862} 10863 10864static void 10865bxe_nic_init(struct bxe_softc *sc, 10866 int load_code) 10867{ 10868 int i; 10869 10870 for (i = 0; i < sc->num_queues; i++) { 10871 bxe_init_eth_fp(sc, i); 10872 } 10873 10874 rmb(); /* ensure status block indices were read */ 10875 10876 bxe_init_rx_rings(sc); 10877 bxe_init_tx_rings(sc); 10878 10879 if (IS_VF(sc)) { 10880 return; 10881 } 10882 10883 /* initialize MOD_ABS interrupts */ 10884 elink_init_mod_abs_int(sc, &sc->link_vars, 10885 sc->devinfo.chip_id, 10886 sc->devinfo.shmem_base, 10887 sc->devinfo.shmem2_base, 10888 SC_PORT(sc)); 10889 10890 bxe_init_def_sb(sc); 10891 bxe_update_dsb_idx(sc); 10892 bxe_init_sp_ring(sc); 10893 bxe_init_eq_ring(sc); 10894 bxe_init_internal(sc, load_code); 10895 bxe_pf_init(sc); 10896 bxe_stats_init(sc); 10897 10898 /* flush all before enabling interrupts */ 10899 mb(); 10900 10901 bxe_int_enable(sc); 10902 10903 /* check for SPIO5 */ 10904 bxe_attn_int_deasserted0(sc, 10905 REG_RD(sc, 10906 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10907 SC_PORT(sc)*4)) & 10908 AEU_INPUTS_ATTN_BITS_SPIO5); 10909} 10910 10911static inline void 10912bxe_init_objs(struct bxe_softc *sc) 10913{ 10914 /* mcast rules must be added to tx if tx switching is enabled */ 10915 ecore_obj_type o_type = 10916 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10917 ECORE_OBJ_TYPE_RX; 10918 10919 /* RX_MODE controlling object */ 10920 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10921 10922 /* multicast configuration controlling object */ 10923 ecore_init_mcast_obj(sc, 10924 &sc->mcast_obj, 10925 sc->fp[0].cl_id, 10926 sc->fp[0].index, 10927 SC_FUNC(sc), 10928 SC_FUNC(sc), 10929 BXE_SP(sc, mcast_rdata), 10930 BXE_SP_MAPPING(sc, mcast_rdata), 10931 ECORE_FILTER_MCAST_PENDING, 10932 &sc->sp_state, 10933 o_type); 10934 10935 /* Setup CAM credit pools */ 10936 ecore_init_mac_credit_pool(sc, 10937 &sc->macs_pool, 10938 SC_FUNC(sc), 10939 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10940 VNICS_PER_PATH(sc)); 10941 10942 ecore_init_vlan_credit_pool(sc, 10943 &sc->vlans_pool, 10944 SC_ABS_FUNC(sc) >> 1, 10945 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10946 VNICS_PER_PATH(sc)); 10947 10948 /* RSS configuration object */ 10949 ecore_init_rss_config_obj(sc, 10950 &sc->rss_conf_obj, 10951 sc->fp[0].cl_id, 10952 sc->fp[0].index, 10953 SC_FUNC(sc), 10954 SC_FUNC(sc), 10955 BXE_SP(sc, rss_rdata), 10956 BXE_SP_MAPPING(sc, rss_rdata), 10957 ECORE_FILTER_RSS_CONF_PENDING, 10958 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10959} 10960 10961/* 10962 * Initialize the function. This must be called before sending CLIENT_SETUP 10963 * for the first client. 10964 */ 10965static inline int 10966bxe_func_start(struct bxe_softc *sc) 10967{ 10968 struct ecore_func_state_params func_params = { NULL }; 10969 struct ecore_func_start_params *start_params = &func_params.params.start; 10970 10971 /* Prepare parameters for function state transitions */ 10972 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 10973 10974 func_params.f_obj = &sc->func_obj; 10975 func_params.cmd = ECORE_F_CMD_START; 10976 10977 /* Function parameters */ 10978 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 10979 start_params->sd_vlan_tag = OVLAN(sc); 10980 10981 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 10982 start_params->network_cos_mode = STATIC_COS; 10983 } else { /* CHIP_IS_E1X */ 10984 start_params->network_cos_mode = FW_WRR; 10985 } 10986 10987 start_params->gre_tunnel_mode = 0; 10988 start_params->gre_tunnel_rss = 0; 10989 10990 return (ecore_func_state_change(sc, &func_params)); 10991} 10992 10993static int 10994bxe_set_power_state(struct bxe_softc *sc, 10995 uint8_t state) 10996{ 10997 uint16_t pmcsr; 10998 10999 /* If there is no power capability, silently succeed */ 11000 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 11001 BLOGW(sc, "No power capability\n"); 11002 return (0); 11003 } 11004 11005 pmcsr = pci_read_config(sc->dev, 11006 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11007 2); 11008 11009 switch (state) { 11010 case PCI_PM_D0: 11011 pci_write_config(sc->dev, 11012 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11013 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 11014 11015 if (pmcsr & PCIM_PSTAT_DMASK) { 11016 /* delay required during transition out of D3hot */ 11017 DELAY(20000); 11018 } 11019 11020 break; 11021 11022 case PCI_PM_D3hot: 11023 /* XXX if there are other clients above don't shut down the power */ 11024 11025 /* don't shut down the power for emulation and FPGA */ 11026 if (CHIP_REV_IS_SLOW(sc)) { 11027 return (0); 11028 } 11029 11030 pmcsr &= ~PCIM_PSTAT_DMASK; 11031 pmcsr |= PCIM_PSTAT_D3; 11032 11033 if (sc->wol) { 11034 pmcsr |= PCIM_PSTAT_PMEENABLE; 11035 } 11036 11037 pci_write_config(sc->dev, 11038 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11039 pmcsr, 4); 11040 11041 /* 11042 * No more memory access after this point until device is brought back 11043 * to D0 state. 11044 */ 11045 break; 11046 11047 default: 11048 BLOGE(sc, "Can't support PCI power state = %d\n", state); 11049 return (-1); 11050 } 11051 11052 return (0); 11053} 11054 11055 11056/* return true if succeeded to acquire the lock */ 11057static uint8_t 11058bxe_trylock_hw_lock(struct bxe_softc *sc, 11059 uint32_t resource) 11060{ 11061 uint32_t lock_status; 11062 uint32_t resource_bit = (1 << resource); 11063 int func = SC_FUNC(sc); 11064 uint32_t hw_lock_control_reg; 11065 11066 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 11067 11068 /* Validating that the resource is within range */ 11069 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 11070 BLOGD(sc, DBG_LOAD, 11071 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 11072 resource, HW_LOCK_MAX_RESOURCE_VALUE); 11073 return (FALSE); 11074 } 11075 11076 if (func <= 5) { 11077 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 11078 } else { 11079 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 11080 } 11081 11082 /* try to acquire the lock */ 11083 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 11084 lock_status = REG_RD(sc, hw_lock_control_reg); 11085 if (lock_status & resource_bit) { 11086 return (TRUE); 11087 } 11088 11089 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); 11090 11091 return (FALSE); 11092} 11093 11094/* 11095 * Get the recovery leader resource id according to the engine this function 11096 * belongs to. Currently only only 2 engines is supported. 11097 */ 11098static int 11099bxe_get_leader_lock_resource(struct bxe_softc *sc) 11100{ 11101 if (SC_PATH(sc)) { 11102 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 11103 } else { 11104 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 11105 } 11106} 11107 11108/* try to acquire a leader lock for current engine */ 11109static uint8_t 11110bxe_trylock_leader_lock(struct bxe_softc *sc) 11111{ 11112 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11113} 11114 11115static int 11116bxe_release_leader_lock(struct bxe_softc *sc) 11117{ 11118 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11119} 11120 11121/* close gates #2, #3 and #4 */ 11122static void 11123bxe_set_234_gates(struct bxe_softc *sc, 11124 uint8_t close) 11125{ 11126 uint32_t val; 11127 11128 /* gates #2 and #4a are closed/opened for "not E1" only */ 11129 if (!CHIP_IS_E1(sc)) { 11130 /* #4 */ 11131 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 11132 /* #2 */ 11133 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 11134 } 11135 11136 /* #3 */ 11137 if (CHIP_IS_E1x(sc)) { 11138 /* prevent interrupts from HC on both ports */ 11139 val = REG_RD(sc, HC_REG_CONFIG_1); 11140 REG_WR(sc, HC_REG_CONFIG_1, 11141 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 11142 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 11143 11144 val = REG_RD(sc, HC_REG_CONFIG_0); 11145 REG_WR(sc, HC_REG_CONFIG_0, 11146 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 11147 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 11148 } else { 11149 /* Prevent incomming interrupts in IGU */ 11150 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 11151 11152 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 11153 (!close) ? 11154 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 11155 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 11156 } 11157 11158 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 11159 close ? "closing" : "opening"); 11160 11161 wmb(); 11162} 11163 11164/* poll for pending writes bit, it should get cleared in no more than 1s */ 11165static int 11166bxe_er_poll_igu_vq(struct bxe_softc *sc) 11167{ 11168 uint32_t cnt = 1000; 11169 uint32_t pend_bits = 0; 11170 11171 do { 11172 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 11173 11174 if (pend_bits == 0) { 11175 break; 11176 } 11177 11178 DELAY(1000); 11179 } while (--cnt > 0); 11180 11181 if (cnt == 0) { 11182 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 11183 return (-1); 11184 } 11185 11186 return (0); 11187} 11188 11189#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 11190 11191static void 11192bxe_clp_reset_prep(struct bxe_softc *sc, 11193 uint32_t *magic_val) 11194{ 11195 /* Do some magic... */ 11196 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11197 *magic_val = val & SHARED_MF_CLP_MAGIC; 11198 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 11199} 11200 11201/* restore the value of the 'magic' bit */ 11202static void 11203bxe_clp_reset_done(struct bxe_softc *sc, 11204 uint32_t magic_val) 11205{ 11206 /* Restore the 'magic' bit value... */ 11207 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11208 MFCFG_WR(sc, shared_mf_config.clp_mb, 11209 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 11210} 11211 11212/* prepare for MCP reset, takes care of CLP configurations */ 11213static void 11214bxe_reset_mcp_prep(struct bxe_softc *sc, 11215 uint32_t *magic_val) 11216{ 11217 uint32_t shmem; 11218 uint32_t validity_offset; 11219 11220 /* set `magic' bit in order to save MF config */ 11221 if (!CHIP_IS_E1(sc)) { 11222 bxe_clp_reset_prep(sc, magic_val); 11223 } 11224 11225 /* get shmem offset */ 11226 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11227 validity_offset = 11228 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 11229 11230 /* Clear validity map flags */ 11231 if (shmem > 0) { 11232 REG_WR(sc, shmem + validity_offset, 0); 11233 } 11234} 11235 11236#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 11237#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 11238 11239static void 11240bxe_mcp_wait_one(struct bxe_softc *sc) 11241{ 11242 /* special handling for emulation and FPGA (10 times longer) */ 11243 if (CHIP_REV_IS_SLOW(sc)) { 11244 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 11245 } else { 11246 DELAY((MCP_ONE_TIMEOUT) * 1000); 11247 } 11248} 11249 11250/* initialize shmem_base and waits for validity signature to appear */ 11251static int 11252bxe_init_shmem(struct bxe_softc *sc) 11253{ 11254 int cnt = 0; 11255 uint32_t val = 0; 11256 11257 do { 11258 sc->devinfo.shmem_base = 11259 sc->link_params.shmem_base = 11260 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11261 11262 if (sc->devinfo.shmem_base) { 11263 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 11264 if (val & SHR_MEM_VALIDITY_MB) 11265 return (0); 11266 } 11267 11268 bxe_mcp_wait_one(sc); 11269 11270 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 11271 11272 BLOGE(sc, "BAD MCP validity signature\n"); 11273 11274 return (-1); 11275} 11276 11277static int 11278bxe_reset_mcp_comp(struct bxe_softc *sc, 11279 uint32_t magic_val) 11280{ 11281 int rc = bxe_init_shmem(sc); 11282 11283 /* Restore the `magic' bit value */ 11284 if (!CHIP_IS_E1(sc)) { 11285 bxe_clp_reset_done(sc, magic_val); 11286 } 11287 11288 return (rc); 11289} 11290 11291static void 11292bxe_pxp_prep(struct bxe_softc *sc) 11293{ 11294 if (!CHIP_IS_E1(sc)) { 11295 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 11296 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 11297 wmb(); 11298 } 11299} 11300 11301/* 11302 * Reset the whole chip except for: 11303 * - PCIE core 11304 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 11305 * - IGU 11306 * - MISC (including AEU) 11307 * - GRC 11308 * - RBCN, RBCP 11309 */ 11310static void 11311bxe_process_kill_chip_reset(struct bxe_softc *sc, 11312 uint8_t global) 11313{ 11314 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 11315 uint32_t global_bits2, stay_reset2; 11316 11317 /* 11318 * Bits that have to be set in reset_mask2 if we want to reset 'global' 11319 * (per chip) blocks. 11320 */ 11321 global_bits2 = 11322 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 11323 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 11324 11325 /* 11326 * Don't reset the following blocks. 11327 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 11328 * reset, as in 4 port device they might still be owned 11329 * by the MCP (there is only one leader per path). 11330 */ 11331 not_reset_mask1 = 11332 MISC_REGISTERS_RESET_REG_1_RST_HC | 11333 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 11334 MISC_REGISTERS_RESET_REG_1_RST_PXP; 11335 11336 not_reset_mask2 = 11337 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 11338 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 11339 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 11340 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 11341 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 11342 MISC_REGISTERS_RESET_REG_2_RST_GRC | 11343 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 11344 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 11345 MISC_REGISTERS_RESET_REG_2_RST_ATC | 11346 MISC_REGISTERS_RESET_REG_2_PGLC | 11347 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 11348 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 11349 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 11350 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 11351 MISC_REGISTERS_RESET_REG_2_UMAC0 | 11352 MISC_REGISTERS_RESET_REG_2_UMAC1; 11353 11354 /* 11355 * Keep the following blocks in reset: 11356 * - all xxMACs are handled by the elink code. 11357 */ 11358 stay_reset2 = 11359 MISC_REGISTERS_RESET_REG_2_XMAC | 11360 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 11361 11362 /* Full reset masks according to the chip */ 11363 reset_mask1 = 0xffffffff; 11364 11365 if (CHIP_IS_E1(sc)) 11366 reset_mask2 = 0xffff; 11367 else if (CHIP_IS_E1H(sc)) 11368 reset_mask2 = 0x1ffff; 11369 else if (CHIP_IS_E2(sc)) 11370 reset_mask2 = 0xfffff; 11371 else /* CHIP_IS_E3 */ 11372 reset_mask2 = 0x3ffffff; 11373 11374 /* Don't reset global blocks unless we need to */ 11375 if (!global) 11376 reset_mask2 &= ~global_bits2; 11377 11378 /* 11379 * In case of attention in the QM, we need to reset PXP 11380 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11381 * because otherwise QM reset would release 'close the gates' shortly 11382 * before resetting the PXP, then the PSWRQ would send a write 11383 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11384 * read the payload data from PSWWR, but PSWWR would not 11385 * respond. The write queue in PGLUE would stuck, dmae commands 11386 * would not return. Therefore it's important to reset the second 11387 * reset register (containing the 11388 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11389 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11390 * bit). 11391 */ 11392 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11393 reset_mask2 & (~not_reset_mask2)); 11394 11395 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11396 reset_mask1 & (~not_reset_mask1)); 11397 11398 mb(); 11399 wmb(); 11400 11401 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11402 reset_mask2 & (~stay_reset2)); 11403 11404 mb(); 11405 wmb(); 11406 11407 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11408 wmb(); 11409} 11410 11411static int 11412bxe_process_kill(struct bxe_softc *sc, 11413 uint8_t global) 11414{ 11415 int cnt = 1000; 11416 uint32_t val = 0; 11417 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11418 uint32_t tags_63_32 = 0; 11419 11420 /* Empty the Tetris buffer, wait for 1s */ 11421 do { 11422 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11423 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11424 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11425 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11426 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11427 if (CHIP_IS_E3(sc)) { 11428 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11429 } 11430 11431 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11432 ((port_is_idle_0 & 0x1) == 0x1) && 11433 ((port_is_idle_1 & 0x1) == 0x1) && 11434 (pgl_exp_rom2 == 0xffffffff) && 11435 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11436 break; 11437 DELAY(1000); 11438 } while (cnt-- > 0); 11439 11440 if (cnt <= 0) { 11441 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11442 "are still outstanding read requests after 1s! " 11443 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11444 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11445 sr_cnt, blk_cnt, port_is_idle_0, 11446 port_is_idle_1, pgl_exp_rom2); 11447 return (-1); 11448 } 11449 11450 mb(); 11451 11452 /* Close gates #2, #3 and #4 */ 11453 bxe_set_234_gates(sc, TRUE); 11454 11455 /* Poll for IGU VQs for 57712 and newer chips */ 11456 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11457 return (-1); 11458 } 11459 11460 /* XXX indicate that "process kill" is in progress to MCP */ 11461 11462 /* clear "unprepared" bit */ 11463 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11464 mb(); 11465 11466 /* Make sure all is written to the chip before the reset */ 11467 wmb(); 11468 11469 /* 11470 * Wait for 1ms to empty GLUE and PCI-E core queues, 11471 * PSWHST, GRC and PSWRD Tetris buffer. 11472 */ 11473 DELAY(1000); 11474 11475 /* Prepare to chip reset: */ 11476 /* MCP */ 11477 if (global) { 11478 bxe_reset_mcp_prep(sc, &val); 11479 } 11480 11481 /* PXP */ 11482 bxe_pxp_prep(sc); 11483 mb(); 11484 11485 /* reset the chip */ 11486 bxe_process_kill_chip_reset(sc, global); 11487 mb(); 11488 11489 /* Recover after reset: */ 11490 /* MCP */ 11491 if (global && bxe_reset_mcp_comp(sc, val)) { 11492 return (-1); 11493 } 11494 11495 /* XXX add resetting the NO_MCP mode DB here */ 11496 11497 /* Open the gates #2, #3 and #4 */ 11498 bxe_set_234_gates(sc, FALSE); 11499 11500 /* XXX 11501 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11502 * re-enable attentions 11503 */ 11504 11505 return (0); 11506} 11507 11508static int 11509bxe_leader_reset(struct bxe_softc *sc) 11510{ 11511 int rc = 0; 11512 uint8_t global = bxe_reset_is_global(sc); 11513 uint32_t load_code; 11514 11515 /* 11516 * If not going to reset MCP, load "fake" driver to reset HW while 11517 * driver is owner of the HW. 11518 */ 11519 if (!global && !BXE_NOMCP(sc)) { 11520 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11521 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11522 if (!load_code) { 11523 BLOGE(sc, "MCP response failure, aborting\n"); 11524 rc = -1; 11525 goto exit_leader_reset; 11526 } 11527 11528 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11529 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11530 BLOGE(sc, "MCP unexpected response, aborting\n"); 11531 rc = -1; 11532 goto exit_leader_reset2; 11533 } 11534 11535 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11536 if (!load_code) { 11537 BLOGE(sc, "MCP response failure, aborting\n"); 11538 rc = -1; 11539 goto exit_leader_reset2; 11540 } 11541 } 11542 11543 /* try to recover after the failure */ 11544 if (bxe_process_kill(sc, global)) { 11545 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11546 rc = -1; 11547 goto exit_leader_reset2; 11548 } 11549 11550 /* 11551 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11552 * state. 11553 */ 11554 bxe_set_reset_done(sc); 11555 if (global) { 11556 bxe_clear_reset_global(sc); 11557 } 11558 11559exit_leader_reset2: 11560 11561 /* unload "fake driver" if it was loaded */ 11562 if (!global && !BXE_NOMCP(sc)) { 11563 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11564 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11565 } 11566 11567exit_leader_reset: 11568 11569 sc->is_leader = 0; 11570 bxe_release_leader_lock(sc); 11571 11572 mb(); 11573 return (rc); 11574} 11575 11576/* 11577 * prepare INIT transition, parameters configured: 11578 * - HC configuration 11579 * - Queue's CDU context 11580 */ 11581static void 11582bxe_pf_q_prep_init(struct bxe_softc *sc, 11583 struct bxe_fastpath *fp, 11584 struct ecore_queue_init_params *init_params) 11585{ 11586 uint8_t cos; 11587 int cxt_index, cxt_offset; 11588 11589 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11590 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11591 11592 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11593 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11594 11595 /* HC rate */ 11596 init_params->rx.hc_rate = 11597 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11598 init_params->tx.hc_rate = 11599 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11600 11601 /* FW SB ID */ 11602 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11603 11604 /* CQ index among the SB indices */ 11605 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11606 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11607 11608 /* set maximum number of COSs supported by this queue */ 11609 init_params->max_cos = sc->max_cos; 11610 11611 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11612 fp->index, init_params->max_cos); 11613 11614 /* set the context pointers queue object */ 11615 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11616 /* XXX change index/cid here if ever support multiple tx CoS */ 11617 /* fp->txdata[cos]->cid */ 11618 cxt_index = fp->index / ILT_PAGE_CIDS; 11619 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11620 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11621 } 11622} 11623 11624/* set flags that are common for the Tx-only and not normal connections */ 11625static unsigned long 11626bxe_get_common_flags(struct bxe_softc *sc, 11627 struct bxe_fastpath *fp, 11628 uint8_t zero_stats) 11629{ 11630 unsigned long flags = 0; 11631 11632 /* PF driver will always initialize the Queue to an ACTIVE state */ 11633 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11634 11635 /* 11636 * tx only connections collect statistics (on the same index as the 11637 * parent connection). The statistics are zeroed when the parent 11638 * connection is initialized. 11639 */ 11640 11641 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11642 if (zero_stats) { 11643 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11644 } 11645 11646 /* 11647 * tx only connections can support tx-switching, though their 11648 * CoS-ness doesn't survive the loopback 11649 */ 11650 if (sc->flags & BXE_TX_SWITCHING) { 11651 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11652 } 11653 11654 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11655 11656 return (flags); 11657} 11658 11659static unsigned long 11660bxe_get_q_flags(struct bxe_softc *sc, 11661 struct bxe_fastpath *fp, 11662 uint8_t leading) 11663{ 11664 unsigned long flags = 0; 11665 11666 if (IS_MF_SD(sc)) { 11667 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11668 } 11669 11670 if (sc->ifnet->if_capenable & IFCAP_LRO) { 11671 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11672 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11673#if 0 11674 if (fp->mode == TPA_MODE_GRO) 11675 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags); 11676#endif 11677 } 11678 11679 if (leading) { 11680 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11681 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11682 } 11683 11684 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11685 11686#if 0 11687 /* configure silent vlan removal */ 11688 if (IS_MF_AFEX(sc)) { 11689 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); 11690 } 11691#endif 11692 11693 /* merge with common flags */ 11694 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11695} 11696 11697static void 11698bxe_pf_q_prep_general(struct bxe_softc *sc, 11699 struct bxe_fastpath *fp, 11700 struct ecore_general_setup_params *gen_init, 11701 uint8_t cos) 11702{ 11703 gen_init->stat_id = bxe_stats_id(fp); 11704 gen_init->spcl_id = fp->cl_id; 11705 gen_init->mtu = sc->mtu; 11706 gen_init->cos = cos; 11707} 11708 11709static void 11710bxe_pf_rx_q_prep(struct bxe_softc *sc, 11711 struct bxe_fastpath *fp, 11712 struct rxq_pause_params *pause, 11713 struct ecore_rxq_setup_params *rxq_init) 11714{ 11715 uint8_t max_sge = 0; 11716 uint16_t sge_sz = 0; 11717 uint16_t tpa_agg_size = 0; 11718 11719 if (sc->ifnet->if_capenable & IFCAP_LRO) { 11720 pause->sge_th_lo = SGE_TH_LO(sc); 11721 pause->sge_th_hi = SGE_TH_HI(sc); 11722 11723 /* validate SGE ring has enough to cross high threshold */ 11724 if (sc->dropless_fc && 11725 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11726 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11727 BLOGW(sc, "sge ring threshold limit\n"); 11728 } 11729 11730 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11731 tpa_agg_size = (2 * sc->mtu); 11732 if (tpa_agg_size < sc->max_aggregation_size) { 11733 tpa_agg_size = sc->max_aggregation_size; 11734 } 11735 11736 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11737 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11738 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11739 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11740 } 11741 11742 /* pause - not for e1 */ 11743 if (!CHIP_IS_E1(sc)) { 11744 pause->bd_th_lo = BD_TH_LO(sc); 11745 pause->bd_th_hi = BD_TH_HI(sc); 11746 11747 pause->rcq_th_lo = RCQ_TH_LO(sc); 11748 pause->rcq_th_hi = RCQ_TH_HI(sc); 11749 11750 /* validate rings have enough entries to cross high thresholds */ 11751 if (sc->dropless_fc && 11752 pause->bd_th_hi + FW_PREFETCH_CNT > 11753 sc->rx_ring_size) { 11754 BLOGW(sc, "rx bd ring threshold limit\n"); 11755 } 11756 11757 if (sc->dropless_fc && 11758 pause->rcq_th_hi + FW_PREFETCH_CNT > 11759 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11760 BLOGW(sc, "rcq ring threshold limit\n"); 11761 } 11762 11763 pause->pri_map = 1; 11764 } 11765 11766 /* rxq setup */ 11767 rxq_init->dscr_map = fp->rx_dma.paddr; 11768 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11769 rxq_init->rcq_map = fp->rcq_dma.paddr; 11770 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11771 11772 /* 11773 * This should be a maximum number of data bytes that may be 11774 * placed on the BD (not including paddings). 11775 */ 11776 rxq_init->buf_sz = (fp->rx_buf_size - 11777 IP_HEADER_ALIGNMENT_PADDING); 11778 11779 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11780 rxq_init->tpa_agg_sz = tpa_agg_size; 11781 rxq_init->sge_buf_sz = sge_sz; 11782 rxq_init->max_sges_pkt = max_sge; 11783 rxq_init->rss_engine_id = SC_FUNC(sc); 11784 rxq_init->mcast_engine_id = SC_FUNC(sc); 11785 11786 /* 11787 * Maximum number or simultaneous TPA aggregation for this Queue. 11788 * For PF Clients it should be the maximum available number. 11789 * VF driver(s) may want to define it to a smaller value. 11790 */ 11791 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11792 11793 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11794 rxq_init->fw_sb_id = fp->fw_sb_id; 11795 11796 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11797 11798 /* 11799 * configure silent vlan removal 11800 * if multi function mode is afex, then mask default vlan 11801 */ 11802 if (IS_MF_AFEX(sc)) { 11803 rxq_init->silent_removal_value = 11804 sc->devinfo.mf_info.afex_def_vlan_tag; 11805 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11806 } 11807} 11808 11809static void 11810bxe_pf_tx_q_prep(struct bxe_softc *sc, 11811 struct bxe_fastpath *fp, 11812 struct ecore_txq_setup_params *txq_init, 11813 uint8_t cos) 11814{ 11815 /* 11816 * XXX If multiple CoS is ever supported then each fastpath structure 11817 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11818 * fp->txdata[cos]->tx_dma.paddr; 11819 */ 11820 txq_init->dscr_map = fp->tx_dma.paddr; 11821 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11822 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11823 txq_init->fw_sb_id = fp->fw_sb_id; 11824 11825 /* 11826 * set the TSS leading client id for TX classfication to the 11827 * leading RSS client id 11828 */ 11829 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11830} 11831 11832/* 11833 * This function performs 2 steps in a queue state machine: 11834 * 1) RESET->INIT 11835 * 2) INIT->SETUP 11836 */ 11837static int 11838bxe_setup_queue(struct bxe_softc *sc, 11839 struct bxe_fastpath *fp, 11840 uint8_t leading) 11841{ 11842 struct ecore_queue_state_params q_params = { NULL }; 11843 struct ecore_queue_setup_params *setup_params = 11844 &q_params.params.setup; 11845#if 0 11846 struct ecore_queue_setup_tx_only_params *tx_only_params = 11847 &q_params.params.tx_only; 11848 uint8_t tx_index; 11849#endif 11850 int rc; 11851 11852 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11853 11854 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11855 11856 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11857 11858 /* we want to wait for completion in this context */ 11859 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11860 11861 /* prepare the INIT parameters */ 11862 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11863 11864 /* Set the command */ 11865 q_params.cmd = ECORE_Q_CMD_INIT; 11866 11867 /* Change the state to INIT */ 11868 rc = ecore_queue_state_change(sc, &q_params); 11869 if (rc) { 11870 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); 11871 return (rc); 11872 } 11873 11874 BLOGD(sc, DBG_LOAD, "init complete\n"); 11875 11876 /* now move the Queue to the SETUP state */ 11877 memset(setup_params, 0, sizeof(*setup_params)); 11878 11879 /* set Queue flags */ 11880 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11881 11882 /* set general SETUP parameters */ 11883 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11884 FIRST_TX_COS_INDEX); 11885 11886 bxe_pf_rx_q_prep(sc, fp, 11887 &setup_params->pause_params, 11888 &setup_params->rxq_params); 11889 11890 bxe_pf_tx_q_prep(sc, fp, 11891 &setup_params->txq_params, 11892 FIRST_TX_COS_INDEX); 11893 11894 /* Set the command */ 11895 q_params.cmd = ECORE_Q_CMD_SETUP; 11896 11897 /* change the state to SETUP */ 11898 rc = ecore_queue_state_change(sc, &q_params); 11899 if (rc) { 11900 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); 11901 return (rc); 11902 } 11903 11904#if 0 11905 /* loop through the relevant tx-only indices */ 11906 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 11907 tx_index < sc->max_cos; 11908 tx_index++) { 11909 /* prepare and send tx-only ramrod*/ 11910 rc = bxe_setup_tx_only(sc, fp, &q_params, 11911 tx_only_params, tx_index, leading); 11912 if (rc) { 11913 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", 11914 fp->index, tx_index); 11915 return (rc); 11916 } 11917 } 11918#endif 11919 11920 return (rc); 11921} 11922 11923static int 11924bxe_setup_leading(struct bxe_softc *sc) 11925{ 11926 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11927} 11928 11929static int 11930bxe_config_rss_pf(struct bxe_softc *sc, 11931 struct ecore_rss_config_obj *rss_obj, 11932 uint8_t config_hash) 11933{ 11934 struct ecore_config_rss_params params = { NULL }; 11935 int i; 11936 11937 /* 11938 * Although RSS is meaningless when there is a single HW queue we 11939 * still need it enabled in order to have HW Rx hash generated. 11940 */ 11941 11942 params.rss_obj = rss_obj; 11943 11944 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11945 11946 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11947 11948 /* RSS configuration */ 11949 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11950 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11951 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11952 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11953 if (rss_obj->udp_rss_v4) { 11954 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11955 } 11956 if (rss_obj->udp_rss_v6) { 11957 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11958 } 11959 11960 /* Hash bits */ 11961 params.rss_result_mask = MULTI_MASK; 11962 11963 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 11964 11965 if (config_hash) { 11966 /* RSS keys */ 11967 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 11968 params.rss_key[i] = arc4random(); 11969 } 11970 11971 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 11972 } 11973 11974 return (ecore_config_rss(sc, ¶ms)); 11975} 11976 11977static int 11978bxe_config_rss_eth(struct bxe_softc *sc, 11979 uint8_t config_hash) 11980{ 11981 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 11982} 11983 11984static int 11985bxe_init_rss_pf(struct bxe_softc *sc) 11986{ 11987 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 11988 int i; 11989 11990 /* 11991 * Prepare the initial contents of the indirection table if 11992 * RSS is enabled 11993 */ 11994 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 11995 sc->rss_conf_obj.ind_table[i] = 11996 (sc->fp->cl_id + (i % num_eth_queues)); 11997 } 11998 11999 if (sc->udp_rss) { 12000 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 12001 } 12002 12003 /* 12004 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 12005 * per-port, so if explicit configuration is needed, do it only 12006 * for a PMF. 12007 * 12008 * For 57712 and newer it's a per-function configuration. 12009 */ 12010 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 12011} 12012 12013static int 12014bxe_set_mac_one(struct bxe_softc *sc, 12015 uint8_t *mac, 12016 struct ecore_vlan_mac_obj *obj, 12017 uint8_t set, 12018 int mac_type, 12019 unsigned long *ramrod_flags) 12020{ 12021 struct ecore_vlan_mac_ramrod_params ramrod_param; 12022 int rc; 12023 12024 memset(&ramrod_param, 0, sizeof(ramrod_param)); 12025 12026 /* fill in general parameters */ 12027 ramrod_param.vlan_mac_obj = obj; 12028 ramrod_param.ramrod_flags = *ramrod_flags; 12029 12030 /* fill a user request section if needed */ 12031 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 12032 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 12033 12034 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 12035 12036 /* Set the command: ADD or DEL */ 12037 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 12038 ECORE_VLAN_MAC_DEL; 12039 } 12040 12041 rc = ecore_config_vlan_mac(sc, &ramrod_param); 12042 12043 if (rc == ECORE_EXISTS) { 12044 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12045 /* do not treat adding same MAC as error */ 12046 rc = 0; 12047 } else if (rc < 0) { 12048 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 12049 } 12050 12051 return (rc); 12052} 12053 12054static int 12055bxe_set_eth_mac(struct bxe_softc *sc, 12056 uint8_t set) 12057{ 12058 unsigned long ramrod_flags = 0; 12059 12060 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 12061 12062 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12063 12064 /* Eth MAC is set on RSS leading client (fp[0]) */ 12065 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 12066 &sc->sp_objs->mac_obj, 12067 set, ECORE_ETH_MAC, &ramrod_flags)); 12068} 12069 12070#if 0 12071static void 12072bxe_update_max_mf_config(struct bxe_softc *sc, 12073 uint32_t value) 12074{ 12075 /* load old values */ 12076 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; 12077 12078 if (value != bxe_extract_max_cfg(sc, mf_cfg)) { 12079 /* leave all but MAX value */ 12080 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 12081 12082 /* set new MAX value */ 12083 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & 12084 FUNC_MF_CFG_MAX_BW_MASK); 12085 12086 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 12087 } 12088} 12089#endif 12090 12091static int 12092bxe_get_cur_phy_idx(struct bxe_softc *sc) 12093{ 12094 uint32_t sel_phy_idx = 0; 12095 12096 if (sc->link_params.num_phys <= 1) { 12097 return (ELINK_INT_PHY); 12098 } 12099 12100 if (sc->link_vars.link_up) { 12101 sel_phy_idx = ELINK_EXT_PHY1; 12102 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 12103 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 12104 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 12105 ELINK_SUPPORTED_FIBRE)) 12106 sel_phy_idx = ELINK_EXT_PHY2; 12107 } else { 12108 switch (elink_phy_selection(&sc->link_params)) { 12109 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 12110 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 12111 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 12112 sel_phy_idx = ELINK_EXT_PHY1; 12113 break; 12114 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 12115 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 12116 sel_phy_idx = ELINK_EXT_PHY2; 12117 break; 12118 } 12119 } 12120 12121 return (sel_phy_idx); 12122} 12123 12124static int 12125bxe_get_link_cfg_idx(struct bxe_softc *sc) 12126{ 12127 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 12128 12129 /* 12130 * The selected activated PHY is always after swapping (in case PHY 12131 * swapping is enabled). So when swapping is enabled, we need to reverse 12132 * the configuration 12133 */ 12134 12135 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 12136 if (sel_phy_idx == ELINK_EXT_PHY1) 12137 sel_phy_idx = ELINK_EXT_PHY2; 12138 else if (sel_phy_idx == ELINK_EXT_PHY2) 12139 sel_phy_idx = ELINK_EXT_PHY1; 12140 } 12141 12142 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 12143} 12144 12145static void 12146bxe_set_requested_fc(struct bxe_softc *sc) 12147{ 12148 /* 12149 * Initialize link parameters structure variables 12150 * It is recommended to turn off RX FC for jumbo frames 12151 * for better performance 12152 */ 12153 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 12154 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 12155 } else { 12156 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 12157 } 12158} 12159 12160static void 12161bxe_calc_fc_adv(struct bxe_softc *sc) 12162{ 12163 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 12164 switch (sc->link_vars.ieee_fc & 12165 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 12166 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 12167 default: 12168 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 12169 ADVERTISED_Pause); 12170 break; 12171 12172 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 12173 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 12174 ADVERTISED_Pause); 12175 break; 12176 12177 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 12178 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 12179 break; 12180 } 12181} 12182 12183static uint16_t 12184bxe_get_mf_speed(struct bxe_softc *sc) 12185{ 12186 uint16_t line_speed = sc->link_vars.line_speed; 12187 if (IS_MF(sc)) { 12188 uint16_t maxCfg = 12189 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 12190 12191 /* calculate the current MAX line speed limit for the MF devices */ 12192 if (IS_MF_SI(sc)) { 12193 line_speed = (line_speed * maxCfg) / 100; 12194 } else { /* SD mode */ 12195 uint16_t vn_max_rate = maxCfg * 100; 12196 12197 if (vn_max_rate < line_speed) { 12198 line_speed = vn_max_rate; 12199 } 12200 } 12201 } 12202 12203 return (line_speed); 12204} 12205 12206static void 12207bxe_fill_report_data(struct bxe_softc *sc, 12208 struct bxe_link_report_data *data) 12209{ 12210 uint16_t line_speed = bxe_get_mf_speed(sc); 12211 12212 memset(data, 0, sizeof(*data)); 12213 12214 /* fill the report data with the effective line speed */ 12215 data->line_speed = line_speed; 12216 12217 /* Link is down */ 12218 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 12219 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 12220 } 12221 12222 /* Full DUPLEX */ 12223 if (sc->link_vars.duplex == DUPLEX_FULL) { 12224 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 12225 } 12226 12227 /* Rx Flow Control is ON */ 12228 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 12229 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 12230 } 12231 12232 /* Tx Flow Control is ON */ 12233 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 12234 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 12235 } 12236} 12237 12238/* report link status to OS, should be called under phy_lock */ 12239static void 12240bxe_link_report_locked(struct bxe_softc *sc) 12241{ 12242 struct bxe_link_report_data cur_data; 12243 12244 /* reread mf_cfg */ 12245 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 12246 bxe_read_mf_cfg(sc); 12247 } 12248 12249 /* Read the current link report info */ 12250 bxe_fill_report_data(sc, &cur_data); 12251 12252 /* Don't report link down or exactly the same link status twice */ 12253 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 12254 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12255 &sc->last_reported_link.link_report_flags) && 12256 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12257 &cur_data.link_report_flags))) { 12258 return; 12259 } 12260 12261 sc->link_cnt++; 12262 12263 /* report new link params and remember the state for the next time */ 12264 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 12265 12266 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12267 &cur_data.link_report_flags)) { 12268 if_link_state_change(sc->ifnet, LINK_STATE_DOWN); 12269 BLOGI(sc, "NIC Link is Down\n"); 12270 } else { 12271 const char *duplex; 12272 const char *flow; 12273 12274 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 12275 &cur_data.link_report_flags)) { 12276 duplex = "full"; 12277 } else { 12278 duplex = "half"; 12279 } 12280 12281 /* 12282 * Handle the FC at the end so that only these flags would be 12283 * possibly set. This way we may easily check if there is no FC 12284 * enabled. 12285 */ 12286 if (cur_data.link_report_flags) { 12287 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12288 &cur_data.link_report_flags) && 12289 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12290 &cur_data.link_report_flags)) { 12291 flow = "ON - receive & transmit"; 12292 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12293 &cur_data.link_report_flags) && 12294 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12295 &cur_data.link_report_flags)) { 12296 flow = "ON - receive"; 12297 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12298 &cur_data.link_report_flags) && 12299 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12300 &cur_data.link_report_flags)) { 12301 flow = "ON - transmit"; 12302 } else { 12303 flow = "none"; /* possible? */ 12304 } 12305 } else { 12306 flow = "none"; 12307 } 12308 12309 if_link_state_change(sc->ifnet, LINK_STATE_UP); 12310 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 12311 cur_data.line_speed, duplex, flow); 12312 } 12313} 12314 12315static void 12316bxe_link_report(struct bxe_softc *sc) 12317{ 12318 BXE_PHY_LOCK(sc); 12319 bxe_link_report_locked(sc); 12320 BXE_PHY_UNLOCK(sc); 12321} 12322 12323static void 12324bxe_link_status_update(struct bxe_softc *sc) 12325{ 12326 if (sc->state != BXE_STATE_OPEN) { 12327 return; 12328 } 12329 12330#if 0 12331 /* read updated dcb configuration */ 12332 if (IS_PF(sc)) 12333 bxe_dcbx_pmf_update(sc); 12334#endif 12335 12336 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 12337 elink_link_status_update(&sc->link_params, &sc->link_vars); 12338 } else { 12339 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 12340 ELINK_SUPPORTED_10baseT_Full | 12341 ELINK_SUPPORTED_100baseT_Half | 12342 ELINK_SUPPORTED_100baseT_Full | 12343 ELINK_SUPPORTED_1000baseT_Full | 12344 ELINK_SUPPORTED_2500baseX_Full | 12345 ELINK_SUPPORTED_10000baseT_Full | 12346 ELINK_SUPPORTED_TP | 12347 ELINK_SUPPORTED_FIBRE | 12348 ELINK_SUPPORTED_Autoneg | 12349 ELINK_SUPPORTED_Pause | 12350 ELINK_SUPPORTED_Asym_Pause); 12351 sc->port.advertising[0] = sc->port.supported[0]; 12352 12353 sc->link_params.sc = sc; 12354 sc->link_params.port = SC_PORT(sc); 12355 sc->link_params.req_duplex[0] = DUPLEX_FULL; 12356 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 12357 sc->link_params.req_line_speed[0] = SPEED_10000; 12358 sc->link_params.speed_cap_mask[0] = 0x7f0000; 12359 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 12360 12361 if (CHIP_REV_IS_FPGA(sc)) { 12362 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 12363 sc->link_vars.line_speed = ELINK_SPEED_1000; 12364 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12365 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 12366 } else { 12367 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 12368 sc->link_vars.line_speed = ELINK_SPEED_10000; 12369 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12370 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 12371 } 12372 12373 sc->link_vars.link_up = 1; 12374 12375 sc->link_vars.duplex = DUPLEX_FULL; 12376 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 12377 12378 if (IS_PF(sc)) { 12379 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 12380 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12381 bxe_link_report(sc); 12382 } 12383 } 12384 12385 if (IS_PF(sc)) { 12386 if (sc->link_vars.link_up) { 12387 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12388 } else { 12389 bxe_stats_handle(sc, STATS_EVENT_STOP); 12390 } 12391 bxe_link_report(sc); 12392 } else { 12393 bxe_link_report(sc); 12394 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12395 } 12396} 12397 12398static int 12399bxe_initial_phy_init(struct bxe_softc *sc, 12400 int load_mode) 12401{ 12402 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 12403 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 12404 struct elink_params *lp = &sc->link_params; 12405 12406 bxe_set_requested_fc(sc); 12407 12408 if (CHIP_REV_IS_SLOW(sc)) { 12409 uint32_t bond = CHIP_BOND_ID(sc); 12410 uint32_t feat = 0; 12411 12412 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12413 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12414 } else if (bond & 0x4) { 12415 if (CHIP_IS_E3(sc)) { 12416 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12417 } else { 12418 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12419 } 12420 } else if (bond & 0x8) { 12421 if (CHIP_IS_E3(sc)) { 12422 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12423 } else { 12424 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12425 } 12426 } 12427 12428 /* disable EMAC for E3 and above */ 12429 if (bond & 0x2) { 12430 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12431 } 12432 12433 sc->link_params.feature_config_flags |= feat; 12434 } 12435 12436 BXE_PHY_LOCK(sc); 12437 12438 if (load_mode == LOAD_DIAG) { 12439 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12440 /* Prefer doing PHY loopback at 10G speed, if possible */ 12441 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12442 if (lp->speed_cap_mask[cfg_idx] & 12443 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12444 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12445 } else { 12446 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12447 } 12448 } 12449 } 12450 12451 if (load_mode == LOAD_LOOPBACK_EXT) { 12452 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12453 } 12454 12455 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12456 12457 BXE_PHY_UNLOCK(sc); 12458 12459 bxe_calc_fc_adv(sc); 12460 12461 if (sc->link_vars.link_up) { 12462 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12463 bxe_link_report(sc); 12464 } 12465 12466 if (!CHIP_REV_IS_SLOW(sc)) { 12467 bxe_periodic_start(sc); 12468 } 12469 12470 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12471 return (rc); 12472} 12473 12474/* must be called under IF_ADDR_LOCK */ 12475static int 12476bxe_init_mcast_macs_list(struct bxe_softc *sc, 12477 struct ecore_mcast_ramrod_params *p) 12478{ 12479 struct ifnet *ifp = sc->ifnet; 12480 int mc_count = 0; 12481 struct ifmultiaddr *ifma; 12482 struct ecore_mcast_list_elem *mc_mac; 12483 12484 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12485 if (ifma->ifma_addr->sa_family != AF_LINK) { 12486 continue; 12487 } 12488 12489 mc_count++; 12490 } 12491 12492 ECORE_LIST_INIT(&p->mcast_list); 12493 p->mcast_list_len = 0; 12494 12495 if (!mc_count) { 12496 return (0); 12497 } 12498 12499 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, 12500 (M_NOWAIT | M_ZERO)); 12501 if (!mc_mac) { 12502 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12503 return (-1); 12504 } 12505 12506 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12507 if (ifma->ifma_addr->sa_family != AF_LINK) { 12508 continue; 12509 } 12510 12511 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 12512 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list); 12513 12514 BLOGD(sc, DBG_LOAD, 12515 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", 12516 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12517 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); 12518 12519 mc_mac++; 12520 } 12521 12522 p->mcast_list_len = mc_count; 12523 12524 return (0); 12525} 12526 12527static void 12528bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) 12529{ 12530 struct ecore_mcast_list_elem *mc_mac = 12531 ECORE_LIST_FIRST_ENTRY(&p->mcast_list, 12532 struct ecore_mcast_list_elem, 12533 link); 12534 12535 if (mc_mac) { 12536 /* only a single free as all mc_macs are in the same heap array */ 12537 free(mc_mac, M_DEVBUF); 12538 } 12539} 12540 12541static int 12542bxe_set_mc_list(struct bxe_softc *sc) 12543{ 12544 struct ecore_mcast_ramrod_params rparam = { NULL }; 12545 int rc = 0; 12546 12547 rparam.mcast_obj = &sc->mcast_obj; 12548 12549 BXE_MCAST_LOCK(sc); 12550 12551 /* first, clear all configured multicast MACs */ 12552 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12553 if (rc < 0) { 12554 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12555 return (rc); 12556 } 12557 12558 /* configure a new MACs list */ 12559 rc = bxe_init_mcast_macs_list(sc, &rparam); 12560 if (rc) { 12561 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); 12562 BXE_MCAST_UNLOCK(sc); 12563 return (rc); 12564 } 12565 12566 /* Now add the new MACs */ 12567 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12568 if (rc < 0) { 12569 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12570 } 12571 12572 bxe_free_mcast_macs_list(&rparam); 12573 12574 BXE_MCAST_UNLOCK(sc); 12575 12576 return (rc); 12577} 12578 12579static int 12580bxe_set_uc_list(struct bxe_softc *sc) 12581{ 12582 struct ifnet *ifp = sc->ifnet; 12583 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12584 struct ifaddr *ifa; 12585 unsigned long ramrod_flags = 0; 12586 int rc; 12587 12588#if __FreeBSD_version < 800000 12589 IF_ADDR_LOCK(ifp); 12590#else 12591 if_addr_rlock(ifp); 12592#endif 12593 12594 /* first schedule a cleanup up of old configuration */ 12595 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12596 if (rc < 0) { 12597 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12598#if __FreeBSD_version < 800000 12599 IF_ADDR_UNLOCK(ifp); 12600#else 12601 if_addr_runlock(ifp); 12602#endif 12603 return (rc); 12604 } 12605 12606 ifa = ifp->if_addr; 12607 while (ifa) { 12608 if (ifa->ifa_addr->sa_family != AF_LINK) { 12609 ifa = TAILQ_NEXT(ifa, ifa_link); 12610 continue; 12611 } 12612 12613 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12614 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12615 if (rc == -EEXIST) { 12616 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12617 /* do not treat adding same MAC as an error */ 12618 rc = 0; 12619 } else if (rc < 0) { 12620 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12621#if __FreeBSD_version < 800000 12622 IF_ADDR_UNLOCK(ifp); 12623#else 12624 if_addr_runlock(ifp); 12625#endif 12626 return (rc); 12627 } 12628 12629 ifa = TAILQ_NEXT(ifa, ifa_link); 12630 } 12631 12632#if __FreeBSD_version < 800000 12633 IF_ADDR_UNLOCK(ifp); 12634#else 12635 if_addr_runlock(ifp); 12636#endif 12637 12638 /* Execute the pending commands */ 12639 bit_set(&ramrod_flags, RAMROD_CONT); 12640 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12641 ECORE_UC_LIST_MAC, &ramrod_flags)); 12642} 12643 12644static void 12645bxe_handle_rx_mode_tq(void *context, 12646 int pending) 12647{ 12648 struct bxe_softc *sc = (struct bxe_softc *)context; 12649 struct ifnet *ifp = sc->ifnet; 12650 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12651 12652 BXE_CORE_LOCK(sc); 12653 12654 if (sc->state != BXE_STATE_OPEN) { 12655 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12656 BXE_CORE_UNLOCK(sc); 12657 return; 12658 } 12659 12660 BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags); 12661 12662 if (ifp->if_flags & IFF_PROMISC) { 12663 rx_mode = BXE_RX_MODE_PROMISC; 12664 } else if ((ifp->if_flags & IFF_ALLMULTI) || 12665 ((ifp->if_amcount > BXE_MAX_MULTICAST) && 12666 CHIP_IS_E1(sc))) { 12667 rx_mode = BXE_RX_MODE_ALLMULTI; 12668 } else { 12669 if (IS_PF(sc)) { 12670 /* some multicasts */ 12671 if (bxe_set_mc_list(sc) < 0) { 12672 rx_mode = BXE_RX_MODE_ALLMULTI; 12673 } 12674 if (bxe_set_uc_list(sc) < 0) { 12675 rx_mode = BXE_RX_MODE_PROMISC; 12676 } 12677 } 12678#if 0 12679 else { 12680 /* 12681 * Configuring mcast to a VF involves sleeping (when we 12682 * wait for the PF's response). Since this function is 12683 * called from a non sleepable context we must schedule 12684 * a work item for this purpose 12685 */ 12686 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); 12687 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12688 } 12689#endif 12690 } 12691 12692 sc->rx_mode = rx_mode; 12693 12694 /* schedule the rx_mode command */ 12695 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12696 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12697 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12698 BXE_CORE_UNLOCK(sc); 12699 return; 12700 } 12701 12702 if (IS_PF(sc)) { 12703 bxe_set_storm_rx_mode(sc); 12704 } 12705#if 0 12706 else { 12707 /* 12708 * Configuring mcast to a VF involves sleeping (when we 12709 * wait for the PF's response). Since this function is 12710 * called from a non sleepable context we must schedule 12711 * a work item for this purpose 12712 */ 12713 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); 12714 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12715 } 12716#endif 12717 12718 BXE_CORE_UNLOCK(sc); 12719} 12720 12721static void 12722bxe_set_rx_mode(struct bxe_softc *sc) 12723{ 12724 taskqueue_enqueue(sc->rx_mode_tq, &sc->rx_mode_tq_task); 12725} 12726 12727/* update flags in shmem */ 12728static void 12729bxe_update_drv_flags(struct bxe_softc *sc, 12730 uint32_t flags, 12731 uint32_t set) 12732{ 12733 uint32_t drv_flags; 12734 12735 if (SHMEM2_HAS(sc, drv_flags)) { 12736 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12737 drv_flags = SHMEM2_RD(sc, drv_flags); 12738 12739 if (set) { 12740 SET_FLAGS(drv_flags, flags); 12741 } else { 12742 RESET_FLAGS(drv_flags, flags); 12743 } 12744 12745 SHMEM2_WR(sc, drv_flags, drv_flags); 12746 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12747 12748 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12749 } 12750} 12751 12752/* periodic timer callout routine, only runs when the interface is up */ 12753 12754static void 12755bxe_periodic_callout_func(void *xsc) 12756{ 12757 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12758 int i; 12759 12760 if (!BXE_CORE_TRYLOCK(sc)) { 12761 /* just bail and try again next time */ 12762 12763 if ((sc->state == BXE_STATE_OPEN) && 12764 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12765 /* schedule the next periodic callout */ 12766 callout_reset(&sc->periodic_callout, hz, 12767 bxe_periodic_callout_func, sc); 12768 } 12769 12770 return; 12771 } 12772 12773 if ((sc->state != BXE_STATE_OPEN) || 12774 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12775 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12776 BXE_CORE_UNLOCK(sc); 12777 return; 12778 } 12779 12780 /* Check for TX timeouts on any fastpath. */ 12781 FOR_EACH_QUEUE(sc, i) { 12782 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12783 /* Ruh-Roh, chip was reset! */ 12784 break; 12785 } 12786 } 12787 12788 if (!CHIP_REV_IS_SLOW(sc)) { 12789 /* 12790 * This barrier is needed to ensure the ordering between the writing 12791 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12792 * the reading here. 12793 */ 12794 mb(); 12795 if (sc->port.pmf) { 12796 BXE_PHY_LOCK(sc); 12797 elink_period_func(&sc->link_params, &sc->link_vars); 12798 BXE_PHY_UNLOCK(sc); 12799 } 12800 } 12801 12802 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 12803 int mb_idx = SC_FW_MB_IDX(sc); 12804 uint32_t drv_pulse; 12805 uint32_t mcp_pulse; 12806 12807 ++sc->fw_drv_pulse_wr_seq; 12808 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12809 12810 drv_pulse = sc->fw_drv_pulse_wr_seq; 12811 bxe_drv_pulse(sc); 12812 12813 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12814 MCP_PULSE_SEQ_MASK); 12815 12816 /* 12817 * The delta between driver pulse and mcp response should 12818 * be 1 (before mcp response) or 0 (after mcp response). 12819 */ 12820 if ((drv_pulse != mcp_pulse) && 12821 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12822 /* someone lost a heartbeat... */ 12823 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12824 drv_pulse, mcp_pulse); 12825 } 12826 } 12827 12828 /* state is BXE_STATE_OPEN */ 12829 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12830 12831#if 0 12832 /* sample VF bulletin board for new posts from PF */ 12833 if (IS_VF(sc)) { 12834 bxe_sample_bulletin(sc); 12835 } 12836#endif 12837 12838 BXE_CORE_UNLOCK(sc); 12839 12840 if ((sc->state == BXE_STATE_OPEN) && 12841 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12842 /* schedule the next periodic callout */ 12843 callout_reset(&sc->periodic_callout, hz, 12844 bxe_periodic_callout_func, sc); 12845 } 12846} 12847 12848static void 12849bxe_periodic_start(struct bxe_softc *sc) 12850{ 12851 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12852 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12853} 12854 12855static void 12856bxe_periodic_stop(struct bxe_softc *sc) 12857{ 12858 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12859 callout_drain(&sc->periodic_callout); 12860} 12861 12862/* start the controller */ 12863static __noinline int 12864bxe_nic_load(struct bxe_softc *sc, 12865 int load_mode) 12866{ 12867 uint32_t val; 12868 int load_code = 0; 12869 int i, rc = 0; 12870 12871 BXE_CORE_LOCK_ASSERT(sc); 12872 12873 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12874 12875 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12876 12877 if (IS_PF(sc)) { 12878 /* must be called before memory allocation and HW init */ 12879 bxe_ilt_set_info(sc); 12880 } 12881 12882 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12883 12884 bxe_set_fp_rx_buf_size(sc); 12885 12886 if (bxe_alloc_fp_buffers(sc) != 0) { 12887 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12888 sc->state = BXE_STATE_CLOSED; 12889 rc = ENOMEM; 12890 goto bxe_nic_load_error0; 12891 } 12892 12893 if (bxe_alloc_mem(sc) != 0) { 12894 sc->state = BXE_STATE_CLOSED; 12895 rc = ENOMEM; 12896 goto bxe_nic_load_error0; 12897 } 12898 12899 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12900 sc->state = BXE_STATE_CLOSED; 12901 rc = ENOMEM; 12902 goto bxe_nic_load_error0; 12903 } 12904 12905 if (IS_PF(sc)) { 12906 /* set pf load just before approaching the MCP */ 12907 bxe_set_pf_load(sc); 12908 12909 /* if MCP exists send load request and analyze response */ 12910 if (!BXE_NOMCP(sc)) { 12911 /* attempt to load pf */ 12912 if (bxe_nic_load_request(sc, &load_code) != 0) { 12913 sc->state = BXE_STATE_CLOSED; 12914 rc = ENXIO; 12915 goto bxe_nic_load_error1; 12916 } 12917 12918 /* what did the MCP say? */ 12919 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12920 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12921 sc->state = BXE_STATE_CLOSED; 12922 rc = ENXIO; 12923 goto bxe_nic_load_error2; 12924 } 12925 } else { 12926 BLOGI(sc, "Device has no MCP!\n"); 12927 load_code = bxe_nic_load_no_mcp(sc); 12928 } 12929 12930 /* mark PMF if applicable */ 12931 bxe_nic_load_pmf(sc, load_code); 12932 12933 /* Init Function state controlling object */ 12934 bxe_init_func_obj(sc); 12935 12936 /* Initialize HW */ 12937 if (bxe_init_hw(sc, load_code) != 0) { 12938 BLOGE(sc, "HW init failed\n"); 12939 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12940 sc->state = BXE_STATE_CLOSED; 12941 rc = ENXIO; 12942 goto bxe_nic_load_error2; 12943 } 12944 } 12945 12946 /* attach interrupts */ 12947 if (bxe_interrupt_attach(sc) != 0) { 12948 sc->state = BXE_STATE_CLOSED; 12949 rc = ENXIO; 12950 goto bxe_nic_load_error2; 12951 } 12952 12953 bxe_nic_init(sc, load_code); 12954 12955 /* Init per-function objects */ 12956 if (IS_PF(sc)) { 12957 bxe_init_objs(sc); 12958 // XXX bxe_iov_nic_init(sc); 12959 12960 /* set AFEX default VLAN tag to an invalid value */ 12961 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12962 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12963 12964 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12965 rc = bxe_func_start(sc); 12966 if (rc) { 12967 BLOGE(sc, "Function start failed!\n"); 12968 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12969 sc->state = BXE_STATE_ERROR; 12970 goto bxe_nic_load_error3; 12971 } 12972 12973 /* send LOAD_DONE command to MCP */ 12974 if (!BXE_NOMCP(sc)) { 12975 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12976 if (!load_code) { 12977 BLOGE(sc, "MCP response failure, aborting\n"); 12978 sc->state = BXE_STATE_ERROR; 12979 rc = ENXIO; 12980 goto bxe_nic_load_error3; 12981 } 12982 } 12983 12984 rc = bxe_setup_leading(sc); 12985 if (rc) { 12986 BLOGE(sc, "Setup leading failed!\n"); 12987 sc->state = BXE_STATE_ERROR; 12988 goto bxe_nic_load_error3; 12989 } 12990 12991 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 12992 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 12993 if (rc) { 12994 BLOGE(sc, "Queue(%d) setup failed\n", i); 12995 sc->state = BXE_STATE_ERROR; 12996 goto bxe_nic_load_error3; 12997 } 12998 } 12999 13000 rc = bxe_init_rss_pf(sc); 13001 if (rc) { 13002 BLOGE(sc, "PF RSS init failed\n"); 13003 sc->state = BXE_STATE_ERROR; 13004 goto bxe_nic_load_error3; 13005 } 13006 } 13007 /* XXX VF */ 13008#if 0 13009 else { /* VF */ 13010 FOR_EACH_ETH_QUEUE(sc, i) { 13011 rc = bxe_vfpf_setup_q(sc, i); 13012 if (rc) { 13013 BLOGE(sc, "Queue(%d) setup failed\n", i); 13014 sc->state = BXE_STATE_ERROR; 13015 goto bxe_nic_load_error3; 13016 } 13017 } 13018 } 13019#endif 13020 13021 /* now when Clients are configured we are ready to work */ 13022 sc->state = BXE_STATE_OPEN; 13023 13024 /* Configure a ucast MAC */ 13025 if (IS_PF(sc)) { 13026 rc = bxe_set_eth_mac(sc, TRUE); 13027 } 13028#if 0 13029 else { /* IS_VF(sc) */ 13030 rc = bxe_vfpf_set_mac(sc); 13031 } 13032#endif 13033 if (rc) { 13034 BLOGE(sc, "Setting Ethernet MAC failed\n"); 13035 sc->state = BXE_STATE_ERROR; 13036 goto bxe_nic_load_error3; 13037 } 13038 13039#if 0 13040 if (IS_PF(sc) && sc->pending_max) { 13041 /* for AFEX */ 13042 bxe_update_max_mf_config(sc, sc->pending_max); 13043 sc->pending_max = 0; 13044 } 13045#endif 13046 13047 if (sc->port.pmf) { 13048 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 13049 if (rc) { 13050 sc->state = BXE_STATE_ERROR; 13051 goto bxe_nic_load_error3; 13052 } 13053 } 13054 13055 sc->link_params.feature_config_flags &= 13056 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 13057 13058 /* start fast path */ 13059 13060 /* Initialize Rx filter */ 13061 bxe_set_rx_mode(sc); 13062 13063 /* start the Tx */ 13064 switch (/* XXX load_mode */LOAD_OPEN) { 13065 case LOAD_NORMAL: 13066 case LOAD_OPEN: 13067 break; 13068 13069 case LOAD_DIAG: 13070 case LOAD_LOOPBACK_EXT: 13071 sc->state = BXE_STATE_DIAG; 13072 break; 13073 13074 default: 13075 break; 13076 } 13077 13078 if (sc->port.pmf) { 13079 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 13080 } else { 13081 bxe_link_status_update(sc); 13082 } 13083 13084 /* start the periodic timer callout */ 13085 bxe_periodic_start(sc); 13086 13087 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 13088 /* mark driver is loaded in shmem2 */ 13089 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 13090 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 13091 (val | 13092 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 13093 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 13094 } 13095 13096 /* wait for all pending SP commands to complete */ 13097 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 13098 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 13099 bxe_periodic_stop(sc); 13100 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 13101 return (ENXIO); 13102 } 13103 13104#if 0 13105 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 13106 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { 13107 bxe_dcbx_init(sc, FALSE); 13108 } 13109#endif 13110 13111 /* Tell the stack the driver is running! */ 13112 sc->ifnet->if_drv_flags = IFF_DRV_RUNNING; 13113 13114 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 13115 13116 return (0); 13117 13118bxe_nic_load_error3: 13119 13120 if (IS_PF(sc)) { 13121 bxe_int_disable_sync(sc, 1); 13122 13123 /* clean out queued objects */ 13124 bxe_squeeze_objects(sc); 13125 } 13126 13127 bxe_interrupt_detach(sc); 13128 13129bxe_nic_load_error2: 13130 13131 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 13132 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 13133 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 13134 } 13135 13136 sc->port.pmf = 0; 13137 13138bxe_nic_load_error1: 13139 13140 /* clear pf_load status, as it was already set */ 13141 if (IS_PF(sc)) { 13142 bxe_clear_pf_load(sc); 13143 } 13144 13145bxe_nic_load_error0: 13146 13147 bxe_free_fw_stats_mem(sc); 13148 bxe_free_fp_buffers(sc); 13149 bxe_free_mem(sc); 13150 13151 return (rc); 13152} 13153 13154static int 13155bxe_init_locked(struct bxe_softc *sc) 13156{ 13157 int other_engine = SC_PATH(sc) ? 0 : 1; 13158 uint8_t other_load_status, load_status; 13159 uint8_t global = FALSE; 13160 int rc; 13161 13162 BXE_CORE_LOCK_ASSERT(sc); 13163 13164 /* check if the driver is already running */ 13165 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 13166 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 13167 return (0); 13168 } 13169 13170 bxe_set_power_state(sc, PCI_PM_D0); 13171 13172 /* 13173 * If parity occurred during the unload, then attentions and/or 13174 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 13175 * loaded on the current engine to complete the recovery. Parity recovery 13176 * is only relevant for PF driver. 13177 */ 13178 if (IS_PF(sc)) { 13179 other_load_status = bxe_get_load_status(sc, other_engine); 13180 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 13181 13182 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 13183 bxe_chk_parity_attn(sc, &global, TRUE)) { 13184 do { 13185 /* 13186 * If there are attentions and they are in global blocks, set 13187 * the GLOBAL_RESET bit regardless whether it will be this 13188 * function that will complete the recovery or not. 13189 */ 13190 if (global) { 13191 bxe_set_reset_global(sc); 13192 } 13193 13194 /* 13195 * Only the first function on the current engine should try 13196 * to recover in open. In case of attentions in global blocks 13197 * only the first in the chip should try to recover. 13198 */ 13199 if ((!load_status && (!global || !other_load_status)) && 13200 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 13201 BLOGI(sc, "Recovered during init\n"); 13202 break; 13203 } 13204 13205 /* recovery has failed... */ 13206 bxe_set_power_state(sc, PCI_PM_D3hot); 13207 sc->recovery_state = BXE_RECOVERY_FAILED; 13208 13209 BLOGE(sc, "Recovery flow hasn't properly " 13210 "completed yet, try again later. " 13211 "If you still see this message after a " 13212 "few retries then power cycle is required.\n"); 13213 13214 rc = ENXIO; 13215 goto bxe_init_locked_done; 13216 } while (0); 13217 } 13218 } 13219 13220 sc->recovery_state = BXE_RECOVERY_DONE; 13221 13222 rc = bxe_nic_load(sc, LOAD_OPEN); 13223 13224bxe_init_locked_done: 13225 13226 if (rc) { 13227 /* Tell the stack the driver is NOT running! */ 13228 BLOGE(sc, "Initialization failed, " 13229 "stack notified driver is NOT running!\n"); 13230 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING; 13231 } 13232 13233 return (rc); 13234} 13235 13236static int 13237bxe_stop_locked(struct bxe_softc *sc) 13238{ 13239 BXE_CORE_LOCK_ASSERT(sc); 13240 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13241} 13242 13243/* 13244 * Handles controller initialization when called from an unlocked routine. 13245 * ifconfig calls this function. 13246 * 13247 * Returns: 13248 * void 13249 */ 13250static void 13251bxe_init(void *xsc) 13252{ 13253 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13254 13255 BXE_CORE_LOCK(sc); 13256 bxe_init_locked(sc); 13257 BXE_CORE_UNLOCK(sc); 13258} 13259 13260static int 13261bxe_init_ifnet(struct bxe_softc *sc) 13262{ 13263 struct ifnet *ifp; 13264 13265 /* ifconfig entrypoint for media type/status reporting */ 13266 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13267 bxe_ifmedia_update, 13268 bxe_ifmedia_status); 13269 13270 /* set the default interface values */ 13271 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13272 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13273 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13274 13275 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13276 13277 /* allocate the ifnet structure */ 13278 if ((ifp = if_alloc(IFT_ETHER)) == NULL) { 13279 BLOGE(sc, "Interface allocation failed!\n"); 13280 return (ENXIO); 13281 } 13282 13283 ifp->if_softc = sc; 13284 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13285 ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 13286 ifp->if_ioctl = bxe_ioctl; 13287 ifp->if_start = bxe_tx_start; 13288#if __FreeBSD_version >= 800000 13289 ifp->if_transmit = bxe_tx_mq_start; 13290 ifp->if_qflush = bxe_mq_flush; 13291#endif 13292#ifdef FreeBSD8_0 13293 ifp->if_timer = 0; 13294#endif 13295 ifp->if_init = bxe_init; 13296 ifp->if_mtu = sc->mtu; 13297 ifp->if_hwassist = (CSUM_IP | 13298 CSUM_TCP | 13299 CSUM_UDP | 13300 CSUM_TSO | 13301 CSUM_TCP_IPV6 | 13302 CSUM_UDP_IPV6); 13303 ifp->if_capabilities = 13304#if __FreeBSD_version < 700000 13305 (IFCAP_VLAN_MTU | 13306 IFCAP_VLAN_HWTAGGING | 13307 IFCAP_HWCSUM | 13308 IFCAP_JUMBO_MTU | 13309 IFCAP_LRO); 13310#else 13311 (IFCAP_VLAN_MTU | 13312 IFCAP_VLAN_HWTAGGING | 13313 IFCAP_VLAN_HWTSO | 13314 IFCAP_VLAN_HWFILTER | 13315 IFCAP_VLAN_HWCSUM | 13316 IFCAP_HWCSUM | 13317 IFCAP_JUMBO_MTU | 13318 IFCAP_LRO | 13319 IFCAP_TSO4 | 13320 IFCAP_TSO6 | 13321 IFCAP_WOL_MAGIC); 13322#endif 13323 ifp->if_capenable = ifp->if_capabilities; 13324 ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */ 13325#if __FreeBSD_version < 1000025 13326 ifp->if_baudrate = 1000000000; 13327#else 13328 if_initbaudrate(ifp, IF_Gbps(10)); 13329#endif 13330 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size; 13331 13332 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 13333 IFQ_SET_READY(&ifp->if_snd); 13334 13335 sc->ifnet = ifp; 13336 13337 /* attach to the Ethernet interface list */ 13338 ether_ifattach(ifp, sc->link_params.mac_addr); 13339 13340 return (0); 13341} 13342 13343static void 13344bxe_deallocate_bars(struct bxe_softc *sc) 13345{ 13346 int i; 13347 13348 for (i = 0; i < MAX_BARS; i++) { 13349 if (sc->bar[i].resource != NULL) { 13350 bus_release_resource(sc->dev, 13351 SYS_RES_MEMORY, 13352 sc->bar[i].rid, 13353 sc->bar[i].resource); 13354 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13355 i, PCIR_BAR(i)); 13356 } 13357 } 13358} 13359 13360static int 13361bxe_allocate_bars(struct bxe_softc *sc) 13362{ 13363 u_int flags; 13364 int i; 13365 13366 memset(sc->bar, 0, sizeof(sc->bar)); 13367 13368 for (i = 0; i < MAX_BARS; i++) { 13369 13370 /* memory resources reside at BARs 0, 2, 4 */ 13371 /* Run `pciconf -lb` to see mappings */ 13372 if ((i != 0) && (i != 2) && (i != 4)) { 13373 continue; 13374 } 13375 13376 sc->bar[i].rid = PCIR_BAR(i); 13377 13378 flags = RF_ACTIVE; 13379 if (i == 0) { 13380 flags |= RF_SHAREABLE; 13381 } 13382 13383 if ((sc->bar[i].resource = 13384 bus_alloc_resource_any(sc->dev, 13385 SYS_RES_MEMORY, 13386 &sc->bar[i].rid, 13387 flags)) == NULL) { 13388#if 0 13389 /* BAR4 doesn't exist for E1 */ 13390 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", 13391 i, PCIR_BAR(i)); 13392#endif 13393 return (0); 13394 } 13395 13396 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13397 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13398 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13399 13400 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", 13401 i, PCIR_BAR(i), 13402 (void *)rman_get_start(sc->bar[i].resource), 13403 (void *)rman_get_end(sc->bar[i].resource), 13404 rman_get_size(sc->bar[i].resource), 13405 (void *)sc->bar[i].kva); 13406 } 13407 13408 return (0); 13409} 13410 13411static void 13412bxe_get_function_num(struct bxe_softc *sc) 13413{ 13414 uint32_t val = 0; 13415 13416 /* 13417 * Read the ME register to get the function number. The ME register 13418 * holds the relative-function number and absolute-function number. The 13419 * absolute-function number appears only in E2 and above. Before that 13420 * these bits always contained zero, therefore we cannot blindly use them. 13421 */ 13422 13423 val = REG_RD(sc, BAR_ME_REGISTER); 13424 13425 sc->pfunc_rel = 13426 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13427 sc->path_id = 13428 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13429 13430 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13431 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13432 } else { 13433 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13434 } 13435 13436 BLOGD(sc, DBG_LOAD, 13437 "Relative function %d, Absolute function %d, Path %d\n", 13438 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13439} 13440 13441static uint32_t 13442bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13443{ 13444 uint32_t shmem2_size; 13445 uint32_t offset; 13446 uint32_t mf_cfg_offset_value; 13447 13448 /* Non 57712 */ 13449 offset = (SHMEM_RD(sc, func_mb) + 13450 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13451 13452 /* 57712 plus */ 13453 if (sc->devinfo.shmem2_base != 0) { 13454 shmem2_size = SHMEM2_RD(sc, size); 13455 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13456 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13457 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13458 offset = mf_cfg_offset_value; 13459 } 13460 } 13461 } 13462 13463 return (offset); 13464} 13465 13466static uint32_t 13467bxe_pcie_capability_read(struct bxe_softc *sc, 13468 int reg, 13469 int width) 13470{ 13471 int pcie_reg; 13472 13473 /* ensure PCIe capability is enabled */ 13474 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13475 if (pcie_reg != 0) { 13476 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13477 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13478 } 13479 } 13480 13481 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13482 13483 return (0); 13484} 13485 13486static uint8_t 13487bxe_is_pcie_pending(struct bxe_softc *sc) 13488{ 13489 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13490 PCIM_EXP_STA_TRANSACTION_PND); 13491} 13492 13493/* 13494 * Walk the PCI capabiites list for the device to find what features are 13495 * supported. These capabilites may be enabled/disabled by firmware so it's 13496 * best to walk the list rather than make assumptions. 13497 */ 13498static void 13499bxe_probe_pci_caps(struct bxe_softc *sc) 13500{ 13501 uint16_t link_status; 13502 int reg; 13503 13504 /* check if PCI Power Management is enabled */ 13505 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13506 if (reg != 0) { 13507 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13508 13509 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13510 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13511 } 13512 } 13513 13514 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13515 13516 /* handle PCIe 2.0 workarounds for 57710 */ 13517 if (CHIP_IS_E1(sc)) { 13518 /* workaround for 57710 errata E4_57710_27462 */ 13519 sc->devinfo.pcie_link_speed = 13520 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13521 13522 /* workaround for 57710 errata E4_57710_27488 */ 13523 sc->devinfo.pcie_link_width = 13524 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13525 if (sc->devinfo.pcie_link_speed > 1) { 13526 sc->devinfo.pcie_link_width = 13527 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13528 } 13529 } else { 13530 sc->devinfo.pcie_link_speed = 13531 (link_status & PCIM_LINK_STA_SPEED); 13532 sc->devinfo.pcie_link_width = 13533 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13534 } 13535 13536 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13537 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13538 13539 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13540 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13541 13542 /* check if MSI capability is enabled */ 13543 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13544 if (reg != 0) { 13545 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13546 13547 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13548 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13549 } 13550 } 13551 13552 /* check if MSI-X capability is enabled */ 13553 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13554 if (reg != 0) { 13555 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13556 13557 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13558 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13559 } 13560 } 13561} 13562 13563static int 13564bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13565{ 13566 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13567 uint32_t val; 13568 13569 /* get the outer vlan if we're in switch-dependent mode */ 13570 13571 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13572 mf_info->ext_id = (uint16_t)val; 13573 13574 mf_info->multi_vnics_mode = 1; 13575 13576 if (!VALID_OVLAN(mf_info->ext_id)) { 13577 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13578 return (1); 13579 } 13580 13581 /* get the capabilities */ 13582 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13583 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13584 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13585 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13586 FUNC_MF_CFG_PROTOCOL_FCOE) { 13587 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13588 } else { 13589 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13590 } 13591 13592 mf_info->vnics_per_port = 13593 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13594 13595 return (0); 13596} 13597 13598static uint32_t 13599bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13600{ 13601 uint32_t retval = 0; 13602 uint32_t val; 13603 13604 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13605 13606 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13607 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13608 retval |= MF_PROTO_SUPPORT_ETHERNET; 13609 } 13610 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13611 retval |= MF_PROTO_SUPPORT_ISCSI; 13612 } 13613 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13614 retval |= MF_PROTO_SUPPORT_FCOE; 13615 } 13616 } 13617 13618 return (retval); 13619} 13620 13621static int 13622bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13623{ 13624 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13625 uint32_t val; 13626 13627 /* 13628 * There is no outer vlan if we're in switch-independent mode. 13629 * If the mac is valid then assume multi-function. 13630 */ 13631 13632 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13633 13634 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13635 13636 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13637 13638 mf_info->vnics_per_port = 13639 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13640 13641 return (0); 13642} 13643 13644static int 13645bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13646{ 13647 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13648 uint32_t e1hov_tag; 13649 uint32_t func_config; 13650 uint32_t niv_config; 13651 13652 mf_info->multi_vnics_mode = 1; 13653 13654 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13655 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13656 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13657 13658 mf_info->ext_id = 13659 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13660 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13661 13662 mf_info->default_vlan = 13663 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13664 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13665 13666 mf_info->niv_allowed_priorities = 13667 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13668 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13669 13670 mf_info->niv_default_cos = 13671 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13672 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13673 13674 mf_info->afex_vlan_mode = 13675 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13676 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13677 13678 mf_info->niv_mba_enabled = 13679 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13680 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13681 13682 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13683 13684 mf_info->vnics_per_port = 13685 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13686 13687 return (0); 13688} 13689 13690static int 13691bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13692{ 13693 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13694 uint32_t mf_cfg1; 13695 uint32_t mf_cfg2; 13696 uint32_t ovlan1; 13697 uint32_t ovlan2; 13698 uint8_t i, j; 13699 13700 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13701 SC_PORT(sc)); 13702 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13703 mf_info->mf_config[SC_VN(sc)]); 13704 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13705 mf_info->multi_vnics_mode); 13706 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13707 mf_info->vnics_per_port); 13708 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13709 mf_info->ext_id); 13710 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13711 mf_info->min_bw[0], mf_info->min_bw[1], 13712 mf_info->min_bw[2], mf_info->min_bw[3]); 13713 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13714 mf_info->max_bw[0], mf_info->max_bw[1], 13715 mf_info->max_bw[2], mf_info->max_bw[3]); 13716 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13717 sc->mac_addr_str); 13718 13719 /* various MF mode sanity checks... */ 13720 13721 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13722 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13723 SC_PORT(sc)); 13724 return (1); 13725 } 13726 13727 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13728 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13729 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13730 return (1); 13731 } 13732 13733 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13734 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13735 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13736 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13737 SC_VN(sc), OVLAN(sc)); 13738 return (1); 13739 } 13740 13741 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13742 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13743 mf_info->multi_vnics_mode, OVLAN(sc)); 13744 return (1); 13745 } 13746 13747 /* 13748 * Verify all functions are either MF or SF mode. If MF, make sure 13749 * sure that all non-hidden functions have a valid ovlan. If SF, 13750 * make sure that all non-hidden functions have an invalid ovlan. 13751 */ 13752 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13753 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13754 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13755 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13756 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13757 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13758 BLOGE(sc, "mf_mode=SD function %d MF config " 13759 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13760 i, mf_info->multi_vnics_mode, ovlan1); 13761 return (1); 13762 } 13763 } 13764 13765 /* Verify all funcs on the same port each have a different ovlan. */ 13766 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13767 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13768 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13769 /* iterate from the next function on the port to the max func */ 13770 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13771 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13772 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13773 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13774 VALID_OVLAN(ovlan1) && 13775 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13776 VALID_OVLAN(ovlan2) && 13777 (ovlan1 == ovlan2)) { 13778 BLOGE(sc, "mf_mode=SD functions %d and %d " 13779 "have the same ovlan (%d)\n", 13780 i, j, ovlan1); 13781 return (1); 13782 } 13783 } 13784 } 13785 } /* MULTI_FUNCTION_SD */ 13786 13787 return (0); 13788} 13789 13790static int 13791bxe_get_mf_cfg_info(struct bxe_softc *sc) 13792{ 13793 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13794 uint32_t val, mac_upper; 13795 uint8_t i, vnic; 13796 13797 /* initialize mf_info defaults */ 13798 mf_info->vnics_per_port = 1; 13799 mf_info->multi_vnics_mode = FALSE; 13800 mf_info->path_has_ovlan = FALSE; 13801 mf_info->mf_mode = SINGLE_FUNCTION; 13802 13803 if (!CHIP_IS_MF_CAP(sc)) { 13804 return (0); 13805 } 13806 13807 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13808 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13809 return (1); 13810 } 13811 13812 /* get the MF mode (switch dependent / independent / single-function) */ 13813 13814 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13815 13816 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13817 { 13818 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13819 13820 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13821 13822 /* check for legal upper mac bytes */ 13823 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13824 mf_info->mf_mode = MULTI_FUNCTION_SI; 13825 } else { 13826 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13827 } 13828 13829 break; 13830 13831 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13832 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13833 13834 /* get outer vlan configuration */ 13835 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13836 13837 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13838 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13839 mf_info->mf_mode = MULTI_FUNCTION_SD; 13840 } else { 13841 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13842 } 13843 13844 break; 13845 13846 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13847 13848 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13849 return (0); 13850 13851 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13852 13853 /* 13854 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13855 * and the MAC address is valid. 13856 */ 13857 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13858 13859 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13860 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13861 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13862 } else { 13863 BLOGE(sc, "Invalid config for AFEX mode\n"); 13864 } 13865 13866 break; 13867 13868 default: 13869 13870 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13871 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13872 13873 return (1); 13874 } 13875 13876 /* set path mf_mode (which could be different than function mf_mode) */ 13877 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13878 mf_info->path_has_ovlan = TRUE; 13879 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13880 /* 13881 * Decide on path multi vnics mode. If we're not in MF mode and in 13882 * 4-port mode, this is good enough to check vnic-0 of the other port 13883 * on the same path 13884 */ 13885 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13886 uint8_t other_port = !(PORT_ID(sc) & 1); 13887 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13888 13889 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13890 13891 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13892 } 13893 } 13894 13895 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13896 /* invalid MF config */ 13897 if (SC_VN(sc) >= 1) { 13898 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13899 return (1); 13900 } 13901 13902 return (0); 13903 } 13904 13905 /* get the MF configuration */ 13906 mf_info->mf_config[SC_VN(sc)] = 13907 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13908 13909 switch(mf_info->mf_mode) 13910 { 13911 case MULTI_FUNCTION_SD: 13912 13913 bxe_get_shmem_mf_cfg_info_sd(sc); 13914 break; 13915 13916 case MULTI_FUNCTION_SI: 13917 13918 bxe_get_shmem_mf_cfg_info_si(sc); 13919 break; 13920 13921 case MULTI_FUNCTION_AFEX: 13922 13923 bxe_get_shmem_mf_cfg_info_niv(sc); 13924 break; 13925 13926 default: 13927 13928 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13929 mf_info->mf_mode); 13930 return (1); 13931 } 13932 13933 /* get the congestion management parameters */ 13934 13935 vnic = 0; 13936 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13937 /* get min/max bw */ 13938 val = MFCFG_RD(sc, func_mf_config[i].config); 13939 mf_info->min_bw[vnic] = 13940 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13941 mf_info->max_bw[vnic] = 13942 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13943 vnic++; 13944 } 13945 13946 return (bxe_check_valid_mf_cfg(sc)); 13947} 13948 13949static int 13950bxe_get_shmem_info(struct bxe_softc *sc) 13951{ 13952 int port; 13953 uint32_t mac_hi, mac_lo, val; 13954 13955 port = SC_PORT(sc); 13956 mac_hi = mac_lo = 0; 13957 13958 sc->link_params.sc = sc; 13959 sc->link_params.port = port; 13960 13961 /* get the hardware config info */ 13962 sc->devinfo.hw_config = 13963 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13964 sc->devinfo.hw_config2 = 13965 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13966 13967 sc->link_params.hw_led_mode = 13968 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13969 SHARED_HW_CFG_LED_MODE_SHIFT); 13970 13971 /* get the port feature config */ 13972 sc->port.config = 13973 SHMEM_RD(sc, dev_info.port_feature_config[port].config), 13974 13975 /* get the link params */ 13976 sc->link_params.speed_cap_mask[0] = 13977 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13978 sc->link_params.speed_cap_mask[1] = 13979 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13980 13981 /* get the lane config */ 13982 sc->link_params.lane_config = 13983 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13984 13985 /* get the link config */ 13986 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13987 sc->port.link_config[ELINK_INT_PHY] = val; 13988 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13989 sc->port.link_config[ELINK_EXT_PHY1] = 13990 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13991 13992 /* get the override preemphasis flag and enable it or turn it off */ 13993 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13994 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 13995 sc->link_params.feature_config_flags |= 13996 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13997 } else { 13998 sc->link_params.feature_config_flags &= 13999 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14000 } 14001 14002 /* get the initial value of the link params */ 14003 sc->link_params.multi_phy_config = 14004 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 14005 14006 /* get external phy info */ 14007 sc->port.ext_phy_config = 14008 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 14009 14010 /* get the multifunction configuration */ 14011 bxe_get_mf_cfg_info(sc); 14012 14013 /* get the mac address */ 14014 if (IS_MF(sc)) { 14015 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 14016 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 14017 } else { 14018 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 14019 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 14020 } 14021 14022 if ((mac_lo == 0) && (mac_hi == 0)) { 14023 *sc->mac_addr_str = 0; 14024 BLOGE(sc, "No Ethernet address programmed!\n"); 14025 } else { 14026 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 14027 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 14028 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 14029 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 14030 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 14031 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 14032 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 14033 "%02x:%02x:%02x:%02x:%02x:%02x", 14034 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 14035 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 14036 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 14037 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 14038 } 14039 14040#if 0 14041 if (!IS_MF(sc) && 14042 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14043 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { 14044 sc->flags |= BXE_NO_ISCSI; 14045 } 14046 if (!IS_MF(sc) && 14047 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14048 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { 14049 sc->flags |= BXE_NO_FCOE_FLAG; 14050 } 14051#endif 14052 14053 return (0); 14054} 14055 14056static void 14057bxe_get_tunable_params(struct bxe_softc *sc) 14058{ 14059 /* sanity checks */ 14060 14061 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 14062 (bxe_interrupt_mode != INTR_MODE_MSI) && 14063 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 14064 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 14065 bxe_interrupt_mode = INTR_MODE_MSIX; 14066 } 14067 14068 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 14069 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 14070 bxe_queue_count = 0; 14071 } 14072 14073 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 14074 if (bxe_max_rx_bufs == 0) { 14075 bxe_max_rx_bufs = RX_BD_USABLE; 14076 } else { 14077 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 14078 bxe_max_rx_bufs = 2048; 14079 } 14080 } 14081 14082 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 14083 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 14084 bxe_hc_rx_ticks = 25; 14085 } 14086 14087 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 14088 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 14089 bxe_hc_tx_ticks = 50; 14090 } 14091 14092 if (bxe_max_aggregation_size == 0) { 14093 bxe_max_aggregation_size = TPA_AGG_SIZE; 14094 } 14095 14096 if (bxe_max_aggregation_size > 0xffff) { 14097 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 14098 bxe_max_aggregation_size); 14099 bxe_max_aggregation_size = TPA_AGG_SIZE; 14100 } 14101 14102 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 14103 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 14104 bxe_mrrs = -1; 14105 } 14106 14107 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 14108 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 14109 bxe_autogreeen = 0; 14110 } 14111 14112 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 14113 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 14114 bxe_udp_rss = 0; 14115 } 14116 14117 /* pull in user settings */ 14118 14119 sc->interrupt_mode = bxe_interrupt_mode; 14120 sc->max_rx_bufs = bxe_max_rx_bufs; 14121 sc->hc_rx_ticks = bxe_hc_rx_ticks; 14122 sc->hc_tx_ticks = bxe_hc_tx_ticks; 14123 sc->max_aggregation_size = bxe_max_aggregation_size; 14124 sc->mrrs = bxe_mrrs; 14125 sc->autogreeen = bxe_autogreeen; 14126 sc->udp_rss = bxe_udp_rss; 14127 14128 if (bxe_interrupt_mode == INTR_MODE_INTX) { 14129 sc->num_queues = 1; 14130 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 14131 sc->num_queues = 14132 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 14133 MAX_RSS_CHAINS); 14134 if (sc->num_queues > mp_ncpus) { 14135 sc->num_queues = mp_ncpus; 14136 } 14137 } 14138 14139 BLOGD(sc, DBG_LOAD, 14140 "User Config: " 14141 "debug=0x%lx " 14142 "interrupt_mode=%d " 14143 "queue_count=%d " 14144 "hc_rx_ticks=%d " 14145 "hc_tx_ticks=%d " 14146 "rx_budget=%d " 14147 "max_aggregation_size=%d " 14148 "mrrs=%d " 14149 "autogreeen=%d " 14150 "udp_rss=%d\n", 14151 bxe_debug, 14152 sc->interrupt_mode, 14153 sc->num_queues, 14154 sc->hc_rx_ticks, 14155 sc->hc_tx_ticks, 14156 bxe_rx_budget, 14157 sc->max_aggregation_size, 14158 sc->mrrs, 14159 sc->autogreeen, 14160 sc->udp_rss); 14161} 14162 14163static void 14164bxe_media_detect(struct bxe_softc *sc) 14165{ 14166 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 14167 switch (sc->link_params.phy[phy_idx].media_type) { 14168 case ELINK_ETH_PHY_SFPP_10G_FIBER: 14169 case ELINK_ETH_PHY_XFP_FIBER: 14170 BLOGI(sc, "Found 10Gb Fiber media.\n"); 14171 sc->media = IFM_10G_SR; 14172 break; 14173 case ELINK_ETH_PHY_SFP_1G_FIBER: 14174 BLOGI(sc, "Found 1Gb Fiber media.\n"); 14175 sc->media = IFM_1000_SX; 14176 break; 14177 case ELINK_ETH_PHY_KR: 14178 case ELINK_ETH_PHY_CX4: 14179 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 14180 sc->media = IFM_10G_CX4; 14181 break; 14182 case ELINK_ETH_PHY_DA_TWINAX: 14183 BLOGI(sc, "Found 10Gb Twinax media.\n"); 14184 sc->media = IFM_10G_TWINAX; 14185 break; 14186 case ELINK_ETH_PHY_BASE_T: 14187 if (sc->link_params.speed_cap_mask[0] & 14188 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 14189 BLOGI(sc, "Found 10GBase-T media.\n"); 14190 sc->media = IFM_10G_T; 14191 } else { 14192 BLOGI(sc, "Found 1000Base-T media.\n"); 14193 sc->media = IFM_1000_T; 14194 } 14195 break; 14196 case ELINK_ETH_PHY_NOT_PRESENT: 14197 BLOGI(sc, "Media not present.\n"); 14198 sc->media = 0; 14199 break; 14200 case ELINK_ETH_PHY_UNSPECIFIED: 14201 default: 14202 BLOGI(sc, "Unknown media!\n"); 14203 sc->media = 0; 14204 break; 14205 } 14206} 14207 14208#define GET_FIELD(value, fname) \ 14209 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 14210#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 14211#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 14212 14213static int 14214bxe_get_igu_cam_info(struct bxe_softc *sc) 14215{ 14216 int pfid = SC_FUNC(sc); 14217 int igu_sb_id; 14218 uint32_t val; 14219 uint8_t fid, igu_sb_cnt = 0; 14220 14221 sc->igu_base_sb = 0xff; 14222 14223 if (CHIP_INT_MODE_IS_BC(sc)) { 14224 int vn = SC_VN(sc); 14225 igu_sb_cnt = sc->igu_sb_cnt; 14226 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14227 FP_SB_MAX_E1x); 14228 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14229 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14230 return (0); 14231 } 14232 14233 /* IGU in normal mode - read CAM */ 14234 for (igu_sb_id = 0; 14235 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14236 igu_sb_id++) { 14237 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14238 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14239 continue; 14240 } 14241 fid = IGU_FID(val); 14242 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14243 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14244 continue; 14245 } 14246 if (IGU_VEC(val) == 0) { 14247 /* default status block */ 14248 sc->igu_dsb_id = igu_sb_id; 14249 } else { 14250 if (sc->igu_base_sb == 0xff) { 14251 sc->igu_base_sb = igu_sb_id; 14252 } 14253 igu_sb_cnt++; 14254 } 14255 } 14256 } 14257 14258 /* 14259 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14260 * that number of CAM entries will not be equal to the value advertised in 14261 * PCI. Driver should use the minimal value of both as the actual status 14262 * block count 14263 */ 14264 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14265 14266 if (igu_sb_cnt == 0) { 14267 BLOGE(sc, "CAM configuration error\n"); 14268 return (-1); 14269 } 14270 14271 return (0); 14272} 14273 14274/* 14275 * Gather various information from the device config space, the device itself, 14276 * shmem, and the user input. 14277 */ 14278static int 14279bxe_get_device_info(struct bxe_softc *sc) 14280{ 14281 uint32_t val; 14282 int rc; 14283 14284 /* Get the data for the device */ 14285 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14286 sc->devinfo.device_id = pci_get_device(sc->dev); 14287 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14288 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14289 14290 /* get the chip revision (chip metal comes from pci config space) */ 14291 sc->devinfo.chip_id = 14292 sc->link_params.chip_id = 14293 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14294 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14295 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14296 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14297 14298 /* force 57811 according to MISC register */ 14299 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14300 if (CHIP_IS_57810(sc)) { 14301 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14302 (sc->devinfo.chip_id & 0x0000ffff)); 14303 } else if (CHIP_IS_57810_MF(sc)) { 14304 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14305 (sc->devinfo.chip_id & 0x0000ffff)); 14306 } 14307 sc->devinfo.chip_id |= 0x1; 14308 } 14309 14310 BLOGD(sc, DBG_LOAD, 14311 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14312 sc->devinfo.chip_id, 14313 ((sc->devinfo.chip_id >> 16) & 0xffff), 14314 ((sc->devinfo.chip_id >> 12) & 0xf), 14315 ((sc->devinfo.chip_id >> 4) & 0xff), 14316 ((sc->devinfo.chip_id >> 0) & 0xf)); 14317 14318 val = (REG_RD(sc, 0x2874) & 0x55); 14319 if ((sc->devinfo.chip_id & 0x1) || 14320 (CHIP_IS_E1(sc) && val) || 14321 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14322 sc->flags |= BXE_ONE_PORT_FLAG; 14323 BLOGD(sc, DBG_LOAD, "single port device\n"); 14324 } 14325 14326 /* set the doorbell size */ 14327 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14328 14329 /* determine whether the device is in 2 port or 4 port mode */ 14330 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14331 if (CHIP_IS_E2E3(sc)) { 14332 /* 14333 * Read port4mode_en_ovwr[0]: 14334 * If 1, four port mode is in port4mode_en_ovwr[1]. 14335 * If 0, four port mode is in port4mode_en[0]. 14336 */ 14337 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14338 if (val & 1) { 14339 val = ((val >> 1) & 1); 14340 } else { 14341 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14342 } 14343 14344 sc->devinfo.chip_port_mode = 14345 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14346 14347 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14348 } 14349 14350 /* get the function and path info for the device */ 14351 bxe_get_function_num(sc); 14352 14353 /* get the shared memory base address */ 14354 sc->devinfo.shmem_base = 14355 sc->link_params.shmem_base = 14356 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14357 sc->devinfo.shmem2_base = 14358 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14359 MISC_REG_GENERIC_CR_0)); 14360 14361 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14362 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14363 14364 if (!sc->devinfo.shmem_base) { 14365 /* this should ONLY prevent upcoming shmem reads */ 14366 BLOGI(sc, "MCP not active\n"); 14367 sc->flags |= BXE_NO_MCP_FLAG; 14368 return (0); 14369 } 14370 14371 /* make sure the shared memory contents are valid */ 14372 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14373 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14374 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14375 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14376 return (0); 14377 } 14378 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14379 14380 /* get the bootcode version */ 14381 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14382 snprintf(sc->devinfo.bc_ver_str, 14383 sizeof(sc->devinfo.bc_ver_str), 14384 "%d.%d.%d", 14385 ((sc->devinfo.bc_ver >> 24) & 0xff), 14386 ((sc->devinfo.bc_ver >> 16) & 0xff), 14387 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14388 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14389 14390 /* get the bootcode shmem address */ 14391 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14392 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14393 14394 /* clean indirect addresses as they're not used */ 14395 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14396 if (IS_PF(sc)) { 14397 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14398 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14399 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14400 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14401 if (CHIP_IS_E1x(sc)) { 14402 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14403 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14404 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14405 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14406 } 14407 14408 /* 14409 * Enable internal target-read (in case we are probed after PF 14410 * FLR). Must be done prior to any BAR read access. Only for 14411 * 57712 and up 14412 */ 14413 if (!CHIP_IS_E1x(sc)) { 14414 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14415 } 14416 } 14417 14418 /* get the nvram size */ 14419 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14420 sc->devinfo.flash_size = 14421 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14422 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14423 14424 /* get PCI capabilites */ 14425 bxe_probe_pci_caps(sc); 14426 14427 bxe_set_power_state(sc, PCI_PM_D0); 14428 14429 /* get various configuration parameters from shmem */ 14430 bxe_get_shmem_info(sc); 14431 14432 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14433 val = pci_read_config(sc->dev, 14434 (sc->devinfo.pcie_msix_cap_reg + 14435 PCIR_MSIX_CTRL), 14436 2); 14437 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14438 } else { 14439 sc->igu_sb_cnt = 1; 14440 } 14441 14442 sc->igu_base_addr = BAR_IGU_INTMEM; 14443 14444 /* initialize IGU parameters */ 14445 if (CHIP_IS_E1x(sc)) { 14446 sc->devinfo.int_block = INT_BLOCK_HC; 14447 sc->igu_dsb_id = DEF_SB_IGU_ID; 14448 sc->igu_base_sb = 0; 14449 } else { 14450 sc->devinfo.int_block = INT_BLOCK_IGU; 14451 14452 /* do not allow device reset during IGU info preocessing */ 14453 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14454 14455 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14456 14457 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14458 int tout = 5000; 14459 14460 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14461 14462 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14463 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14464 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14465 14466 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14467 tout--; 14468 DELAY(1000); 14469 } 14470 14471 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14472 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14473 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14474 return (-1); 14475 } 14476 } 14477 14478 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14479 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14480 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14481 } else { 14482 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14483 } 14484 14485 rc = bxe_get_igu_cam_info(sc); 14486 14487 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14488 14489 if (rc) { 14490 return (rc); 14491 } 14492 } 14493 14494 /* 14495 * Get base FW non-default (fast path) status block ID. This value is 14496 * used to initialize the fw_sb_id saved on the fp/queue structure to 14497 * determine the id used by the FW. 14498 */ 14499 if (CHIP_IS_E1x(sc)) { 14500 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14501 } else { 14502 /* 14503 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14504 * the same queue are indicated on the same IGU SB). So we prefer 14505 * FW and IGU SBs to be the same value. 14506 */ 14507 sc->base_fw_ndsb = sc->igu_base_sb; 14508 } 14509 14510 BLOGD(sc, DBG_LOAD, 14511 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14512 sc->igu_dsb_id, sc->igu_base_sb, 14513 sc->igu_sb_cnt, sc->base_fw_ndsb); 14514 14515 elink_phy_probe(&sc->link_params); 14516 14517 return (0); 14518} 14519 14520static void 14521bxe_link_settings_supported(struct bxe_softc *sc, 14522 uint32_t switch_cfg) 14523{ 14524 uint32_t cfg_size = 0; 14525 uint32_t idx; 14526 uint8_t port = SC_PORT(sc); 14527 14528 /* aggregation of supported attributes of all external phys */ 14529 sc->port.supported[0] = 0; 14530 sc->port.supported[1] = 0; 14531 14532 switch (sc->link_params.num_phys) { 14533 case 1: 14534 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14535 cfg_size = 1; 14536 break; 14537 case 2: 14538 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14539 cfg_size = 1; 14540 break; 14541 case 3: 14542 if (sc->link_params.multi_phy_config & 14543 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14544 sc->port.supported[1] = 14545 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14546 sc->port.supported[0] = 14547 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14548 } else { 14549 sc->port.supported[0] = 14550 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14551 sc->port.supported[1] = 14552 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14553 } 14554 cfg_size = 2; 14555 break; 14556 } 14557 14558 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14559 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14560 SHMEM_RD(sc, 14561 dev_info.port_hw_config[port].external_phy_config), 14562 SHMEM_RD(sc, 14563 dev_info.port_hw_config[port].external_phy_config2)); 14564 return; 14565 } 14566 14567 if (CHIP_IS_E3(sc)) 14568 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14569 else { 14570 switch (switch_cfg) { 14571 case ELINK_SWITCH_CFG_1G: 14572 sc->port.phy_addr = 14573 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14574 break; 14575 case ELINK_SWITCH_CFG_10G: 14576 sc->port.phy_addr = 14577 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14578 break; 14579 default: 14580 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14581 sc->port.link_config[0]); 14582 return; 14583 } 14584 } 14585 14586 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14587 14588 /* mask what we support according to speed_cap_mask per configuration */ 14589 for (idx = 0; idx < cfg_size; idx++) { 14590 if (!(sc->link_params.speed_cap_mask[idx] & 14591 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14592 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14593 } 14594 14595 if (!(sc->link_params.speed_cap_mask[idx] & 14596 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14597 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14598 } 14599 14600 if (!(sc->link_params.speed_cap_mask[idx] & 14601 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14602 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14603 } 14604 14605 if (!(sc->link_params.speed_cap_mask[idx] & 14606 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14607 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14608 } 14609 14610 if (!(sc->link_params.speed_cap_mask[idx] & 14611 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14612 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14613 } 14614 14615 if (!(sc->link_params.speed_cap_mask[idx] & 14616 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14617 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14618 } 14619 14620 if (!(sc->link_params.speed_cap_mask[idx] & 14621 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14622 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14623 } 14624 14625 if (!(sc->link_params.speed_cap_mask[idx] & 14626 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14627 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14628 } 14629 } 14630 14631 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14632 sc->port.supported[0], sc->port.supported[1]); 14633} 14634 14635static void 14636bxe_link_settings_requested(struct bxe_softc *sc) 14637{ 14638 uint32_t link_config; 14639 uint32_t idx; 14640 uint32_t cfg_size = 0; 14641 14642 sc->port.advertising[0] = 0; 14643 sc->port.advertising[1] = 0; 14644 14645 switch (sc->link_params.num_phys) { 14646 case 1: 14647 case 2: 14648 cfg_size = 1; 14649 break; 14650 case 3: 14651 cfg_size = 2; 14652 break; 14653 } 14654 14655 for (idx = 0; idx < cfg_size; idx++) { 14656 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14657 link_config = sc->port.link_config[idx]; 14658 14659 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14660 case PORT_FEATURE_LINK_SPEED_AUTO: 14661 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14662 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14663 sc->port.advertising[idx] |= sc->port.supported[idx]; 14664 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14665 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14666 sc->port.advertising[idx] |= 14667 (ELINK_SUPPORTED_100baseT_Half | 14668 ELINK_SUPPORTED_100baseT_Full); 14669 } else { 14670 /* force 10G, no AN */ 14671 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14672 sc->port.advertising[idx] |= 14673 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14674 continue; 14675 } 14676 break; 14677 14678 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14679 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14680 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14681 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14682 ADVERTISED_TP); 14683 } else { 14684 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14685 "speed_cap_mask=0x%08x\n", 14686 link_config, sc->link_params.speed_cap_mask[idx]); 14687 return; 14688 } 14689 break; 14690 14691 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14692 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14693 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14694 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14695 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14696 ADVERTISED_TP); 14697 } else { 14698 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14699 "speed_cap_mask=0x%08x\n", 14700 link_config, sc->link_params.speed_cap_mask[idx]); 14701 return; 14702 } 14703 break; 14704 14705 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14706 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14707 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14708 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14709 ADVERTISED_TP); 14710 } else { 14711 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14712 "speed_cap_mask=0x%08x\n", 14713 link_config, sc->link_params.speed_cap_mask[idx]); 14714 return; 14715 } 14716 break; 14717 14718 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14719 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14720 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14721 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14722 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14723 ADVERTISED_TP); 14724 } else { 14725 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14726 "speed_cap_mask=0x%08x\n", 14727 link_config, sc->link_params.speed_cap_mask[idx]); 14728 return; 14729 } 14730 break; 14731 14732 case PORT_FEATURE_LINK_SPEED_1G: 14733 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14734 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14735 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14736 ADVERTISED_TP); 14737 } else { 14738 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14739 "speed_cap_mask=0x%08x\n", 14740 link_config, sc->link_params.speed_cap_mask[idx]); 14741 return; 14742 } 14743 break; 14744 14745 case PORT_FEATURE_LINK_SPEED_2_5G: 14746 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14747 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14748 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14749 ADVERTISED_TP); 14750 } else { 14751 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14752 "speed_cap_mask=0x%08x\n", 14753 link_config, sc->link_params.speed_cap_mask[idx]); 14754 return; 14755 } 14756 break; 14757 14758 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14759 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14760 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14761 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14762 ADVERTISED_FIBRE); 14763 } else { 14764 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14765 "speed_cap_mask=0x%08x\n", 14766 link_config, sc->link_params.speed_cap_mask[idx]); 14767 return; 14768 } 14769 break; 14770 14771 case PORT_FEATURE_LINK_SPEED_20G: 14772 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14773 break; 14774 14775 default: 14776 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14777 "speed_cap_mask=0x%08x\n", 14778 link_config, sc->link_params.speed_cap_mask[idx]); 14779 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14780 sc->port.advertising[idx] = sc->port.supported[idx]; 14781 break; 14782 } 14783 14784 sc->link_params.req_flow_ctrl[idx] = 14785 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14786 14787 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14788 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14789 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14790 } else { 14791 bxe_set_requested_fc(sc); 14792 } 14793 } 14794 14795 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14796 "req_flow_ctrl=0x%x advertising=0x%x\n", 14797 sc->link_params.req_line_speed[idx], 14798 sc->link_params.req_duplex[idx], 14799 sc->link_params.req_flow_ctrl[idx], 14800 sc->port.advertising[idx]); 14801 } 14802} 14803 14804static void 14805bxe_get_phy_info(struct bxe_softc *sc) 14806{ 14807 uint8_t port = SC_PORT(sc); 14808 uint32_t config = sc->port.config; 14809 uint32_t eee_mode; 14810 14811 /* shmem data already read in bxe_get_shmem_info() */ 14812 14813 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14814 "link_config0=0x%08x\n", 14815 sc->link_params.lane_config, 14816 sc->link_params.speed_cap_mask[0], 14817 sc->port.link_config[0]); 14818 14819 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14820 bxe_link_settings_requested(sc); 14821 14822 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14823 sc->link_params.feature_config_flags |= 14824 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14825 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14826 sc->link_params.feature_config_flags &= 14827 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14828 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14829 sc->link_params.feature_config_flags |= 14830 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14831 } 14832 14833 /* configure link feature according to nvram value */ 14834 eee_mode = 14835 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14836 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14837 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14838 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14839 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14840 ELINK_EEE_MODE_ENABLE_LPI | 14841 ELINK_EEE_MODE_OUTPUT_TIME); 14842 } else { 14843 sc->link_params.eee_mode = 0; 14844 } 14845 14846 /* get the media type */ 14847 bxe_media_detect(sc); 14848} 14849 14850static void 14851bxe_get_params(struct bxe_softc *sc) 14852{ 14853 /* get user tunable params */ 14854 bxe_get_tunable_params(sc); 14855 14856 /* select the RX and TX ring sizes */ 14857 sc->tx_ring_size = TX_BD_USABLE; 14858 sc->rx_ring_size = RX_BD_USABLE; 14859 14860 /* XXX disable WoL */ 14861 sc->wol = 0; 14862} 14863 14864static void 14865bxe_set_modes_bitmap(struct bxe_softc *sc) 14866{ 14867 uint32_t flags = 0; 14868 14869 if (CHIP_REV_IS_FPGA(sc)) { 14870 SET_FLAGS(flags, MODE_FPGA); 14871 } else if (CHIP_REV_IS_EMUL(sc)) { 14872 SET_FLAGS(flags, MODE_EMUL); 14873 } else { 14874 SET_FLAGS(flags, MODE_ASIC); 14875 } 14876 14877 if (CHIP_IS_MODE_4_PORT(sc)) { 14878 SET_FLAGS(flags, MODE_PORT4); 14879 } else { 14880 SET_FLAGS(flags, MODE_PORT2); 14881 } 14882 14883 if (CHIP_IS_E2(sc)) { 14884 SET_FLAGS(flags, MODE_E2); 14885 } else if (CHIP_IS_E3(sc)) { 14886 SET_FLAGS(flags, MODE_E3); 14887 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14888 SET_FLAGS(flags, MODE_E3_A0); 14889 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14890 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14891 } 14892 } 14893 14894 if (IS_MF(sc)) { 14895 SET_FLAGS(flags, MODE_MF); 14896 switch (sc->devinfo.mf_info.mf_mode) { 14897 case MULTI_FUNCTION_SD: 14898 SET_FLAGS(flags, MODE_MF_SD); 14899 break; 14900 case MULTI_FUNCTION_SI: 14901 SET_FLAGS(flags, MODE_MF_SI); 14902 break; 14903 case MULTI_FUNCTION_AFEX: 14904 SET_FLAGS(flags, MODE_MF_AFEX); 14905 break; 14906 } 14907 } else { 14908 SET_FLAGS(flags, MODE_SF); 14909 } 14910 14911#if defined(__LITTLE_ENDIAN) 14912 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14913#else /* __BIG_ENDIAN */ 14914 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14915#endif 14916 14917 INIT_MODE_FLAGS(sc) = flags; 14918} 14919 14920static int 14921bxe_alloc_hsi_mem(struct bxe_softc *sc) 14922{ 14923 struct bxe_fastpath *fp; 14924 bus_addr_t busaddr; 14925 int max_agg_queues; 14926 int max_segments; 14927 bus_size_t max_size; 14928 bus_size_t max_seg_size; 14929 char buf[32]; 14930 int rc; 14931 int i, j; 14932 14933 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14934 14935 /* allocate the parent bus DMA tag */ 14936 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14937 1, /* alignment */ 14938 0, /* boundary limit */ 14939 BUS_SPACE_MAXADDR, /* restricted low */ 14940 BUS_SPACE_MAXADDR, /* restricted hi */ 14941 NULL, /* addr filter() */ 14942 NULL, /* addr filter() arg */ 14943 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14944 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14945 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14946 0, /* flags */ 14947 NULL, /* lock() */ 14948 NULL, /* lock() arg */ 14949 &sc->parent_dma_tag); /* returned dma tag */ 14950 if (rc != 0) { 14951 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14952 return (1); 14953 } 14954 14955 /************************/ 14956 /* DEFAULT STATUS BLOCK */ 14957 /************************/ 14958 14959 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14960 &sc->def_sb_dma, "default status block") != 0) { 14961 /* XXX */ 14962 bus_dma_tag_destroy(sc->parent_dma_tag); 14963 return (1); 14964 } 14965 14966 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14967 14968 /***************/ 14969 /* EVENT QUEUE */ 14970 /***************/ 14971 14972 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14973 &sc->eq_dma, "event queue") != 0) { 14974 /* XXX */ 14975 bxe_dma_free(sc, &sc->def_sb_dma); 14976 sc->def_sb = NULL; 14977 bus_dma_tag_destroy(sc->parent_dma_tag); 14978 return (1); 14979 } 14980 14981 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14982 14983 /*************/ 14984 /* SLOW PATH */ 14985 /*************/ 14986 14987 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14988 &sc->sp_dma, "slow path") != 0) { 14989 /* XXX */ 14990 bxe_dma_free(sc, &sc->eq_dma); 14991 sc->eq = NULL; 14992 bxe_dma_free(sc, &sc->def_sb_dma); 14993 sc->def_sb = NULL; 14994 bus_dma_tag_destroy(sc->parent_dma_tag); 14995 return (1); 14996 } 14997 14998 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 14999 15000 /*******************/ 15001 /* SLOW PATH QUEUE */ 15002 /*******************/ 15003 15004 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 15005 &sc->spq_dma, "slow path queue") != 0) { 15006 /* XXX */ 15007 bxe_dma_free(sc, &sc->sp_dma); 15008 sc->sp = NULL; 15009 bxe_dma_free(sc, &sc->eq_dma); 15010 sc->eq = NULL; 15011 bxe_dma_free(sc, &sc->def_sb_dma); 15012 sc->def_sb = NULL; 15013 bus_dma_tag_destroy(sc->parent_dma_tag); 15014 return (1); 15015 } 15016 15017 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 15018 15019 /***************************/ 15020 /* FW DECOMPRESSION BUFFER */ 15021 /***************************/ 15022 15023 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 15024 "fw decompression buffer") != 0) { 15025 /* XXX */ 15026 bxe_dma_free(sc, &sc->spq_dma); 15027 sc->spq = NULL; 15028 bxe_dma_free(sc, &sc->sp_dma); 15029 sc->sp = NULL; 15030 bxe_dma_free(sc, &sc->eq_dma); 15031 sc->eq = NULL; 15032 bxe_dma_free(sc, &sc->def_sb_dma); 15033 sc->def_sb = NULL; 15034 bus_dma_tag_destroy(sc->parent_dma_tag); 15035 return (1); 15036 } 15037 15038 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 15039 15040 if ((sc->gz_strm = 15041 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 15042 /* XXX */ 15043 bxe_dma_free(sc, &sc->gz_buf_dma); 15044 sc->gz_buf = NULL; 15045 bxe_dma_free(sc, &sc->spq_dma); 15046 sc->spq = NULL; 15047 bxe_dma_free(sc, &sc->sp_dma); 15048 sc->sp = NULL; 15049 bxe_dma_free(sc, &sc->eq_dma); 15050 sc->eq = NULL; 15051 bxe_dma_free(sc, &sc->def_sb_dma); 15052 sc->def_sb = NULL; 15053 bus_dma_tag_destroy(sc->parent_dma_tag); 15054 return (1); 15055 } 15056 15057 /*************/ 15058 /* FASTPATHS */ 15059 /*************/ 15060 15061 /* allocate DMA memory for each fastpath structure */ 15062 for (i = 0; i < sc->num_queues; i++) { 15063 fp = &sc->fp[i]; 15064 fp->sc = sc; 15065 fp->index = i; 15066 15067 /*******************/ 15068 /* FP STATUS BLOCK */ 15069 /*******************/ 15070 15071 snprintf(buf, sizeof(buf), "fp %d status block", i); 15072 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 15073 &fp->sb_dma, buf) != 0) { 15074 /* XXX unwind and free previous fastpath allocations */ 15075 BLOGE(sc, "Failed to alloc %s\n", buf); 15076 return (1); 15077 } else { 15078 if (CHIP_IS_E2E3(sc)) { 15079 fp->status_block.e2_sb = 15080 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 15081 } else { 15082 fp->status_block.e1x_sb = 15083 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 15084 } 15085 } 15086 15087 /******************/ 15088 /* FP TX BD CHAIN */ 15089 /******************/ 15090 15091 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 15092 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 15093 &fp->tx_dma, buf) != 0) { 15094 /* XXX unwind and free previous fastpath allocations */ 15095 BLOGE(sc, "Failed to alloc %s\n", buf); 15096 return (1); 15097 } else { 15098 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 15099 } 15100 15101 /* link together the tx bd chain pages */ 15102 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 15103 /* index into the tx bd chain array to last entry per page */ 15104 struct eth_tx_next_bd *tx_next_bd = 15105 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 15106 /* point to the next page and wrap from last page */ 15107 busaddr = (fp->tx_dma.paddr + 15108 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 15109 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 15110 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 15111 } 15112 15113 /******************/ 15114 /* FP RX BD CHAIN */ 15115 /******************/ 15116 15117 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 15118 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 15119 &fp->rx_dma, buf) != 0) { 15120 /* XXX unwind and free previous fastpath allocations */ 15121 BLOGE(sc, "Failed to alloc %s\n", buf); 15122 return (1); 15123 } else { 15124 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 15125 } 15126 15127 /* link together the rx bd chain pages */ 15128 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 15129 /* index into the rx bd chain array to last entry per page */ 15130 struct eth_rx_bd *rx_bd = 15131 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 15132 /* point to the next page and wrap from last page */ 15133 busaddr = (fp->rx_dma.paddr + 15134 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 15135 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 15136 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 15137 } 15138 15139 /*******************/ 15140 /* FP RX RCQ CHAIN */ 15141 /*******************/ 15142 15143 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 15144 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 15145 &fp->rcq_dma, buf) != 0) { 15146 /* XXX unwind and free previous fastpath allocations */ 15147 BLOGE(sc, "Failed to alloc %s\n", buf); 15148 return (1); 15149 } else { 15150 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 15151 } 15152 15153 /* link together the rcq chain pages */ 15154 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 15155 /* index into the rcq chain array to last entry per page */ 15156 struct eth_rx_cqe_next_page *rx_cqe_next = 15157 (struct eth_rx_cqe_next_page *) 15158 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 15159 /* point to the next page and wrap from last page */ 15160 busaddr = (fp->rcq_dma.paddr + 15161 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 15162 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 15163 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 15164 } 15165 15166 /*******************/ 15167 /* FP RX SGE CHAIN */ 15168 /*******************/ 15169 15170 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 15171 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 15172 &fp->rx_sge_dma, buf) != 0) { 15173 /* XXX unwind and free previous fastpath allocations */ 15174 BLOGE(sc, "Failed to alloc %s\n", buf); 15175 return (1); 15176 } else { 15177 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 15178 } 15179 15180 /* link together the sge chain pages */ 15181 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 15182 /* index into the rcq chain array to last entry per page */ 15183 struct eth_rx_sge *rx_sge = 15184 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 15185 /* point to the next page and wrap from last page */ 15186 busaddr = (fp->rx_sge_dma.paddr + 15187 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 15188 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 15189 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 15190 } 15191 15192 /***********************/ 15193 /* FP TX MBUF DMA MAPS */ 15194 /***********************/ 15195 15196 /* set required sizes before mapping to conserve resources */ 15197 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) { 15198 max_size = BXE_TSO_MAX_SIZE; 15199 max_segments = BXE_TSO_MAX_SEGMENTS; 15200 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 15201 } else { 15202 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 15203 max_segments = BXE_MAX_SEGMENTS; 15204 max_seg_size = MCLBYTES; 15205 } 15206 15207 /* create a dma tag for the tx mbufs */ 15208 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15209 1, /* alignment */ 15210 0, /* boundary limit */ 15211 BUS_SPACE_MAXADDR, /* restricted low */ 15212 BUS_SPACE_MAXADDR, /* restricted hi */ 15213 NULL, /* addr filter() */ 15214 NULL, /* addr filter() arg */ 15215 max_size, /* max map size */ 15216 max_segments, /* num discontinuous */ 15217 max_seg_size, /* max seg size */ 15218 0, /* flags */ 15219 NULL, /* lock() */ 15220 NULL, /* lock() arg */ 15221 &fp->tx_mbuf_tag); /* returned dma tag */ 15222 if (rc != 0) { 15223 /* XXX unwind and free previous fastpath allocations */ 15224 BLOGE(sc, "Failed to create dma tag for " 15225 "'fp %d tx mbufs' (%d)\n", 15226 i, rc); 15227 return (1); 15228 } 15229 15230 /* create dma maps for each of the tx mbuf clusters */ 15231 for (j = 0; j < TX_BD_TOTAL; j++) { 15232 if (bus_dmamap_create(fp->tx_mbuf_tag, 15233 BUS_DMA_NOWAIT, 15234 &fp->tx_mbuf_chain[j].m_map)) { 15235 /* XXX unwind and free previous fastpath allocations */ 15236 BLOGE(sc, "Failed to create dma map for " 15237 "'fp %d tx mbuf %d' (%d)\n", 15238 i, j, rc); 15239 return (1); 15240 } 15241 } 15242 15243 /***********************/ 15244 /* FP RX MBUF DMA MAPS */ 15245 /***********************/ 15246 15247 /* create a dma tag for the rx mbufs */ 15248 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15249 1, /* alignment */ 15250 0, /* boundary limit */ 15251 BUS_SPACE_MAXADDR, /* restricted low */ 15252 BUS_SPACE_MAXADDR, /* restricted hi */ 15253 NULL, /* addr filter() */ 15254 NULL, /* addr filter() arg */ 15255 MJUM9BYTES, /* max map size */ 15256 1, /* num discontinuous */ 15257 MJUM9BYTES, /* max seg size */ 15258 0, /* flags */ 15259 NULL, /* lock() */ 15260 NULL, /* lock() arg */ 15261 &fp->rx_mbuf_tag); /* returned dma tag */ 15262 if (rc != 0) { 15263 /* XXX unwind and free previous fastpath allocations */ 15264 BLOGE(sc, "Failed to create dma tag for " 15265 "'fp %d rx mbufs' (%d)\n", 15266 i, rc); 15267 return (1); 15268 } 15269 15270 /* create dma maps for each of the rx mbuf clusters */ 15271 for (j = 0; j < RX_BD_TOTAL; j++) { 15272 if (bus_dmamap_create(fp->rx_mbuf_tag, 15273 BUS_DMA_NOWAIT, 15274 &fp->rx_mbuf_chain[j].m_map)) { 15275 /* XXX unwind and free previous fastpath allocations */ 15276 BLOGE(sc, "Failed to create dma map for " 15277 "'fp %d rx mbuf %d' (%d)\n", 15278 i, j, rc); 15279 return (1); 15280 } 15281 } 15282 15283 /* create dma map for the spare rx mbuf cluster */ 15284 if (bus_dmamap_create(fp->rx_mbuf_tag, 15285 BUS_DMA_NOWAIT, 15286 &fp->rx_mbuf_spare_map)) { 15287 /* XXX unwind and free previous fastpath allocations */ 15288 BLOGE(sc, "Failed to create dma map for " 15289 "'fp %d spare rx mbuf' (%d)\n", 15290 i, rc); 15291 return (1); 15292 } 15293 15294 /***************************/ 15295 /* FP RX SGE MBUF DMA MAPS */ 15296 /***************************/ 15297 15298 /* create a dma tag for the rx sge mbufs */ 15299 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15300 1, /* alignment */ 15301 0, /* boundary limit */ 15302 BUS_SPACE_MAXADDR, /* restricted low */ 15303 BUS_SPACE_MAXADDR, /* restricted hi */ 15304 NULL, /* addr filter() */ 15305 NULL, /* addr filter() arg */ 15306 BCM_PAGE_SIZE, /* max map size */ 15307 1, /* num discontinuous */ 15308 BCM_PAGE_SIZE, /* max seg size */ 15309 0, /* flags */ 15310 NULL, /* lock() */ 15311 NULL, /* lock() arg */ 15312 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15313 if (rc != 0) { 15314 /* XXX unwind and free previous fastpath allocations */ 15315 BLOGE(sc, "Failed to create dma tag for " 15316 "'fp %d rx sge mbufs' (%d)\n", 15317 i, rc); 15318 return (1); 15319 } 15320 15321 /* create dma maps for the rx sge mbuf clusters */ 15322 for (j = 0; j < RX_SGE_TOTAL; j++) { 15323 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15324 BUS_DMA_NOWAIT, 15325 &fp->rx_sge_mbuf_chain[j].m_map)) { 15326 /* XXX unwind and free previous fastpath allocations */ 15327 BLOGE(sc, "Failed to create dma map for " 15328 "'fp %d rx sge mbuf %d' (%d)\n", 15329 i, j, rc); 15330 return (1); 15331 } 15332 } 15333 15334 /* create dma map for the spare rx sge mbuf cluster */ 15335 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15336 BUS_DMA_NOWAIT, 15337 &fp->rx_sge_mbuf_spare_map)) { 15338 /* XXX unwind and free previous fastpath allocations */ 15339 BLOGE(sc, "Failed to create dma map for " 15340 "'fp %d spare rx sge mbuf' (%d)\n", 15341 i, rc); 15342 return (1); 15343 } 15344 15345 /***************************/ 15346 /* FP RX TPA MBUF DMA MAPS */ 15347 /***************************/ 15348 15349 /* create dma maps for the rx tpa mbuf clusters */ 15350 max_agg_queues = MAX_AGG_QS(sc); 15351 15352 for (j = 0; j < max_agg_queues; j++) { 15353 if (bus_dmamap_create(fp->rx_mbuf_tag, 15354 BUS_DMA_NOWAIT, 15355 &fp->rx_tpa_info[j].bd.m_map)) { 15356 /* XXX unwind and free previous fastpath allocations */ 15357 BLOGE(sc, "Failed to create dma map for " 15358 "'fp %d rx tpa mbuf %d' (%d)\n", 15359 i, j, rc); 15360 return (1); 15361 } 15362 } 15363 15364 /* create dma map for the spare rx tpa mbuf cluster */ 15365 if (bus_dmamap_create(fp->rx_mbuf_tag, 15366 BUS_DMA_NOWAIT, 15367 &fp->rx_tpa_info_mbuf_spare_map)) { 15368 /* XXX unwind and free previous fastpath allocations */ 15369 BLOGE(sc, "Failed to create dma map for " 15370 "'fp %d spare rx tpa mbuf' (%d)\n", 15371 i, rc); 15372 return (1); 15373 } 15374 15375 bxe_init_sge_ring_bit_mask(fp); 15376 } 15377 15378 return (0); 15379} 15380 15381static void 15382bxe_free_hsi_mem(struct bxe_softc *sc) 15383{ 15384 struct bxe_fastpath *fp; 15385 int max_agg_queues; 15386 int i, j; 15387 15388 if (sc->parent_dma_tag == NULL) { 15389 return; /* assume nothing was allocated */ 15390 } 15391 15392 for (i = 0; i < sc->num_queues; i++) { 15393 fp = &sc->fp[i]; 15394 15395 /*******************/ 15396 /* FP STATUS BLOCK */ 15397 /*******************/ 15398 15399 bxe_dma_free(sc, &fp->sb_dma); 15400 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15401 15402 /******************/ 15403 /* FP TX BD CHAIN */ 15404 /******************/ 15405 15406 bxe_dma_free(sc, &fp->tx_dma); 15407 fp->tx_chain = NULL; 15408 15409 /******************/ 15410 /* FP RX BD CHAIN */ 15411 /******************/ 15412 15413 bxe_dma_free(sc, &fp->rx_dma); 15414 fp->rx_chain = NULL; 15415 15416 /*******************/ 15417 /* FP RX RCQ CHAIN */ 15418 /*******************/ 15419 15420 bxe_dma_free(sc, &fp->rcq_dma); 15421 fp->rcq_chain = NULL; 15422 15423 /*******************/ 15424 /* FP RX SGE CHAIN */ 15425 /*******************/ 15426 15427 bxe_dma_free(sc, &fp->rx_sge_dma); 15428 fp->rx_sge_chain = NULL; 15429 15430 /***********************/ 15431 /* FP TX MBUF DMA MAPS */ 15432 /***********************/ 15433 15434 if (fp->tx_mbuf_tag != NULL) { 15435 for (j = 0; j < TX_BD_TOTAL; j++) { 15436 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15437 bus_dmamap_unload(fp->tx_mbuf_tag, 15438 fp->tx_mbuf_chain[j].m_map); 15439 bus_dmamap_destroy(fp->tx_mbuf_tag, 15440 fp->tx_mbuf_chain[j].m_map); 15441 } 15442 } 15443 15444 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15445 fp->tx_mbuf_tag = NULL; 15446 } 15447 15448 /***********************/ 15449 /* FP RX MBUF DMA MAPS */ 15450 /***********************/ 15451 15452 if (fp->rx_mbuf_tag != NULL) { 15453 for (j = 0; j < RX_BD_TOTAL; j++) { 15454 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15455 bus_dmamap_unload(fp->rx_mbuf_tag, 15456 fp->rx_mbuf_chain[j].m_map); 15457 bus_dmamap_destroy(fp->rx_mbuf_tag, 15458 fp->rx_mbuf_chain[j].m_map); 15459 } 15460 } 15461 15462 if (fp->rx_mbuf_spare_map != NULL) { 15463 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15464 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15465 } 15466 15467 /***************************/ 15468 /* FP RX TPA MBUF DMA MAPS */ 15469 /***************************/ 15470 15471 max_agg_queues = MAX_AGG_QS(sc); 15472 15473 for (j = 0; j < max_agg_queues; j++) { 15474 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15475 bus_dmamap_unload(fp->rx_mbuf_tag, 15476 fp->rx_tpa_info[j].bd.m_map); 15477 bus_dmamap_destroy(fp->rx_mbuf_tag, 15478 fp->rx_tpa_info[j].bd.m_map); 15479 } 15480 } 15481 15482 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15483 bus_dmamap_unload(fp->rx_mbuf_tag, 15484 fp->rx_tpa_info_mbuf_spare_map); 15485 bus_dmamap_destroy(fp->rx_mbuf_tag, 15486 fp->rx_tpa_info_mbuf_spare_map); 15487 } 15488 15489 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15490 fp->rx_mbuf_tag = NULL; 15491 } 15492 15493 /***************************/ 15494 /* FP RX SGE MBUF DMA MAPS */ 15495 /***************************/ 15496 15497 if (fp->rx_sge_mbuf_tag != NULL) { 15498 for (j = 0; j < RX_SGE_TOTAL; j++) { 15499 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15500 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15501 fp->rx_sge_mbuf_chain[j].m_map); 15502 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15503 fp->rx_sge_mbuf_chain[j].m_map); 15504 } 15505 } 15506 15507 if (fp->rx_sge_mbuf_spare_map != NULL) { 15508 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15509 fp->rx_sge_mbuf_spare_map); 15510 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15511 fp->rx_sge_mbuf_spare_map); 15512 } 15513 15514 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15515 fp->rx_sge_mbuf_tag = NULL; 15516 } 15517 } 15518 15519 /***************************/ 15520 /* FW DECOMPRESSION BUFFER */ 15521 /***************************/ 15522 15523 bxe_dma_free(sc, &sc->gz_buf_dma); 15524 sc->gz_buf = NULL; 15525 free(sc->gz_strm, M_DEVBUF); 15526 sc->gz_strm = NULL; 15527 15528 /*******************/ 15529 /* SLOW PATH QUEUE */ 15530 /*******************/ 15531 15532 bxe_dma_free(sc, &sc->spq_dma); 15533 sc->spq = NULL; 15534 15535 /*************/ 15536 /* SLOW PATH */ 15537 /*************/ 15538 15539 bxe_dma_free(sc, &sc->sp_dma); 15540 sc->sp = NULL; 15541 15542 /***************/ 15543 /* EVENT QUEUE */ 15544 /***************/ 15545 15546 bxe_dma_free(sc, &sc->eq_dma); 15547 sc->eq = NULL; 15548 15549 /************************/ 15550 /* DEFAULT STATUS BLOCK */ 15551 /************************/ 15552 15553 bxe_dma_free(sc, &sc->def_sb_dma); 15554 sc->def_sb = NULL; 15555 15556 bus_dma_tag_destroy(sc->parent_dma_tag); 15557 sc->parent_dma_tag = NULL; 15558} 15559 15560/* 15561 * Previous driver DMAE transaction may have occurred when pre-boot stage 15562 * ended and boot began. This would invalidate the addresses of the 15563 * transaction, resulting in was-error bit set in the PCI causing all 15564 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15565 * the interrupt which detected this from the pglueb and the was-done bit 15566 */ 15567static void 15568bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15569{ 15570 uint32_t val; 15571 15572 if (!CHIP_IS_E1x(sc)) { 15573 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15574 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15575 BLOGD(sc, DBG_LOAD, 15576 "Clearing 'was-error' bit that was set in pglueb"); 15577 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15578 } 15579 } 15580} 15581 15582static int 15583bxe_prev_mcp_done(struct bxe_softc *sc) 15584{ 15585 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15586 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15587 if (!rc) { 15588 BLOGE(sc, "MCP response failure, aborting\n"); 15589 return (-1); 15590 } 15591 15592 return (0); 15593} 15594 15595static struct bxe_prev_list_node * 15596bxe_prev_path_get_entry(struct bxe_softc *sc) 15597{ 15598 struct bxe_prev_list_node *tmp; 15599 15600 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15601 if ((sc->pcie_bus == tmp->bus) && 15602 (sc->pcie_device == tmp->slot) && 15603 (SC_PATH(sc) == tmp->path)) { 15604 return (tmp); 15605 } 15606 } 15607 15608 return (NULL); 15609} 15610 15611static uint8_t 15612bxe_prev_is_path_marked(struct bxe_softc *sc) 15613{ 15614 struct bxe_prev_list_node *tmp; 15615 int rc = FALSE; 15616 15617 mtx_lock(&bxe_prev_mtx); 15618 15619 tmp = bxe_prev_path_get_entry(sc); 15620 if (tmp) { 15621 if (tmp->aer) { 15622 BLOGD(sc, DBG_LOAD, 15623 "Path %d/%d/%d was marked by AER\n", 15624 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15625 } else { 15626 rc = TRUE; 15627 BLOGD(sc, DBG_LOAD, 15628 "Path %d/%d/%d was already cleaned from previous drivers\n", 15629 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15630 } 15631 } 15632 15633 mtx_unlock(&bxe_prev_mtx); 15634 15635 return (rc); 15636} 15637 15638static int 15639bxe_prev_mark_path(struct bxe_softc *sc, 15640 uint8_t after_undi) 15641{ 15642 struct bxe_prev_list_node *tmp; 15643 15644 mtx_lock(&bxe_prev_mtx); 15645 15646 /* Check whether the entry for this path already exists */ 15647 tmp = bxe_prev_path_get_entry(sc); 15648 if (tmp) { 15649 if (!tmp->aer) { 15650 BLOGD(sc, DBG_LOAD, 15651 "Re-marking AER in path %d/%d/%d\n", 15652 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15653 } else { 15654 BLOGD(sc, DBG_LOAD, 15655 "Removing AER indication from path %d/%d/%d\n", 15656 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15657 tmp->aer = 0; 15658 } 15659 15660 mtx_unlock(&bxe_prev_mtx); 15661 return (0); 15662 } 15663 15664 mtx_unlock(&bxe_prev_mtx); 15665 15666 /* Create an entry for this path and add it */ 15667 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15668 (M_NOWAIT | M_ZERO)); 15669 if (!tmp) { 15670 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15671 return (-1); 15672 } 15673 15674 tmp->bus = sc->pcie_bus; 15675 tmp->slot = sc->pcie_device; 15676 tmp->path = SC_PATH(sc); 15677 tmp->aer = 0; 15678 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15679 15680 mtx_lock(&bxe_prev_mtx); 15681 15682 BLOGD(sc, DBG_LOAD, 15683 "Marked path %d/%d/%d - finished previous unload\n", 15684 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15685 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15686 15687 mtx_unlock(&bxe_prev_mtx); 15688 15689 return (0); 15690} 15691 15692static int 15693bxe_do_flr(struct bxe_softc *sc) 15694{ 15695 int i; 15696 15697 /* only E2 and onwards support FLR */ 15698 if (CHIP_IS_E1x(sc)) { 15699 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15700 return (-1); 15701 } 15702 15703 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15704 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15705 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15706 sc->devinfo.bc_ver); 15707 return (-1); 15708 } 15709 15710 /* Wait for Transaction Pending bit clean */ 15711 for (i = 0; i < 4; i++) { 15712 if (i) { 15713 DELAY(((1 << (i - 1)) * 100) * 1000); 15714 } 15715 15716 if (!bxe_is_pcie_pending(sc)) { 15717 goto clear; 15718 } 15719 } 15720 15721 BLOGE(sc, "PCIE transaction is not cleared, " 15722 "proceeding with reset anyway\n"); 15723 15724clear: 15725 15726 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15727 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15728 15729 return (0); 15730} 15731 15732struct bxe_mac_vals { 15733 uint32_t xmac_addr; 15734 uint32_t xmac_val; 15735 uint32_t emac_addr; 15736 uint32_t emac_val; 15737 uint32_t umac_addr; 15738 uint32_t umac_val; 15739 uint32_t bmac_addr; 15740 uint32_t bmac_val[2]; 15741}; 15742 15743static void 15744bxe_prev_unload_close_mac(struct bxe_softc *sc, 15745 struct bxe_mac_vals *vals) 15746{ 15747 uint32_t val, base_addr, offset, mask, reset_reg; 15748 uint8_t mac_stopped = FALSE; 15749 uint8_t port = SC_PORT(sc); 15750 uint32_t wb_data[2]; 15751 15752 /* reset addresses as they also mark which values were changed */ 15753 vals->bmac_addr = 0; 15754 vals->umac_addr = 0; 15755 vals->xmac_addr = 0; 15756 vals->emac_addr = 0; 15757 15758 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15759 15760 if (!CHIP_IS_E3(sc)) { 15761 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15762 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15763 if ((mask & reset_reg) && val) { 15764 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15765 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15766 : NIG_REG_INGRESS_BMAC0_MEM; 15767 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15768 : BIGMAC_REGISTER_BMAC_CONTROL; 15769 15770 /* 15771 * use rd/wr since we cannot use dmae. This is safe 15772 * since MCP won't access the bus due to the request 15773 * to unload, and no function on the path can be 15774 * loaded at this time. 15775 */ 15776 wb_data[0] = REG_RD(sc, base_addr + offset); 15777 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15778 vals->bmac_addr = base_addr + offset; 15779 vals->bmac_val[0] = wb_data[0]; 15780 vals->bmac_val[1] = wb_data[1]; 15781 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15782 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15783 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15784 } 15785 15786 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15787 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15788 vals->emac_val = REG_RD(sc, vals->emac_addr); 15789 REG_WR(sc, vals->emac_addr, 0); 15790 mac_stopped = TRUE; 15791 } else { 15792 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15793 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15794 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15795 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15796 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15797 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15798 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15799 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15800 REG_WR(sc, vals->xmac_addr, 0); 15801 mac_stopped = TRUE; 15802 } 15803 15804 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15805 if (mask & reset_reg) { 15806 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15807 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15808 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15809 vals->umac_val = REG_RD(sc, vals->umac_addr); 15810 REG_WR(sc, vals->umac_addr, 0); 15811 mac_stopped = TRUE; 15812 } 15813 } 15814 15815 if (mac_stopped) { 15816 DELAY(20000); 15817 } 15818} 15819 15820#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15821#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15822#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15823#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15824 15825static void 15826bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15827 uint8_t port, 15828 uint8_t inc) 15829{ 15830 uint16_t rcq, bd; 15831 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15832 15833 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15834 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15835 15836 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15837 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15838 15839 BLOGD(sc, DBG_LOAD, 15840 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15841 port, bd, rcq); 15842} 15843 15844static int 15845bxe_prev_unload_common(struct bxe_softc *sc) 15846{ 15847 uint32_t reset_reg, tmp_reg = 0, rc; 15848 uint8_t prev_undi = FALSE; 15849 struct bxe_mac_vals mac_vals; 15850 uint32_t timer_count = 1000; 15851 uint32_t prev_brb; 15852 15853 /* 15854 * It is possible a previous function received 'common' answer, 15855 * but hasn't loaded yet, therefore creating a scenario of 15856 * multiple functions receiving 'common' on the same path. 15857 */ 15858 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15859 15860 memset(&mac_vals, 0, sizeof(mac_vals)); 15861 15862 if (bxe_prev_is_path_marked(sc)) { 15863 return (bxe_prev_mcp_done(sc)); 15864 } 15865 15866 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15867 15868 /* Reset should be performed after BRB is emptied */ 15869 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15870 /* Close the MAC Rx to prevent BRB from filling up */ 15871 bxe_prev_unload_close_mac(sc, &mac_vals); 15872 15873 /* close LLH filters towards the BRB */ 15874 elink_set_rx_filter(&sc->link_params, 0); 15875 15876 /* 15877 * Check if the UNDI driver was previously loaded. 15878 * UNDI driver initializes CID offset for normal bell to 0x7 15879 */ 15880 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15881 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15882 if (tmp_reg == 0x7) { 15883 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15884 prev_undi = TRUE; 15885 /* clear the UNDI indication */ 15886 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15887 /* clear possible idle check errors */ 15888 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15889 } 15890 } 15891 15892 /* wait until BRB is empty */ 15893 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15894 while (timer_count) { 15895 prev_brb = tmp_reg; 15896 15897 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15898 if (!tmp_reg) { 15899 break; 15900 } 15901 15902 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15903 15904 /* reset timer as long as BRB actually gets emptied */ 15905 if (prev_brb > tmp_reg) { 15906 timer_count = 1000; 15907 } else { 15908 timer_count--; 15909 } 15910 15911 /* If UNDI resides in memory, manually increment it */ 15912 if (prev_undi) { 15913 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15914 } 15915 15916 DELAY(10); 15917 } 15918 15919 if (!timer_count) { 15920 BLOGE(sc, "Failed to empty BRB\n"); 15921 } 15922 } 15923 15924 /* No packets are in the pipeline, path is ready for reset */ 15925 bxe_reset_common(sc); 15926 15927 if (mac_vals.xmac_addr) { 15928 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15929 } 15930 if (mac_vals.umac_addr) { 15931 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15932 } 15933 if (mac_vals.emac_addr) { 15934 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15935 } 15936 if (mac_vals.bmac_addr) { 15937 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15938 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15939 } 15940 15941 rc = bxe_prev_mark_path(sc, prev_undi); 15942 if (rc) { 15943 bxe_prev_mcp_done(sc); 15944 return (rc); 15945 } 15946 15947 return (bxe_prev_mcp_done(sc)); 15948} 15949 15950static int 15951bxe_prev_unload_uncommon(struct bxe_softc *sc) 15952{ 15953 int rc; 15954 15955 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15956 15957 /* Test if previous unload process was already finished for this path */ 15958 if (bxe_prev_is_path_marked(sc)) { 15959 return (bxe_prev_mcp_done(sc)); 15960 } 15961 15962 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15963 15964 /* 15965 * If function has FLR capabilities, and existing FW version matches 15966 * the one required, then FLR will be sufficient to clean any residue 15967 * left by previous driver 15968 */ 15969 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15970 if (!rc) { 15971 /* fw version is good */ 15972 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15973 rc = bxe_do_flr(sc); 15974 } 15975 15976 if (!rc) { 15977 /* FLR was performed */ 15978 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15979 return (0); 15980 } 15981 15982 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15983 15984 /* Close the MCP request, return failure*/ 15985 rc = bxe_prev_mcp_done(sc); 15986 if (!rc) { 15987 rc = BXE_PREV_WAIT_NEEDED; 15988 } 15989 15990 return (rc); 15991} 15992 15993static int 15994bxe_prev_unload(struct bxe_softc *sc) 15995{ 15996 int time_counter = 10; 15997 uint32_t fw, hw_lock_reg, hw_lock_val; 15998 uint32_t rc = 0; 15999 16000 /* 16001 * Clear HW from errors which may have resulted from an interrupted 16002 * DMAE transaction. 16003 */ 16004 bxe_prev_interrupted_dmae(sc); 16005 16006 /* Release previously held locks */ 16007 hw_lock_reg = 16008 (SC_FUNC(sc) <= 5) ? 16009 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 16010 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 16011 16012 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 16013 if (hw_lock_val) { 16014 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 16015 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 16016 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 16017 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 16018 } 16019 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 16020 REG_WR(sc, hw_lock_reg, 0xffffffff); 16021 } else { 16022 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 16023 } 16024 16025 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 16026 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 16027 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 16028 } 16029 16030 do { 16031 /* Lock MCP using an unload request */ 16032 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 16033 if (!fw) { 16034 BLOGE(sc, "MCP response failure, aborting\n"); 16035 rc = -1; 16036 break; 16037 } 16038 16039 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 16040 rc = bxe_prev_unload_common(sc); 16041 break; 16042 } 16043 16044 /* non-common reply from MCP night require looping */ 16045 rc = bxe_prev_unload_uncommon(sc); 16046 if (rc != BXE_PREV_WAIT_NEEDED) { 16047 break; 16048 } 16049 16050 DELAY(20000); 16051 } while (--time_counter); 16052 16053 if (!time_counter || rc) { 16054 BLOGE(sc, "Failed to unload previous driver!\n"); 16055 rc = -1; 16056 } 16057 16058 return (rc); 16059} 16060 16061void 16062bxe_dcbx_set_state(struct bxe_softc *sc, 16063 uint8_t dcb_on, 16064 uint32_t dcbx_enabled) 16065{ 16066 if (!CHIP_IS_E1x(sc)) { 16067 sc->dcb_state = dcb_on; 16068 sc->dcbx_enabled = dcbx_enabled; 16069 } else { 16070 sc->dcb_state = FALSE; 16071 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 16072 } 16073 BLOGD(sc, DBG_LOAD, 16074 "DCB state [%s:%s]\n", 16075 dcb_on ? "ON" : "OFF", 16076 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 16077 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 16078 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 16079 "on-chip with negotiation" : "invalid"); 16080} 16081 16082/* must be called after sriov-enable */ 16083static int 16084bxe_set_qm_cid_count(struct bxe_softc *sc) 16085{ 16086 int cid_count = BXE_L2_MAX_CID(sc); 16087 16088 if (IS_SRIOV(sc)) { 16089 cid_count += BXE_VF_CIDS; 16090 } 16091 16092 if (CNIC_SUPPORT(sc)) { 16093 cid_count += CNIC_CID_MAX; 16094 } 16095 16096 return (roundup(cid_count, QM_CID_ROUND)); 16097} 16098 16099static void 16100bxe_init_multi_cos(struct bxe_softc *sc) 16101{ 16102 int pri, cos; 16103 16104 uint32_t pri_map = 0; /* XXX change to user config */ 16105 16106 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 16107 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 16108 if (cos < sc->max_cos) { 16109 sc->prio_to_cos[pri] = cos; 16110 } else { 16111 BLOGW(sc, "Invalid COS %d for priority %d " 16112 "(max COS is %d), setting to 0\n", 16113 cos, pri, (sc->max_cos - 1)); 16114 sc->prio_to_cos[pri] = 0; 16115 } 16116 } 16117} 16118 16119static int 16120bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 16121{ 16122 struct bxe_softc *sc; 16123 int error, result; 16124 16125 result = 0; 16126 error = sysctl_handle_int(oidp, &result, 0, req); 16127 16128 if (error || !req->newptr) { 16129 return (error); 16130 } 16131 16132 if (result == 1) { 16133 sc = (struct bxe_softc *)arg1; 16134 BLOGI(sc, "... dumping driver state ...\n"); 16135 /* XXX */ 16136 } 16137 16138 return (error); 16139} 16140 16141static int 16142bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 16143{ 16144 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16145 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 16146 uint32_t *offset; 16147 uint64_t value = 0; 16148 int index = (int)arg2; 16149 16150 if (index >= BXE_NUM_ETH_STATS) { 16151 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 16152 return (-1); 16153 } 16154 16155 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 16156 16157 switch (bxe_eth_stats_arr[index].size) { 16158 case 4: 16159 value = (uint64_t)*offset; 16160 break; 16161 case 8: 16162 value = HILO_U64(*offset, *(offset + 1)); 16163 break; 16164 default: 16165 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 16166 index, bxe_eth_stats_arr[index].size); 16167 return (-1); 16168 } 16169 16170 return (sysctl_handle_64(oidp, &value, 0, req)); 16171} 16172 16173static int 16174bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 16175{ 16176 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16177 uint32_t *eth_stats; 16178 uint32_t *offset; 16179 uint64_t value = 0; 16180 uint32_t q_stat = (uint32_t)arg2; 16181 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 16182 uint32_t index = (q_stat & 0xffff); 16183 16184 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 16185 16186 if (index >= BXE_NUM_ETH_Q_STATS) { 16187 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 16188 return (-1); 16189 } 16190 16191 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 16192 16193 switch (bxe_eth_q_stats_arr[index].size) { 16194 case 4: 16195 value = (uint64_t)*offset; 16196 break; 16197 case 8: 16198 value = HILO_U64(*offset, *(offset + 1)); 16199 break; 16200 default: 16201 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 16202 index, bxe_eth_q_stats_arr[index].size); 16203 return (-1); 16204 } 16205 16206 return (sysctl_handle_64(oidp, &value, 0, req)); 16207} 16208 16209static void 16210bxe_add_sysctls(struct bxe_softc *sc) 16211{ 16212 struct sysctl_ctx_list *ctx; 16213 struct sysctl_oid_list *children; 16214 struct sysctl_oid *queue_top, *queue; 16215 struct sysctl_oid_list *queue_top_children, *queue_children; 16216 char queue_num_buf[32]; 16217 uint32_t q_stat; 16218 int i, j; 16219 16220 ctx = device_get_sysctl_ctx(sc->dev); 16221 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16222 16223 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16224 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16225 "version"); 16226 16227 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16228 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0, 16229 "bootcode version"); 16230 16231 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16232 BCM_5710_FW_MAJOR_VERSION, 16233 BCM_5710_FW_MINOR_VERSION, 16234 BCM_5710_FW_REVISION_VERSION, 16235 BCM_5710_FW_ENGINEERING_VERSION); 16236 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16237 CTLFLAG_RD, &sc->fw_ver_str, 0, 16238 "firmware version"); 16239 16240 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16241 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16242 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16243 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16244 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16245 "Unknown")); 16246 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16247 CTLFLAG_RD, &sc->mf_mode_str, 0, 16248 "multifunction mode"); 16249 16250 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16251 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16252 "multifunction vnics per port"); 16253 16254 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16255 CTLFLAG_RD, &sc->mac_addr_str, 0, 16256 "mac address"); 16257 16258 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16259 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16260 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16261 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16262 "???GT/s"), 16263 sc->devinfo.pcie_link_width); 16264 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16265 CTLFLAG_RD, &sc->pci_link_str, 0, 16266 "pci link status"); 16267 16268 sc->debug = bxe_debug; 16269 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", 16270 CTLFLAG_RW, &sc->debug, 0, 16271 "debug logging mode"); 16272 16273 sc->rx_budget = bxe_rx_budget; 16274 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16275 CTLFLAG_RW, &sc->rx_budget, 0, 16276 "rx processing budget"); 16277 16278 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16279 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16280 bxe_sysctl_state, "IU", "dump driver state"); 16281 16282 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16283 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16284 bxe_eth_stats_arr[i].string, 16285 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16286 bxe_sysctl_eth_stat, "LU", 16287 bxe_eth_stats_arr[i].string); 16288 } 16289 16290 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16291 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16292 CTLFLAG_RD, NULL, "queue"); 16293 queue_top_children = SYSCTL_CHILDREN(queue_top); 16294 16295 for (i = 0; i < sc->num_queues; i++) { 16296 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16297 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16298 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16299 queue_num_buf, CTLFLAG_RD, NULL, 16300 "single queue"); 16301 queue_children = SYSCTL_CHILDREN(queue); 16302 16303 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16304 q_stat = ((i << 16) | j); 16305 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16306 bxe_eth_q_stats_arr[j].string, 16307 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16308 bxe_sysctl_eth_q_stat, "LU", 16309 bxe_eth_q_stats_arr[j].string); 16310 } 16311 } 16312} 16313 16314/* 16315 * Device attach function. 16316 * 16317 * Allocates device resources, performs secondary chip identification, and 16318 * initializes driver instance variables. This function is called from driver 16319 * load after a successful probe. 16320 * 16321 * Returns: 16322 * 0 = Success, >0 = Failure 16323 */ 16324static int 16325bxe_attach(device_t dev) 16326{ 16327 struct bxe_softc *sc; 16328 16329 sc = device_get_softc(dev); 16330 16331 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16332 16333 sc->state = BXE_STATE_CLOSED; 16334 16335 sc->dev = dev; 16336 sc->unit = device_get_unit(dev); 16337 16338 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16339 16340 sc->pcie_bus = pci_get_bus(dev); 16341 sc->pcie_device = pci_get_slot(dev); 16342 sc->pcie_func = pci_get_function(dev); 16343 16344 /* enable bus master capability */ 16345 pci_enable_busmaster(dev); 16346 16347 /* get the BARs */ 16348 if (bxe_allocate_bars(sc) != 0) { 16349 return (ENXIO); 16350 } 16351 16352 /* initialize the mutexes */ 16353 bxe_init_mutexes(sc); 16354 16355 /* prepare the periodic callout */ 16356 callout_init(&sc->periodic_callout, 0); 16357 16358 /* prepare the chip taskqueue */ 16359 sc->chip_tq_flags = CHIP_TQ_NONE; 16360 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16361 "bxe%d_chip_tq", sc->unit); 16362 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16363 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16364 taskqueue_thread_enqueue, 16365 &sc->chip_tq); 16366 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16367 "%s", sc->chip_tq_name); 16368 16369 /* get device info and set params */ 16370 if (bxe_get_device_info(sc) != 0) { 16371 BLOGE(sc, "getting device info\n"); 16372 bxe_deallocate_bars(sc); 16373 pci_disable_busmaster(dev); 16374 return (ENXIO); 16375 } 16376 16377 /* get final misc params */ 16378 bxe_get_params(sc); 16379 16380 /* set the default MTU (changed via ifconfig) */ 16381 sc->mtu = ETHERMTU; 16382 16383 bxe_set_modes_bitmap(sc); 16384 16385 /* XXX 16386 * If in AFEX mode and the function is configured for FCoE 16387 * then bail... no L2 allowed. 16388 */ 16389 16390 /* get phy settings from shmem and 'and' against admin settings */ 16391 bxe_get_phy_info(sc); 16392 16393 /* initialize the FreeBSD ifnet interface */ 16394 if (bxe_init_ifnet(sc) != 0) { 16395 bxe_release_mutexes(sc); 16396 bxe_deallocate_bars(sc); 16397 pci_disable_busmaster(dev); 16398 return (ENXIO); 16399 } 16400 16401 /* allocate device interrupts */ 16402 if (bxe_interrupt_alloc(sc) != 0) { 16403 if (sc->ifnet != NULL) { 16404 ether_ifdetach(sc->ifnet); 16405 } 16406 ifmedia_removeall(&sc->ifmedia); 16407 bxe_release_mutexes(sc); 16408 bxe_deallocate_bars(sc); 16409 pci_disable_busmaster(dev); 16410 return (ENXIO); 16411 } 16412 16413 /* allocate ilt */ 16414 if (bxe_alloc_ilt_mem(sc) != 0) { 16415 bxe_interrupt_free(sc); 16416 if (sc->ifnet != NULL) { 16417 ether_ifdetach(sc->ifnet); 16418 } 16419 ifmedia_removeall(&sc->ifmedia); 16420 bxe_release_mutexes(sc); 16421 bxe_deallocate_bars(sc); 16422 pci_disable_busmaster(dev); 16423 return (ENXIO); 16424 } 16425 16426 /* allocate the host hardware/software hsi structures */ 16427 if (bxe_alloc_hsi_mem(sc) != 0) { 16428 bxe_free_ilt_mem(sc); 16429 bxe_interrupt_free(sc); 16430 if (sc->ifnet != NULL) { 16431 ether_ifdetach(sc->ifnet); 16432 } 16433 ifmedia_removeall(&sc->ifmedia); 16434 bxe_release_mutexes(sc); 16435 bxe_deallocate_bars(sc); 16436 pci_disable_busmaster(dev); 16437 return (ENXIO); 16438 } 16439 16440 /* need to reset chip if UNDI was active */ 16441 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16442 /* init fw_seq */ 16443 sc->fw_seq = 16444 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16445 DRV_MSG_SEQ_NUMBER_MASK); 16446 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16447 bxe_prev_unload(sc); 16448 } 16449 16450#if 1 16451 /* XXX */ 16452 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16453#else 16454 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16455 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16456 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16457 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16458 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16459 bxe_dcbx_init_params(sc); 16460 } else { 16461 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16462 } 16463#endif 16464 16465 /* calculate qm_cid_count */ 16466 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16467 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16468 16469 sc->max_cos = 1; 16470 bxe_init_multi_cos(sc); 16471 16472 bxe_add_sysctls(sc); 16473 16474 return (0); 16475} 16476 16477/* 16478 * Device detach function. 16479 * 16480 * Stops the controller, resets the controller, and releases resources. 16481 * 16482 * Returns: 16483 * 0 = Success, >0 = Failure 16484 */ 16485static int 16486bxe_detach(device_t dev) 16487{ 16488 struct bxe_softc *sc; 16489 struct ifnet *ifp; 16490 16491 sc = device_get_softc(dev); 16492 16493 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16494 16495 ifp = sc->ifnet; 16496 if (ifp != NULL && ifp->if_vlantrunk != NULL) { 16497 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16498 return(EBUSY); 16499 } 16500 16501 /* stop the periodic callout */ 16502 bxe_periodic_stop(sc); 16503 16504 /* stop the chip taskqueue */ 16505 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16506 if (sc->chip_tq) { 16507 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16508 taskqueue_free(sc->chip_tq); 16509 sc->chip_tq = NULL; 16510 } 16511 16512 /* stop and reset the controller if it was open */ 16513 if (sc->state != BXE_STATE_CLOSED) { 16514 BXE_CORE_LOCK(sc); 16515 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16516 BXE_CORE_UNLOCK(sc); 16517 } 16518 16519 /* release the network interface */ 16520 if (ifp != NULL) { 16521 ether_ifdetach(ifp); 16522 } 16523 ifmedia_removeall(&sc->ifmedia); 16524 16525 /* XXX do the following based on driver state... */ 16526 16527 /* free the host hardware/software hsi structures */ 16528 bxe_free_hsi_mem(sc); 16529 16530 /* free ilt */ 16531 bxe_free_ilt_mem(sc); 16532 16533 /* release the interrupts */ 16534 bxe_interrupt_free(sc); 16535 16536 /* Release the mutexes*/ 16537 bxe_release_mutexes(sc); 16538 16539 /* Release the PCIe BAR mapped memory */ 16540 bxe_deallocate_bars(sc); 16541 16542 /* Release the FreeBSD interface. */ 16543 if (sc->ifnet != NULL) { 16544 if_free(sc->ifnet); 16545 } 16546 16547 pci_disable_busmaster(dev); 16548 16549 return (0); 16550} 16551 16552/* 16553 * Device shutdown function. 16554 * 16555 * Stops and resets the controller. 16556 * 16557 * Returns: 16558 * Nothing 16559 */ 16560static int 16561bxe_shutdown(device_t dev) 16562{ 16563 struct bxe_softc *sc; 16564 16565 sc = device_get_softc(dev); 16566 16567 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16568 16569 /* stop the periodic callout */ 16570 bxe_periodic_stop(sc); 16571 16572 BXE_CORE_LOCK(sc); 16573 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16574 BXE_CORE_UNLOCK(sc); 16575 16576 return (0); 16577} 16578 16579void 16580bxe_igu_ack_sb(struct bxe_softc *sc, 16581 uint8_t igu_sb_id, 16582 uint8_t segment, 16583 uint16_t index, 16584 uint8_t op, 16585 uint8_t update) 16586{ 16587 uint32_t igu_addr = sc->igu_base_addr; 16588 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16589 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16590} 16591 16592static void 16593bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16594 uint8_t func, 16595 uint8_t idu_sb_id, 16596 uint8_t is_pf) 16597{ 16598 uint32_t data, ctl, cnt = 100; 16599 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16600 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16601 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16602 uint32_t sb_bit = 1 << (idu_sb_id%32); 16603 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16604 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16605 16606 /* Not supported in BC mode */ 16607 if (CHIP_INT_MODE_IS_BC(sc)) { 16608 return; 16609 } 16610 16611 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16612 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16613 IGU_REGULAR_CLEANUP_SET | 16614 IGU_REGULAR_BCLEANUP); 16615 16616 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16617 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16618 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16619 16620 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16621 data, igu_addr_data); 16622 REG_WR(sc, igu_addr_data, data); 16623 16624 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16625 BUS_SPACE_BARRIER_WRITE); 16626 mb(); 16627 16628 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16629 ctl, igu_addr_ctl); 16630 REG_WR(sc, igu_addr_ctl, ctl); 16631 16632 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16633 BUS_SPACE_BARRIER_WRITE); 16634 mb(); 16635 16636 /* wait for clean up to finish */ 16637 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16638 DELAY(20000); 16639 } 16640 16641 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16642 BLOGD(sc, DBG_LOAD, 16643 "Unable to finish IGU cleanup: " 16644 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16645 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16646 } 16647} 16648 16649static void 16650bxe_igu_clear_sb(struct bxe_softc *sc, 16651 uint8_t idu_sb_id) 16652{ 16653 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16654} 16655 16656 16657 16658 16659 16660 16661 16662/*******************/ 16663/* ECORE CALLBACKS */ 16664/*******************/ 16665 16666static void 16667bxe_reset_common(struct bxe_softc *sc) 16668{ 16669 uint32_t val = 0x1400; 16670 16671 /* reset_common */ 16672 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16673 16674 if (CHIP_IS_E3(sc)) { 16675 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16676 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16677 } 16678 16679 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16680} 16681 16682static void 16683bxe_common_init_phy(struct bxe_softc *sc) 16684{ 16685 uint32_t shmem_base[2]; 16686 uint32_t shmem2_base[2]; 16687 16688 /* Avoid common init in case MFW supports LFA */ 16689 if (SHMEM2_RD(sc, size) > 16690 (uint32_t)offsetof(struct shmem2_region, 16691 lfa_host_addr[SC_PORT(sc)])) { 16692 return; 16693 } 16694 16695 shmem_base[0] = sc->devinfo.shmem_base; 16696 shmem2_base[0] = sc->devinfo.shmem2_base; 16697 16698 if (!CHIP_IS_E1x(sc)) { 16699 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16700 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16701 } 16702 16703 BXE_PHY_LOCK(sc); 16704 elink_common_init_phy(sc, shmem_base, shmem2_base, 16705 sc->devinfo.chip_id, 0); 16706 BXE_PHY_UNLOCK(sc); 16707} 16708 16709static void 16710bxe_pf_disable(struct bxe_softc *sc) 16711{ 16712 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16713 16714 val &= ~IGU_PF_CONF_FUNC_EN; 16715 16716 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16717 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16718 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16719} 16720 16721static void 16722bxe_init_pxp(struct bxe_softc *sc) 16723{ 16724 uint16_t devctl; 16725 int r_order, w_order; 16726 16727 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16728 16729 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16730 16731 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16732 16733 if (sc->mrrs == -1) { 16734 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16735 } else { 16736 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16737 r_order = sc->mrrs; 16738 } 16739 16740 ecore_init_pxp_arb(sc, r_order, w_order); 16741} 16742 16743static uint32_t 16744bxe_get_pretend_reg(struct bxe_softc *sc) 16745{ 16746 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16747 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16748 return (base + (SC_ABS_FUNC(sc)) * stride); 16749} 16750 16751/* 16752 * Called only on E1H or E2. 16753 * When pretending to be PF, the pretend value is the function number 0..7. 16754 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16755 * combination. 16756 */ 16757static int 16758bxe_pretend_func(struct bxe_softc *sc, 16759 uint16_t pretend_func_val) 16760{ 16761 uint32_t pretend_reg; 16762 16763 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16764 return (-1); 16765 } 16766 16767 /* get my own pretend register */ 16768 pretend_reg = bxe_get_pretend_reg(sc); 16769 REG_WR(sc, pretend_reg, pretend_func_val); 16770 REG_RD(sc, pretend_reg); 16771 return (0); 16772} 16773 16774static void 16775bxe_iov_init_dmae(struct bxe_softc *sc) 16776{ 16777 return; 16778#if 0 16779 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); 16780 16781 if (!IS_SRIOV(sc)) { 16782 return; 16783 } 16784 16785 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); 16786#endif 16787} 16788 16789#if 0 16790static int 16791bxe_iov_init_ilt(struct bxe_softc *sc, 16792 uint16_t line) 16793{ 16794 return (line); 16795#if 0 16796 int i; 16797 struct ecore_ilt* ilt = sc->ilt; 16798 16799 if (!IS_SRIOV(sc)) { 16800 return (line); 16801 } 16802 16803 /* set vfs ilt lines */ 16804 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { 16805 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); 16806 ilt->lines[line+i].page = hw_cxt->addr; 16807 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 16808 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 16809 } 16810 return (line+i); 16811#endif 16812} 16813#endif 16814 16815static void 16816bxe_iov_init_dq(struct bxe_softc *sc) 16817{ 16818 return; 16819#if 0 16820 if (!IS_SRIOV(sc)) { 16821 return; 16822 } 16823 16824 /* Set the DQ such that the CID reflect the abs_vfid */ 16825 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); 16826 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 16827 16828 /* 16829 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to 16830 * the PF L2 queues 16831 */ 16832 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 16833 16834 /* The VF window size is the log2 of the max number of CIDs per VF */ 16835 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 16836 16837 /* 16838 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 16839 * the Pf doorbell size although the 2 are independent. 16840 */ 16841 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, 16842 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 16843 16844 /* 16845 * No security checks for now - 16846 * configure single rule (out of 16) mask = 0x1, value = 0x0, 16847 * CID range 0 - 0x1ffff 16848 */ 16849 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); 16850 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); 16851 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 16852 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 16853 16854 /* set the number of VF alllowed doorbells to the full DQ range */ 16855 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 16856 16857 /* set the VF doorbell threshold */ 16858 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 16859#endif 16860} 16861 16862/* send a NIG loopback debug packet */ 16863static void 16864bxe_lb_pckt(struct bxe_softc *sc) 16865{ 16866 uint32_t wb_write[3]; 16867 16868 /* Ethernet source and destination addresses */ 16869 wb_write[0] = 0x55555555; 16870 wb_write[1] = 0x55555555; 16871 wb_write[2] = 0x20; /* SOP */ 16872 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16873 16874 /* NON-IP protocol */ 16875 wb_write[0] = 0x09000000; 16876 wb_write[1] = 0x55555555; 16877 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16878 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16879} 16880 16881/* 16882 * Some of the internal memories are not directly readable from the driver. 16883 * To test them we send debug packets. 16884 */ 16885static int 16886bxe_int_mem_test(struct bxe_softc *sc) 16887{ 16888 int factor; 16889 int count, i; 16890 uint32_t val = 0; 16891 16892 if (CHIP_REV_IS_FPGA(sc)) { 16893 factor = 120; 16894 } else if (CHIP_REV_IS_EMUL(sc)) { 16895 factor = 200; 16896 } else { 16897 factor = 1; 16898 } 16899 16900 /* disable inputs of parser neighbor blocks */ 16901 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16902 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16903 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16904 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16905 16906 /* write 0 to parser credits for CFC search request */ 16907 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16908 16909 /* send Ethernet packet */ 16910 bxe_lb_pckt(sc); 16911 16912 /* TODO do i reset NIG statistic? */ 16913 /* Wait until NIG register shows 1 packet of size 0x10 */ 16914 count = 1000 * factor; 16915 while (count) { 16916 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16917 val = *BXE_SP(sc, wb_data[0]); 16918 if (val == 0x10) { 16919 break; 16920 } 16921 16922 DELAY(10000); 16923 count--; 16924 } 16925 16926 if (val != 0x10) { 16927 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16928 return (-1); 16929 } 16930 16931 /* wait until PRS register shows 1 packet */ 16932 count = (1000 * factor); 16933 while (count) { 16934 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16935 if (val == 1) { 16936 break; 16937 } 16938 16939 DELAY(10000); 16940 count--; 16941 } 16942 16943 if (val != 0x1) { 16944 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16945 return (-2); 16946 } 16947 16948 /* Reset and init BRB, PRS */ 16949 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16950 DELAY(50000); 16951 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16952 DELAY(50000); 16953 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16954 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16955 16956 /* Disable inputs of parser neighbor blocks */ 16957 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16958 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16959 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16960 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16961 16962 /* Write 0 to parser credits for CFC search request */ 16963 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16964 16965 /* send 10 Ethernet packets */ 16966 for (i = 0; i < 10; i++) { 16967 bxe_lb_pckt(sc); 16968 } 16969 16970 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 16971 count = (1000 * factor); 16972 while (count) { 16973 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16974 val = *BXE_SP(sc, wb_data[0]); 16975 if (val == 0xb0) { 16976 break; 16977 } 16978 16979 DELAY(10000); 16980 count--; 16981 } 16982 16983 if (val != 0xb0) { 16984 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16985 return (-3); 16986 } 16987 16988 /* Wait until PRS register shows 2 packets */ 16989 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16990 if (val != 2) { 16991 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16992 } 16993 16994 /* Write 1 to parser credits for CFC search request */ 16995 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 16996 16997 /* Wait until PRS register shows 3 packets */ 16998 DELAY(10000 * factor); 16999 17000 /* Wait until NIG register shows 1 packet of size 0x10 */ 17001 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17002 if (val != 3) { 17003 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17004 } 17005 17006 /* clear NIG EOP FIFO */ 17007 for (i = 0; i < 11; i++) { 17008 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 17009 } 17010 17011 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 17012 if (val != 1) { 17013 BLOGE(sc, "clear of NIG failed\n"); 17014 return (-4); 17015 } 17016 17017 /* Reset and init BRB, PRS, NIG */ 17018 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 17019 DELAY(50000); 17020 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 17021 DELAY(50000); 17022 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17023 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17024 if (!CNIC_SUPPORT(sc)) { 17025 /* set NIC mode */ 17026 REG_WR(sc, PRS_REG_NIC_MODE, 1); 17027 } 17028 17029 /* Enable inputs of parser neighbor blocks */ 17030 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 17031 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 17032 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 17033 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 17034 17035 return (0); 17036} 17037 17038static void 17039bxe_setup_fan_failure_detection(struct bxe_softc *sc) 17040{ 17041 int is_required; 17042 uint32_t val; 17043 int port; 17044 17045 is_required = 0; 17046 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 17047 SHARED_HW_CFG_FAN_FAILURE_MASK); 17048 17049 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17050 is_required = 1; 17051 } 17052 /* 17053 * The fan failure mechanism is usually related to the PHY type since 17054 * the power consumption of the board is affected by the PHY. Currently, 17055 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17056 */ 17057 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17058 for (port = PORT_0; port < PORT_MAX; port++) { 17059 is_required |= elink_fan_failure_det_req(sc, 17060 sc->devinfo.shmem_base, 17061 sc->devinfo.shmem2_base, 17062 port); 17063 } 17064 } 17065 17066 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17067 17068 if (is_required == 0) { 17069 return; 17070 } 17071 17072 /* Fan failure is indicated by SPIO 5 */ 17073 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17074 17075 /* set to active low mode */ 17076 val = REG_RD(sc, MISC_REG_SPIO_INT); 17077 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17078 REG_WR(sc, MISC_REG_SPIO_INT, val); 17079 17080 /* enable interrupt to signal the IGU */ 17081 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17082 val |= MISC_SPIO_SPIO5; 17083 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17084} 17085 17086static void 17087bxe_enable_blocks_attention(struct bxe_softc *sc) 17088{ 17089 uint32_t val; 17090 17091 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17092 if (!CHIP_IS_E1x(sc)) { 17093 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17094 } else { 17095 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17096 } 17097 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17098 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17099 /* 17100 * mask read length error interrupts in brb for parser 17101 * (parsing unit and 'checksum and crc' unit) 17102 * these errors are legal (PU reads fixed length and CAC can cause 17103 * read length error on truncated packets) 17104 */ 17105 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17106 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17107 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17108 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17109 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17110 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17111/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17112/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17113 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17114 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17115 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17116/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17117/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17118 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17119 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17120 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17121 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17122/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17123/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17124 17125 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17126 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17127 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17128 if (!CHIP_IS_E1x(sc)) { 17129 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17130 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17131 } 17132 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17133 17134 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17135 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17136 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17137/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17138 17139 if (!CHIP_IS_E1x(sc)) { 17140 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17141 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17142 } 17143 17144 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17145 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17146/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17147 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17148} 17149 17150/** 17151 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17152 * 17153 * @sc: driver handle 17154 */ 17155static int 17156bxe_init_hw_common(struct bxe_softc *sc) 17157{ 17158 uint8_t abs_func_id; 17159 uint32_t val; 17160 17161 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17162 SC_ABS_FUNC(sc)); 17163 17164 /* 17165 * take the RESET lock to protect undi_unload flow from accessing 17166 * registers while we are resetting the chip 17167 */ 17168 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17169 17170 bxe_reset_common(sc); 17171 17172 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17173 17174 val = 0xfffc; 17175 if (CHIP_IS_E3(sc)) { 17176 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17177 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17178 } 17179 17180 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17181 17182 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17183 17184 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17185 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17186 17187 if (!CHIP_IS_E1x(sc)) { 17188 /* 17189 * 4-port mode or 2-port mode we need to turn off master-enable for 17190 * everyone. After that we turn it back on for self. So, we disregard 17191 * multi-function, and always disable all functions on the given path, 17192 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17193 */ 17194 for (abs_func_id = SC_PATH(sc); 17195 abs_func_id < (E2_FUNC_MAX * 2); 17196 abs_func_id += 2) { 17197 if (abs_func_id == SC_ABS_FUNC(sc)) { 17198 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17199 continue; 17200 } 17201 17202 bxe_pretend_func(sc, abs_func_id); 17203 17204 /* clear pf enable */ 17205 bxe_pf_disable(sc); 17206 17207 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17208 } 17209 } 17210 17211 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17212 17213 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17214 17215 if (CHIP_IS_E1(sc)) { 17216 /* 17217 * enable HW interrupt from PXP on USDM overflow 17218 * bit 16 on INT_MASK_0 17219 */ 17220 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17221 } 17222 17223 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17224 bxe_init_pxp(sc); 17225 17226#ifdef __BIG_ENDIAN 17227 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17228 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17229 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17230 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17231 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17232 /* make sure this value is 0 */ 17233 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17234 17235 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17236 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17237 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17238 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17239 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17240#endif 17241 17242 ecore_ilt_init_page_size(sc, INITOP_SET); 17243 17244 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17245 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17246 } 17247 17248 /* let the HW do it's magic... */ 17249 DELAY(100000); 17250 17251 /* finish PXP init */ 17252 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17253 if (val != 1) { 17254 BLOGE(sc, "PXP2 CFG failed\n"); 17255 return (-1); 17256 } 17257 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17258 if (val != 1) { 17259 BLOGE(sc, "PXP2 RD_INIT failed\n"); 17260 return (-1); 17261 } 17262 17263 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17264 17265 /* 17266 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17267 * entries with value "0" and valid bit on. This needs to be done by the 17268 * first PF that is loaded in a path (i.e. common phase) 17269 */ 17270 if (!CHIP_IS_E1x(sc)) { 17271/* 17272 * In E2 there is a bug in the timers block that can cause function 6 / 7 17273 * (i.e. vnic3) to start even if it is marked as "scan-off". 17274 * This occurs when a different function (func2,3) is being marked 17275 * as "scan-off". Real-life scenario for example: if a driver is being 17276 * load-unloaded while func6,7 are down. This will cause the timer to access 17277 * the ilt, translate to a logical address and send a request to read/write. 17278 * Since the ilt for the function that is down is not valid, this will cause 17279 * a translation error which is unrecoverable. 17280 * The Workaround is intended to make sure that when this happens nothing 17281 * fatal will occur. The workaround: 17282 * 1. First PF driver which loads on a path will: 17283 * a. After taking the chip out of reset, by using pretend, 17284 * it will write "0" to the following registers of 17285 * the other vnics. 17286 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17287 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17288 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17289 * And for itself it will write '1' to 17290 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17291 * dmae-operations (writing to pram for example.) 17292 * note: can be done for only function 6,7 but cleaner this 17293 * way. 17294 * b. Write zero+valid to the entire ILT. 17295 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17296 * VNIC3 (of that port). The range allocated will be the 17297 * entire ILT. This is needed to prevent ILT range error. 17298 * 2. Any PF driver load flow: 17299 * a. ILT update with the physical addresses of the allocated 17300 * logical pages. 17301 * b. Wait 20msec. - note that this timeout is needed to make 17302 * sure there are no requests in one of the PXP internal 17303 * queues with "old" ILT addresses. 17304 * c. PF enable in the PGLC. 17305 * d. Clear the was_error of the PF in the PGLC. (could have 17306 * occurred while driver was down) 17307 * e. PF enable in the CFC (WEAK + STRONG) 17308 * f. Timers scan enable 17309 * 3. PF driver unload flow: 17310 * a. Clear the Timers scan_en. 17311 * b. Polling for scan_on=0 for that PF. 17312 * c. Clear the PF enable bit in the PXP. 17313 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17314 * e. Write zero+valid to all ILT entries (The valid bit must 17315 * stay set) 17316 * f. If this is VNIC 3 of a port then also init 17317 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17318 * to the last enrty in the ILT. 17319 * 17320 * Notes: 17321 * Currently the PF error in the PGLC is non recoverable. 17322 * In the future the there will be a recovery routine for this error. 17323 * Currently attention is masked. 17324 * Having an MCP lock on the load/unload process does not guarantee that 17325 * there is no Timer disable during Func6/7 enable. This is because the 17326 * Timers scan is currently being cleared by the MCP on FLR. 17327 * Step 2.d can be done only for PF6/7 and the driver can also check if 17328 * there is error before clearing it. But the flow above is simpler and 17329 * more general. 17330 * All ILT entries are written by zero+valid and not just PF6/7 17331 * ILT entries since in the future the ILT entries allocation for 17332 * PF-s might be dynamic. 17333 */ 17334 struct ilt_client_info ilt_cli; 17335 struct ecore_ilt ilt; 17336 17337 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17338 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17339 17340 /* initialize dummy TM client */ 17341 ilt_cli.start = 0; 17342 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17343 ilt_cli.client_num = ILT_CLIENT_TM; 17344 17345 /* 17346 * Step 1: set zeroes to all ilt page entries with valid bit on 17347 * Step 2: set the timers first/last ilt entry to point 17348 * to the entire range to prevent ILT range error for 3rd/4th 17349 * vnic (this code assumes existence of the vnic) 17350 * 17351 * both steps performed by call to ecore_ilt_client_init_op() 17352 * with dummy TM client 17353 * 17354 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17355 * and his brother are split registers 17356 */ 17357 17358 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17359 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17360 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17361 17362 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17363 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17364 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17365 } 17366 17367 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17368 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17369 17370 if (!CHIP_IS_E1x(sc)) { 17371 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17372 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17373 17374 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17375 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17376 17377 /* let the HW do it's magic... */ 17378 do { 17379 DELAY(200000); 17380 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17381 } while (factor-- && (val != 1)); 17382 17383 if (val != 1) { 17384 BLOGE(sc, "ATC_INIT failed\n"); 17385 return (-1); 17386 } 17387 } 17388 17389 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17390 17391 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17392 17393 bxe_iov_init_dmae(sc); 17394 17395 /* clean the DMAE memory */ 17396 sc->dmae_ready = 1; 17397 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17398 17399 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17400 17401 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17402 17403 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17404 17405 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17406 17407 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17408 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17409 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17410 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17411 17412 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17413 17414 /* QM queues pointers table */ 17415 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17416 17417 /* soft reset pulse */ 17418 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17419 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17420 17421 if (CNIC_SUPPORT(sc)) 17422 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17423 17424 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17425 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17426 if (!CHIP_REV_IS_SLOW(sc)) { 17427 /* enable hw interrupt from doorbell Q */ 17428 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17429 } 17430 17431 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17432 17433 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17434 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17435 17436 if (!CHIP_IS_E1(sc)) { 17437 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17438 } 17439 17440 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17441 if (IS_MF_AFEX(sc)) { 17442 /* 17443 * configure that AFEX and VLAN headers must be 17444 * received in AFEX mode 17445 */ 17446 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17447 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17448 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17449 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17450 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17451 } else { 17452 /* 17453 * Bit-map indicating which L2 hdrs may appear 17454 * after the basic Ethernet header 17455 */ 17456 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17457 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17458 } 17459 } 17460 17461 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17462 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17463 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17464 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17465 17466 if (!CHIP_IS_E1x(sc)) { 17467 /* reset VFC memories */ 17468 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17469 VFC_MEMORIES_RST_REG_CAM_RST | 17470 VFC_MEMORIES_RST_REG_RAM_RST); 17471 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17472 VFC_MEMORIES_RST_REG_CAM_RST | 17473 VFC_MEMORIES_RST_REG_RAM_RST); 17474 17475 DELAY(20000); 17476 } 17477 17478 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17479 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17480 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17481 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17482 17483 /* sync semi rtc */ 17484 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17485 0x80000000); 17486 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17487 0x80000000); 17488 17489 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17490 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17491 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17492 17493 if (!CHIP_IS_E1x(sc)) { 17494 if (IS_MF_AFEX(sc)) { 17495 /* 17496 * configure that AFEX and VLAN headers must be 17497 * sent in AFEX mode 17498 */ 17499 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17500 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17501 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17502 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17503 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17504 } else { 17505 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17506 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17507 } 17508 } 17509 17510 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17511 17512 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17513 17514 if (CNIC_SUPPORT(sc)) { 17515 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17516 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17517 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17518 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17519 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17520 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17521 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17522 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17523 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17524 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17525 } 17526 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17527 17528 if (sizeof(union cdu_context) != 1024) { 17529 /* we currently assume that a context is 1024 bytes */ 17530 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17531 (long)sizeof(union cdu_context)); 17532 } 17533 17534 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17535 val = (4 << 24) + (0 << 12) + 1024; 17536 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17537 17538 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17539 17540 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17541 /* enable context validation interrupt from CFC */ 17542 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17543 17544 /* set the thresholds to prevent CFC/CDU race */ 17545 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17546 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17547 17548 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17549 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17550 } 17551 17552 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17553 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17554 17555 /* Reset PCIE errors for debug */ 17556 REG_WR(sc, 0x2814, 0xffffffff); 17557 REG_WR(sc, 0x3820, 0xffffffff); 17558 17559 if (!CHIP_IS_E1x(sc)) { 17560 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17561 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17562 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17563 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17564 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17565 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17566 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17567 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17568 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17569 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17570 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17571 } 17572 17573 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17574 17575 if (!CHIP_IS_E1(sc)) { 17576 /* in E3 this done in per-port section */ 17577 if (!CHIP_IS_E3(sc)) 17578 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17579 } 17580 17581 if (CHIP_IS_E1H(sc)) { 17582 /* not applicable for E2 (and above ...) */ 17583 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17584 } 17585 17586 if (CHIP_REV_IS_SLOW(sc)) { 17587 DELAY(200000); 17588 } 17589 17590 /* finish CFC init */ 17591 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17592 if (val != 1) { 17593 BLOGE(sc, "CFC LL_INIT failed\n"); 17594 return (-1); 17595 } 17596 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17597 if (val != 1) { 17598 BLOGE(sc, "CFC AC_INIT failed\n"); 17599 return (-1); 17600 } 17601 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17602 if (val != 1) { 17603 BLOGE(sc, "CFC CAM_INIT failed\n"); 17604 return (-1); 17605 } 17606 REG_WR(sc, CFC_REG_DEBUG0, 0); 17607 17608 if (CHIP_IS_E1(sc)) { 17609 /* read NIG statistic to see if this is our first up since powerup */ 17610 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17611 val = *BXE_SP(sc, wb_data[0]); 17612 17613 /* do internal memory self test */ 17614 if ((val == 0) && bxe_int_mem_test(sc)) { 17615 BLOGE(sc, "internal mem self test failed\n"); 17616 return (-1); 17617 } 17618 } 17619 17620 bxe_setup_fan_failure_detection(sc); 17621 17622 /* clear PXP2 attentions */ 17623 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17624 17625 bxe_enable_blocks_attention(sc); 17626 17627 if (!CHIP_REV_IS_SLOW(sc)) { 17628 ecore_enable_blocks_parity(sc); 17629 } 17630 17631 if (!BXE_NOMCP(sc)) { 17632 if (CHIP_IS_E1x(sc)) { 17633 bxe_common_init_phy(sc); 17634 } 17635 } 17636 17637 return (0); 17638} 17639 17640/** 17641 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17642 * 17643 * @sc: driver handle 17644 */ 17645static int 17646bxe_init_hw_common_chip(struct bxe_softc *sc) 17647{ 17648 int rc = bxe_init_hw_common(sc); 17649 17650 if (rc) { 17651 return (rc); 17652 } 17653 17654 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17655 if (!BXE_NOMCP(sc)) { 17656 bxe_common_init_phy(sc); 17657 } 17658 17659 return (0); 17660} 17661 17662static int 17663bxe_init_hw_port(struct bxe_softc *sc) 17664{ 17665 int port = SC_PORT(sc); 17666 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17667 uint32_t low, high; 17668 uint32_t val; 17669 17670 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17671 17672 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17673 17674 ecore_init_block(sc, BLOCK_MISC, init_phase); 17675 ecore_init_block(sc, BLOCK_PXP, init_phase); 17676 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17677 17678 /* 17679 * Timers bug workaround: disables the pf_master bit in pglue at 17680 * common phase, we need to enable it here before any dmae access are 17681 * attempted. Therefore we manually added the enable-master to the 17682 * port phase (it also happens in the function phase) 17683 */ 17684 if (!CHIP_IS_E1x(sc)) { 17685 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17686 } 17687 17688 ecore_init_block(sc, BLOCK_ATC, init_phase); 17689 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17690 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17691 ecore_init_block(sc, BLOCK_QM, init_phase); 17692 17693 ecore_init_block(sc, BLOCK_TCM, init_phase); 17694 ecore_init_block(sc, BLOCK_UCM, init_phase); 17695 ecore_init_block(sc, BLOCK_CCM, init_phase); 17696 ecore_init_block(sc, BLOCK_XCM, init_phase); 17697 17698 /* QM cid (connection) count */ 17699 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17700 17701 if (CNIC_SUPPORT(sc)) { 17702 ecore_init_block(sc, BLOCK_TM, init_phase); 17703 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17704 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17705 } 17706 17707 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17708 17709 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17710 17711 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17712 if (IS_MF(sc)) { 17713 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17714 } else if (sc->mtu > 4096) { 17715 if (BXE_ONE_PORT(sc)) { 17716 low = 160; 17717 } else { 17718 val = sc->mtu; 17719 /* (24*1024 + val*4)/256 */ 17720 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17721 } 17722 } else { 17723 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17724 } 17725 high = (low + 56); /* 14*1024/256 */ 17726 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17727 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17728 } 17729 17730 if (CHIP_IS_MODE_4_PORT(sc)) { 17731 REG_WR(sc, SC_PORT(sc) ? 17732 BRB1_REG_MAC_GUARANTIED_1 : 17733 BRB1_REG_MAC_GUARANTIED_0, 40); 17734 } 17735 17736 ecore_init_block(sc, BLOCK_PRS, init_phase); 17737 if (CHIP_IS_E3B0(sc)) { 17738 if (IS_MF_AFEX(sc)) { 17739 /* configure headers for AFEX mode */ 17740 REG_WR(sc, SC_PORT(sc) ? 17741 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17742 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17743 REG_WR(sc, SC_PORT(sc) ? 17744 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17745 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17746 REG_WR(sc, SC_PORT(sc) ? 17747 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17748 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17749 } else { 17750 /* Ovlan exists only if we are in multi-function + 17751 * switch-dependent mode, in switch-independent there 17752 * is no ovlan headers 17753 */ 17754 REG_WR(sc, SC_PORT(sc) ? 17755 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17756 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17757 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17758 } 17759 } 17760 17761 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17762 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17763 ecore_init_block(sc, BLOCK_USDM, init_phase); 17764 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17765 17766 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17767 ecore_init_block(sc, BLOCK_USEM, init_phase); 17768 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17769 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17770 17771 ecore_init_block(sc, BLOCK_UPB, init_phase); 17772 ecore_init_block(sc, BLOCK_XPB, init_phase); 17773 17774 ecore_init_block(sc, BLOCK_PBF, init_phase); 17775 17776 if (CHIP_IS_E1x(sc)) { 17777 /* configure PBF to work without PAUSE mtu 9000 */ 17778 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17779 17780 /* update threshold */ 17781 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17782 /* update init credit */ 17783 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17784 17785 /* probe changes */ 17786 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17787 DELAY(50); 17788 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17789 } 17790 17791 if (CNIC_SUPPORT(sc)) { 17792 ecore_init_block(sc, BLOCK_SRC, init_phase); 17793 } 17794 17795 ecore_init_block(sc, BLOCK_CDU, init_phase); 17796 ecore_init_block(sc, BLOCK_CFC, init_phase); 17797 17798 if (CHIP_IS_E1(sc)) { 17799 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17800 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17801 } 17802 ecore_init_block(sc, BLOCK_HC, init_phase); 17803 17804 ecore_init_block(sc, BLOCK_IGU, init_phase); 17805 17806 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17807 /* init aeu_mask_attn_func_0/1: 17808 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17809 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17810 * bits 4-7 are used for "per vn group attention" */ 17811 val = IS_MF(sc) ? 0xF7 : 0x7; 17812 /* Enable DCBX attention for all but E1 */ 17813 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17814 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17815 17816 ecore_init_block(sc, BLOCK_NIG, init_phase); 17817 17818 if (!CHIP_IS_E1x(sc)) { 17819 /* Bit-map indicating which L2 hdrs may appear after the 17820 * basic Ethernet header 17821 */ 17822 if (IS_MF_AFEX(sc)) { 17823 REG_WR(sc, SC_PORT(sc) ? 17824 NIG_REG_P1_HDRS_AFTER_BASIC : 17825 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17826 } else { 17827 REG_WR(sc, SC_PORT(sc) ? 17828 NIG_REG_P1_HDRS_AFTER_BASIC : 17829 NIG_REG_P0_HDRS_AFTER_BASIC, 17830 IS_MF_SD(sc) ? 7 : 6); 17831 } 17832 17833 if (CHIP_IS_E3(sc)) { 17834 REG_WR(sc, SC_PORT(sc) ? 17835 NIG_REG_LLH1_MF_MODE : 17836 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17837 } 17838 } 17839 if (!CHIP_IS_E3(sc)) { 17840 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17841 } 17842 17843 if (!CHIP_IS_E1(sc)) { 17844 /* 0x2 disable mf_ov, 0x1 enable */ 17845 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17846 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17847 17848 if (!CHIP_IS_E1x(sc)) { 17849 val = 0; 17850 switch (sc->devinfo.mf_info.mf_mode) { 17851 case MULTI_FUNCTION_SD: 17852 val = 1; 17853 break; 17854 case MULTI_FUNCTION_SI: 17855 case MULTI_FUNCTION_AFEX: 17856 val = 2; 17857 break; 17858 } 17859 17860 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17861 NIG_REG_LLH0_CLS_TYPE), val); 17862 } 17863 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17864 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17865 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17866 } 17867 17868 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17869 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17870 if (val & MISC_SPIO_SPIO5) { 17871 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17872 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17873 val = REG_RD(sc, reg_addr); 17874 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17875 REG_WR(sc, reg_addr, val); 17876 } 17877 17878 return (0); 17879} 17880 17881static uint32_t 17882bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17883 uint32_t reg, 17884 uint32_t expected, 17885 uint32_t poll_count) 17886{ 17887 uint32_t cur_cnt = poll_count; 17888 uint32_t val; 17889 17890 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17891 DELAY(FLR_WAIT_INTERVAL); 17892 } 17893 17894 return (val); 17895} 17896 17897static int 17898bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17899 uint32_t reg, 17900 char *msg, 17901 uint32_t poll_cnt) 17902{ 17903 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17904 17905 if (val != 0) { 17906 BLOGE(sc, "%s usage count=%d\n", msg, val); 17907 return (1); 17908 } 17909 17910 return (0); 17911} 17912 17913/* Common routines with VF FLR cleanup */ 17914static uint32_t 17915bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17916{ 17917 /* adjust polling timeout */ 17918 if (CHIP_REV_IS_EMUL(sc)) { 17919 return (FLR_POLL_CNT * 2000); 17920 } 17921 17922 if (CHIP_REV_IS_FPGA(sc)) { 17923 return (FLR_POLL_CNT * 120); 17924 } 17925 17926 return (FLR_POLL_CNT); 17927} 17928 17929static int 17930bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17931 uint32_t poll_cnt) 17932{ 17933 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17934 if (bxe_flr_clnup_poll_hw_counter(sc, 17935 CFC_REG_NUM_LCIDS_INSIDE_PF, 17936 "CFC PF usage counter timed out", 17937 poll_cnt)) { 17938 return (1); 17939 } 17940 17941 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17942 if (bxe_flr_clnup_poll_hw_counter(sc, 17943 DORQ_REG_PF_USAGE_CNT, 17944 "DQ PF usage counter timed out", 17945 poll_cnt)) { 17946 return (1); 17947 } 17948 17949 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17950 if (bxe_flr_clnup_poll_hw_counter(sc, 17951 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17952 "QM PF usage counter timed out", 17953 poll_cnt)) { 17954 return (1); 17955 } 17956 17957 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17958 if (bxe_flr_clnup_poll_hw_counter(sc, 17959 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17960 "Timers VNIC usage counter timed out", 17961 poll_cnt)) { 17962 return (1); 17963 } 17964 17965 if (bxe_flr_clnup_poll_hw_counter(sc, 17966 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17967 "Timers NUM_SCANS usage counter timed out", 17968 poll_cnt)) { 17969 return (1); 17970 } 17971 17972 /* Wait DMAE PF usage counter to zero */ 17973 if (bxe_flr_clnup_poll_hw_counter(sc, 17974 dmae_reg_go_c[INIT_DMAE_C(sc)], 17975 "DMAE dommand register timed out", 17976 poll_cnt)) { 17977 return (1); 17978 } 17979 17980 return (0); 17981} 17982 17983#define OP_GEN_PARAM(param) \ 17984 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 17985#define OP_GEN_TYPE(type) \ 17986 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 17987#define OP_GEN_AGG_VECT(index) \ 17988 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 17989 17990static int 17991bxe_send_final_clnup(struct bxe_softc *sc, 17992 uint8_t clnup_func, 17993 uint32_t poll_cnt) 17994{ 17995 uint32_t op_gen_command = 0; 17996 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 17997 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 17998 int ret = 0; 17999 18000 if (REG_RD(sc, comp_addr)) { 18001 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 18002 return (1); 18003 } 18004 18005 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 18006 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 18007 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 18008 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 18009 18010 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 18011 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 18012 18013 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 18014 BLOGE(sc, "FW final cleanup did not succeed\n"); 18015 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 18016 (REG_RD(sc, comp_addr))); 18017 bxe_panic(sc, ("FLR cleanup failed\n")); 18018 return (1); 18019 } 18020 18021 /* Zero completion for nxt FLR */ 18022 REG_WR(sc, comp_addr, 0); 18023 18024 return (ret); 18025} 18026 18027static void 18028bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 18029 struct pbf_pN_buf_regs *regs, 18030 uint32_t poll_count) 18031{ 18032 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 18033 uint32_t cur_cnt = poll_count; 18034 18035 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 18036 crd = crd_start = REG_RD(sc, regs->crd); 18037 init_crd = REG_RD(sc, regs->init_crd); 18038 18039 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 18040 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 18041 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 18042 18043 while ((crd != init_crd) && 18044 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 18045 (init_crd - crd_start))) { 18046 if (cur_cnt--) { 18047 DELAY(FLR_WAIT_INTERVAL); 18048 crd = REG_RD(sc, regs->crd); 18049 crd_freed = REG_RD(sc, regs->crd_freed); 18050 } else { 18051 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18052 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18053 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18054 break; 18055 } 18056 } 18057 18058 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18059 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18060} 18061 18062static void 18063bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18064 struct pbf_pN_cmd_regs *regs, 18065 uint32_t poll_count) 18066{ 18067 uint32_t occup, to_free, freed, freed_start; 18068 uint32_t cur_cnt = poll_count; 18069 18070 occup = to_free = REG_RD(sc, regs->lines_occup); 18071 freed = freed_start = REG_RD(sc, regs->lines_freed); 18072 18073 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18074 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18075 18076 while (occup && 18077 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18078 if (cur_cnt--) { 18079 DELAY(FLR_WAIT_INTERVAL); 18080 occup = REG_RD(sc, regs->lines_occup); 18081 freed = REG_RD(sc, regs->lines_freed); 18082 } else { 18083 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18084 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18085 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18086 break; 18087 } 18088 } 18089 18090 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18091 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18092} 18093 18094static void 18095bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18096{ 18097 struct pbf_pN_cmd_regs cmd_regs[] = { 18098 {0, (CHIP_IS_E3B0(sc)) ? 18099 PBF_REG_TQ_OCCUPANCY_Q0 : 18100 PBF_REG_P0_TQ_OCCUPANCY, 18101 (CHIP_IS_E3B0(sc)) ? 18102 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18103 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18104 {1, (CHIP_IS_E3B0(sc)) ? 18105 PBF_REG_TQ_OCCUPANCY_Q1 : 18106 PBF_REG_P1_TQ_OCCUPANCY, 18107 (CHIP_IS_E3B0(sc)) ? 18108 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18109 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18110 {4, (CHIP_IS_E3B0(sc)) ? 18111 PBF_REG_TQ_OCCUPANCY_LB_Q : 18112 PBF_REG_P4_TQ_OCCUPANCY, 18113 (CHIP_IS_E3B0(sc)) ? 18114 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18115 PBF_REG_P4_TQ_LINES_FREED_CNT} 18116 }; 18117 18118 struct pbf_pN_buf_regs buf_regs[] = { 18119 {0, (CHIP_IS_E3B0(sc)) ? 18120 PBF_REG_INIT_CRD_Q0 : 18121 PBF_REG_P0_INIT_CRD , 18122 (CHIP_IS_E3B0(sc)) ? 18123 PBF_REG_CREDIT_Q0 : 18124 PBF_REG_P0_CREDIT, 18125 (CHIP_IS_E3B0(sc)) ? 18126 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18127 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18128 {1, (CHIP_IS_E3B0(sc)) ? 18129 PBF_REG_INIT_CRD_Q1 : 18130 PBF_REG_P1_INIT_CRD, 18131 (CHIP_IS_E3B0(sc)) ? 18132 PBF_REG_CREDIT_Q1 : 18133 PBF_REG_P1_CREDIT, 18134 (CHIP_IS_E3B0(sc)) ? 18135 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18136 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18137 {4, (CHIP_IS_E3B0(sc)) ? 18138 PBF_REG_INIT_CRD_LB_Q : 18139 PBF_REG_P4_INIT_CRD, 18140 (CHIP_IS_E3B0(sc)) ? 18141 PBF_REG_CREDIT_LB_Q : 18142 PBF_REG_P4_CREDIT, 18143 (CHIP_IS_E3B0(sc)) ? 18144 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18145 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18146 }; 18147 18148 int i; 18149 18150 /* Verify the command queues are flushed P0, P1, P4 */ 18151 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18152 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18153 } 18154 18155 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18156 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18157 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18158 } 18159} 18160 18161static void 18162bxe_hw_enable_status(struct bxe_softc *sc) 18163{ 18164 uint32_t val; 18165 18166 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18167 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18168 18169 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18170 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18171 18172 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18173 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18174 18175 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18176 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18177 18178 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18179 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18180 18181 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18182 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18183 18184 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18185 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18186 18187 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18188 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18189} 18190 18191static int 18192bxe_pf_flr_clnup(struct bxe_softc *sc) 18193{ 18194 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18195 18196 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18197 18198 /* Re-enable PF target read access */ 18199 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18200 18201 /* Poll HW usage counters */ 18202 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18203 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18204 return (-1); 18205 } 18206 18207 /* Zero the igu 'trailing edge' and 'leading edge' */ 18208 18209 /* Send the FW cleanup command */ 18210 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18211 return (-1); 18212 } 18213 18214 /* ATC cleanup */ 18215 18216 /* Verify TX hw is flushed */ 18217 bxe_tx_hw_flushed(sc, poll_cnt); 18218 18219 /* Wait 100ms (not adjusted according to platform) */ 18220 DELAY(100000); 18221 18222 /* Verify no pending pci transactions */ 18223 if (bxe_is_pcie_pending(sc)) { 18224 BLOGE(sc, "PCIE Transactions still pending\n"); 18225 } 18226 18227 /* Debug */ 18228 bxe_hw_enable_status(sc); 18229 18230 /* 18231 * Master enable - Due to WB DMAE writes performed before this 18232 * register is re-initialized as part of the regular function init 18233 */ 18234 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18235 18236 return (0); 18237} 18238 18239#if 0 18240static void 18241bxe_init_searcher(struct bxe_softc *sc) 18242{ 18243 int port = SC_PORT(sc); 18244 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); 18245 /* T1 hash bits value determines the T1 number of entries */ 18246 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 18247} 18248#endif 18249 18250static int 18251bxe_init_hw_func(struct bxe_softc *sc) 18252{ 18253 int port = SC_PORT(sc); 18254 int func = SC_FUNC(sc); 18255 int init_phase = PHASE_PF0 + func; 18256 struct ecore_ilt *ilt = sc->ilt; 18257 uint16_t cdu_ilt_start; 18258 uint32_t addr, val; 18259 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18260 int i, main_mem_width, rc; 18261 18262 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18263 18264 /* FLR cleanup */ 18265 if (!CHIP_IS_E1x(sc)) { 18266 rc = bxe_pf_flr_clnup(sc); 18267 if (rc) { 18268 BLOGE(sc, "FLR cleanup failed!\n"); 18269 // XXX bxe_fw_dump(sc); 18270 // XXX bxe_idle_chk(sc); 18271 return (rc); 18272 } 18273 } 18274 18275 /* set MSI reconfigure capability */ 18276 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18277 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18278 val = REG_RD(sc, addr); 18279 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18280 REG_WR(sc, addr, val); 18281 } 18282 18283 ecore_init_block(sc, BLOCK_PXP, init_phase); 18284 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18285 18286 ilt = sc->ilt; 18287 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18288 18289#if 0 18290 if (IS_SRIOV(sc)) { 18291 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; 18292 } 18293 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); 18294 18295#if (BXE_FIRST_VF_CID > 0) 18296 /* 18297 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes 18298 * those of the VFs, so start line should be reset 18299 */ 18300 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18301#endif 18302#endif 18303 18304 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18305 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18306 ilt->lines[cdu_ilt_start + i].page_mapping = 18307 sc->context[i].vcxt_dma.paddr; 18308 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18309 } 18310 ecore_ilt_init_op(sc, INITOP_SET); 18311 18312#if 0 18313 if (!CONFIGURE_NIC_MODE(sc)) { 18314 bxe_init_searcher(sc); 18315 REG_WR(sc, PRS_REG_NIC_MODE, 0); 18316 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); 18317 } else 18318#endif 18319 { 18320 /* Set NIC mode */ 18321 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18322 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18323 } 18324 18325 if (!CHIP_IS_E1x(sc)) { 18326 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18327 18328 /* Turn on a single ISR mode in IGU if driver is going to use 18329 * INT#x or MSI 18330 */ 18331 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18332 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18333 } 18334 18335 /* 18336 * Timers workaround bug: function init part. 18337 * Need to wait 20msec after initializing ILT, 18338 * needed to make sure there are no requests in 18339 * one of the PXP internal queues with "old" ILT addresses 18340 */ 18341 DELAY(20000); 18342 18343 /* 18344 * Master enable - Due to WB DMAE writes performed before this 18345 * register is re-initialized as part of the regular function 18346 * init 18347 */ 18348 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18349 /* Enable the function in IGU */ 18350 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18351 } 18352 18353 sc->dmae_ready = 1; 18354 18355 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18356 18357 if (!CHIP_IS_E1x(sc)) 18358 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18359 18360 ecore_init_block(sc, BLOCK_ATC, init_phase); 18361 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18362 ecore_init_block(sc, BLOCK_NIG, init_phase); 18363 ecore_init_block(sc, BLOCK_SRC, init_phase); 18364 ecore_init_block(sc, BLOCK_MISC, init_phase); 18365 ecore_init_block(sc, BLOCK_TCM, init_phase); 18366 ecore_init_block(sc, BLOCK_UCM, init_phase); 18367 ecore_init_block(sc, BLOCK_CCM, init_phase); 18368 ecore_init_block(sc, BLOCK_XCM, init_phase); 18369 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18370 ecore_init_block(sc, BLOCK_USEM, init_phase); 18371 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18372 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18373 18374 if (!CHIP_IS_E1x(sc)) 18375 REG_WR(sc, QM_REG_PF_EN, 1); 18376 18377 if (!CHIP_IS_E1x(sc)) { 18378 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18379 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18380 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18381 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18382 } 18383 ecore_init_block(sc, BLOCK_QM, init_phase); 18384 18385 ecore_init_block(sc, BLOCK_TM, init_phase); 18386 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18387 18388 bxe_iov_init_dq(sc); 18389 18390 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18391 ecore_init_block(sc, BLOCK_PRS, init_phase); 18392 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18393 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18394 ecore_init_block(sc, BLOCK_USDM, init_phase); 18395 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18396 ecore_init_block(sc, BLOCK_UPB, init_phase); 18397 ecore_init_block(sc, BLOCK_XPB, init_phase); 18398 ecore_init_block(sc, BLOCK_PBF, init_phase); 18399 if (!CHIP_IS_E1x(sc)) 18400 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18401 18402 ecore_init_block(sc, BLOCK_CDU, init_phase); 18403 18404 ecore_init_block(sc, BLOCK_CFC, init_phase); 18405 18406 if (!CHIP_IS_E1x(sc)) 18407 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18408 18409 if (IS_MF(sc)) { 18410 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18411 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18412 } 18413 18414 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18415 18416 /* HC init per function */ 18417 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18418 if (CHIP_IS_E1H(sc)) { 18419 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18420 18421 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18422 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18423 } 18424 ecore_init_block(sc, BLOCK_HC, init_phase); 18425 18426 } else { 18427 int num_segs, sb_idx, prod_offset; 18428 18429 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18430 18431 if (!CHIP_IS_E1x(sc)) { 18432 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18433 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18434 } 18435 18436 ecore_init_block(sc, BLOCK_IGU, init_phase); 18437 18438 if (!CHIP_IS_E1x(sc)) { 18439 int dsb_idx = 0; 18440 /** 18441 * Producer memory: 18442 * E2 mode: address 0-135 match to the mapping memory; 18443 * 136 - PF0 default prod; 137 - PF1 default prod; 18444 * 138 - PF2 default prod; 139 - PF3 default prod; 18445 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18446 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18447 * 144-147 reserved. 18448 * 18449 * E1.5 mode - In backward compatible mode; 18450 * for non default SB; each even line in the memory 18451 * holds the U producer and each odd line hold 18452 * the C producer. The first 128 producers are for 18453 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18454 * producers are for the DSB for each PF. 18455 * Each PF has five segments: (the order inside each 18456 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18457 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18458 * 144-147 attn prods; 18459 */ 18460 /* non-default-status-blocks */ 18461 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18462 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18463 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18464 prod_offset = (sc->igu_base_sb + sb_idx) * 18465 num_segs; 18466 18467 for (i = 0; i < num_segs; i++) { 18468 addr = IGU_REG_PROD_CONS_MEMORY + 18469 (prod_offset + i) * 4; 18470 REG_WR(sc, addr, 0); 18471 } 18472 /* send consumer update with value 0 */ 18473 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18474 USTORM_ID, 0, IGU_INT_NOP, 1); 18475 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18476 } 18477 18478 /* default-status-blocks */ 18479 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18480 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18481 18482 if (CHIP_IS_MODE_4_PORT(sc)) 18483 dsb_idx = SC_FUNC(sc); 18484 else 18485 dsb_idx = SC_VN(sc); 18486 18487 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18488 IGU_BC_BASE_DSB_PROD + dsb_idx : 18489 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18490 18491 /* 18492 * igu prods come in chunks of E1HVN_MAX (4) - 18493 * does not matters what is the current chip mode 18494 */ 18495 for (i = 0; i < (num_segs * E1HVN_MAX); 18496 i += E1HVN_MAX) { 18497 addr = IGU_REG_PROD_CONS_MEMORY + 18498 (prod_offset + i)*4; 18499 REG_WR(sc, addr, 0); 18500 } 18501 /* send consumer update with 0 */ 18502 if (CHIP_INT_MODE_IS_BC(sc)) { 18503 bxe_ack_sb(sc, sc->igu_dsb_id, 18504 USTORM_ID, 0, IGU_INT_NOP, 1); 18505 bxe_ack_sb(sc, sc->igu_dsb_id, 18506 CSTORM_ID, 0, IGU_INT_NOP, 1); 18507 bxe_ack_sb(sc, sc->igu_dsb_id, 18508 XSTORM_ID, 0, IGU_INT_NOP, 1); 18509 bxe_ack_sb(sc, sc->igu_dsb_id, 18510 TSTORM_ID, 0, IGU_INT_NOP, 1); 18511 bxe_ack_sb(sc, sc->igu_dsb_id, 18512 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18513 } else { 18514 bxe_ack_sb(sc, sc->igu_dsb_id, 18515 USTORM_ID, 0, IGU_INT_NOP, 1); 18516 bxe_ack_sb(sc, sc->igu_dsb_id, 18517 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18518 } 18519 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18520 18521 /* !!! these should become driver const once 18522 rf-tool supports split-68 const */ 18523 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18524 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18525 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18526 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18527 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18528 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18529 } 18530 } 18531 18532 /* Reset PCIE errors for debug */ 18533 REG_WR(sc, 0x2114, 0xffffffff); 18534 REG_WR(sc, 0x2120, 0xffffffff); 18535 18536 if (CHIP_IS_E1x(sc)) { 18537 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18538 main_mem_base = HC_REG_MAIN_MEMORY + 18539 SC_PORT(sc) * (main_mem_size * 4); 18540 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18541 main_mem_width = 8; 18542 18543 val = REG_RD(sc, main_mem_prty_clr); 18544 if (val) { 18545 BLOGD(sc, DBG_LOAD, 18546 "Parity errors in HC block during function init (0x%x)!\n", 18547 val); 18548 } 18549 18550 /* Clear "false" parity errors in MSI-X table */ 18551 for (i = main_mem_base; 18552 i < main_mem_base + main_mem_size * 4; 18553 i += main_mem_width) { 18554 bxe_read_dmae(sc, i, main_mem_width / 4); 18555 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18556 i, main_mem_width / 4); 18557 } 18558 /* Clear HC parity attention */ 18559 REG_RD(sc, main_mem_prty_clr); 18560 } 18561 18562#if 1 18563 /* Enable STORMs SP logging */ 18564 REG_WR8(sc, BAR_USTRORM_INTMEM + 18565 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18566 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18567 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18568 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18569 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18570 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18571 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18572#endif 18573 18574 elink_phy_probe(&sc->link_params); 18575 18576 return (0); 18577} 18578 18579static void 18580bxe_link_reset(struct bxe_softc *sc) 18581{ 18582 if (!BXE_NOMCP(sc)) { 18583 BXE_PHY_LOCK(sc); 18584 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18585 BXE_PHY_UNLOCK(sc); 18586 } else { 18587 if (!CHIP_REV_IS_SLOW(sc)) { 18588 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18589 } 18590 } 18591} 18592 18593static void 18594bxe_reset_port(struct bxe_softc *sc) 18595{ 18596 int port = SC_PORT(sc); 18597 uint32_t val; 18598 18599 /* reset physical Link */ 18600 bxe_link_reset(sc); 18601 18602 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18603 18604 /* Do not rcv packets to BRB */ 18605 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18606 /* Do not direct rcv packets that are not for MCP to the BRB */ 18607 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18608 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18609 18610 /* Configure AEU */ 18611 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18612 18613 DELAY(100000); 18614 18615 /* Check for BRB port occupancy */ 18616 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18617 if (val) { 18618 BLOGD(sc, DBG_LOAD, 18619 "BRB1 is not empty, %d blocks are occupied\n", val); 18620 } 18621 18622 /* TODO: Close Doorbell port? */ 18623} 18624 18625static void 18626bxe_ilt_wr(struct bxe_softc *sc, 18627 uint32_t index, 18628 bus_addr_t addr) 18629{ 18630 int reg; 18631 uint32_t wb_write[2]; 18632 18633 if (CHIP_IS_E1(sc)) { 18634 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18635 } else { 18636 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18637 } 18638 18639 wb_write[0] = ONCHIP_ADDR1(addr); 18640 wb_write[1] = ONCHIP_ADDR2(addr); 18641 REG_WR_DMAE(sc, reg, wb_write, 2); 18642} 18643 18644static void 18645bxe_clear_func_ilt(struct bxe_softc *sc, 18646 uint32_t func) 18647{ 18648 uint32_t i, base = FUNC_ILT_BASE(func); 18649 for (i = base; i < base + ILT_PER_FUNC; i++) { 18650 bxe_ilt_wr(sc, i, 0); 18651 } 18652} 18653 18654static void 18655bxe_reset_func(struct bxe_softc *sc) 18656{ 18657 struct bxe_fastpath *fp; 18658 int port = SC_PORT(sc); 18659 int func = SC_FUNC(sc); 18660 int i; 18661 18662 /* Disable the function in the FW */ 18663 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18664 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18665 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18666 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18667 18668 /* FP SBs */ 18669 FOR_EACH_ETH_QUEUE(sc, i) { 18670 fp = &sc->fp[i]; 18671 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18672 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18673 SB_DISABLED); 18674 } 18675 18676#if 0 18677 if (CNIC_LOADED(sc)) { 18678 /* CNIC SB */ 18679 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18680 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 18681 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED); 18682 } 18683#endif 18684 18685 /* SP SB */ 18686 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18687 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18688 SB_DISABLED); 18689 18690 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18691 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18692 } 18693 18694 /* Configure IGU */ 18695 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18696 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18697 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18698 } else { 18699 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18700 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18701 } 18702 18703 if (CNIC_LOADED(sc)) { 18704 /* Disable Timer scan */ 18705 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18706 /* 18707 * Wait for at least 10ms and up to 2 second for the timers 18708 * scan to complete 18709 */ 18710 for (i = 0; i < 200; i++) { 18711 DELAY(10000); 18712 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18713 break; 18714 } 18715 } 18716 18717 /* Clear ILT */ 18718 bxe_clear_func_ilt(sc, func); 18719 18720 /* 18721 * Timers workaround bug for E2: if this is vnic-3, 18722 * we need to set the entire ilt range for this timers. 18723 */ 18724 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18725 struct ilt_client_info ilt_cli; 18726 /* use dummy TM client */ 18727 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18728 ilt_cli.start = 0; 18729 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18730 ilt_cli.client_num = ILT_CLIENT_TM; 18731 18732 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18733 } 18734 18735 /* this assumes that reset_port() called before reset_func()*/ 18736 if (!CHIP_IS_E1x(sc)) { 18737 bxe_pf_disable(sc); 18738 } 18739 18740 sc->dmae_ready = 0; 18741} 18742 18743static int 18744bxe_gunzip_init(struct bxe_softc *sc) 18745{ 18746 return (0); 18747} 18748 18749static void 18750bxe_gunzip_end(struct bxe_softc *sc) 18751{ 18752 return; 18753} 18754 18755static int 18756bxe_init_firmware(struct bxe_softc *sc) 18757{ 18758 if (CHIP_IS_E1(sc)) { 18759 ecore_init_e1_firmware(sc); 18760 sc->iro_array = e1_iro_arr; 18761 } else if (CHIP_IS_E1H(sc)) { 18762 ecore_init_e1h_firmware(sc); 18763 sc->iro_array = e1h_iro_arr; 18764 } else if (!CHIP_IS_E1x(sc)) { 18765 ecore_init_e2_firmware(sc); 18766 sc->iro_array = e2_iro_arr; 18767 } else { 18768 BLOGE(sc, "Unsupported chip revision\n"); 18769 return (-1); 18770 } 18771 18772 return (0); 18773} 18774 18775static void 18776bxe_release_firmware(struct bxe_softc *sc) 18777{ 18778 /* Do nothing */ 18779 return; 18780} 18781 18782static int 18783ecore_gunzip(struct bxe_softc *sc, 18784 const uint8_t *zbuf, 18785 int len) 18786{ 18787 /* XXX : Implement... */ 18788 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18789 return (FALSE); 18790} 18791 18792static void 18793ecore_reg_wr_ind(struct bxe_softc *sc, 18794 uint32_t addr, 18795 uint32_t val) 18796{ 18797 bxe_reg_wr_ind(sc, addr, val); 18798} 18799 18800static void 18801ecore_write_dmae_phys_len(struct bxe_softc *sc, 18802 bus_addr_t phys_addr, 18803 uint32_t addr, 18804 uint32_t len) 18805{ 18806 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18807} 18808 18809void 18810ecore_storm_memset_struct(struct bxe_softc *sc, 18811 uint32_t addr, 18812 size_t size, 18813 uint32_t *data) 18814{ 18815 uint8_t i; 18816 for (i = 0; i < size/4; i++) { 18817 REG_WR(sc, addr + (i * 4), data[i]); 18818 } 18819} 18820 18821