1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30#define BXE_DRIVER_VERSION "1.78.91"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60#define BXE_DEF_SB_ATT_IDX 0x0001
61#define BXE_DEF_SB_IDX     0x0002
62
63/*
64 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
65 * function HW initialization.
66 */
67#define FLR_WAIT_USEC     10000 /* 10 msecs */
68#define FLR_WAIT_INTERVAL 50    /* usecs */
69#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
70
71struct pbf_pN_buf_regs {
72    int pN;
73    uint32_t init_crd;
74    uint32_t crd;
75    uint32_t crd_freed;
76};
77
78struct pbf_pN_cmd_regs {
79    int pN;
80    uint32_t lines_occup;
81    uint32_t lines_freed;
82};
83
84/*
85 * PCI Device ID Table used by bxe_probe().
86 */
87#define BXE_DEVDESC_MAX 64
88static struct bxe_device_type bxe_devs[] = {
89    {
90        BRCM_VENDORID,
91        CHIP_NUM_57710,
92        PCI_ANY_ID, PCI_ANY_ID,
93        "QLogic NetXtreme II BCM57710 10GbE"
94    },
95    {
96        BRCM_VENDORID,
97        CHIP_NUM_57711,
98        PCI_ANY_ID, PCI_ANY_ID,
99        "QLogic NetXtreme II BCM57711 10GbE"
100    },
101    {
102        BRCM_VENDORID,
103        CHIP_NUM_57711E,
104        PCI_ANY_ID, PCI_ANY_ID,
105        "QLogic NetXtreme II BCM57711E 10GbE"
106    },
107    {
108        BRCM_VENDORID,
109        CHIP_NUM_57712,
110        PCI_ANY_ID, PCI_ANY_ID,
111        "QLogic NetXtreme II BCM57712 10GbE"
112    },
113    {
114        BRCM_VENDORID,
115        CHIP_NUM_57712_MF,
116        PCI_ANY_ID, PCI_ANY_ID,
117        "QLogic NetXtreme II BCM57712 MF 10GbE"
118    },
119    {
120        BRCM_VENDORID,
121        CHIP_NUM_57800,
122        PCI_ANY_ID, PCI_ANY_ID,
123        "QLogic NetXtreme II BCM57800 10GbE"
124    },
125    {
126        BRCM_VENDORID,
127        CHIP_NUM_57800_MF,
128        PCI_ANY_ID, PCI_ANY_ID,
129        "QLogic NetXtreme II BCM57800 MF 10GbE"
130    },
131    {
132        BRCM_VENDORID,
133        CHIP_NUM_57810,
134        PCI_ANY_ID, PCI_ANY_ID,
135        "QLogic NetXtreme II BCM57810 10GbE"
136    },
137    {
138        BRCM_VENDORID,
139        CHIP_NUM_57810_MF,
140        PCI_ANY_ID, PCI_ANY_ID,
141        "QLogic NetXtreme II BCM57810 MF 10GbE"
142    },
143    {
144        BRCM_VENDORID,
145        CHIP_NUM_57811,
146        PCI_ANY_ID, PCI_ANY_ID,
147        "QLogic NetXtreme II BCM57811 10GbE"
148    },
149    {
150        BRCM_VENDORID,
151        CHIP_NUM_57811_MF,
152        PCI_ANY_ID, PCI_ANY_ID,
153        "QLogic NetXtreme II BCM57811 MF 10GbE"
154    },
155    {
156        BRCM_VENDORID,
157        CHIP_NUM_57840_4_10,
158        PCI_ANY_ID, PCI_ANY_ID,
159        "QLogic NetXtreme II BCM57840 4x10GbE"
160    },
161    {
162        QLOGIC_VENDORID,
163        CHIP_NUM_57840_4_10,
164        PCI_ANY_ID, PCI_ANY_ID,
165        "QLogic NetXtreme II BCM57840 4x10GbE"
166    },
167    {
168        BRCM_VENDORID,
169        CHIP_NUM_57840_2_20,
170        PCI_ANY_ID, PCI_ANY_ID,
171        "QLogic NetXtreme II BCM57840 2x20GbE"
172    },
173    {
174        BRCM_VENDORID,
175        CHIP_NUM_57840_MF,
176        PCI_ANY_ID, PCI_ANY_ID,
177        "QLogic NetXtreme II BCM57840 MF 10GbE"
178    },
179    {
180        0, 0, 0, 0, NULL
181    }
182};
183
184MALLOC_DECLARE(M_BXE_ILT);
185MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
186
187/*
188 * FreeBSD device entry points.
189 */
190static int bxe_probe(device_t);
191static int bxe_attach(device_t);
192static int bxe_detach(device_t);
193static int bxe_shutdown(device_t);
194
195
196/*
197 * FreeBSD KLD module/device interface event handler method.
198 */
199static device_method_t bxe_methods[] = {
200    /* Device interface (device_if.h) */
201    DEVMETHOD(device_probe,     bxe_probe),
202    DEVMETHOD(device_attach,    bxe_attach),
203    DEVMETHOD(device_detach,    bxe_detach),
204    DEVMETHOD(device_shutdown,  bxe_shutdown),
205    /* Bus interface (bus_if.h) */
206    DEVMETHOD(bus_print_child,  bus_generic_print_child),
207    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
208    KOBJMETHOD_END
209};
210
211/*
212 * FreeBSD KLD Module data declaration
213 */
214static driver_t bxe_driver = {
215    "bxe",                   /* module name */
216    bxe_methods,             /* event handler */
217    sizeof(struct bxe_softc) /* extra data */
218};
219
220MODULE_DEPEND(bxe, pci, 1, 1, 1);
221MODULE_DEPEND(bxe, ether, 1, 1, 1);
222DRIVER_MODULE(bxe, pci, bxe_driver, 0, 0);
223
224DEBUGNET_DEFINE(bxe);
225
226/* resources needed for unloading a previously loaded device */
227
228#define BXE_PREV_WAIT_NEEDED 1
229struct mtx bxe_prev_mtx;
230MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
231struct bxe_prev_list_node {
232    LIST_ENTRY(bxe_prev_list_node) node;
233    uint8_t bus;
234    uint8_t slot;
235    uint8_t path;
236    uint8_t aer; /* XXX automatic error recovery */
237    uint8_t undi;
238};
239static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
240
241static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
242
243/* Tunable device values... */
244
245SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
246    "bxe driver parameters");
247
248/* Debug */
249unsigned long bxe_debug = 0;
250SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
251             &bxe_debug, 0, "Debug logging mode");
252
253/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
254static int bxe_interrupt_mode = INTR_MODE_MSIX;
255SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
256           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
257
258/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
259static int bxe_queue_count = 4;
260SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
261           &bxe_queue_count, 0, "Multi-Queue queue count");
262
263/* max number of buffers per queue (default RX_BD_USABLE) */
264static int bxe_max_rx_bufs = 0;
265SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
266           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
267
268/* Host interrupt coalescing RX tick timer (usecs) */
269static int bxe_hc_rx_ticks = 25;
270SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
271           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
272
273/* Host interrupt coalescing TX tick timer (usecs) */
274static int bxe_hc_tx_ticks = 50;
275SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
276           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
277
278/* Maximum number of Rx packets to process at a time */
279static int bxe_rx_budget = 0xffffffff;
280SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_RDTUN,
281           &bxe_rx_budget, 0, "Rx processing budget");
282
283/* Maximum LRO aggregation size */
284static int bxe_max_aggregation_size = 0;
285SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_RDTUN,
286           &bxe_max_aggregation_size, 0, "max aggregation size");
287
288/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
289static int bxe_mrrs = -1;
290SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
291           &bxe_mrrs, 0, "PCIe maximum read request size");
292
293/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
294static int bxe_autogreeen = 0;
295SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
296           &bxe_autogreeen, 0, "AutoGrEEEn support");
297
298/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
299static int bxe_udp_rss = 0;
300SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
301           &bxe_udp_rss, 0, "UDP RSS support");
302
303
304#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
305
306#define STATS_OFFSET32(stat_name)                   \
307    (offsetof(struct bxe_eth_stats, stat_name) / 4)
308
309#define Q_STATS_OFFSET32(stat_name)                   \
310    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
311
312static const struct {
313    uint32_t offset;
314    uint32_t size;
315    uint32_t flags;
316#define STATS_FLAGS_PORT  1
317#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
318#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
319    char string[STAT_NAME_LEN];
320} bxe_eth_stats_arr[] = {
321    { STATS_OFFSET32(total_bytes_received_hi),
322                8, STATS_FLAGS_BOTH, "rx_bytes" },
323    { STATS_OFFSET32(error_bytes_received_hi),
324                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
325    { STATS_OFFSET32(total_unicast_packets_received_hi),
326                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
327    { STATS_OFFSET32(total_multicast_packets_received_hi),
328                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
329    { STATS_OFFSET32(total_broadcast_packets_received_hi),
330                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
331    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
332                8, STATS_FLAGS_PORT, "rx_crc_errors" },
333    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
334                8, STATS_FLAGS_PORT, "rx_align_errors" },
335    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
336                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
337    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
338                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
339    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
340                8, STATS_FLAGS_PORT, "rx_fragments" },
341    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
342                8, STATS_FLAGS_PORT, "rx_jabbers" },
343    { STATS_OFFSET32(no_buff_discard_hi),
344                8, STATS_FLAGS_BOTH, "rx_discards" },
345    { STATS_OFFSET32(mac_filter_discard),
346                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
347    { STATS_OFFSET32(mf_tag_discard),
348                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
349    { STATS_OFFSET32(pfc_frames_received_hi),
350                8, STATS_FLAGS_PORT, "pfc_frames_received" },
351    { STATS_OFFSET32(pfc_frames_sent_hi),
352                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
353    { STATS_OFFSET32(brb_drop_hi),
354                8, STATS_FLAGS_PORT, "rx_brb_discard" },
355    { STATS_OFFSET32(brb_truncate_hi),
356                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
357    { STATS_OFFSET32(pause_frames_received_hi),
358                8, STATS_FLAGS_PORT, "rx_pause_frames" },
359    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
360                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
361    { STATS_OFFSET32(nig_timer_max),
362                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
363    { STATS_OFFSET32(total_bytes_transmitted_hi),
364                8, STATS_FLAGS_BOTH, "tx_bytes" },
365    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
366                8, STATS_FLAGS_PORT, "tx_error_bytes" },
367    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
368                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
369    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
370                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
371    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
372                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
373    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
374                8, STATS_FLAGS_PORT, "tx_mac_errors" },
375    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
376                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
377    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
378                8, STATS_FLAGS_PORT, "tx_single_collisions" },
379    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
380                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
381    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
382                8, STATS_FLAGS_PORT, "tx_deferred" },
383    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
384                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
385    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
386                8, STATS_FLAGS_PORT, "tx_late_collisions" },
387    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
388                8, STATS_FLAGS_PORT, "tx_total_collisions" },
389    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
390                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
391    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
392                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
393    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
394                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
395    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
396                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
397    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
398                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
399    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
400                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
401    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
402                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
403    { STATS_OFFSET32(pause_frames_sent_hi),
404                8, STATS_FLAGS_PORT, "tx_pause_frames" },
405    { STATS_OFFSET32(total_tpa_aggregations_hi),
406                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
407    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
408                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
409    { STATS_OFFSET32(total_tpa_bytes_hi),
410                8, STATS_FLAGS_FUNC, "tpa_bytes"},
411    { STATS_OFFSET32(eee_tx_lpi),
412                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
413    { STATS_OFFSET32(rx_calls),
414                4, STATS_FLAGS_FUNC, "rx_calls"},
415    { STATS_OFFSET32(rx_pkts),
416                4, STATS_FLAGS_FUNC, "rx_pkts"},
417    { STATS_OFFSET32(rx_tpa_pkts),
418                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
419    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
420                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
421    { STATS_OFFSET32(rx_bxe_service_rxsgl),
422                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
423    { STATS_OFFSET32(rx_jumbo_sge_pkts),
424                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
425    { STATS_OFFSET32(rx_soft_errors),
426                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
427    { STATS_OFFSET32(rx_hw_csum_errors),
428                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
429    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
430                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
431    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
432                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
433    { STATS_OFFSET32(rx_budget_reached),
434                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
435    { STATS_OFFSET32(tx_pkts),
436                4, STATS_FLAGS_FUNC, "tx_pkts"},
437    { STATS_OFFSET32(tx_soft_errors),
438                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
439    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
440                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
441    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
442                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
443    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
444                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
445    { STATS_OFFSET32(tx_ofld_frames_lso),
446                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
447    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
448                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
449    { STATS_OFFSET32(tx_encap_failures),
450                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
451    { STATS_OFFSET32(tx_hw_queue_full),
452                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
453    { STATS_OFFSET32(tx_hw_max_queue_depth),
454                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
455    { STATS_OFFSET32(tx_dma_mapping_failure),
456                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
457    { STATS_OFFSET32(tx_max_drbr_queue_depth),
458                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
459    { STATS_OFFSET32(tx_window_violation_std),
460                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
461    { STATS_OFFSET32(tx_window_violation_tso),
462                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
463    { STATS_OFFSET32(tx_chain_lost_mbuf),
464                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
465    { STATS_OFFSET32(tx_frames_deferred),
466                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
467    { STATS_OFFSET32(tx_queue_xoff),
468                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
469    { STATS_OFFSET32(mbuf_defrag_attempts),
470                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
471    { STATS_OFFSET32(mbuf_defrag_failures),
472                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
473    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
474                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
475    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
476                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
477    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
478                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
479    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
480                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
481    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
482                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
483    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
485    { STATS_OFFSET32(mbuf_alloc_tx),
486                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
487    { STATS_OFFSET32(mbuf_alloc_rx),
488                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
489    { STATS_OFFSET32(mbuf_alloc_sge),
490                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
491    { STATS_OFFSET32(mbuf_alloc_tpa),
492                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
493    { STATS_OFFSET32(tx_queue_full_return),
494                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
495    { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
496                4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
497    { STATS_OFFSET32(tx_request_link_down_failures),
498                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
499    { STATS_OFFSET32(bd_avail_too_less_failures),
500                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
501    { STATS_OFFSET32(tx_mq_not_empty),
502                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
503    { STATS_OFFSET32(nsegs_path1_errors),
504                4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
505    { STATS_OFFSET32(nsegs_path2_errors),
506                4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
507
508
509};
510
511static const struct {
512    uint32_t offset;
513    uint32_t size;
514    char string[STAT_NAME_LEN];
515} bxe_eth_q_stats_arr[] = {
516    { Q_STATS_OFFSET32(total_bytes_received_hi),
517                8, "rx_bytes" },
518    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
519                8, "rx_ucast_packets" },
520    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
521                8, "rx_mcast_packets" },
522    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
523                8, "rx_bcast_packets" },
524    { Q_STATS_OFFSET32(no_buff_discard_hi),
525                8, "rx_discards" },
526    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
527                8, "tx_bytes" },
528    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
529                8, "tx_ucast_packets" },
530    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
531                8, "tx_mcast_packets" },
532    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
533                8, "tx_bcast_packets" },
534    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
535                8, "tpa_aggregations" },
536    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
537                8, "tpa_aggregated_frames"},
538    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
539                8, "tpa_bytes"},
540    { Q_STATS_OFFSET32(rx_calls),
541                4, "rx_calls"},
542    { Q_STATS_OFFSET32(rx_pkts),
543                4, "rx_pkts"},
544    { Q_STATS_OFFSET32(rx_tpa_pkts),
545                4, "rx_tpa_pkts"},
546    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
547                4, "rx_erroneous_jumbo_sge_pkts"},
548    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
549                4, "rx_bxe_service_rxsgl"},
550    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
551                4, "rx_jumbo_sge_pkts"},
552    { Q_STATS_OFFSET32(rx_soft_errors),
553                4, "rx_soft_errors"},
554    { Q_STATS_OFFSET32(rx_hw_csum_errors),
555                4, "rx_hw_csum_errors"},
556    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
557                4, "rx_ofld_frames_csum_ip"},
558    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
559                4, "rx_ofld_frames_csum_tcp_udp"},
560    { Q_STATS_OFFSET32(rx_budget_reached),
561                4, "rx_budget_reached"},
562    { Q_STATS_OFFSET32(tx_pkts),
563                4, "tx_pkts"},
564    { Q_STATS_OFFSET32(tx_soft_errors),
565                4, "tx_soft_errors"},
566    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
567                4, "tx_ofld_frames_csum_ip"},
568    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
569                4, "tx_ofld_frames_csum_tcp"},
570    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
571                4, "tx_ofld_frames_csum_udp"},
572    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
573                4, "tx_ofld_frames_lso"},
574    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
575                4, "tx_ofld_frames_lso_hdr_splits"},
576    { Q_STATS_OFFSET32(tx_encap_failures),
577                4, "tx_encap_failures"},
578    { Q_STATS_OFFSET32(tx_hw_queue_full),
579                4, "tx_hw_queue_full"},
580    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
581                4, "tx_hw_max_queue_depth"},
582    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
583                4, "tx_dma_mapping_failure"},
584    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
585                4, "tx_max_drbr_queue_depth"},
586    { Q_STATS_OFFSET32(tx_window_violation_std),
587                4, "tx_window_violation_std"},
588    { Q_STATS_OFFSET32(tx_window_violation_tso),
589                4, "tx_window_violation_tso"},
590    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
591                4, "tx_chain_lost_mbuf"},
592    { Q_STATS_OFFSET32(tx_frames_deferred),
593                4, "tx_frames_deferred"},
594    { Q_STATS_OFFSET32(tx_queue_xoff),
595                4, "tx_queue_xoff"},
596    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
597                4, "mbuf_defrag_attempts"},
598    { Q_STATS_OFFSET32(mbuf_defrag_failures),
599                4, "mbuf_defrag_failures"},
600    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
601                4, "mbuf_rx_bd_alloc_failed"},
602    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
603                4, "mbuf_rx_bd_mapping_failed"},
604    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
605                4, "mbuf_rx_tpa_alloc_failed"},
606    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
607                4, "mbuf_rx_tpa_mapping_failed"},
608    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
609                4, "mbuf_rx_sge_alloc_failed"},
610    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
611                4, "mbuf_rx_sge_mapping_failed"},
612    { Q_STATS_OFFSET32(mbuf_alloc_tx),
613                4, "mbuf_alloc_tx"},
614    { Q_STATS_OFFSET32(mbuf_alloc_rx),
615                4, "mbuf_alloc_rx"},
616    { Q_STATS_OFFSET32(mbuf_alloc_sge),
617                4, "mbuf_alloc_sge"},
618    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
619                4, "mbuf_alloc_tpa"},
620    { Q_STATS_OFFSET32(tx_queue_full_return),
621                4, "tx_queue_full_return"},
622    { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
623                4, "bxe_tx_mq_sc_state_failures"},
624    { Q_STATS_OFFSET32(tx_request_link_down_failures),
625                4, "tx_request_link_down_failures"},
626    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
627                4, "bd_avail_too_less_failures"},
628    { Q_STATS_OFFSET32(tx_mq_not_empty),
629                4, "tx_mq_not_empty"},
630    { Q_STATS_OFFSET32(nsegs_path1_errors),
631                4, "nsegs_path1_errors"},
632    { Q_STATS_OFFSET32(nsegs_path2_errors),
633                4, "nsegs_path2_errors"}
634
635
636};
637
638#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
639#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
640
641
642static void    bxe_cmng_fns_init(struct bxe_softc *sc,
643                                 uint8_t          read_cfg,
644                                 uint8_t          cmng_type);
645static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
646static void    storm_memset_cmng(struct bxe_softc *sc,
647                                 struct cmng_init *cmng,
648                                 uint8_t          port);
649static void    bxe_set_reset_global(struct bxe_softc *sc);
650static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
651static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
652                                 int              engine);
653static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
654static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
655                                   uint8_t          *global,
656                                   uint8_t          print);
657static void    bxe_int_disable(struct bxe_softc *sc);
658static int     bxe_release_leader_lock(struct bxe_softc *sc);
659static void    bxe_pf_disable(struct bxe_softc *sc);
660static void    bxe_free_fp_buffers(struct bxe_softc *sc);
661static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
662                                      struct bxe_fastpath *fp,
663                                      uint16_t            rx_bd_prod,
664                                      uint16_t            rx_cq_prod,
665                                      uint16_t            rx_sge_prod);
666static void    bxe_link_report_locked(struct bxe_softc *sc);
667static void    bxe_link_report(struct bxe_softc *sc);
668static void    bxe_link_status_update(struct bxe_softc *sc);
669static void    bxe_periodic_callout_func(void *xsc);
670static void    bxe_periodic_start(struct bxe_softc *sc);
671static void    bxe_periodic_stop(struct bxe_softc *sc);
672static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
673                                    uint16_t prev_index,
674                                    uint16_t index);
675static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
676                                     int                 queue);
677static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
678                                     uint16_t            index);
679static uint8_t bxe_txeof(struct bxe_softc *sc,
680                         struct bxe_fastpath *fp);
681static void    bxe_task_fp(struct bxe_fastpath *fp);
682static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
683                                     struct mbuf      *m,
684                                     uint8_t          contents);
685static int     bxe_alloc_mem(struct bxe_softc *sc);
686static void    bxe_free_mem(struct bxe_softc *sc);
687static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
688static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
689static int     bxe_interrupt_attach(struct bxe_softc *sc);
690static void    bxe_interrupt_detach(struct bxe_softc *sc);
691static void    bxe_set_rx_mode(struct bxe_softc *sc);
692static int     bxe_init_locked(struct bxe_softc *sc);
693static int     bxe_stop_locked(struct bxe_softc *sc);
694static void    bxe_sp_err_timeout_task(void *arg, int pending);
695void           bxe_parity_recover(struct bxe_softc *sc);
696void           bxe_handle_error(struct bxe_softc *sc);
697static __noinline int bxe_nic_load(struct bxe_softc *sc,
698                                   int              load_mode);
699static __noinline int bxe_nic_unload(struct bxe_softc *sc,
700                                     uint32_t         unload_mode,
701                                     uint8_t          keep_link);
702
703static void bxe_handle_sp_tq(void *context, int pending);
704static void bxe_handle_fp_tq(void *context, int pending);
705
706static int bxe_add_cdev(struct bxe_softc *sc);
707static void bxe_del_cdev(struct bxe_softc *sc);
708int bxe_grc_dump(struct bxe_softc *sc);
709static int bxe_alloc_buf_rings(struct bxe_softc *sc);
710static void bxe_free_buf_rings(struct bxe_softc *sc);
711
712/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
713uint32_t
714calc_crc32(uint8_t  *crc32_packet,
715           uint32_t crc32_length,
716           uint32_t crc32_seed,
717           uint8_t  complement)
718{
719   uint32_t byte         = 0;
720   uint32_t bit          = 0;
721   uint8_t  msb          = 0;
722   uint32_t temp         = 0;
723   uint32_t shft         = 0;
724   uint8_t  current_byte = 0;
725   uint32_t crc32_result = crc32_seed;
726   const uint32_t CRC32_POLY = 0x1edc6f41;
727
728   if ((crc32_packet == NULL) ||
729       (crc32_length == 0) ||
730       ((crc32_length % 8) != 0))
731    {
732        return (crc32_result);
733    }
734
735    for (byte = 0; byte < crc32_length; byte = byte + 1)
736    {
737        current_byte = crc32_packet[byte];
738        for (bit = 0; bit < 8; bit = bit + 1)
739        {
740            /* msb = crc32_result[31]; */
741            msb = (uint8_t)(crc32_result >> 31);
742
743            crc32_result = crc32_result << 1;
744
745            /* it (msb != current_byte[bit]) */
746            if (msb != (0x1 & (current_byte >> bit)))
747            {
748                crc32_result = crc32_result ^ CRC32_POLY;
749                /* crc32_result[0] = 1 */
750                crc32_result |= 1;
751            }
752        }
753    }
754
755    /* Last step is to:
756     * 1. "mirror" every bit
757     * 2. swap the 4 bytes
758     * 3. complement each bit
759     */
760
761    /* Mirror */
762    temp = crc32_result;
763    shft = sizeof(crc32_result) * 8 - 1;
764
765    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
766    {
767        temp <<= 1;
768        temp |= crc32_result & 1;
769        shft-- ;
770    }
771
772    /* temp[31-bit] = crc32_result[bit] */
773    temp <<= shft;
774
775    /* Swap */
776    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
777    {
778        uint32_t t0, t1, t2, t3;
779        t0 = (0x000000ff & (temp >> 24));
780        t1 = (0x0000ff00 & (temp >> 8));
781        t2 = (0x00ff0000 & (temp << 8));
782        t3 = (0xff000000 & (temp << 24));
783        crc32_result = t0 | t1 | t2 | t3;
784    }
785
786    /* Complement */
787    if (complement)
788    {
789        crc32_result = ~crc32_result;
790    }
791
792    return (crc32_result);
793}
794
795int
796bxe_test_bit(int                    nr,
797             volatile unsigned long *addr)
798{
799    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
800}
801
802void
803bxe_set_bit(unsigned int           nr,
804            volatile unsigned long *addr)
805{
806    atomic_set_acq_long(addr, (1 << nr));
807}
808
809void
810bxe_clear_bit(int                    nr,
811              volatile unsigned long *addr)
812{
813    atomic_clear_acq_long(addr, (1 << nr));
814}
815
816int
817bxe_test_and_set_bit(int                    nr,
818                       volatile unsigned long *addr)
819{
820    unsigned long x;
821    nr = (1 << nr);
822    do {
823        x = *addr;
824    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
825    // if (x & nr) bit_was_set; else bit_was_not_set;
826    return (x & nr);
827}
828
829int
830bxe_test_and_clear_bit(int                    nr,
831                       volatile unsigned long *addr)
832{
833    unsigned long x;
834    nr = (1 << nr);
835    do {
836        x = *addr;
837    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
838    // if (x & nr) bit_was_set; else bit_was_not_set;
839    return (x & nr);
840}
841
842int
843bxe_cmpxchg(volatile int *addr,
844            int          old,
845            int          new)
846{
847    int x;
848    do {
849        x = *addr;
850    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
851    return (x);
852}
853
854/*
855 * Get DMA memory from the OS.
856 *
857 * Validates that the OS has provided DMA buffers in response to a
858 * bus_dmamap_load call and saves the physical address of those buffers.
859 * When the callback is used the OS will return 0 for the mapping function
860 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
861 * failures back to the caller.
862 *
863 * Returns:
864 *   Nothing.
865 */
866static void
867bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
868{
869    struct bxe_dma *dma = arg;
870
871    if (error) {
872        dma->paddr = 0;
873        dma->nseg  = 0;
874        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
875    } else {
876        dma->paddr = segs->ds_addr;
877        dma->nseg  = nseg;
878    }
879}
880
881/*
882 * Allocate a block of memory and map it for DMA. No partial completions
883 * allowed and release any resources acquired if we can't acquire all
884 * resources.
885 *
886 * Returns:
887 *   0 = Success, !0 = Failure
888 */
889int
890bxe_dma_alloc(struct bxe_softc *sc,
891              bus_size_t       size,
892              struct bxe_dma   *dma,
893              const char       *msg)
894{
895    int rc;
896
897    if (dma->size > 0) {
898        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
899              (unsigned long)dma->size);
900        return (1);
901    }
902
903    memset(dma, 0, sizeof(*dma)); /* sanity */
904    dma->sc   = sc;
905    dma->size = size;
906    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
907
908    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
909                            BCM_PAGE_SIZE,      /* alignment */
910                            0,                  /* boundary limit */
911                            BUS_SPACE_MAXADDR,  /* restricted low */
912                            BUS_SPACE_MAXADDR,  /* restricted hi */
913                            NULL,               /* addr filter() */
914                            NULL,               /* addr filter() arg */
915                            size,               /* max map size */
916                            1,                  /* num discontinuous */
917                            size,               /* max seg size */
918                            BUS_DMA_ALLOCNOW,   /* flags */
919                            NULL,               /* lock() */
920                            NULL,               /* lock() arg */
921                            &dma->tag);         /* returned dma tag */
922    if (rc != 0) {
923        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
924        memset(dma, 0, sizeof(*dma));
925        return (1);
926    }
927
928    rc = bus_dmamem_alloc(dma->tag,
929                          (void **)&dma->vaddr,
930                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
931                          &dma->map);
932    if (rc != 0) {
933        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
934        bus_dma_tag_destroy(dma->tag);
935        memset(dma, 0, sizeof(*dma));
936        return (1);
937    }
938
939    rc = bus_dmamap_load(dma->tag,
940                         dma->map,
941                         dma->vaddr,
942                         size,
943                         bxe_dma_map_addr, /* BLOGD in here */
944                         dma,
945                         BUS_DMA_NOWAIT);
946    if (rc != 0) {
947        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
948        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
949        bus_dma_tag_destroy(dma->tag);
950        memset(dma, 0, sizeof(*dma));
951        return (1);
952    }
953
954    return (0);
955}
956
957void
958bxe_dma_free(struct bxe_softc *sc,
959             struct bxe_dma   *dma)
960{
961    if (dma->size > 0) {
962        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
963
964        bus_dmamap_sync(dma->tag, dma->map,
965                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
966        bus_dmamap_unload(dma->tag, dma->map);
967        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
968        bus_dma_tag_destroy(dma->tag);
969    }
970
971    memset(dma, 0, sizeof(*dma));
972}
973
974/*
975 * These indirect read and write routines are only during init.
976 * The locking is handled by the MCP.
977 */
978
979void
980bxe_reg_wr_ind(struct bxe_softc *sc,
981               uint32_t         addr,
982               uint32_t         val)
983{
984    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
985    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
986    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
987}
988
989uint32_t
990bxe_reg_rd_ind(struct bxe_softc *sc,
991               uint32_t         addr)
992{
993    uint32_t val;
994
995    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
996    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
997    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
998
999    return (val);
1000}
1001
1002static int
1003bxe_acquire_hw_lock(struct bxe_softc *sc,
1004                    uint32_t         resource)
1005{
1006    uint32_t lock_status;
1007    uint32_t resource_bit = (1 << resource);
1008    int func = SC_FUNC(sc);
1009    uint32_t hw_lock_control_reg;
1010    int cnt;
1011
1012    /* validate the resource is within range */
1013    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1014        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1015            " resource_bit 0x%x\n", resource, resource_bit);
1016        return (-1);
1017    }
1018
1019    if (func <= 5) {
1020        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1021    } else {
1022        hw_lock_control_reg =
1023                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1024    }
1025
1026    /* validate the resource is not already taken */
1027    lock_status = REG_RD(sc, hw_lock_control_reg);
1028    if (lock_status & resource_bit) {
1029        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1030              resource, lock_status, resource_bit);
1031        return (-1);
1032    }
1033
1034    /* try every 5ms for 5 seconds */
1035    for (cnt = 0; cnt < 1000; cnt++) {
1036        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1037        lock_status = REG_RD(sc, hw_lock_control_reg);
1038        if (lock_status & resource_bit) {
1039            return (0);
1040        }
1041        DELAY(5000);
1042    }
1043
1044    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1045        resource, resource_bit);
1046    return (-1);
1047}
1048
1049static int
1050bxe_release_hw_lock(struct bxe_softc *sc,
1051                    uint32_t         resource)
1052{
1053    uint32_t lock_status;
1054    uint32_t resource_bit = (1 << resource);
1055    int func = SC_FUNC(sc);
1056    uint32_t hw_lock_control_reg;
1057
1058    /* validate the resource is within range */
1059    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1060        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1061            " resource_bit 0x%x\n", resource, resource_bit);
1062        return (-1);
1063    }
1064
1065    if (func <= 5) {
1066        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1067    } else {
1068        hw_lock_control_reg =
1069                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1070    }
1071
1072    /* validate the resource is currently taken */
1073    lock_status = REG_RD(sc, hw_lock_control_reg);
1074    if (!(lock_status & resource_bit)) {
1075        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1076              resource, lock_status, resource_bit);
1077        return (-1);
1078    }
1079
1080    REG_WR(sc, hw_lock_control_reg, resource_bit);
1081    return (0);
1082}
1083static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1084{
1085	BXE_PHY_LOCK(sc);
1086	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1087}
1088
1089static void bxe_release_phy_lock(struct bxe_softc *sc)
1090{
1091	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1092	BXE_PHY_UNLOCK(sc);
1093}
1094/*
1095 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1096 * had we done things the other way around, if two pfs from the same port
1097 * would attempt to access nvram at the same time, we could run into a
1098 * scenario such as:
1099 * pf A takes the port lock.
1100 * pf B succeeds in taking the same lock since they are from the same port.
1101 * pf A takes the per pf misc lock. Performs eeprom access.
1102 * pf A finishes. Unlocks the per pf misc lock.
1103 * Pf B takes the lock and proceeds to perform it's own access.
1104 * pf A unlocks the per port lock, while pf B is still working (!).
1105 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1106 * access corrupted by pf B).*
1107 */
1108static int
1109bxe_acquire_nvram_lock(struct bxe_softc *sc)
1110{
1111    int port = SC_PORT(sc);
1112    int count, i;
1113    uint32_t val = 0;
1114
1115    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1116    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1117
1118    /* adjust timeout for emulation/FPGA */
1119    count = NVRAM_TIMEOUT_COUNT;
1120    if (CHIP_REV_IS_SLOW(sc)) {
1121        count *= 100;
1122    }
1123
1124    /* request access to nvram interface */
1125    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1126           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1127
1128    for (i = 0; i < count*10; i++) {
1129        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1130        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1131            break;
1132        }
1133
1134        DELAY(5);
1135    }
1136
1137    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1138        BLOGE(sc, "Cannot get access to nvram interface "
1139            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1140            port, val);
1141        return (-1);
1142    }
1143
1144    return (0);
1145}
1146
1147static int
1148bxe_release_nvram_lock(struct bxe_softc *sc)
1149{
1150    int port = SC_PORT(sc);
1151    int count, i;
1152    uint32_t val = 0;
1153
1154    /* adjust timeout for emulation/FPGA */
1155    count = NVRAM_TIMEOUT_COUNT;
1156    if (CHIP_REV_IS_SLOW(sc)) {
1157        count *= 100;
1158    }
1159
1160    /* relinquish nvram interface */
1161    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1162           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1163
1164    for (i = 0; i < count*10; i++) {
1165        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1166        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1167            break;
1168        }
1169
1170        DELAY(5);
1171    }
1172
1173    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1174        BLOGE(sc, "Cannot free access to nvram interface "
1175            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1176            port, val);
1177        return (-1);
1178    }
1179
1180    /* release HW lock: protect against other PFs in PF Direct Assignment */
1181    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1182
1183    return (0);
1184}
1185
1186static void
1187bxe_enable_nvram_access(struct bxe_softc *sc)
1188{
1189    uint32_t val;
1190
1191    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1192
1193    /* enable both bits, even on read */
1194    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1195           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1196}
1197
1198static void
1199bxe_disable_nvram_access(struct bxe_softc *sc)
1200{
1201    uint32_t val;
1202
1203    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1204
1205    /* disable both bits, even after read */
1206    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1207           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1208                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1209}
1210
1211static int
1212bxe_nvram_read_dword(struct bxe_softc *sc,
1213                     uint32_t         offset,
1214                     uint32_t         *ret_val,
1215                     uint32_t         cmd_flags)
1216{
1217    int count, i, rc;
1218    uint32_t val;
1219
1220    /* build the command word */
1221    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1222
1223    /* need to clear DONE bit separately */
1224    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1225
1226    /* address of the NVRAM to read from */
1227    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1228           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1229
1230    /* issue a read command */
1231    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1232
1233    /* adjust timeout for emulation/FPGA */
1234    count = NVRAM_TIMEOUT_COUNT;
1235    if (CHIP_REV_IS_SLOW(sc)) {
1236        count *= 100;
1237    }
1238
1239    /* wait for completion */
1240    *ret_val = 0;
1241    rc = -1;
1242    for (i = 0; i < count; i++) {
1243        DELAY(5);
1244        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1245
1246        if (val & MCPR_NVM_COMMAND_DONE) {
1247            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1248            /* we read nvram data in cpu order
1249             * but ethtool sees it as an array of bytes
1250             * converting to big-endian will do the work
1251             */
1252            *ret_val = htobe32(val);
1253            rc = 0;
1254            break;
1255        }
1256    }
1257
1258    if (rc == -1) {
1259        BLOGE(sc, "nvram read timeout expired "
1260            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1261            offset, cmd_flags, val);
1262    }
1263
1264    return (rc);
1265}
1266
1267static int
1268bxe_nvram_read(struct bxe_softc *sc,
1269               uint32_t         offset,
1270               uint8_t          *ret_buf,
1271               int              buf_size)
1272{
1273    uint32_t cmd_flags;
1274    uint32_t val;
1275    int rc;
1276
1277    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1278        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1279              offset, buf_size);
1280        return (-1);
1281    }
1282
1283    if ((offset + buf_size) > sc->devinfo.flash_size) {
1284        BLOGE(sc, "Invalid parameter, "
1285                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1286              offset, buf_size, sc->devinfo.flash_size);
1287        return (-1);
1288    }
1289
1290    /* request access to nvram interface */
1291    rc = bxe_acquire_nvram_lock(sc);
1292    if (rc) {
1293        return (rc);
1294    }
1295
1296    /* enable access to nvram interface */
1297    bxe_enable_nvram_access(sc);
1298
1299    /* read the first word(s) */
1300    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1301    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1302        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1303        memcpy(ret_buf, &val, 4);
1304
1305        /* advance to the next dword */
1306        offset += sizeof(uint32_t);
1307        ret_buf += sizeof(uint32_t);
1308        buf_size -= sizeof(uint32_t);
1309        cmd_flags = 0;
1310    }
1311
1312    if (rc == 0) {
1313        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1314        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1315        memcpy(ret_buf, &val, 4);
1316    }
1317
1318    /* disable access to nvram interface */
1319    bxe_disable_nvram_access(sc);
1320    bxe_release_nvram_lock(sc);
1321
1322    return (rc);
1323}
1324
1325static int
1326bxe_nvram_write_dword(struct bxe_softc *sc,
1327                      uint32_t         offset,
1328                      uint32_t         val,
1329                      uint32_t         cmd_flags)
1330{
1331    int count, i, rc;
1332
1333    /* build the command word */
1334    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1335
1336    /* need to clear DONE bit separately */
1337    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1338
1339    /* write the data */
1340    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1341
1342    /* address of the NVRAM to write to */
1343    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1344           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1345
1346    /* issue the write command */
1347    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1348
1349    /* adjust timeout for emulation/FPGA */
1350    count = NVRAM_TIMEOUT_COUNT;
1351    if (CHIP_REV_IS_SLOW(sc)) {
1352        count *= 100;
1353    }
1354
1355    /* wait for completion */
1356    rc = -1;
1357    for (i = 0; i < count; i++) {
1358        DELAY(5);
1359        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1360        if (val & MCPR_NVM_COMMAND_DONE) {
1361            rc = 0;
1362            break;
1363        }
1364    }
1365
1366    if (rc == -1) {
1367        BLOGE(sc, "nvram write timeout expired "
1368            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1369            offset, cmd_flags, val);
1370    }
1371
1372    return (rc);
1373}
1374
1375#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1376
1377static int
1378bxe_nvram_write1(struct bxe_softc *sc,
1379                 uint32_t         offset,
1380                 uint8_t          *data_buf,
1381                 int              buf_size)
1382{
1383    uint32_t cmd_flags;
1384    uint32_t align_offset;
1385    uint32_t val;
1386    int rc;
1387
1388    if ((offset + buf_size) > sc->devinfo.flash_size) {
1389        BLOGE(sc, "Invalid parameter, "
1390                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1391              offset, buf_size, sc->devinfo.flash_size);
1392        return (-1);
1393    }
1394
1395    /* request access to nvram interface */
1396    rc = bxe_acquire_nvram_lock(sc);
1397    if (rc) {
1398        return (rc);
1399    }
1400
1401    /* enable access to nvram interface */
1402    bxe_enable_nvram_access(sc);
1403
1404    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1405    align_offset = (offset & ~0x03);
1406    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1407
1408    if (rc == 0) {
1409        val &= ~(0xff << BYTE_OFFSET(offset));
1410        val |= (*data_buf << BYTE_OFFSET(offset));
1411
1412        /* nvram data is returned as an array of bytes
1413         * convert it back to cpu order
1414         */
1415        val = be32toh(val);
1416
1417        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1418    }
1419
1420    /* disable access to nvram interface */
1421    bxe_disable_nvram_access(sc);
1422    bxe_release_nvram_lock(sc);
1423
1424    return (rc);
1425}
1426
1427static int
1428bxe_nvram_write(struct bxe_softc *sc,
1429                uint32_t         offset,
1430                uint8_t          *data_buf,
1431                int              buf_size)
1432{
1433    uint32_t cmd_flags;
1434    uint32_t val;
1435    uint32_t written_so_far;
1436    int rc;
1437
1438    if (buf_size == 1) {
1439        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1440    }
1441
1442    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1443        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1444              offset, buf_size);
1445        return (-1);
1446    }
1447
1448    if (buf_size == 0) {
1449        return (0); /* nothing to do */
1450    }
1451
1452    if ((offset + buf_size) > sc->devinfo.flash_size) {
1453        BLOGE(sc, "Invalid parameter, "
1454                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1455              offset, buf_size, sc->devinfo.flash_size);
1456        return (-1);
1457    }
1458
1459    /* request access to nvram interface */
1460    rc = bxe_acquire_nvram_lock(sc);
1461    if (rc) {
1462        return (rc);
1463    }
1464
1465    /* enable access to nvram interface */
1466    bxe_enable_nvram_access(sc);
1467
1468    written_so_far = 0;
1469    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1470    while ((written_so_far < buf_size) && (rc == 0)) {
1471        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1472            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1473        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1474            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1475        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1476            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1477        }
1478
1479        memcpy(&val, data_buf, 4);
1480
1481        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1482
1483        /* advance to the next dword */
1484        offset += sizeof(uint32_t);
1485        data_buf += sizeof(uint32_t);
1486        written_so_far += sizeof(uint32_t);
1487        cmd_flags = 0;
1488    }
1489
1490    /* disable access to nvram interface */
1491    bxe_disable_nvram_access(sc);
1492    bxe_release_nvram_lock(sc);
1493
1494    return (rc);
1495}
1496
1497/* copy command into DMAE command memory and set DMAE command Go */
1498void
1499bxe_post_dmae(struct bxe_softc    *sc,
1500              struct dmae_cmd *dmae,
1501              int                 idx)
1502{
1503    uint32_t cmd_offset;
1504    int i;
1505
1506    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1507    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1508        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1509    }
1510
1511    REG_WR(sc, dmae_reg_go_c[idx], 1);
1512}
1513
1514uint32_t
1515bxe_dmae_opcode_add_comp(uint32_t opcode,
1516                         uint8_t  comp_type)
1517{
1518    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1519                      DMAE_CMD_C_TYPE_ENABLE));
1520}
1521
1522uint32_t
1523bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1524{
1525    return (opcode & ~DMAE_CMD_SRC_RESET);
1526}
1527
1528uint32_t
1529bxe_dmae_opcode(struct bxe_softc *sc,
1530                uint8_t          src_type,
1531                uint8_t          dst_type,
1532                uint8_t          with_comp,
1533                uint8_t          comp_type)
1534{
1535    uint32_t opcode = 0;
1536
1537    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1538               (dst_type << DMAE_CMD_DST_SHIFT));
1539
1540    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1541
1542    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1543
1544    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1545               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1546
1547    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1548
1549#ifdef __BIG_ENDIAN
1550    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1551#else
1552    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1553#endif
1554
1555    if (with_comp) {
1556        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1557    }
1558
1559    return (opcode);
1560}
1561
1562static void
1563bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1564                        struct dmae_cmd *dmae,
1565                        uint8_t             src_type,
1566                        uint8_t             dst_type)
1567{
1568    memset(dmae, 0, sizeof(struct dmae_cmd));
1569
1570    /* set the opcode */
1571    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1572                                   TRUE, DMAE_COMP_PCI);
1573
1574    /* fill in the completion parameters */
1575    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1576    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1577    dmae->comp_val     = DMAE_COMP_VAL;
1578}
1579
1580/* issue a DMAE command over the init channel and wait for completion */
1581static int
1582bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1583                         struct dmae_cmd *dmae)
1584{
1585    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1586    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1587
1588    BXE_DMAE_LOCK(sc);
1589
1590    /* reset completion */
1591    *wb_comp = 0;
1592
1593    /* post the command on the channel used for initializations */
1594    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1595
1596    /* wait for completion */
1597    DELAY(5);
1598
1599    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1600        if (!timeout ||
1601            (sc->recovery_state != BXE_RECOVERY_DONE &&
1602             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1603            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1604                *wb_comp, sc->recovery_state);
1605            BXE_DMAE_UNLOCK(sc);
1606            return (DMAE_TIMEOUT);
1607        }
1608
1609        timeout--;
1610        DELAY(50);
1611    }
1612
1613    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1614        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1615                *wb_comp, sc->recovery_state);
1616        BXE_DMAE_UNLOCK(sc);
1617        return (DMAE_PCI_ERROR);
1618    }
1619
1620    BXE_DMAE_UNLOCK(sc);
1621    return (0);
1622}
1623
1624void
1625bxe_read_dmae(struct bxe_softc *sc,
1626              uint32_t         src_addr,
1627              uint32_t         len32)
1628{
1629    struct dmae_cmd dmae;
1630    uint32_t *data;
1631    int i, rc;
1632
1633    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1634
1635    if (!sc->dmae_ready) {
1636        data = BXE_SP(sc, wb_data[0]);
1637
1638        for (i = 0; i < len32; i++) {
1639            data[i] = (CHIP_IS_E1(sc)) ?
1640                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1641                          REG_RD(sc, (src_addr + (i * 4)));
1642        }
1643
1644        return;
1645    }
1646
1647    /* set opcode and fixed command fields */
1648    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1649
1650    /* fill in addresses and len */
1651    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1652    dmae.src_addr_hi = 0;
1653    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1654    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1655    dmae.len         = len32;
1656
1657    /* issue the command and wait for completion */
1658    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1659        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1660    }
1661}
1662
1663void
1664bxe_write_dmae(struct bxe_softc *sc,
1665               bus_addr_t       dma_addr,
1666               uint32_t         dst_addr,
1667               uint32_t         len32)
1668{
1669    struct dmae_cmd dmae;
1670    int rc;
1671
1672    if (!sc->dmae_ready) {
1673        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1674
1675        if (CHIP_IS_E1(sc)) {
1676            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1677        } else {
1678            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1679        }
1680
1681        return;
1682    }
1683
1684    /* set opcode and fixed command fields */
1685    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1686
1687    /* fill in addresses and len */
1688    dmae.src_addr_lo = U64_LO(dma_addr);
1689    dmae.src_addr_hi = U64_HI(dma_addr);
1690    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1691    dmae.dst_addr_hi = 0;
1692    dmae.len         = len32;
1693
1694    /* issue the command and wait for completion */
1695    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1696        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1697    }
1698}
1699
1700void
1701bxe_write_dmae_phys_len(struct bxe_softc *sc,
1702                        bus_addr_t       phys_addr,
1703                        uint32_t         addr,
1704                        uint32_t         len)
1705{
1706    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1707    int offset = 0;
1708
1709    while (len > dmae_wr_max) {
1710        bxe_write_dmae(sc,
1711                       (phys_addr + offset), /* src DMA address */
1712                       (addr + offset),      /* dst GRC address */
1713                       dmae_wr_max);
1714        offset += (dmae_wr_max * 4);
1715        len -= dmae_wr_max;
1716    }
1717
1718    bxe_write_dmae(sc,
1719                   (phys_addr + offset), /* src DMA address */
1720                   (addr + offset),      /* dst GRC address */
1721                   len);
1722}
1723
1724void
1725bxe_set_ctx_validation(struct bxe_softc   *sc,
1726                       struct eth_context *cxt,
1727                       uint32_t           cid)
1728{
1729    /* ustorm cxt validation */
1730    cxt->ustorm_ag_context.cdu_usage =
1731        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1732            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1733    /* xcontext validation */
1734    cxt->xstorm_ag_context.cdu_reserved =
1735        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1736            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1737}
1738
1739static void
1740bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1741                            uint8_t          port,
1742                            uint8_t          fw_sb_id,
1743                            uint8_t          sb_index,
1744                            uint8_t          ticks)
1745{
1746    uint32_t addr =
1747        (BAR_CSTRORM_INTMEM +
1748         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1749
1750    REG_WR8(sc, addr, ticks);
1751
1752    BLOGD(sc, DBG_LOAD,
1753          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1754          port, fw_sb_id, sb_index, ticks);
1755}
1756
1757static void
1758bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1759                            uint8_t          port,
1760                            uint16_t         fw_sb_id,
1761                            uint8_t          sb_index,
1762                            uint8_t          disable)
1763{
1764    uint32_t enable_flag =
1765        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1766    uint32_t addr =
1767        (BAR_CSTRORM_INTMEM +
1768         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1769    uint8_t flags;
1770
1771    /* clear and set */
1772    flags = REG_RD8(sc, addr);
1773    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1774    flags |= enable_flag;
1775    REG_WR8(sc, addr, flags);
1776
1777    BLOGD(sc, DBG_LOAD,
1778          "port %d fw_sb_id %d sb_index %d disable %d\n",
1779          port, fw_sb_id, sb_index, disable);
1780}
1781
1782void
1783bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1784                             uint8_t          fw_sb_id,
1785                             uint8_t          sb_index,
1786                             uint8_t          disable,
1787                             uint16_t         usec)
1788{
1789    int port = SC_PORT(sc);
1790    uint8_t ticks = (usec / 4); /* XXX ??? */
1791
1792    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1793
1794    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1795    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1796}
1797
1798void
1799elink_cb_udelay(struct bxe_softc *sc,
1800                uint32_t         usecs)
1801{
1802    DELAY(usecs);
1803}
1804
1805uint32_t
1806elink_cb_reg_read(struct bxe_softc *sc,
1807                  uint32_t         reg_addr)
1808{
1809    return (REG_RD(sc, reg_addr));
1810}
1811
1812void
1813elink_cb_reg_write(struct bxe_softc *sc,
1814                   uint32_t         reg_addr,
1815                   uint32_t         val)
1816{
1817    REG_WR(sc, reg_addr, val);
1818}
1819
1820void
1821elink_cb_reg_wb_write(struct bxe_softc *sc,
1822                      uint32_t         offset,
1823                      uint32_t         *wb_write,
1824                      uint16_t         len)
1825{
1826    REG_WR_DMAE(sc, offset, wb_write, len);
1827}
1828
1829void
1830elink_cb_reg_wb_read(struct bxe_softc *sc,
1831                     uint32_t         offset,
1832                     uint32_t         *wb_write,
1833                     uint16_t         len)
1834{
1835    REG_RD_DMAE(sc, offset, wb_write, len);
1836}
1837
1838uint8_t
1839elink_cb_path_id(struct bxe_softc *sc)
1840{
1841    return (SC_PATH(sc));
1842}
1843
1844void
1845elink_cb_event_log(struct bxe_softc     *sc,
1846                   const elink_log_id_t elink_log_id,
1847                   ...)
1848{
1849    /* XXX */
1850    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1851}
1852
1853static int
1854bxe_set_spio(struct bxe_softc *sc,
1855             int              spio,
1856             uint32_t         mode)
1857{
1858    uint32_t spio_reg;
1859
1860    /* Only 2 SPIOs are configurable */
1861    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1862        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1863        return (-1);
1864    }
1865
1866    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1867
1868    /* read SPIO and mask except the float bits */
1869    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1870
1871    switch (mode) {
1872    case MISC_SPIO_OUTPUT_LOW:
1873        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1874        /* clear FLOAT and set CLR */
1875        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1876        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1877        break;
1878
1879    case MISC_SPIO_OUTPUT_HIGH:
1880        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1881        /* clear FLOAT and set SET */
1882        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1883        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1884        break;
1885
1886    case MISC_SPIO_INPUT_HI_Z:
1887        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1888        /* set FLOAT */
1889        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1890        break;
1891
1892    default:
1893        break;
1894    }
1895
1896    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1897    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1898
1899    return (0);
1900}
1901
1902static int
1903bxe_gpio_read(struct bxe_softc *sc,
1904              int              gpio_num,
1905              uint8_t          port)
1906{
1907    /* The GPIO should be swapped if swap register is set and active */
1908    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1909                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1910    int gpio_shift = (gpio_num +
1911                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1912    uint32_t gpio_mask = (1 << gpio_shift);
1913    uint32_t gpio_reg;
1914
1915    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1916        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1917            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1918            gpio_mask);
1919        return (-1);
1920    }
1921
1922    /* read GPIO value */
1923    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1924
1925    /* get the requested pin value */
1926    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1927}
1928
1929static int
1930bxe_gpio_write(struct bxe_softc *sc,
1931               int              gpio_num,
1932               uint32_t         mode,
1933               uint8_t          port)
1934{
1935    /* The GPIO should be swapped if swap register is set and active */
1936    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1937                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1938    int gpio_shift = (gpio_num +
1939                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1940    uint32_t gpio_mask = (1 << gpio_shift);
1941    uint32_t gpio_reg;
1942
1943    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1944        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1945            " gpio_shift %d gpio_mask 0x%x\n",
1946            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1947        return (-1);
1948    }
1949
1950    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1951
1952    /* read GPIO and mask except the float bits */
1953    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1954
1955    switch (mode) {
1956    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1957        BLOGD(sc, DBG_PHY,
1958              "Set GPIO %d (shift %d) -> output low\n",
1959              gpio_num, gpio_shift);
1960        /* clear FLOAT and set CLR */
1961        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1962        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1963        break;
1964
1965    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1966        BLOGD(sc, DBG_PHY,
1967              "Set GPIO %d (shift %d) -> output high\n",
1968              gpio_num, gpio_shift);
1969        /* clear FLOAT and set SET */
1970        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1971        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1972        break;
1973
1974    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1975        BLOGD(sc, DBG_PHY,
1976              "Set GPIO %d (shift %d) -> input\n",
1977              gpio_num, gpio_shift);
1978        /* set FLOAT */
1979        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980        break;
1981
1982    default:
1983        break;
1984    }
1985
1986    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1987    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1988
1989    return (0);
1990}
1991
1992static int
1993bxe_gpio_mult_write(struct bxe_softc *sc,
1994                    uint8_t          pins,
1995                    uint32_t         mode)
1996{
1997    uint32_t gpio_reg;
1998
1999    /* any port swapping should be handled by caller */
2000
2001    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2002
2003    /* read GPIO and mask except the float bits */
2004    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2005    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2006    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2007    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2008
2009    switch (mode) {
2010    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2011        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2012        /* set CLR */
2013        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2014        break;
2015
2016    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2018        /* set SET */
2019        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2020        break;
2021
2022    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2023        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2024        /* set FLOAT */
2025        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2026        break;
2027
2028    default:
2029        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2030            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2031        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2032        return (-1);
2033    }
2034
2035    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2036    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2037
2038    return (0);
2039}
2040
2041static int
2042bxe_gpio_int_write(struct bxe_softc *sc,
2043                   int              gpio_num,
2044                   uint32_t         mode,
2045                   uint8_t          port)
2046{
2047    /* The GPIO should be swapped if swap register is set and active */
2048    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2049                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2050    int gpio_shift = (gpio_num +
2051                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2052    uint32_t gpio_mask = (1 << gpio_shift);
2053    uint32_t gpio_reg;
2054
2055    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2056        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2057            " gpio_shift %d gpio_mask 0x%x\n",
2058            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2059        return (-1);
2060    }
2061
2062    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2063
2064    /* read GPIO int */
2065    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2066
2067    switch (mode) {
2068    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2069        BLOGD(sc, DBG_PHY,
2070              "Clear GPIO INT %d (shift %d) -> output low\n",
2071              gpio_num, gpio_shift);
2072        /* clear SET and set CLR */
2073        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2075        break;
2076
2077    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2078        BLOGD(sc, DBG_PHY,
2079              "Set GPIO INT %d (shift %d) -> output high\n",
2080              gpio_num, gpio_shift);
2081        /* clear CLR and set SET */
2082        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2083        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2084        break;
2085
2086    default:
2087        break;
2088    }
2089
2090    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2091    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2092
2093    return (0);
2094}
2095
2096uint32_t
2097elink_cb_gpio_read(struct bxe_softc *sc,
2098                   uint16_t         gpio_num,
2099                   uint8_t          port)
2100{
2101    return (bxe_gpio_read(sc, gpio_num, port));
2102}
2103
2104uint8_t
2105elink_cb_gpio_write(struct bxe_softc *sc,
2106                    uint16_t         gpio_num,
2107                    uint8_t          mode, /* 0=low 1=high */
2108                    uint8_t          port)
2109{
2110    return (bxe_gpio_write(sc, gpio_num, mode, port));
2111}
2112
2113uint8_t
2114elink_cb_gpio_mult_write(struct bxe_softc *sc,
2115                         uint8_t          pins,
2116                         uint8_t          mode) /* 0=low 1=high */
2117{
2118    return (bxe_gpio_mult_write(sc, pins, mode));
2119}
2120
2121uint8_t
2122elink_cb_gpio_int_write(struct bxe_softc *sc,
2123                        uint16_t         gpio_num,
2124                        uint8_t          mode, /* 0=low 1=high */
2125                        uint8_t          port)
2126{
2127    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2128}
2129
2130void
2131elink_cb_notify_link_changed(struct bxe_softc *sc)
2132{
2133    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2134                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2135}
2136
2137/* send the MCP a request, block until there is a reply */
2138uint32_t
2139elink_cb_fw_command(struct bxe_softc *sc,
2140                    uint32_t         command,
2141                    uint32_t         param)
2142{
2143    int mb_idx = SC_FW_MB_IDX(sc);
2144    uint32_t seq;
2145    uint32_t rc = 0;
2146    uint32_t cnt = 1;
2147    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2148
2149    BXE_FWMB_LOCK(sc);
2150
2151    seq = ++sc->fw_seq;
2152    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2153    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2154
2155    BLOGD(sc, DBG_PHY,
2156          "wrote command 0x%08x to FW MB param 0x%08x\n",
2157          (command | seq), param);
2158
2159    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2160    do {
2161        DELAY(delay * 1000);
2162        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2163    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2164
2165    BLOGD(sc, DBG_PHY,
2166          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2167          cnt*delay, rc, seq);
2168
2169    /* is this a reply to our command? */
2170    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2171        rc &= FW_MSG_CODE_MASK;
2172    } else {
2173        /* Ruh-roh! */
2174        BLOGE(sc, "FW failed to respond!\n");
2175        // XXX bxe_fw_dump(sc);
2176        rc = 0;
2177    }
2178
2179    BXE_FWMB_UNLOCK(sc);
2180    return (rc);
2181}
2182
2183static uint32_t
2184bxe_fw_command(struct bxe_softc *sc,
2185               uint32_t         command,
2186               uint32_t         param)
2187{
2188    return (elink_cb_fw_command(sc, command, param));
2189}
2190
2191static void
2192__storm_memset_dma_mapping(struct bxe_softc *sc,
2193                           uint32_t         addr,
2194                           bus_addr_t       mapping)
2195{
2196    REG_WR(sc, addr, U64_LO(mapping));
2197    REG_WR(sc, (addr + 4), U64_HI(mapping));
2198}
2199
2200static void
2201storm_memset_spq_addr(struct bxe_softc *sc,
2202                      bus_addr_t       mapping,
2203                      uint16_t         abs_fid)
2204{
2205    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2206                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2207    __storm_memset_dma_mapping(sc, addr, mapping);
2208}
2209
2210static void
2211storm_memset_vf_to_pf(struct bxe_softc *sc,
2212                      uint16_t         abs_fid,
2213                      uint16_t         pf_id)
2214{
2215    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2216    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2217    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2218    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2219}
2220
2221static void
2222storm_memset_func_en(struct bxe_softc *sc,
2223                     uint16_t         abs_fid,
2224                     uint8_t          enable)
2225{
2226    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2227    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2228    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2229    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2230}
2231
2232static void
2233storm_memset_eq_data(struct bxe_softc       *sc,
2234                     struct event_ring_data *eq_data,
2235                     uint16_t               pfid)
2236{
2237    uint32_t addr;
2238    size_t size;
2239
2240    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2241    size = sizeof(struct event_ring_data);
2242    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2243}
2244
2245static void
2246storm_memset_eq_prod(struct bxe_softc *sc,
2247                     uint16_t         eq_prod,
2248                     uint16_t         pfid)
2249{
2250    uint32_t addr = (BAR_CSTRORM_INTMEM +
2251                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2252    REG_WR16(sc, addr, eq_prod);
2253}
2254
2255/*
2256 * Post a slowpath command.
2257 *
2258 * A slowpath command is used to propagate a configuration change through
2259 * the controller in a controlled manner, allowing each STORM processor and
2260 * other H/W blocks to phase in the change.  The commands sent on the
2261 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2262 * completion of the ramrod will occur in different ways.  Here's a
2263 * breakdown of ramrods and how they complete:
2264 *
2265 * RAMROD_CMD_ID_ETH_PORT_SETUP
2266 *   Used to setup the leading connection on a port.  Completes on the
2267 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2268 *
2269 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2270 *   Used to setup an additional connection on a port.  Completes on the
2271 *   RCQ of the multi-queue/RSS connection being initialized.
2272 *
2273 * RAMROD_CMD_ID_ETH_STAT_QUERY
2274 *   Used to force the storm processors to update the statistics database
2275 *   in host memory.  This ramrod is send on the leading connection CID and
2276 *   completes as an index increment of the CSTORM on the default status
2277 *   block.
2278 *
2279 * RAMROD_CMD_ID_ETH_UPDATE
2280 *   Used to update the state of the leading connection, usually to udpate
2281 *   the RSS indirection table.  Completes on the RCQ of the leading
2282 *   connection. (Not currently used under FreeBSD until OS support becomes
2283 *   available.)
2284 *
2285 * RAMROD_CMD_ID_ETH_HALT
2286 *   Used when tearing down a connection prior to driver unload.  Completes
2287 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2288 *   use this on the leading connection.
2289 *
2290 * RAMROD_CMD_ID_ETH_SET_MAC
2291 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2292 *   the RCQ of the leading connection.
2293 *
2294 * RAMROD_CMD_ID_ETH_CFC_DEL
2295 *   Used when tearing down a conneciton prior to driver unload.  Completes
2296 *   on the RCQ of the leading connection (since the current connection
2297 *   has been completely removed from controller memory).
2298 *
2299 * RAMROD_CMD_ID_ETH_PORT_DEL
2300 *   Used to tear down the leading connection prior to driver unload,
2301 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2302 *   default status block.
2303 *
2304 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2305 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2306 *   RSS connection that is being offloaded.  (Not currently used under
2307 *   FreeBSD.)
2308 *
2309 * There can only be one command pending per function.
2310 *
2311 * Returns:
2312 *   0 = Success, !0 = Failure.
2313 */
2314
2315/* must be called under the spq lock */
2316static inline
2317struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2318{
2319    struct eth_spe *next_spe = sc->spq_prod_bd;
2320
2321    if (sc->spq_prod_bd == sc->spq_last_bd) {
2322        /* wrap back to the first eth_spq */
2323        sc->spq_prod_bd = sc->spq;
2324        sc->spq_prod_idx = 0;
2325    } else {
2326        sc->spq_prod_bd++;
2327        sc->spq_prod_idx++;
2328    }
2329
2330    return (next_spe);
2331}
2332
2333/* must be called under the spq lock */
2334static inline
2335void bxe_sp_prod_update(struct bxe_softc *sc)
2336{
2337    int func = SC_FUNC(sc);
2338
2339    /*
2340     * Make sure that BD data is updated before writing the producer.
2341     * BD data is written to the memory, the producer is read from the
2342     * memory, thus we need a full memory barrier to ensure the ordering.
2343     */
2344    mb();
2345
2346    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2347             sc->spq_prod_idx);
2348
2349    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2350                      BUS_SPACE_BARRIER_WRITE);
2351}
2352
2353/**
2354 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2355 *
2356 * @cmd:      command to check
2357 * @cmd_type: command type
2358 */
2359static inline
2360int bxe_is_contextless_ramrod(int cmd,
2361                              int cmd_type)
2362{
2363    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2364        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2365        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2366        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2367        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2368        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2369        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2370        return (TRUE);
2371    } else {
2372        return (FALSE);
2373    }
2374}
2375
2376/**
2377 * bxe_sp_post - place a single command on an SP ring
2378 *
2379 * @sc:         driver handle
2380 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2381 * @cid:        SW CID the command is related to
2382 * @data_hi:    command private data address (high 32 bits)
2383 * @data_lo:    command private data address (low 32 bits)
2384 * @cmd_type:   command type (e.g. NONE, ETH)
2385 *
2386 * SP data is handled as if it's always an address pair, thus data fields are
2387 * not swapped to little endian in upper functions. Instead this function swaps
2388 * data as if it's two uint32 fields.
2389 */
2390int
2391bxe_sp_post(struct bxe_softc *sc,
2392            int              command,
2393            int              cid,
2394            uint32_t         data_hi,
2395            uint32_t         data_lo,
2396            int              cmd_type)
2397{
2398    struct eth_spe *spe;
2399    uint16_t type;
2400    int common;
2401
2402    common = bxe_is_contextless_ramrod(command, cmd_type);
2403
2404    BXE_SP_LOCK(sc);
2405
2406    if (common) {
2407        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2408            BLOGE(sc, "EQ ring is full!\n");
2409            BXE_SP_UNLOCK(sc);
2410            return (-1);
2411        }
2412    } else {
2413        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2414            BLOGE(sc, "SPQ ring is full!\n");
2415            BXE_SP_UNLOCK(sc);
2416            return (-1);
2417        }
2418    }
2419
2420    spe = bxe_sp_get_next(sc);
2421
2422    /* CID needs port number to be encoded int it */
2423    spe->hdr.conn_and_cmd_data =
2424        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2425
2426    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2427
2428    /* TBD: Check if it works for VFs */
2429    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2430             SPE_HDR_T_FUNCTION_ID);
2431
2432    spe->hdr.type = htole16(type);
2433
2434    spe->data.update_data_addr.hi = htole32(data_hi);
2435    spe->data.update_data_addr.lo = htole32(data_lo);
2436
2437    /*
2438     * It's ok if the actual decrement is issued towards the memory
2439     * somewhere between the lock and unlock. Thus no more explict
2440     * memory barrier is needed.
2441     */
2442    if (common) {
2443        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2444    } else {
2445        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2446    }
2447
2448    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2449    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2450          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2451    BLOGD(sc, DBG_SP,
2452          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2453          sc->spq_prod_idx,
2454          (uint32_t)U64_HI(sc->spq_dma.paddr),
2455          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2456          command,
2457          common,
2458          HW_CID(sc, cid),
2459          data_hi,
2460          data_lo,
2461          type,
2462          atomic_load_acq_long(&sc->cq_spq_left),
2463          atomic_load_acq_long(&sc->eq_spq_left));
2464
2465    bxe_sp_prod_update(sc);
2466
2467    BXE_SP_UNLOCK(sc);
2468    return (0);
2469}
2470
2471/**
2472 * bxe_debug_print_ind_table - prints the indirection table configuration.
2473 *
2474 * @sc: driver hanlde
2475 * @p:  pointer to rss configuration
2476 */
2477
2478/*
2479 * FreeBSD Device probe function.
2480 *
2481 * Compares the device found to the driver's list of supported devices and
2482 * reports back to the bsd loader whether this is the right driver for the device.
2483 * This is the driver entry function called from the "kldload" command.
2484 *
2485 * Returns:
2486 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2487 */
2488static int
2489bxe_probe(device_t dev)
2490{
2491    struct bxe_device_type *t;
2492    uint16_t did, sdid, svid, vid;
2493
2494    /* Find our device structure */
2495    t = bxe_devs;
2496
2497    /* Get the data for the device to be probed. */
2498    vid  = pci_get_vendor(dev);
2499    did  = pci_get_device(dev);
2500    svid = pci_get_subvendor(dev);
2501    sdid = pci_get_subdevice(dev);
2502
2503    /* Look through the list of known devices for a match. */
2504    while (t->bxe_name != NULL) {
2505        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2506            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2507            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2508            device_set_descf(dev,
2509                     "%s (%c%d) BXE v:%s", t->bxe_name,
2510                     (((pci_read_config(dev, PCIR_REVID, 4) &
2511                        0xf0) >> 4) + 'A'),
2512                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2513                     BXE_DRIVER_VERSION);
2514            return (BUS_PROBE_DEFAULT);
2515        }
2516        t++;
2517    }
2518
2519    return (ENXIO);
2520}
2521
2522static void
2523bxe_init_mutexes(struct bxe_softc *sc)
2524{
2525#ifdef BXE_CORE_LOCK_SX
2526    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2527             "bxe%d_core_lock", sc->unit);
2528    sx_init(&sc->core_sx, sc->core_sx_name);
2529#else
2530    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2531             "bxe%d_core_lock", sc->unit);
2532    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2533#endif
2534
2535    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2536             "bxe%d_sp_lock", sc->unit);
2537    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2538
2539    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2540             "bxe%d_dmae_lock", sc->unit);
2541    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2542
2543    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2544             "bxe%d_phy_lock", sc->unit);
2545    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2546
2547    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2548             "bxe%d_fwmb_lock", sc->unit);
2549    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2550
2551    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2552             "bxe%d_print_lock", sc->unit);
2553    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2554
2555    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2556             "bxe%d_stats_lock", sc->unit);
2557    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2558
2559    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2560             "bxe%d_mcast_lock", sc->unit);
2561    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2562}
2563
2564static void
2565bxe_release_mutexes(struct bxe_softc *sc)
2566{
2567#ifdef BXE_CORE_LOCK_SX
2568    sx_destroy(&sc->core_sx);
2569#else
2570    if (mtx_initialized(&sc->core_mtx)) {
2571        mtx_destroy(&sc->core_mtx);
2572    }
2573#endif
2574
2575    if (mtx_initialized(&sc->sp_mtx)) {
2576        mtx_destroy(&sc->sp_mtx);
2577    }
2578
2579    if (mtx_initialized(&sc->dmae_mtx)) {
2580        mtx_destroy(&sc->dmae_mtx);
2581    }
2582
2583    if (mtx_initialized(&sc->port.phy_mtx)) {
2584        mtx_destroy(&sc->port.phy_mtx);
2585    }
2586
2587    if (mtx_initialized(&sc->fwmb_mtx)) {
2588        mtx_destroy(&sc->fwmb_mtx);
2589    }
2590
2591    if (mtx_initialized(&sc->print_mtx)) {
2592        mtx_destroy(&sc->print_mtx);
2593    }
2594
2595    if (mtx_initialized(&sc->stats_mtx)) {
2596        mtx_destroy(&sc->stats_mtx);
2597    }
2598
2599    if (mtx_initialized(&sc->mcast_mtx)) {
2600        mtx_destroy(&sc->mcast_mtx);
2601    }
2602}
2603
2604static void
2605bxe_tx_disable(struct bxe_softc* sc)
2606{
2607    if_t ifp = sc->ifp;
2608
2609    /* tell the stack the driver is stopped and TX queue is full */
2610    if (ifp !=  NULL) {
2611        if_setdrvflags(ifp, 0);
2612    }
2613}
2614
2615static void
2616bxe_drv_pulse(struct bxe_softc *sc)
2617{
2618    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2619             sc->fw_drv_pulse_wr_seq);
2620}
2621
2622static inline uint16_t
2623bxe_tx_avail(struct bxe_softc *sc,
2624             struct bxe_fastpath *fp)
2625{
2626    int16_t  used;
2627    uint16_t prod;
2628    uint16_t cons;
2629
2630    prod = fp->tx_bd_prod;
2631    cons = fp->tx_bd_cons;
2632
2633    used = SUB_S16(prod, cons);
2634
2635    return (int16_t)(sc->tx_ring_size) - used;
2636}
2637
2638static inline int
2639bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2640{
2641    uint16_t hw_cons;
2642
2643    mb(); /* status block fields can change */
2644    hw_cons = le16toh(*fp->tx_cons_sb);
2645    return (hw_cons != fp->tx_pkt_cons);
2646}
2647
2648static inline uint8_t
2649bxe_has_tx_work(struct bxe_fastpath *fp)
2650{
2651    /* expand this for multi-cos if ever supported */
2652    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2653}
2654
2655static inline int
2656bxe_has_rx_work(struct bxe_fastpath *fp)
2657{
2658    uint16_t rx_cq_cons_sb;
2659
2660    mb(); /* status block fields can change */
2661    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2662    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2663        rx_cq_cons_sb++;
2664    return (fp->rx_cq_cons != rx_cq_cons_sb);
2665}
2666
2667static void
2668bxe_sp_event(struct bxe_softc    *sc,
2669             struct bxe_fastpath *fp,
2670             union eth_rx_cqe    *rr_cqe)
2671{
2672    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2673    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2674    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2675    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2676
2677    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2678          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2679
2680    switch (command) {
2681    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2682        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2683        drv_cmd = ECORE_Q_CMD_UPDATE;
2684        break;
2685
2686    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2687        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2688        drv_cmd = ECORE_Q_CMD_SETUP;
2689        break;
2690
2691    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2692        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2693        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2694        break;
2695
2696    case (RAMROD_CMD_ID_ETH_HALT):
2697        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2698        drv_cmd = ECORE_Q_CMD_HALT;
2699        break;
2700
2701    case (RAMROD_CMD_ID_ETH_TERMINATE):
2702        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2703        drv_cmd = ECORE_Q_CMD_TERMINATE;
2704        break;
2705
2706    case (RAMROD_CMD_ID_ETH_EMPTY):
2707        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2708        drv_cmd = ECORE_Q_CMD_EMPTY;
2709        break;
2710
2711    default:
2712        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2713              command, fp->index);
2714        return;
2715    }
2716
2717    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2718        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2719        /*
2720         * q_obj->complete_cmd() failure means that this was
2721         * an unexpected completion.
2722         *
2723         * In this case we don't want to increase the sc->spq_left
2724         * because apparently we haven't sent this command the first
2725         * place.
2726         */
2727        // bxe_panic(sc, ("Unexpected SP completion\n"));
2728        return;
2729    }
2730
2731    atomic_add_acq_long(&sc->cq_spq_left, 1);
2732
2733    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2734          atomic_load_acq_long(&sc->cq_spq_left));
2735}
2736
2737/*
2738 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2739 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2740 * the current aggregation queue as in-progress.
2741 */
2742static void
2743bxe_tpa_start(struct bxe_softc            *sc,
2744              struct bxe_fastpath         *fp,
2745              uint16_t                    queue,
2746              uint16_t                    cons,
2747              uint16_t                    prod,
2748              struct eth_fast_path_rx_cqe *cqe)
2749{
2750    struct bxe_sw_rx_bd tmp_bd;
2751    struct bxe_sw_rx_bd *rx_buf;
2752    struct eth_rx_bd *rx_bd;
2753    int max_agg_queues __diagused;
2754    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2755    uint16_t index;
2756
2757    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2758                       "cons=%d prod=%d\n",
2759          fp->index, queue, cons, prod);
2760
2761    max_agg_queues = MAX_AGG_QS(sc);
2762
2763    KASSERT((queue < max_agg_queues),
2764            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2765             fp->index, queue, max_agg_queues));
2766
2767    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2768            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2769             fp->index, queue));
2770
2771    /* copy the existing mbuf and mapping from the TPA pool */
2772    tmp_bd = tpa_info->bd;
2773
2774    if (tmp_bd.m == NULL) {
2775        uint32_t *tmp;
2776
2777        tmp = (uint32_t *)cqe;
2778
2779        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2780              fp->index, queue, cons, prod);
2781        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2782            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2783
2784        /* XXX Error handling? */
2785        return;
2786    }
2787
2788    /* change the TPA queue to the start state */
2789    tpa_info->state            = BXE_TPA_STATE_START;
2790    tpa_info->placement_offset = cqe->placement_offset;
2791    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2792    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2793    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2794
2795    fp->rx_tpa_queue_used |= (1 << queue);
2796
2797    /*
2798     * If all the buffer descriptors are filled with mbufs then fill in
2799     * the current consumer index with a new BD. Else if a maximum Rx
2800     * buffer limit is imposed then fill in the next producer index.
2801     */
2802    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2803                prod : cons;
2804
2805    /* move the received mbuf and mapping to TPA pool */
2806    tpa_info->bd = fp->rx_mbuf_chain[cons];
2807
2808    /* release any existing RX BD mbuf mappings */
2809    if (cons != index) {
2810        rx_buf = &fp->rx_mbuf_chain[cons];
2811
2812        if (rx_buf->m_map != NULL) {
2813            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2814                            BUS_DMASYNC_POSTREAD);
2815            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2816        }
2817
2818        /*
2819         * We get here when the maximum number of rx buffers is less than
2820         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2821         * it out here without concern of a memory leak.
2822         */
2823        fp->rx_mbuf_chain[cons].m = NULL;
2824    }
2825
2826    /* update the Rx SW BD with the mbuf info from the TPA pool */
2827    fp->rx_mbuf_chain[index] = tmp_bd;
2828
2829    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2830    rx_bd = &fp->rx_chain[index];
2831    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2832    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2833}
2834
2835/*
2836 * When a TPA aggregation is completed, loop through the individual mbufs
2837 * of the aggregation, combining them into a single mbuf which will be sent
2838 * up the stack. Refill all freed SGEs with mbufs as we go along.
2839 */
2840static int
2841bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2842                   struct bxe_fastpath       *fp,
2843                   struct bxe_sw_tpa_info    *tpa_info,
2844                   uint16_t                  queue,
2845                   uint16_t                  pages,
2846                   struct mbuf               *m,
2847			       struct eth_end_agg_rx_cqe *cqe,
2848                   uint16_t                  cqe_idx)
2849{
2850    struct mbuf *m_frag;
2851    uint32_t frag_len, frag_size, i;
2852    uint16_t sge_idx;
2853    int rc = 0;
2854    int j;
2855
2856    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2857
2858    BLOGD(sc, DBG_LRO,
2859          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2860          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2861
2862    /* make sure the aggregated frame is not too big to handle */
2863    if (pages > 8 * PAGES_PER_SGE) {
2864
2865        uint32_t *tmp = (uint32_t *)cqe;
2866
2867        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2868                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2869              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2870              tpa_info->len_on_bd, frag_size);
2871
2872        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2873            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2874
2875        bxe_panic(sc, ("sge page count error\n"));
2876        return (EINVAL);
2877    }
2878
2879    /*
2880     * Scan through the scatter gather list pulling individual mbufs into a
2881     * single mbuf for the host stack.
2882     */
2883    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2884        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2885
2886        /*
2887         * Firmware gives the indices of the SGE as if the ring is an array
2888         * (meaning that the "next" element will consume 2 indices).
2889         */
2890        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2891
2892        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2893                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2894              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2895
2896        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2897
2898        /* allocate a new mbuf for the SGE */
2899        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2900        if (rc) {
2901            /* Leave all remaining SGEs in the ring! */
2902            return (rc);
2903        }
2904
2905        /* update the fragment length */
2906        m_frag->m_len = frag_len;
2907
2908        /* concatenate the fragment to the head mbuf */
2909        m_cat(m, m_frag);
2910        fp->eth_q_stats.mbuf_alloc_sge--;
2911
2912        /* update the TPA mbuf size and remaining fragment size */
2913        m->m_pkthdr.len += frag_len;
2914        frag_size -= frag_len;
2915    }
2916
2917    BLOGD(sc, DBG_LRO,
2918          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2919          fp->index, queue, frag_size);
2920
2921    return (rc);
2922}
2923
2924static inline void
2925bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2926{
2927    int i, j;
2928
2929    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2930        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2931
2932        for (j = 0; j < 2; j++) {
2933            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2934            idx--;
2935        }
2936    }
2937}
2938
2939static inline void
2940bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2941{
2942    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2943    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2944
2945    /*
2946     * Clear the two last indices in the page to 1. These are the indices that
2947     * correspond to the "next" element, hence will never be indicated and
2948     * should be removed from the calculations.
2949     */
2950    bxe_clear_sge_mask_next_elems(fp);
2951}
2952
2953static inline void
2954bxe_update_last_max_sge(struct bxe_fastpath *fp,
2955                        uint16_t            idx)
2956{
2957    uint16_t last_max = fp->last_max_sge;
2958
2959    if (SUB_S16(idx, last_max) > 0) {
2960        fp->last_max_sge = idx;
2961    }
2962}
2963
2964static inline void
2965bxe_update_sge_prod(struct bxe_softc          *sc,
2966                    struct bxe_fastpath       *fp,
2967                    uint16_t                  sge_len,
2968                    union eth_sgl_or_raw_data *cqe)
2969{
2970    uint16_t last_max, last_elem, first_elem;
2971    uint16_t delta = 0;
2972    uint16_t i;
2973
2974    if (!sge_len) {
2975        return;
2976    }
2977
2978    /* first mark all used pages */
2979    for (i = 0; i < sge_len; i++) {
2980        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2981                            RX_SGE(le16toh(cqe->sgl[i])));
2982    }
2983
2984    BLOGD(sc, DBG_LRO,
2985          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2986          fp->index, sge_len - 1,
2987          le16toh(cqe->sgl[sge_len - 1]));
2988
2989    /* assume that the last SGE index is the biggest */
2990    bxe_update_last_max_sge(fp,
2991                            le16toh(cqe->sgl[sge_len - 1]));
2992
2993    last_max = RX_SGE(fp->last_max_sge);
2994    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2995    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2996
2997    /* if ring is not full */
2998    if (last_elem + 1 != first_elem) {
2999        last_elem++;
3000    }
3001
3002    /* now update the prod */
3003    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3004        if (__predict_true(fp->sge_mask[i])) {
3005            break;
3006        }
3007
3008        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3009        delta += BIT_VEC64_ELEM_SZ;
3010    }
3011
3012    if (delta > 0) {
3013        fp->rx_sge_prod += delta;
3014        /* clear page-end entries */
3015        bxe_clear_sge_mask_next_elems(fp);
3016    }
3017
3018    BLOGD(sc, DBG_LRO,
3019          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3020          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3021}
3022
3023/*
3024 * The aggregation on the current TPA queue has completed. Pull the individual
3025 * mbuf fragments together into a single mbuf, perform all necessary checksum
3026 * calculations, and send the resuting mbuf to the stack.
3027 */
3028static void
3029bxe_tpa_stop(struct bxe_softc          *sc,
3030             struct bxe_fastpath       *fp,
3031             struct bxe_sw_tpa_info    *tpa_info,
3032             uint16_t                  queue,
3033             uint16_t                  pages,
3034			 struct eth_end_agg_rx_cqe *cqe,
3035             uint16_t                  cqe_idx)
3036{
3037    if_t ifp = sc->ifp;
3038    struct mbuf *m;
3039    int rc = 0;
3040
3041    BLOGD(sc, DBG_LRO,
3042          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3043          fp->index, queue, tpa_info->placement_offset,
3044          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3045
3046    m = tpa_info->bd.m;
3047
3048    /* allocate a replacement before modifying existing mbuf */
3049    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3050    if (rc) {
3051        /* drop the frame and log an error */
3052        fp->eth_q_stats.rx_soft_errors++;
3053        goto bxe_tpa_stop_exit;
3054    }
3055
3056    /* we have a replacement, fixup the current mbuf */
3057    m_adj(m, tpa_info->placement_offset);
3058    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3059
3060    /* mark the checksums valid (taken care of by the firmware) */
3061    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3062    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3063    m->m_pkthdr.csum_data = 0xffff;
3064    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3065                               CSUM_IP_VALID   |
3066                               CSUM_DATA_VALID |
3067                               CSUM_PSEUDO_HDR);
3068
3069    /* aggregate all of the SGEs into a single mbuf */
3070    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3071    if (rc) {
3072        /* drop the packet and log an error */
3073        fp->eth_q_stats.rx_soft_errors++;
3074        m_freem(m);
3075    } else {
3076        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3077            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3078            m->m_flags |= M_VLANTAG;
3079        }
3080
3081        /* assign packet to this interface interface */
3082        if_setrcvif(m, ifp);
3083
3084        /* specify what RSS queue was used for this flow */
3085        m->m_pkthdr.flowid = fp->index;
3086        BXE_SET_FLOWID(m);
3087
3088        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3089        fp->eth_q_stats.rx_tpa_pkts++;
3090
3091        /* pass the frame to the stack */
3092        if_input(ifp, m);
3093    }
3094
3095    /* we passed an mbuf up the stack or dropped the frame */
3096    fp->eth_q_stats.mbuf_alloc_tpa--;
3097
3098bxe_tpa_stop_exit:
3099
3100    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3101    fp->rx_tpa_queue_used &= ~(1 << queue);
3102}
3103
3104static uint8_t
3105bxe_service_rxsgl(
3106                 struct bxe_fastpath *fp,
3107                 uint16_t len,
3108                 uint16_t lenonbd,
3109                 struct mbuf *m,
3110                 struct eth_fast_path_rx_cqe *cqe_fp)
3111{
3112    struct mbuf *m_frag;
3113    uint16_t frags, frag_len;
3114    uint16_t sge_idx = 0;
3115    uint16_t j;
3116    uint8_t i, rc = 0;
3117    uint32_t frag_size;
3118
3119    /* adjust the mbuf */
3120    m->m_len = lenonbd;
3121
3122    frag_size =  len - lenonbd;
3123    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3124
3125    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3126        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3127
3128        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3129        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3130        m_frag->m_len = frag_len;
3131
3132       /* allocate a new mbuf for the SGE */
3133        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3134        if (rc) {
3135            /* Leave all remaining SGEs in the ring! */
3136            return (rc);
3137        }
3138        fp->eth_q_stats.mbuf_alloc_sge--;
3139
3140        /* concatenate the fragment to the head mbuf */
3141        m_cat(m, m_frag);
3142
3143        frag_size -= frag_len;
3144    }
3145
3146    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3147
3148    return rc;
3149}
3150
3151static uint8_t
3152bxe_rxeof(struct bxe_softc    *sc,
3153          struct bxe_fastpath *fp)
3154{
3155    if_t ifp = sc->ifp;
3156    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3157    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3158    int rx_pkts = 0;
3159    int rc = 0;
3160
3161    BXE_FP_RX_LOCK(fp);
3162
3163    /* CQ "next element" is of the size of the regular element */
3164    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3165    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3166        hw_cq_cons++;
3167    }
3168
3169    bd_cons = fp->rx_bd_cons;
3170    bd_prod = fp->rx_bd_prod;
3171    bd_prod_fw = bd_prod;
3172    sw_cq_cons = fp->rx_cq_cons;
3173    sw_cq_prod = fp->rx_cq_prod;
3174
3175    /*
3176     * Memory barrier necessary as speculative reads of the rx
3177     * buffer can be ahead of the index in the status block
3178     */
3179    rmb();
3180
3181    BLOGD(sc, DBG_RX,
3182          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3183          fp->index, hw_cq_cons, sw_cq_cons);
3184
3185    while (sw_cq_cons != hw_cq_cons) {
3186        struct bxe_sw_rx_bd *rx_buf = NULL;
3187        union eth_rx_cqe *cqe;
3188        struct eth_fast_path_rx_cqe *cqe_fp;
3189        uint8_t cqe_fp_flags;
3190        enum eth_rx_cqe_type cqe_fp_type;
3191        uint16_t len, lenonbd,  pad;
3192        struct mbuf *m = NULL;
3193
3194        comp_ring_cons = RCQ(sw_cq_cons);
3195        bd_prod = RX_BD(bd_prod);
3196        bd_cons = RX_BD(bd_cons);
3197
3198        cqe          = &fp->rcq_chain[comp_ring_cons];
3199        cqe_fp       = &cqe->fast_path_cqe;
3200        cqe_fp_flags = cqe_fp->type_error_flags;
3201        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3202
3203        BLOGD(sc, DBG_RX,
3204              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3205              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3206              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3207              fp->index,
3208              hw_cq_cons,
3209              sw_cq_cons,
3210              bd_prod,
3211              bd_cons,
3212              CQE_TYPE(cqe_fp_flags),
3213              cqe_fp_flags,
3214              cqe_fp->status_flags,
3215              le32toh(cqe_fp->rss_hash_result),
3216              le16toh(cqe_fp->vlan_tag),
3217              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3218              le16toh(cqe_fp->len_on_bd));
3219
3220        /* is this a slowpath msg? */
3221        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3222            bxe_sp_event(sc, fp, cqe);
3223            goto next_cqe;
3224        }
3225
3226        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3227
3228        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3229            struct bxe_sw_tpa_info *tpa_info;
3230            uint16_t frag_size, pages;
3231            uint8_t queue;
3232
3233            if (CQE_TYPE_START(cqe_fp_type)) {
3234                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3235                              bd_cons, bd_prod, cqe_fp);
3236                m = NULL; /* packet not ready yet */
3237                goto next_rx;
3238            }
3239
3240            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3241                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3242
3243            queue = cqe->end_agg_cqe.queue_index;
3244            tpa_info = &fp->rx_tpa_info[queue];
3245
3246            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3247                  fp->index, queue);
3248
3249            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3250                         tpa_info->len_on_bd);
3251            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3252
3253            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3254                         &cqe->end_agg_cqe, comp_ring_cons);
3255
3256            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3257
3258            goto next_cqe;
3259        }
3260
3261        /* non TPA */
3262
3263        /* is this an error packet? */
3264        if (__predict_false(cqe_fp_flags &
3265                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3266            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3267            fp->eth_q_stats.rx_soft_errors++;
3268            goto next_rx;
3269        }
3270
3271        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3272        lenonbd = le16toh(cqe_fp->len_on_bd);
3273        pad = cqe_fp->placement_offset;
3274
3275        m = rx_buf->m;
3276
3277        if (__predict_false(m == NULL)) {
3278            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3279                  bd_cons, fp->index);
3280            goto next_rx;
3281        }
3282
3283        /* XXX double copy if packet length under a threshold */
3284
3285        /*
3286         * If all the buffer descriptors are filled with mbufs then fill in
3287         * the current consumer index with a new BD. Else if a maximum Rx
3288         * buffer limit is imposed then fill in the next producer index.
3289         */
3290        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3291                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3292                                      bd_prod : bd_cons);
3293        if (rc != 0) {
3294
3295            /* we simply reuse the received mbuf and don't post it to the stack */
3296            m = NULL;
3297
3298            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3299                  fp->index, rc);
3300            fp->eth_q_stats.rx_soft_errors++;
3301
3302            if (sc->max_rx_bufs != RX_BD_USABLE) {
3303                /* copy this consumer index to the producer index */
3304                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3305                       sizeof(struct bxe_sw_rx_bd));
3306                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3307            }
3308
3309            goto next_rx;
3310        }
3311
3312        /* current mbuf was detached from the bd */
3313        fp->eth_q_stats.mbuf_alloc_rx--;
3314
3315        /* we allocated a replacement mbuf, fixup the current one */
3316        m_adj(m, pad);
3317        m->m_pkthdr.len = m->m_len = len;
3318
3319        if ((len > 60) && (len > lenonbd)) {
3320            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3321            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3322            if (rc)
3323                break;
3324            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3325        } else if (lenonbd < len) {
3326            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3327        }
3328
3329        /* assign packet to this interface interface */
3330	if_setrcvif(m, ifp);
3331
3332        /* assume no hardware checksum has complated */
3333        m->m_pkthdr.csum_flags = 0;
3334
3335        /* validate checksum if offload enabled */
3336        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3337            /* check for a valid IP frame */
3338            if (!(cqe->fast_path_cqe.status_flags &
3339                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3340                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3341                if (__predict_false(cqe_fp_flags &
3342                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3343                    fp->eth_q_stats.rx_hw_csum_errors++;
3344                } else {
3345                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3346                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3347                }
3348            }
3349
3350            /* check for a valid TCP/UDP frame */
3351            if (!(cqe->fast_path_cqe.status_flags &
3352                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3353                if (__predict_false(cqe_fp_flags &
3354                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3355                    fp->eth_q_stats.rx_hw_csum_errors++;
3356                } else {
3357                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3358                    m->m_pkthdr.csum_data = 0xFFFF;
3359                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3360                                               CSUM_PSEUDO_HDR);
3361                }
3362            }
3363        }
3364
3365        /* if there is a VLAN tag then flag that info */
3366        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3367            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3368            m->m_flags |= M_VLANTAG;
3369        }
3370
3371        /* specify what RSS queue was used for this flow */
3372        m->m_pkthdr.flowid = fp->index;
3373        BXE_SET_FLOWID(m);
3374
3375next_rx:
3376
3377        bd_cons    = RX_BD_NEXT(bd_cons);
3378        bd_prod    = RX_BD_NEXT(bd_prod);
3379        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3380
3381        /* pass the frame to the stack */
3382        if (__predict_true(m != NULL)) {
3383            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3384            rx_pkts++;
3385            if_input(ifp, m);
3386        }
3387
3388next_cqe:
3389
3390        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3391        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3392
3393        /* limit spinning on the queue */
3394        if (rc != 0)
3395            break;
3396
3397        if (rx_pkts == sc->rx_budget) {
3398            fp->eth_q_stats.rx_budget_reached++;
3399            break;
3400        }
3401    } /* while work to do */
3402
3403    fp->rx_bd_cons = bd_cons;
3404    fp->rx_bd_prod = bd_prod_fw;
3405    fp->rx_cq_cons = sw_cq_cons;
3406    fp->rx_cq_prod = sw_cq_prod;
3407
3408    /* Update producers */
3409    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3410
3411    fp->eth_q_stats.rx_pkts += rx_pkts;
3412    fp->eth_q_stats.rx_calls++;
3413
3414    BXE_FP_RX_UNLOCK(fp);
3415
3416    return (sw_cq_cons != hw_cq_cons);
3417}
3418
3419static uint16_t
3420bxe_free_tx_pkt(struct bxe_softc    *sc,
3421                struct bxe_fastpath *fp,
3422                uint16_t            idx)
3423{
3424    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3425    struct eth_tx_start_bd *tx_start_bd;
3426    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3427    uint16_t new_cons;
3428    int nbd;
3429
3430    /* unmap the mbuf from non-paged memory */
3431    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3432
3433    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3434    nbd = le16toh(tx_start_bd->nbd) - 1;
3435
3436    new_cons = (tx_buf->first_bd + nbd);
3437
3438    /* free the mbuf */
3439    if (__predict_true(tx_buf->m != NULL)) {
3440        m_freem(tx_buf->m);
3441        fp->eth_q_stats.mbuf_alloc_tx--;
3442    } else {
3443        fp->eth_q_stats.tx_chain_lost_mbuf++;
3444    }
3445
3446    tx_buf->m = NULL;
3447    tx_buf->first_bd = 0;
3448
3449    return (new_cons);
3450}
3451
3452/* transmit timeout watchdog */
3453static int
3454bxe_watchdog(struct bxe_softc    *sc,
3455             struct bxe_fastpath *fp)
3456{
3457    BXE_FP_TX_LOCK(fp);
3458
3459    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3460        BXE_FP_TX_UNLOCK(fp);
3461        return (0);
3462    }
3463
3464    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3465
3466    BXE_FP_TX_UNLOCK(fp);
3467    BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3468    taskqueue_enqueue_timeout(taskqueue_thread,
3469        &sc->sp_err_timeout_task, hz/10);
3470
3471    return (-1);
3472}
3473
3474/* processes transmit completions */
3475static uint8_t
3476bxe_txeof(struct bxe_softc    *sc,
3477          struct bxe_fastpath *fp)
3478{
3479    if_t ifp = sc->ifp;
3480    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3481    uint16_t tx_bd_avail;
3482
3483    BXE_FP_TX_LOCK_ASSERT(fp);
3484
3485    bd_cons = fp->tx_bd_cons;
3486    hw_cons = le16toh(*fp->tx_cons_sb);
3487    sw_cons = fp->tx_pkt_cons;
3488
3489    while (sw_cons != hw_cons) {
3490        pkt_cons = TX_BD(sw_cons);
3491
3492        BLOGD(sc, DBG_TX,
3493              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3494              fp->index, hw_cons, sw_cons, pkt_cons);
3495
3496        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3497
3498        sw_cons++;
3499    }
3500
3501    fp->tx_pkt_cons = sw_cons;
3502    fp->tx_bd_cons  = bd_cons;
3503
3504    BLOGD(sc, DBG_TX,
3505          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3506          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3507
3508    mb();
3509
3510    tx_bd_avail = bxe_tx_avail(sc, fp);
3511
3512    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3513        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3514    } else {
3515        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3516    }
3517
3518    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3519        /* reset the watchdog timer if there are pending transmits */
3520        fp->watchdog_timer = BXE_TX_TIMEOUT;
3521        return (TRUE);
3522    } else {
3523        /* clear watchdog when there are no pending transmits */
3524        fp->watchdog_timer = 0;
3525        return (FALSE);
3526    }
3527}
3528
3529static void
3530bxe_drain_tx_queues(struct bxe_softc *sc)
3531{
3532    struct bxe_fastpath *fp;
3533    int i, count;
3534
3535    /* wait until all TX fastpath tasks have completed */
3536    for (i = 0; i < sc->num_queues; i++) {
3537        fp = &sc->fp[i];
3538
3539        count = 1000;
3540
3541        while (bxe_has_tx_work(fp)) {
3542
3543            BXE_FP_TX_LOCK(fp);
3544            bxe_txeof(sc, fp);
3545            BXE_FP_TX_UNLOCK(fp);
3546
3547            if (count == 0) {
3548                BLOGE(sc, "Timeout waiting for fp[%d] "
3549                          "transmits to complete!\n", i);
3550                bxe_panic(sc, ("tx drain failure\n"));
3551                return;
3552            }
3553
3554            count--;
3555            DELAY(1000);
3556            rmb();
3557        }
3558    }
3559
3560    return;
3561}
3562
3563static int
3564bxe_del_all_macs(struct bxe_softc          *sc,
3565                 struct ecore_vlan_mac_obj *mac_obj,
3566                 int                       mac_type,
3567                 uint8_t                   wait_for_comp)
3568{
3569    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3570    int rc;
3571
3572    /* wait for completion of requested */
3573    if (wait_for_comp) {
3574        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3575    }
3576
3577    /* Set the mac type of addresses we want to clear */
3578    bxe_set_bit(mac_type, &vlan_mac_flags);
3579
3580    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3581    if (rc < 0) {
3582        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3583            rc, mac_type, wait_for_comp);
3584    }
3585
3586    return (rc);
3587}
3588
3589static int
3590bxe_fill_accept_flags(struct bxe_softc *sc,
3591                      uint32_t         rx_mode,
3592                      unsigned long    *rx_accept_flags,
3593                      unsigned long    *tx_accept_flags)
3594{
3595    /* Clear the flags first */
3596    *rx_accept_flags = 0;
3597    *tx_accept_flags = 0;
3598
3599    switch (rx_mode) {
3600    case BXE_RX_MODE_NONE:
3601        /*
3602         * 'drop all' supersedes any accept flags that may have been
3603         * passed to the function.
3604         */
3605        break;
3606
3607    case BXE_RX_MODE_NORMAL:
3608        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3609        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3610        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3611
3612        /* internal switching mode */
3613        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3614        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3615        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3616
3617        break;
3618
3619    case BXE_RX_MODE_ALLMULTI:
3620        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3621        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3622        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3623
3624        /* internal switching mode */
3625        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3626        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3627        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3628
3629        break;
3630
3631    case BXE_RX_MODE_PROMISC:
3632        /*
3633         * According to deffinition of SI mode, iface in promisc mode
3634         * should receive matched and unmatched (in resolution of port)
3635         * unicast packets.
3636         */
3637        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3638        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3639        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3640        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3641
3642        /* internal switching mode */
3643        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3644        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3645
3646        if (IS_MF_SI(sc)) {
3647            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3648        } else {
3649            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3650        }
3651
3652        break;
3653
3654    default:
3655        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3656        return (-1);
3657    }
3658
3659    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3660    if (rx_mode != BXE_RX_MODE_NONE) {
3661        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3662        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3663    }
3664
3665    return (0);
3666}
3667
3668static int
3669bxe_set_q_rx_mode(struct bxe_softc *sc,
3670                  uint8_t          cl_id,
3671                  unsigned long    rx_mode_flags,
3672                  unsigned long    rx_accept_flags,
3673                  unsigned long    tx_accept_flags,
3674                  unsigned long    ramrod_flags)
3675{
3676    struct ecore_rx_mode_ramrod_params ramrod_param;
3677    int rc;
3678
3679    memset(&ramrod_param, 0, sizeof(ramrod_param));
3680
3681    /* Prepare ramrod parameters */
3682    ramrod_param.cid = 0;
3683    ramrod_param.cl_id = cl_id;
3684    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3685    ramrod_param.func_id = SC_FUNC(sc);
3686
3687    ramrod_param.pstate = &sc->sp_state;
3688    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3689
3690    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3691    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3692
3693    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3694
3695    ramrod_param.ramrod_flags = ramrod_flags;
3696    ramrod_param.rx_mode_flags = rx_mode_flags;
3697
3698    ramrod_param.rx_accept_flags = rx_accept_flags;
3699    ramrod_param.tx_accept_flags = tx_accept_flags;
3700
3701    rc = ecore_config_rx_mode(sc, &ramrod_param);
3702    if (rc < 0) {
3703        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3704            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3705            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3706            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3707            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3708        return (rc);
3709    }
3710
3711    return (0);
3712}
3713
3714static int
3715bxe_set_storm_rx_mode(struct bxe_softc *sc)
3716{
3717    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3718    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3719    int rc;
3720
3721    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3722                               &tx_accept_flags);
3723    if (rc) {
3724        return (rc);
3725    }
3726
3727    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3728    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3729
3730    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3731    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3732                              rx_accept_flags, tx_accept_flags,
3733                              ramrod_flags));
3734}
3735
3736/* returns the "mcp load_code" according to global load_count array */
3737static int
3738bxe_nic_load_no_mcp(struct bxe_softc *sc)
3739{
3740    int path = SC_PATH(sc);
3741    int port = SC_PORT(sc);
3742
3743    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3744          path, load_count[path][0], load_count[path][1],
3745          load_count[path][2]);
3746    load_count[path][0]++;
3747    load_count[path][1 + port]++;
3748    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3749          path, load_count[path][0], load_count[path][1],
3750          load_count[path][2]);
3751    if (load_count[path][0] == 1) {
3752        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3753    } else if (load_count[path][1 + port] == 1) {
3754        return (FW_MSG_CODE_DRV_LOAD_PORT);
3755    } else {
3756        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3757    }
3758}
3759
3760/* returns the "mcp load_code" according to global load_count array */
3761static int
3762bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3763{
3764    int port = SC_PORT(sc);
3765    int path = SC_PATH(sc);
3766
3767    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3768          path, load_count[path][0], load_count[path][1],
3769          load_count[path][2]);
3770    load_count[path][0]--;
3771    load_count[path][1 + port]--;
3772    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3773          path, load_count[path][0], load_count[path][1],
3774          load_count[path][2]);
3775    if (load_count[path][0] == 0) {
3776        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3777    } else if (load_count[path][1 + port] == 0) {
3778        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3779    } else {
3780        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3781    }
3782}
3783
3784/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3785static uint32_t
3786bxe_send_unload_req(struct bxe_softc *sc,
3787                    int              unload_mode)
3788{
3789    uint32_t reset_code = 0;
3790
3791    /* Select the UNLOAD request mode */
3792    if (unload_mode == UNLOAD_NORMAL) {
3793        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3794    } else {
3795        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3796    }
3797
3798    /* Send the request to the MCP */
3799    if (!BXE_NOMCP(sc)) {
3800        reset_code = bxe_fw_command(sc, reset_code, 0);
3801    } else {
3802        reset_code = bxe_nic_unload_no_mcp(sc);
3803    }
3804
3805    return (reset_code);
3806}
3807
3808/* send UNLOAD_DONE command to the MCP */
3809static void
3810bxe_send_unload_done(struct bxe_softc *sc,
3811                     uint8_t          keep_link)
3812{
3813    uint32_t reset_param =
3814        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3815
3816    /* Report UNLOAD_DONE to MCP */
3817    if (!BXE_NOMCP(sc)) {
3818        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3819    }
3820}
3821
3822static int
3823bxe_func_wait_started(struct bxe_softc *sc)
3824{
3825    int tout = 50;
3826
3827    if (!sc->port.pmf) {
3828        return (0);
3829    }
3830
3831    /*
3832     * (assumption: No Attention from MCP at this stage)
3833     * PMF probably in the middle of TX disable/enable transaction
3834     * 1. Sync IRS for default SB
3835     * 2. Sync SP queue - this guarantees us that attention handling started
3836     * 3. Wait, that TX disable/enable transaction completes
3837     *
3838     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3839     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3840     * received completion for the transaction the state is TX_STOPPED.
3841     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3842     * transaction.
3843     */
3844
3845    /* XXX make sure default SB ISR is done */
3846    /* need a way to synchronize an irq (intr_mtx?) */
3847
3848    /* XXX flush any work queues */
3849
3850    while (ecore_func_get_state(sc, &sc->func_obj) !=
3851           ECORE_F_STATE_STARTED && tout--) {
3852        DELAY(20000);
3853    }
3854
3855    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3856        /*
3857         * Failed to complete the transaction in a "good way"
3858         * Force both transactions with CLR bit.
3859         */
3860        struct ecore_func_state_params func_params = { NULL };
3861
3862        BLOGE(sc, "Unexpected function state! "
3863                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3864
3865        func_params.f_obj = &sc->func_obj;
3866        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3867
3868        /* STARTED-->TX_STOPPED */
3869        func_params.cmd = ECORE_F_CMD_TX_STOP;
3870        ecore_func_state_change(sc, &func_params);
3871
3872        /* TX_STOPPED-->STARTED */
3873        func_params.cmd = ECORE_F_CMD_TX_START;
3874        return (ecore_func_state_change(sc, &func_params));
3875    }
3876
3877    return (0);
3878}
3879
3880static int
3881bxe_stop_queue(struct bxe_softc *sc,
3882               int              index)
3883{
3884    struct bxe_fastpath *fp = &sc->fp[index];
3885    struct ecore_queue_state_params q_params = { NULL };
3886    int rc;
3887
3888    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3889
3890    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3891    /* We want to wait for completion in this context */
3892    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3893
3894    /* Stop the primary connection: */
3895
3896    /* ...halt the connection */
3897    q_params.cmd = ECORE_Q_CMD_HALT;
3898    rc = ecore_queue_state_change(sc, &q_params);
3899    if (rc) {
3900        return (rc);
3901    }
3902
3903    /* ...terminate the connection */
3904    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3905    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3906    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3907    rc = ecore_queue_state_change(sc, &q_params);
3908    if (rc) {
3909        return (rc);
3910    }
3911
3912    /* ...delete cfc entry */
3913    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3914    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3915    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3916    return (ecore_queue_state_change(sc, &q_params));
3917}
3918
3919/* wait for the outstanding SP commands */
3920static inline uint8_t
3921bxe_wait_sp_comp(struct bxe_softc *sc,
3922                 unsigned long    mask)
3923{
3924    unsigned long tmp;
3925    int tout = 5000; /* wait for 5 secs tops */
3926
3927    while (tout--) {
3928        mb();
3929        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3930            return (TRUE);
3931        }
3932
3933        DELAY(1000);
3934    }
3935
3936    mb();
3937
3938    tmp = atomic_load_acq_long(&sc->sp_state);
3939    if (tmp & mask) {
3940        BLOGE(sc, "Filtering completion timed out: "
3941                  "sp_state 0x%lx, mask 0x%lx\n",
3942              tmp, mask);
3943        return (FALSE);
3944    }
3945
3946    return (FALSE);
3947}
3948
3949static int
3950bxe_func_stop(struct bxe_softc *sc)
3951{
3952    struct ecore_func_state_params func_params = { NULL };
3953    int rc;
3954
3955    /* prepare parameters for function state transitions */
3956    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3957    func_params.f_obj = &sc->func_obj;
3958    func_params.cmd = ECORE_F_CMD_STOP;
3959
3960    /*
3961     * Try to stop the function the 'good way'. If it fails (in case
3962     * of a parity error during bxe_chip_cleanup()) and we are
3963     * not in a debug mode, perform a state transaction in order to
3964     * enable further HW_RESET transaction.
3965     */
3966    rc = ecore_func_state_change(sc, &func_params);
3967    if (rc) {
3968        BLOGE(sc, "FUNC_STOP ramrod failed. "
3969                  "Running a dry transaction (%d)\n", rc);
3970        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3971        return (ecore_func_state_change(sc, &func_params));
3972    }
3973
3974    return (0);
3975}
3976
3977static int
3978bxe_reset_hw(struct bxe_softc *sc,
3979             uint32_t         load_code)
3980{
3981    struct ecore_func_state_params func_params = { NULL };
3982
3983    /* Prepare parameters for function state transitions */
3984    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3985
3986    func_params.f_obj = &sc->func_obj;
3987    func_params.cmd = ECORE_F_CMD_HW_RESET;
3988
3989    func_params.params.hw_init.load_phase = load_code;
3990
3991    return (ecore_func_state_change(sc, &func_params));
3992}
3993
3994static void
3995bxe_int_disable_sync(struct bxe_softc *sc,
3996                     int              disable_hw)
3997{
3998    if (disable_hw) {
3999        /* prevent the HW from sending interrupts */
4000        bxe_int_disable(sc);
4001    }
4002
4003    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4004    /* make sure all ISRs are done */
4005
4006    /* XXX make sure sp_task is not running */
4007    /* cancel and flush work queues */
4008}
4009
4010static void
4011bxe_chip_cleanup(struct bxe_softc *sc,
4012                 uint32_t         unload_mode,
4013                 uint8_t          keep_link)
4014{
4015    int port = SC_PORT(sc);
4016    struct ecore_mcast_ramrod_params rparam = { NULL };
4017    uint32_t reset_code;
4018    int i, rc = 0;
4019
4020    bxe_drain_tx_queues(sc);
4021
4022    /* give HW time to discard old tx messages */
4023    DELAY(1000);
4024
4025    /* Clean all ETH MACs */
4026    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4027    if (rc < 0) {
4028        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4029    }
4030
4031    /* Clean up UC list  */
4032    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4033    if (rc < 0) {
4034        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4035    }
4036
4037    /* Disable LLH */
4038    if (!CHIP_IS_E1(sc)) {
4039        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4040    }
4041
4042    /* Set "drop all" to stop Rx */
4043
4044    /*
4045     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4046     * a race between the completion code and this code.
4047     */
4048    BXE_MCAST_LOCK(sc);
4049
4050    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4051        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4052    } else {
4053        bxe_set_storm_rx_mode(sc);
4054    }
4055
4056    /* Clean up multicast configuration */
4057    rparam.mcast_obj = &sc->mcast_obj;
4058    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4059    if (rc < 0) {
4060        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4061    }
4062
4063    BXE_MCAST_UNLOCK(sc);
4064
4065    // XXX bxe_iov_chip_cleanup(sc);
4066
4067    /*
4068     * Send the UNLOAD_REQUEST to the MCP. This will return if
4069     * this function should perform FUNCTION, PORT, or COMMON HW
4070     * reset.
4071     */
4072    reset_code = bxe_send_unload_req(sc, unload_mode);
4073
4074    /*
4075     * (assumption: No Attention from MCP at this stage)
4076     * PMF probably in the middle of TX disable/enable transaction
4077     */
4078    rc = bxe_func_wait_started(sc);
4079    if (rc) {
4080        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4081    }
4082
4083    /*
4084     * Close multi and leading connections
4085     * Completions for ramrods are collected in a synchronous way
4086     */
4087    for (i = 0; i < sc->num_queues; i++) {
4088        if (bxe_stop_queue(sc, i)) {
4089            goto unload_error;
4090        }
4091    }
4092
4093    /*
4094     * If SP settings didn't get completed so far - something
4095     * very wrong has happen.
4096     */
4097    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4098        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4099    }
4100
4101unload_error:
4102
4103    rc = bxe_func_stop(sc);
4104    if (rc) {
4105        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4106    }
4107
4108    /* disable HW interrupts */
4109    bxe_int_disable_sync(sc, TRUE);
4110
4111    /* detach interrupts */
4112    bxe_interrupt_detach(sc);
4113
4114    /* Reset the chip */
4115    rc = bxe_reset_hw(sc, reset_code);
4116    if (rc) {
4117        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4118    }
4119
4120    /* Report UNLOAD_DONE to MCP */
4121    bxe_send_unload_done(sc, keep_link);
4122}
4123
4124static void
4125bxe_disable_close_the_gate(struct bxe_softc *sc)
4126{
4127    uint32_t val;
4128    int port = SC_PORT(sc);
4129
4130    BLOGD(sc, DBG_LOAD,
4131          "Disabling 'close the gates'\n");
4132
4133    if (CHIP_IS_E1(sc)) {
4134        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4135                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4136        val = REG_RD(sc, addr);
4137        val &= ~(0x300);
4138        REG_WR(sc, addr, val);
4139    } else {
4140        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4141        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4142                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4143        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4144    }
4145}
4146
4147/*
4148 * Cleans the object that have internal lists without sending
4149 * ramrods. Should be run when interrupts are disabled.
4150 */
4151static void
4152bxe_squeeze_objects(struct bxe_softc *sc)
4153{
4154    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4155    struct ecore_mcast_ramrod_params rparam = { NULL };
4156    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4157    int rc;
4158
4159    /* Cleanup MACs' object first... */
4160
4161    /* Wait for completion of requested */
4162    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4163    /* Perform a dry cleanup */
4164    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4165
4166    /* Clean ETH primary MAC */
4167    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4168    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4169                             &ramrod_flags);
4170    if (rc != 0) {
4171        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4172    }
4173
4174    /* Cleanup UC list */
4175    vlan_mac_flags = 0;
4176    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4177    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4178                             &ramrod_flags);
4179    if (rc != 0) {
4180        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4181    }
4182
4183    /* Now clean mcast object... */
4184
4185    rparam.mcast_obj = &sc->mcast_obj;
4186    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4187
4188    /* Add a DEL command... */
4189    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4190    if (rc < 0) {
4191        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4192    }
4193
4194    /* now wait until all pending commands are cleared */
4195
4196    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4197    while (rc != 0) {
4198        if (rc < 0) {
4199            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4200            return;
4201        }
4202
4203        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4204    }
4205}
4206
4207/* stop the controller */
4208static __noinline int
4209bxe_nic_unload(struct bxe_softc *sc,
4210               uint32_t         unload_mode,
4211               uint8_t          keep_link)
4212{
4213    uint8_t global = FALSE;
4214    uint32_t val;
4215    int i;
4216
4217    BXE_CORE_LOCK_ASSERT(sc);
4218
4219    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4220
4221    for (i = 0; i < sc->num_queues; i++) {
4222        struct bxe_fastpath *fp;
4223
4224        fp = &sc->fp[i];
4225	fp->watchdog_timer = 0;
4226        BXE_FP_TX_LOCK(fp);
4227        BXE_FP_TX_UNLOCK(fp);
4228    }
4229
4230    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4231
4232    /* mark driver as unloaded in shmem2 */
4233    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4234        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4235        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4236                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4237    }
4238
4239    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4240        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4241
4242	if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4243            /*
4244             * We can get here if the driver has been unloaded
4245             * during parity error recovery and is either waiting for a
4246             * leader to complete or for other functions to unload and
4247             * then ifconfig down has been issued. In this case we want to
4248             * unload and let other functions to complete a recovery
4249             * process.
4250             */
4251            sc->recovery_state = BXE_RECOVERY_DONE;
4252            sc->is_leader = 0;
4253            bxe_release_leader_lock(sc);
4254            mb();
4255            BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4256	}
4257        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4258            " state = 0x%x\n", sc->recovery_state, sc->state);
4259        return (-1);
4260    }
4261
4262    /*
4263     * Nothing to do during unload if previous bxe_nic_load()
4264     * did not completed successfully - all resourses are released.
4265     */
4266    if ((sc->state == BXE_STATE_CLOSED) ||
4267        (sc->state == BXE_STATE_ERROR)) {
4268        return (0);
4269    }
4270
4271    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4272    mb();
4273
4274    /* stop tx */
4275    bxe_tx_disable(sc);
4276
4277    sc->rx_mode = BXE_RX_MODE_NONE;
4278    /* XXX set rx mode ??? */
4279
4280    if (IS_PF(sc) && !sc->grcdump_done) {
4281        /* set ALWAYS_ALIVE bit in shmem */
4282        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4283
4284        bxe_drv_pulse(sc);
4285
4286        bxe_stats_handle(sc, STATS_EVENT_STOP);
4287        bxe_save_statistics(sc);
4288    }
4289
4290    /* wait till consumers catch up with producers in all queues */
4291    bxe_drain_tx_queues(sc);
4292
4293    /* if VF indicate to PF this function is going down (PF will delete sp
4294     * elements and clear initializations
4295     */
4296    if (IS_VF(sc)) {
4297        ; /* bxe_vfpf_close_vf(sc); */
4298    } else if (unload_mode != UNLOAD_RECOVERY) {
4299        /* if this is a normal/close unload need to clean up chip */
4300        if (!sc->grcdump_done)
4301            bxe_chip_cleanup(sc, unload_mode, keep_link);
4302    } else {
4303        /* Send the UNLOAD_REQUEST to the MCP */
4304        bxe_send_unload_req(sc, unload_mode);
4305
4306        /*
4307         * Prevent transactions to host from the functions on the
4308         * engine that doesn't reset global blocks in case of global
4309         * attention once gloabl blocks are reset and gates are opened
4310         * (the engine which leader will perform the recovery
4311         * last).
4312         */
4313        if (!CHIP_IS_E1x(sc)) {
4314            bxe_pf_disable(sc);
4315        }
4316
4317        /* disable HW interrupts */
4318        bxe_int_disable_sync(sc, TRUE);
4319
4320        /* detach interrupts */
4321        bxe_interrupt_detach(sc);
4322
4323        /* Report UNLOAD_DONE to MCP */
4324        bxe_send_unload_done(sc, FALSE);
4325    }
4326
4327    /*
4328     * At this stage no more interrupts will arrive so we may safely clean
4329     * the queue'able objects here in case they failed to get cleaned so far.
4330     */
4331    if (IS_PF(sc)) {
4332        bxe_squeeze_objects(sc);
4333    }
4334
4335    /* There should be no more pending SP commands at this stage */
4336    sc->sp_state = 0;
4337
4338    sc->port.pmf = 0;
4339
4340    bxe_free_fp_buffers(sc);
4341
4342    if (IS_PF(sc)) {
4343        bxe_free_mem(sc);
4344    }
4345
4346    bxe_free_fw_stats_mem(sc);
4347
4348    sc->state = BXE_STATE_CLOSED;
4349
4350    /*
4351     * Check if there are pending parity attentions. If there are - set
4352     * RECOVERY_IN_PROGRESS.
4353     */
4354    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4355        bxe_set_reset_in_progress(sc);
4356
4357        /* Set RESET_IS_GLOBAL if needed */
4358        if (global) {
4359            bxe_set_reset_global(sc);
4360        }
4361    }
4362
4363    /*
4364     * The last driver must disable a "close the gate" if there is no
4365     * parity attention or "process kill" pending.
4366     */
4367    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4368        bxe_reset_is_done(sc, SC_PATH(sc))) {
4369        bxe_disable_close_the_gate(sc);
4370    }
4371
4372    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4373
4374    bxe_link_report(sc);
4375
4376    return (0);
4377}
4378
4379/*
4380 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4381 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4382 */
4383static int
4384bxe_ifmedia_update(if_t ifp)
4385{
4386    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4387    struct ifmedia *ifm;
4388
4389    ifm = &sc->ifmedia;
4390
4391    /* We only support Ethernet media type. */
4392    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4393        return (EINVAL);
4394    }
4395
4396    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4397    case IFM_AUTO:
4398         break;
4399    case IFM_10G_CX4:
4400    case IFM_10G_SR:
4401    case IFM_10G_T:
4402    case IFM_10G_TWINAX:
4403    default:
4404        /* We don't support changing the media type. */
4405        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4406              IFM_SUBTYPE(ifm->ifm_media));
4407        return (EINVAL);
4408    }
4409
4410    return (0);
4411}
4412
4413/*
4414 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4415 */
4416static void
4417bxe_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
4418{
4419    struct bxe_softc *sc = if_getsoftc(ifp);
4420
4421    /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4422       line if the IFM_AVALID flag is *NOT* set. So we need to set this
4423       flag unconditionally (irrespective of the admininistrative
4424       'up/down' state of the interface) to ensure that the line is always
4425       displayed.
4426    */
4427    ifmr->ifm_status = IFM_AVALID;
4428
4429    /* Setup the default interface info. */
4430    ifmr->ifm_active = IFM_ETHER;
4431
4432    /* Report link down if the driver isn't running. */
4433    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4434        ifmr->ifm_active |= IFM_NONE;
4435        BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4436        BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4437                __func__, sc->link_vars.link_up);
4438        return;
4439    }
4440
4441
4442    if (sc->link_vars.link_up) {
4443        ifmr->ifm_status |= IFM_ACTIVE;
4444        ifmr->ifm_active |= IFM_FDX;
4445    } else {
4446        ifmr->ifm_active |= IFM_NONE;
4447        BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4448                __func__);
4449        return;
4450    }
4451
4452    ifmr->ifm_active |= sc->media;
4453    return;
4454}
4455
4456static void
4457bxe_handle_chip_tq(void *context,
4458                   int  pending)
4459{
4460    struct bxe_softc *sc = (struct bxe_softc *)context;
4461    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4462
4463    switch (work)
4464    {
4465
4466    case CHIP_TQ_REINIT:
4467        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4468            /* restart the interface */
4469            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4470            bxe_periodic_stop(sc);
4471            BXE_CORE_LOCK(sc);
4472            bxe_stop_locked(sc);
4473            bxe_init_locked(sc);
4474            BXE_CORE_UNLOCK(sc);
4475        }
4476        break;
4477
4478    default:
4479        break;
4480    }
4481}
4482
4483/*
4484 * Handles any IOCTL calls from the operating system.
4485 *
4486 * Returns:
4487 *   0 = Success, >0 Failure
4488 */
4489static int
4490bxe_ioctl(if_t ifp,
4491          u_long       command,
4492          caddr_t      data)
4493{
4494    struct bxe_softc *sc = if_getsoftc(ifp);
4495    struct ifreq *ifr = (struct ifreq *)data;
4496    int mask = 0;
4497    int reinit = 0;
4498    int error = 0;
4499
4500    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4501    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4502
4503    switch (command)
4504    {
4505    case SIOCSIFMTU:
4506        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4507              ifr->ifr_mtu);
4508
4509        if (sc->mtu == ifr->ifr_mtu) {
4510            /* nothing to change */
4511            break;
4512        }
4513
4514        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4515            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4516                  ifr->ifr_mtu, mtu_min, mtu_max);
4517            error = EINVAL;
4518            break;
4519        }
4520
4521        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4522                             (unsigned long)ifr->ifr_mtu);
4523	/*
4524        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4525                              (unsigned long)ifr->ifr_mtu);
4526	XXX - Not sure why it needs to be atomic
4527	*/
4528	if_setmtu(ifp, ifr->ifr_mtu);
4529        reinit = 1;
4530        break;
4531
4532    case SIOCSIFFLAGS:
4533        /* toggle the interface state up or down */
4534        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4535
4536	BXE_CORE_LOCK(sc);
4537        /* check if the interface is up */
4538        if (if_getflags(ifp) & IFF_UP) {
4539            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4540                /* set the receive mode flags */
4541                bxe_set_rx_mode(sc);
4542            } else if(sc->state != BXE_STATE_DISABLED) {
4543		bxe_init_locked(sc);
4544            }
4545        } else {
4546            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4547		bxe_periodic_stop(sc);
4548		bxe_stop_locked(sc);
4549            }
4550        }
4551	BXE_CORE_UNLOCK(sc);
4552
4553        break;
4554
4555    case SIOCADDMULTI:
4556    case SIOCDELMULTI:
4557        /* add/delete multicast addresses */
4558        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4559
4560        /* check if the interface is up */
4561        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4562            /* set the receive mode flags */
4563	    BXE_CORE_LOCK(sc);
4564            bxe_set_rx_mode(sc);
4565	    BXE_CORE_UNLOCK(sc);
4566        }
4567
4568        break;
4569
4570    case SIOCSIFCAP:
4571        /* find out which capabilities have changed */
4572        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4573
4574        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4575              mask);
4576
4577        /* toggle the LRO capabilites enable flag */
4578        if (mask & IFCAP_LRO) {
4579	    if_togglecapenable(ifp, IFCAP_LRO);
4580            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4581                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4582            reinit = 1;
4583        }
4584
4585        /* toggle the TXCSUM checksum capabilites enable flag */
4586        if (mask & IFCAP_TXCSUM) {
4587	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4588            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4589                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4590            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4591                if_sethwassistbits(ifp, (CSUM_IP      |
4592                                    CSUM_TCP      |
4593                                    CSUM_UDP      |
4594                                    CSUM_TSO      |
4595                                    CSUM_TCP_IPV6 |
4596                                    CSUM_UDP_IPV6), 0);
4597            } else {
4598		if_clearhwassist(ifp); /* XXX */
4599            }
4600        }
4601
4602        /* toggle the RXCSUM checksum capabilities enable flag */
4603        if (mask & IFCAP_RXCSUM) {
4604	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4605            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4606                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4607            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4608                if_sethwassistbits(ifp, (CSUM_IP      |
4609                                    CSUM_TCP      |
4610                                    CSUM_UDP      |
4611                                    CSUM_TSO      |
4612                                    CSUM_TCP_IPV6 |
4613                                    CSUM_UDP_IPV6), 0);
4614            } else {
4615		if_clearhwassist(ifp); /* XXX */
4616            }
4617        }
4618
4619        /* toggle TSO4 capabilities enabled flag */
4620        if (mask & IFCAP_TSO4) {
4621            if_togglecapenable(ifp, IFCAP_TSO4);
4622            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4623                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4624        }
4625
4626        /* toggle TSO6 capabilities enabled flag */
4627        if (mask & IFCAP_TSO6) {
4628	    if_togglecapenable(ifp, IFCAP_TSO6);
4629            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4630                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4631        }
4632
4633        /* toggle VLAN_HWTSO capabilities enabled flag */
4634        if (mask & IFCAP_VLAN_HWTSO) {
4635
4636	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4637            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4638                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4639        }
4640
4641        /* toggle VLAN_HWCSUM capabilities enabled flag */
4642        if (mask & IFCAP_VLAN_HWCSUM) {
4643            /* XXX investigate this... */
4644            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4645            error = EINVAL;
4646        }
4647
4648        /* toggle VLAN_MTU capabilities enable flag */
4649        if (mask & IFCAP_VLAN_MTU) {
4650            /* XXX investigate this... */
4651            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4652            error = EINVAL;
4653        }
4654
4655        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4656        if (mask & IFCAP_VLAN_HWTAGGING) {
4657            /* XXX investigate this... */
4658            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4659            error = EINVAL;
4660        }
4661
4662        /* toggle VLAN_HWFILTER capabilities enabled flag */
4663        if (mask & IFCAP_VLAN_HWFILTER) {
4664            /* XXX investigate this... */
4665            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4666            error = EINVAL;
4667        }
4668
4669        /* XXX not yet...
4670         * IFCAP_WOL_MAGIC
4671         */
4672
4673        break;
4674
4675    case SIOCSIFMEDIA:
4676    case SIOCGIFMEDIA:
4677        /* set/get interface media */
4678        BLOGD(sc, DBG_IOCTL,
4679              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4680              (command & 0xff));
4681        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4682        break;
4683
4684    default:
4685        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4686              (command & 0xff));
4687        error = ether_ioctl(ifp, command, data);
4688        break;
4689    }
4690
4691    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4692        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4693              "Re-initializing hardware from IOCTL change\n");
4694	bxe_periodic_stop(sc);
4695	BXE_CORE_LOCK(sc);
4696	bxe_stop_locked(sc);
4697	bxe_init_locked(sc);
4698	BXE_CORE_UNLOCK(sc);
4699    }
4700
4701    return (error);
4702}
4703
4704static __noinline void
4705bxe_dump_mbuf(struct bxe_softc *sc,
4706              struct mbuf      *m,
4707              uint8_t          contents)
4708{
4709    char * type;
4710    int i = 0;
4711
4712    if (!(sc->debug & DBG_MBUF)) {
4713        return;
4714    }
4715
4716    if (m == NULL) {
4717        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4718        return;
4719    }
4720
4721    while (m) {
4722
4723        BLOGD(sc, DBG_MBUF,
4724              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4725              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4726
4727        if (m->m_flags & M_PKTHDR) {
4728             BLOGD(sc, DBG_MBUF,
4729                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4730                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4731                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4732        }
4733
4734        if (m->m_flags & M_EXT) {
4735            switch (m->m_ext.ext_type) {
4736            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4737            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4738            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4739            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4740            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4741            case EXT_PACKET:     type = "EXT_PACKET";     break;
4742            case EXT_MBUF:       type = "EXT_MBUF";       break;
4743            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4744            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4745            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4746            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4747            default:             type = "UNKNOWN";        break;
4748            }
4749
4750            BLOGD(sc, DBG_MBUF,
4751                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4752                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4753        }
4754
4755        if (contents) {
4756            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4757        }
4758
4759        m = m->m_next;
4760        i++;
4761    }
4762}
4763
4764/*
4765 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4766 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4767 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4768 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4769 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4770 */
4771static int
4772bxe_chktso_window(struct bxe_softc  *sc,
4773                  int               nsegs,
4774                  bus_dma_segment_t *segs,
4775                  struct mbuf       *m)
4776{
4777    uint32_t num_wnds, wnd_size, wnd_sum;
4778    int32_t frag_idx, wnd_idx;
4779    unsigned short lso_mss;
4780
4781    wnd_sum = 0;
4782    wnd_size = 10;
4783    num_wnds = nsegs - wnd_size;
4784    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4785
4786    /*
4787     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4788     * first window sum of data while skipping the first assuming it is the
4789     * header in FreeBSD.
4790     */
4791    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4792        wnd_sum += htole16(segs[frag_idx].ds_len);
4793    }
4794
4795    /* check the first 10 bd window size */
4796    if (wnd_sum < lso_mss) {
4797        return (1);
4798    }
4799
4800    /* run through the windows */
4801    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4802        /* subtract the first mbuf->m_len of the last wndw(-header) */
4803        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4804        /* add the next mbuf len to the len of our new window */
4805        wnd_sum += htole16(segs[frag_idx].ds_len);
4806        if (wnd_sum < lso_mss) {
4807            return (1);
4808        }
4809    }
4810
4811    return (0);
4812}
4813
4814static uint8_t
4815bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4816                    struct mbuf         *m,
4817                    uint32_t            *parsing_data)
4818{
4819    struct ether_vlan_header *eh = NULL;
4820    struct ip *ip4 = NULL;
4821    struct ip6_hdr *ip6 = NULL;
4822    caddr_t ip = NULL;
4823    struct tcphdr *th = NULL;
4824    int e_hlen, ip_hlen, l4_off;
4825    uint16_t proto;
4826
4827    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4828        /* no L4 checksum offload needed */
4829        return (0);
4830    }
4831
4832    /* get the Ethernet header */
4833    eh = mtod(m, struct ether_vlan_header *);
4834
4835    /* handle VLAN encapsulation if present */
4836    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4837        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4838        proto  = ntohs(eh->evl_proto);
4839    } else {
4840        e_hlen = ETHER_HDR_LEN;
4841        proto  = ntohs(eh->evl_encap_proto);
4842    }
4843
4844    switch (proto) {
4845    case ETHERTYPE_IP:
4846        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4847        ip4 = (m->m_len < sizeof(struct ip)) ?
4848                  (struct ip *)m->m_next->m_data :
4849                  (struct ip *)(m->m_data + e_hlen);
4850        /* ip_hl is number of 32-bit words */
4851        ip_hlen = (ip4->ip_hl << 2);
4852        ip = (caddr_t)ip4;
4853        break;
4854    case ETHERTYPE_IPV6:
4855        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4856        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4857                  (struct ip6_hdr *)m->m_next->m_data :
4858                  (struct ip6_hdr *)(m->m_data + e_hlen);
4859        /* XXX cannot support offload with IPv6 extensions */
4860        ip_hlen = sizeof(struct ip6_hdr);
4861        ip = (caddr_t)ip6;
4862        break;
4863    default:
4864        /* We can't offload in this case... */
4865        /* XXX error stat ??? */
4866        return (0);
4867    }
4868
4869    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4870    l4_off = (e_hlen + ip_hlen);
4871
4872    *parsing_data |=
4873        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4874         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4875
4876    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4877                                  CSUM_TSO |
4878                                  CSUM_TCP_IPV6)) {
4879        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4880        th = (struct tcphdr *)(ip + ip_hlen);
4881        /* th_off is number of 32-bit words */
4882        *parsing_data |= ((th->th_off <<
4883                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4884                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4885        return (l4_off + (th->th_off << 2)); /* entire header length */
4886    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4887                                         CSUM_UDP_IPV6)) {
4888        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4889        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4890    } else {
4891        /* XXX error stat ??? */
4892        return (0);
4893    }
4894}
4895
4896static uint8_t
4897bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4898                 struct mbuf                *m,
4899                 struct eth_tx_parse_bd_e1x *pbd)
4900{
4901    struct ether_vlan_header *eh = NULL;
4902    struct ip *ip4 = NULL;
4903    struct ip6_hdr *ip6 = NULL;
4904    caddr_t ip = NULL;
4905    struct tcphdr *th = NULL;
4906    struct udphdr *uh = NULL;
4907    int e_hlen, ip_hlen;
4908    uint16_t proto;
4909    uint8_t hlen;
4910    uint16_t tmp_csum;
4911    uint32_t *tmp_uh;
4912
4913    /* get the Ethernet header */
4914    eh = mtod(m, struct ether_vlan_header *);
4915
4916    /* handle VLAN encapsulation if present */
4917    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4918        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4919        proto  = ntohs(eh->evl_proto);
4920    } else {
4921        e_hlen = ETHER_HDR_LEN;
4922        proto  = ntohs(eh->evl_encap_proto);
4923    }
4924
4925    switch (proto) {
4926    case ETHERTYPE_IP:
4927        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4928        ip4 = (m->m_len < sizeof(struct ip)) ?
4929                  (struct ip *)m->m_next->m_data :
4930                  (struct ip *)(m->m_data + e_hlen);
4931        /* ip_hl is number of 32-bit words */
4932        ip_hlen = (ip4->ip_hl << 1);
4933        ip = (caddr_t)ip4;
4934        break;
4935    case ETHERTYPE_IPV6:
4936        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4937        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4938                  (struct ip6_hdr *)m->m_next->m_data :
4939                  (struct ip6_hdr *)(m->m_data + e_hlen);
4940        /* XXX cannot support offload with IPv6 extensions */
4941        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4942        ip = (caddr_t)ip6;
4943        break;
4944    default:
4945        /* We can't offload in this case... */
4946        /* XXX error stat ??? */
4947        return (0);
4948    }
4949
4950    hlen = (e_hlen >> 1);
4951
4952    /* note that rest of global_data is indirectly zeroed here */
4953    if (m->m_flags & M_VLANTAG) {
4954        pbd->global_data =
4955            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4956    } else {
4957        pbd->global_data = htole16(hlen);
4958    }
4959
4960    pbd->ip_hlen_w = ip_hlen;
4961
4962    hlen += pbd->ip_hlen_w;
4963
4964    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4965
4966    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4967                                  CSUM_TSO |
4968                                  CSUM_TCP_IPV6)) {
4969        th = (struct tcphdr *)(ip + (ip_hlen << 1));
4970        /* th_off is number of 32-bit words */
4971        hlen += (uint16_t)(th->th_off << 1);
4972    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4973                                         CSUM_UDP_IPV6)) {
4974        uh = (struct udphdr *)(ip + (ip_hlen << 1));
4975        hlen += (sizeof(struct udphdr) / 2);
4976    } else {
4977        /* valid case as only CSUM_IP was set */
4978        return (0);
4979    }
4980
4981    pbd->total_hlen_w = htole16(hlen);
4982
4983    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4984                                  CSUM_TSO |
4985                                  CSUM_TCP_IPV6)) {
4986        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4987        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
4988    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4989                                         CSUM_UDP_IPV6)) {
4990        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4991
4992        /*
4993         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
4994         * checksums and does not know anything about the UDP header and where
4995         * the checksum field is located. It only knows about TCP. Therefore
4996         * we "lie" to the hardware for outgoing UDP packets w/ checksum
4997         * offload. Since the checksum field offset for TCP is 16 bytes and
4998         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
4999         * bytes less than the start of the UDP header. This allows the
5000         * hardware to write the checksum in the correct spot. But the
5001         * hardware will compute a checksum which includes the last 10 bytes
5002         * of the IP header. To correct this we tweak the stack computed
5003         * pseudo checksum by folding in the calculation of the inverse
5004         * checksum for those final 10 bytes of the IP header. This allows
5005         * the correct checksum to be computed by the hardware.
5006         */
5007
5008        /* set pointer 10 bytes before UDP header */
5009        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5010
5011        /* calculate a pseudo header checksum over the first 10 bytes */
5012        tmp_csum = in_pseudo(*tmp_uh,
5013                             *(tmp_uh + 1),
5014                             *(uint16_t *)(tmp_uh + 2));
5015
5016        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5017    }
5018
5019    return (hlen * 2); /* entire header length, number of bytes */
5020}
5021
5022static void
5023bxe_set_pbd_lso_e2(struct mbuf *m,
5024                   uint32_t    *parsing_data)
5025{
5026    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5027                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5028                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5029
5030    /* XXX test for IPv6 with extension header... */
5031}
5032
5033static void
5034bxe_set_pbd_lso(struct mbuf                *m,
5035                struct eth_tx_parse_bd_e1x *pbd)
5036{
5037    struct ether_vlan_header *eh = NULL;
5038    struct ip *ip = NULL;
5039    struct tcphdr *th = NULL;
5040    int e_hlen;
5041
5042    /* get the Ethernet header */
5043    eh = mtod(m, struct ether_vlan_header *);
5044
5045    /* handle VLAN encapsulation if present */
5046    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5047                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5048
5049    /* get the IP and TCP header, with LSO entire header in first mbuf */
5050    /* XXX assuming IPv4 */
5051    ip = (struct ip *)(m->m_data + e_hlen);
5052    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5053
5054    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5055    pbd->tcp_send_seq = ntohl(th->th_seq);
5056    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5057
5058#if 1
5059        /* XXX IPv4 */
5060        pbd->ip_id = ntohs(ip->ip_id);
5061        pbd->tcp_pseudo_csum =
5062            ntohs(in_pseudo(ip->ip_src.s_addr,
5063                            ip->ip_dst.s_addr,
5064                            htons(IPPROTO_TCP)));
5065#else
5066        /* XXX IPv6 */
5067        pbd->tcp_pseudo_csum =
5068            ntohs(in_pseudo(&ip6->ip6_src,
5069                            &ip6->ip6_dst,
5070                            htons(IPPROTO_TCP)));
5071#endif
5072
5073    pbd->global_data |=
5074        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5075}
5076
5077/*
5078 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5079 * visible to the controller.
5080 *
5081 * If an mbuf is submitted to this routine and cannot be given to the
5082 * controller (e.g. it has too many fragments) then the function may free
5083 * the mbuf and return to the caller.
5084 *
5085 * Returns:
5086 *   0 = Success, !0 = Failure
5087 *   Note the side effect that an mbuf may be freed if it causes a problem.
5088 */
5089static int
5090bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5091{
5092    bus_dma_segment_t segs[32];
5093    struct mbuf *m0;
5094    struct bxe_sw_tx_bd *tx_buf;
5095    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5096    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5097    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5098    struct eth_tx_bd *tx_data_bd;
5099    struct eth_tx_bd *tx_total_pkt_size_bd;
5100    struct eth_tx_start_bd *tx_start_bd;
5101    uint16_t bd_prod, pkt_prod, total_pkt_size;
5102    uint8_t mac_type;
5103    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5104    struct bxe_softc *sc;
5105    uint16_t tx_bd_avail;
5106    struct ether_vlan_header *eh;
5107    uint32_t pbd_e2_parsing_data = 0;
5108    uint8_t hlen = 0;
5109    int tmp_bd;
5110    int i;
5111
5112    sc = fp->sc;
5113
5114    M_ASSERTPKTHDR(*m_head);
5115
5116    m0 = *m_head;
5117    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5118    tx_start_bd = NULL;
5119    tx_data_bd = NULL;
5120    tx_total_pkt_size_bd = NULL;
5121
5122    /* get the H/W pointer for packets and BDs */
5123    pkt_prod = fp->tx_pkt_prod;
5124    bd_prod = fp->tx_bd_prod;
5125
5126    mac_type = UNICAST_ADDRESS;
5127
5128    /* map the mbuf into the next open DMAable memory */
5129    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5130    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5131                                    tx_buf->m_map, m0,
5132                                    segs, &nsegs, BUS_DMA_NOWAIT);
5133
5134    /* mapping errors */
5135    if(__predict_false(error != 0)) {
5136        fp->eth_q_stats.tx_dma_mapping_failure++;
5137        if (error == ENOMEM) {
5138            /* resource issue, try again later */
5139            rc = ENOMEM;
5140        } else if (error == EFBIG) {
5141            /* possibly recoverable with defragmentation */
5142            fp->eth_q_stats.mbuf_defrag_attempts++;
5143            m0 = m_defrag(*m_head, M_NOWAIT);
5144            if (m0 == NULL) {
5145                fp->eth_q_stats.mbuf_defrag_failures++;
5146                rc = ENOBUFS;
5147            } else {
5148                /* defrag successful, try mapping again */
5149                *m_head = m0;
5150                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5151                                                tx_buf->m_map, m0,
5152                                                segs, &nsegs, BUS_DMA_NOWAIT);
5153                if (error) {
5154                    fp->eth_q_stats.tx_dma_mapping_failure++;
5155                    rc = error;
5156                }
5157            }
5158        } else {
5159            /* unknown, unrecoverable mapping error */
5160            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5161            bxe_dump_mbuf(sc, m0, FALSE);
5162            rc = error;
5163        }
5164
5165        goto bxe_tx_encap_continue;
5166    }
5167
5168    tx_bd_avail = bxe_tx_avail(sc, fp);
5169
5170    /* make sure there is enough room in the send queue */
5171    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5172        /* Recoverable, try again later. */
5173        fp->eth_q_stats.tx_hw_queue_full++;
5174        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5175        rc = ENOMEM;
5176        goto bxe_tx_encap_continue;
5177    }
5178
5179    /* capture the current H/W TX chain high watermark */
5180    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5181                        (TX_BD_USABLE - tx_bd_avail))) {
5182        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5183    }
5184
5185    /* make sure it fits in the packet window */
5186    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5187        /*
5188         * The mbuf may be to big for the controller to handle. If the frame
5189         * is a TSO frame we'll need to do an additional check.
5190         */
5191        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5192            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5193                goto bxe_tx_encap_continue; /* OK to send */
5194            } else {
5195                fp->eth_q_stats.tx_window_violation_tso++;
5196            }
5197        } else {
5198            fp->eth_q_stats.tx_window_violation_std++;
5199        }
5200
5201        /* lets try to defragment this mbuf and remap it */
5202        fp->eth_q_stats.mbuf_defrag_attempts++;
5203        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5204
5205        m0 = m_defrag(*m_head, M_NOWAIT);
5206        if (m0 == NULL) {
5207            fp->eth_q_stats.mbuf_defrag_failures++;
5208            /* Ugh, just drop the frame... :( */
5209            rc = ENOBUFS;
5210        } else {
5211            /* defrag successful, try mapping again */
5212            *m_head = m0;
5213            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5214                                            tx_buf->m_map, m0,
5215                                            segs, &nsegs, BUS_DMA_NOWAIT);
5216            if (error) {
5217                fp->eth_q_stats.tx_dma_mapping_failure++;
5218                /* No sense in trying to defrag/copy chain, drop it. :( */
5219                rc = error;
5220            } else {
5221               /* if the chain is still too long then drop it */
5222                if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5223                    /*
5224                     * in case TSO is enabled nsegs should be checked against
5225                     * BXE_TSO_MAX_SEGMENTS
5226                     */
5227                    if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5228                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5229                        fp->eth_q_stats.nsegs_path1_errors++;
5230                        rc = ENODEV;
5231                    }
5232                } else {
5233                    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5234                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5235                        fp->eth_q_stats.nsegs_path2_errors++;
5236                        rc = ENODEV;
5237                    }
5238                }
5239            }
5240        }
5241    }
5242
5243bxe_tx_encap_continue:
5244
5245    /* Check for errors */
5246    if (rc) {
5247        if (rc == ENOMEM) {
5248            /* recoverable try again later  */
5249        } else {
5250            fp->eth_q_stats.tx_soft_errors++;
5251            fp->eth_q_stats.mbuf_alloc_tx--;
5252            m_freem(*m_head);
5253            *m_head = NULL;
5254        }
5255
5256        return (rc);
5257    }
5258
5259    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5260    if (m0->m_flags & M_BCAST) {
5261        mac_type = BROADCAST_ADDRESS;
5262    } else if (m0->m_flags & M_MCAST) {
5263        mac_type = MULTICAST_ADDRESS;
5264    }
5265
5266    /* store the mbuf into the mbuf ring */
5267    tx_buf->m        = m0;
5268    tx_buf->first_bd = fp->tx_bd_prod;
5269    tx_buf->flags    = 0;
5270
5271    /* prepare the first transmit (start) BD for the mbuf */
5272    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5273
5274    BLOGD(sc, DBG_TX,
5275          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5276          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5277
5278    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5279    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5280    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5281    total_pkt_size += tx_start_bd->nbytes;
5282    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5283
5284    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5285
5286    /* all frames have at least Start BD + Parsing BD */
5287    nbds = nsegs + 1;
5288    tx_start_bd->nbd = htole16(nbds);
5289
5290    if (m0->m_flags & M_VLANTAG) {
5291        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5292        tx_start_bd->bd_flags.as_bitfield |=
5293            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5294    } else {
5295        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5296        if (IS_VF(sc)) {
5297            /* map ethernet header to find type and header length */
5298            eh = mtod(m0, struct ether_vlan_header *);
5299            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5300        } else {
5301            /* used by FW for packet accounting */
5302            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5303        }
5304    }
5305
5306    /*
5307     * add a parsing BD from the chain. The parsing BD is always added
5308     * though it is only used for TSO and chksum
5309     */
5310    bd_prod = TX_BD_NEXT(bd_prod);
5311
5312    if (m0->m_pkthdr.csum_flags) {
5313        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5314            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5315            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5316        }
5317
5318        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5319            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5320                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5321        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5322            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5323                                                  ETH_TX_BD_FLAGS_IS_UDP |
5324                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5325        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5326                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5327            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5328        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5329            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5330                                                  ETH_TX_BD_FLAGS_IS_UDP);
5331        }
5332    }
5333
5334    if (!CHIP_IS_E1x(sc)) {
5335        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5336        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5337
5338        if (m0->m_pkthdr.csum_flags) {
5339            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5340        }
5341
5342        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5343                 mac_type);
5344    } else {
5345        uint16_t global_data = 0;
5346
5347        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5348        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5349
5350        if (m0->m_pkthdr.csum_flags) {
5351            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5352        }
5353
5354        SET_FLAG(global_data,
5355                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5356        pbd_e1x->global_data |= htole16(global_data);
5357    }
5358
5359    /* setup the parsing BD with TSO specific info */
5360    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5361        fp->eth_q_stats.tx_ofld_frames_lso++;
5362        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5363
5364        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5365            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5366
5367            /* split the first BD into header/data making the fw job easy */
5368            nbds++;
5369            tx_start_bd->nbd = htole16(nbds);
5370            tx_start_bd->nbytes = htole16(hlen);
5371
5372            bd_prod = TX_BD_NEXT(bd_prod);
5373
5374            /* new transmit BD after the tx_parse_bd */
5375            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5376            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5377            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5378            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5379            if (tx_total_pkt_size_bd == NULL) {
5380                tx_total_pkt_size_bd = tx_data_bd;
5381            }
5382
5383            BLOGD(sc, DBG_TX,
5384                  "TSO split header size is %d (%x:%x) nbds %d\n",
5385                  le16toh(tx_start_bd->nbytes),
5386                  le32toh(tx_start_bd->addr_hi),
5387                  le32toh(tx_start_bd->addr_lo),
5388                  nbds);
5389        }
5390
5391        if (!CHIP_IS_E1x(sc)) {
5392            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5393        } else {
5394            bxe_set_pbd_lso(m0, pbd_e1x);
5395        }
5396    }
5397
5398    if (pbd_e2_parsing_data) {
5399        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5400    }
5401
5402    /* prepare remaining BDs, start tx bd contains first seg/frag */
5403    for (i = 1; i < nsegs ; i++) {
5404        bd_prod = TX_BD_NEXT(bd_prod);
5405        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5406        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5407        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5408        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5409        if (tx_total_pkt_size_bd == NULL) {
5410            tx_total_pkt_size_bd = tx_data_bd;
5411        }
5412        total_pkt_size += tx_data_bd->nbytes;
5413    }
5414
5415    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5416
5417    if (tx_total_pkt_size_bd != NULL) {
5418        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5419    }
5420
5421    if (__predict_false(sc->debug & DBG_TX)) {
5422        tmp_bd = tx_buf->first_bd;
5423        for (i = 0; i < nbds; i++)
5424        {
5425            if (i == 0) {
5426                BLOGD(sc, DBG_TX,
5427                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5428                      "bd_flags=0x%x hdr_nbds=%d\n",
5429                      tx_start_bd,
5430                      tmp_bd,
5431                      le16toh(tx_start_bd->nbd),
5432                      le16toh(tx_start_bd->vlan_or_ethertype),
5433                      tx_start_bd->bd_flags.as_bitfield,
5434                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5435            } else if (i == 1) {
5436                if (pbd_e1x) {
5437                    BLOGD(sc, DBG_TX,
5438                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5439                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5440                          "tcp_seq=%u total_hlen_w=%u\n",
5441                          pbd_e1x,
5442                          tmp_bd,
5443                          pbd_e1x->global_data,
5444                          pbd_e1x->ip_hlen_w,
5445                          pbd_e1x->ip_id,
5446                          pbd_e1x->lso_mss,
5447                          pbd_e1x->tcp_flags,
5448                          pbd_e1x->tcp_pseudo_csum,
5449                          pbd_e1x->tcp_send_seq,
5450                          le16toh(pbd_e1x->total_hlen_w));
5451                } else { /* if (pbd_e2) */
5452                    BLOGD(sc, DBG_TX,
5453                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5454                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5455                          pbd_e2,
5456                          tmp_bd,
5457                          pbd_e2->data.mac_addr.dst_hi,
5458                          pbd_e2->data.mac_addr.dst_mid,
5459                          pbd_e2->data.mac_addr.dst_lo,
5460                          pbd_e2->data.mac_addr.src_hi,
5461                          pbd_e2->data.mac_addr.src_mid,
5462                          pbd_e2->data.mac_addr.src_lo,
5463                          pbd_e2->parsing_data);
5464                }
5465            }
5466
5467            if (i != 1) { /* skip parse db as it doesn't hold data */
5468                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5469                BLOGD(sc, DBG_TX,
5470                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5471                      tx_data_bd,
5472                      tmp_bd,
5473                      le16toh(tx_data_bd->nbytes),
5474                      le32toh(tx_data_bd->addr_hi),
5475                      le32toh(tx_data_bd->addr_lo));
5476            }
5477
5478            tmp_bd = TX_BD_NEXT(tmp_bd);
5479        }
5480    }
5481
5482    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5483
5484    /* update TX BD producer index value for next TX */
5485    bd_prod = TX_BD_NEXT(bd_prod);
5486
5487    /*
5488     * If the chain of tx_bd's describing this frame is adjacent to or spans
5489     * an eth_tx_next_bd element then we need to increment the nbds value.
5490     */
5491    if (TX_BD_IDX(bd_prod) < nbds) {
5492        nbds++;
5493    }
5494
5495    /* don't allow reordering of writes for nbd and packets */
5496    mb();
5497
5498    fp->tx_db.data.prod += nbds;
5499
5500    /* producer points to the next free tx_bd at this point */
5501    fp->tx_pkt_prod++;
5502    fp->tx_bd_prod = bd_prod;
5503
5504    DOORBELL(sc, fp->index, fp->tx_db.raw);
5505
5506    fp->eth_q_stats.tx_pkts++;
5507
5508    /* Prevent speculative reads from getting ahead of the status block. */
5509    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5510                      0, 0, BUS_SPACE_BARRIER_READ);
5511
5512    /* Prevent speculative reads from getting ahead of the doorbell. */
5513    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5514                      0, 0, BUS_SPACE_BARRIER_READ);
5515
5516    return (0);
5517}
5518
5519static void
5520bxe_tx_start_locked(struct bxe_softc *sc,
5521                    if_t ifp,
5522                    struct bxe_fastpath *fp)
5523{
5524    struct mbuf *m = NULL;
5525    int tx_count = 0;
5526    uint16_t tx_bd_avail;
5527
5528    BXE_FP_TX_LOCK_ASSERT(fp);
5529
5530    /* keep adding entries while there are frames to send */
5531    while (!if_sendq_empty(ifp)) {
5532
5533        /*
5534         * check for any frames to send
5535         * dequeue can still be NULL even if queue is not empty
5536         */
5537        m = if_dequeue(ifp);
5538        if (__predict_false(m == NULL)) {
5539            break;
5540        }
5541
5542        /* the mbuf now belongs to us */
5543        fp->eth_q_stats.mbuf_alloc_tx++;
5544
5545        /*
5546         * Put the frame into the transmit ring. If we don't have room,
5547         * place the mbuf back at the head of the TX queue, set the
5548         * OACTIVE flag, and wait for the NIC to drain the chain.
5549         */
5550        if (__predict_false(bxe_tx_encap(fp, &m))) {
5551            fp->eth_q_stats.tx_encap_failures++;
5552            if (m != NULL) {
5553                /* mark the TX queue as full and return the frame */
5554                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5555		if_sendq_prepend(ifp, m);
5556                fp->eth_q_stats.mbuf_alloc_tx--;
5557                fp->eth_q_stats.tx_queue_xoff++;
5558            }
5559
5560            /* stop looking for more work */
5561            break;
5562        }
5563
5564        /* the frame was enqueued successfully */
5565        tx_count++;
5566
5567        /* send a copy of the frame to any BPF listeners. */
5568        ether_bpf_mtap_if(ifp, m);
5569
5570        tx_bd_avail = bxe_tx_avail(sc, fp);
5571
5572        /* handle any completions if we're running low */
5573        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5574            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5575            bxe_txeof(sc, fp);
5576            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5577                break;
5578            }
5579        }
5580    }
5581
5582    /* all TX packets were dequeued and/or the tx ring is full */
5583    if (tx_count > 0) {
5584        /* reset the TX watchdog timeout timer */
5585        fp->watchdog_timer = BXE_TX_TIMEOUT;
5586    }
5587}
5588
5589/* Legacy (non-RSS) dispatch routine */
5590static void
5591bxe_tx_start(if_t ifp)
5592{
5593    struct bxe_softc *sc;
5594    struct bxe_fastpath *fp;
5595
5596    sc = if_getsoftc(ifp);
5597
5598    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5599        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5600        return;
5601    }
5602
5603    if (!sc->link_vars.link_up) {
5604        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5605        return;
5606    }
5607
5608    fp = &sc->fp[0];
5609
5610    if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5611        fp->eth_q_stats.tx_queue_full_return++;
5612        return;
5613    }
5614
5615    BXE_FP_TX_LOCK(fp);
5616    bxe_tx_start_locked(sc, ifp, fp);
5617    BXE_FP_TX_UNLOCK(fp);
5618}
5619
5620static int
5621bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5622                       if_t                ifp,
5623                       struct bxe_fastpath *fp,
5624                       struct mbuf         *m)
5625{
5626    struct buf_ring *tx_br = fp->tx_br;
5627    struct mbuf *next;
5628    int depth, rc, tx_count;
5629    uint16_t tx_bd_avail;
5630
5631    rc = tx_count = 0;
5632
5633    BXE_FP_TX_LOCK_ASSERT(fp);
5634
5635    if (sc->state != BXE_STATE_OPEN)  {
5636        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5637        return ENETDOWN;
5638    }
5639
5640    if (!tx_br) {
5641        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5642        return (EINVAL);
5643    }
5644
5645    if (m != NULL) {
5646        rc = drbr_enqueue(ifp, tx_br, m);
5647        if (rc != 0) {
5648            fp->eth_q_stats.tx_soft_errors++;
5649            goto bxe_tx_mq_start_locked_exit;
5650        }
5651    }
5652
5653    if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5654        fp->eth_q_stats.tx_request_link_down_failures++;
5655        goto bxe_tx_mq_start_locked_exit;
5656    }
5657
5658    /* fetch the depth of the driver queue */
5659    depth = drbr_inuse(ifp, tx_br);
5660    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5661        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5662    }
5663
5664    /* keep adding entries while there are frames to send */
5665    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5666        /* handle any completions if we're running low */
5667        tx_bd_avail = bxe_tx_avail(sc, fp);
5668        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5669            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5670            bxe_txeof(sc, fp);
5671            tx_bd_avail = bxe_tx_avail(sc, fp);
5672            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5673                fp->eth_q_stats.bd_avail_too_less_failures++;
5674                m_freem(next);
5675                drbr_advance(ifp, tx_br);
5676                rc = ENOBUFS;
5677                break;
5678            }
5679        }
5680
5681        /* the mbuf now belongs to us */
5682        fp->eth_q_stats.mbuf_alloc_tx++;
5683
5684        /*
5685         * Put the frame into the transmit ring. If we don't have room,
5686         * place the mbuf back at the head of the TX queue, set the
5687         * OACTIVE flag, and wait for the NIC to drain the chain.
5688         */
5689        rc = bxe_tx_encap(fp, &next);
5690        if (__predict_false(rc != 0)) {
5691            fp->eth_q_stats.tx_encap_failures++;
5692            if (next != NULL) {
5693                /* mark the TX queue as full and save the frame */
5694                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5695                drbr_putback(ifp, tx_br, next);
5696                fp->eth_q_stats.mbuf_alloc_tx--;
5697                fp->eth_q_stats.tx_frames_deferred++;
5698            } else
5699                drbr_advance(ifp, tx_br);
5700
5701            /* stop looking for more work */
5702            break;
5703        }
5704
5705        /* the transmit frame was enqueued successfully */
5706        tx_count++;
5707
5708        /* send a copy of the frame to any BPF listeners */
5709        ether_bpf_mtap_if(ifp, next);
5710
5711        drbr_advance(ifp, tx_br);
5712    }
5713
5714    /* all TX packets were dequeued and/or the tx ring is full */
5715    if (tx_count > 0) {
5716        /* reset the TX watchdog timeout timer */
5717        fp->watchdog_timer = BXE_TX_TIMEOUT;
5718    }
5719
5720bxe_tx_mq_start_locked_exit:
5721    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5722    if (!drbr_empty(ifp, tx_br)) {
5723        fp->eth_q_stats.tx_mq_not_empty++;
5724        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5725    }
5726
5727    return (rc);
5728}
5729
5730static void
5731bxe_tx_mq_start_deferred(void *arg,
5732                         int pending)
5733{
5734    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5735    struct bxe_softc *sc = fp->sc;
5736    if_t ifp = sc->ifp;
5737
5738    BXE_FP_TX_LOCK(fp);
5739    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5740    BXE_FP_TX_UNLOCK(fp);
5741}
5742
5743/* Multiqueue (TSS) dispatch routine. */
5744static int
5745bxe_tx_mq_start(if_t ifp,
5746                struct mbuf  *m)
5747{
5748    struct bxe_softc *sc = if_getsoftc(ifp);
5749    struct bxe_fastpath *fp;
5750    int fp_index, rc;
5751
5752    fp_index = 0; /* default is the first queue */
5753
5754    /* check if flowid is set */
5755
5756    if (BXE_VALID_FLOWID(m))
5757        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5758
5759    fp = &sc->fp[fp_index];
5760
5761    if (sc->state != BXE_STATE_OPEN)  {
5762        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5763        return ENETDOWN;
5764    }
5765
5766    if (BXE_FP_TX_TRYLOCK(fp)) {
5767        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5768        BXE_FP_TX_UNLOCK(fp);
5769    } else {
5770        rc = drbr_enqueue(ifp, fp->tx_br, m);
5771        taskqueue_enqueue(fp->tq, &fp->tx_task);
5772    }
5773
5774    return (rc);
5775}
5776
5777static void
5778bxe_mq_flush(if_t ifp)
5779{
5780    struct bxe_softc *sc = if_getsoftc(ifp);
5781    struct bxe_fastpath *fp;
5782    struct mbuf *m;
5783    int i;
5784
5785    for (i = 0; i < sc->num_queues; i++) {
5786        fp = &sc->fp[i];
5787
5788        if (fp->state != BXE_FP_STATE_IRQ) {
5789            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5790                  fp->index, fp->state);
5791            continue;
5792        }
5793
5794        if (fp->tx_br != NULL) {
5795            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5796            BXE_FP_TX_LOCK(fp);
5797            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5798                m_freem(m);
5799            }
5800            BXE_FP_TX_UNLOCK(fp);
5801        }
5802    }
5803
5804    if_qflush(ifp);
5805}
5806
5807static uint16_t
5808bxe_cid_ilt_lines(struct bxe_softc *sc)
5809{
5810    if (IS_SRIOV(sc)) {
5811        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5812    }
5813    return (L2_ILT_LINES(sc));
5814}
5815
5816static void
5817bxe_ilt_set_info(struct bxe_softc *sc)
5818{
5819    struct ilt_client_info *ilt_client;
5820    struct ecore_ilt *ilt = sc->ilt;
5821    uint16_t line = 0;
5822
5823    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5824    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5825
5826    /* CDU */
5827    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5828    ilt_client->client_num = ILT_CLIENT_CDU;
5829    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5830    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5831    ilt_client->start = line;
5832    line += bxe_cid_ilt_lines(sc);
5833
5834    if (CNIC_SUPPORT(sc)) {
5835        line += CNIC_ILT_LINES;
5836    }
5837
5838    ilt_client->end = (line - 1);
5839
5840    BLOGD(sc, DBG_LOAD,
5841          "ilt client[CDU]: start %d, end %d, "
5842          "psz 0x%x, flags 0x%x, hw psz %d\n",
5843          ilt_client->start, ilt_client->end,
5844          ilt_client->page_size,
5845          ilt_client->flags,
5846          ilog2(ilt_client->page_size >> 12));
5847
5848    /* QM */
5849    if (QM_INIT(sc->qm_cid_count)) {
5850        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5851        ilt_client->client_num = ILT_CLIENT_QM;
5852        ilt_client->page_size = QM_ILT_PAGE_SZ;
5853        ilt_client->flags = 0;
5854        ilt_client->start = line;
5855
5856        /* 4 bytes for each cid */
5857        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5858                             QM_ILT_PAGE_SZ);
5859
5860        ilt_client->end = (line - 1);
5861
5862        BLOGD(sc, DBG_LOAD,
5863              "ilt client[QM]: start %d, end %d, "
5864              "psz 0x%x, flags 0x%x, hw psz %d\n",
5865              ilt_client->start, ilt_client->end,
5866              ilt_client->page_size, ilt_client->flags,
5867              ilog2(ilt_client->page_size >> 12));
5868    }
5869
5870    if (CNIC_SUPPORT(sc)) {
5871        /* SRC */
5872        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5873        ilt_client->client_num = ILT_CLIENT_SRC;
5874        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5875        ilt_client->flags = 0;
5876        ilt_client->start = line;
5877        line += SRC_ILT_LINES;
5878        ilt_client->end = (line - 1);
5879
5880        BLOGD(sc, DBG_LOAD,
5881              "ilt client[SRC]: start %d, end %d, "
5882              "psz 0x%x, flags 0x%x, hw psz %d\n",
5883              ilt_client->start, ilt_client->end,
5884              ilt_client->page_size, ilt_client->flags,
5885              ilog2(ilt_client->page_size >> 12));
5886
5887        /* TM */
5888        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5889        ilt_client->client_num = ILT_CLIENT_TM;
5890        ilt_client->page_size = TM_ILT_PAGE_SZ;
5891        ilt_client->flags = 0;
5892        ilt_client->start = line;
5893        line += TM_ILT_LINES;
5894        ilt_client->end = (line - 1);
5895
5896        BLOGD(sc, DBG_LOAD,
5897              "ilt client[TM]: start %d, end %d, "
5898              "psz 0x%x, flags 0x%x, hw psz %d\n",
5899              ilt_client->start, ilt_client->end,
5900              ilt_client->page_size, ilt_client->flags,
5901              ilog2(ilt_client->page_size >> 12));
5902    }
5903
5904    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5905}
5906
5907static void
5908bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5909{
5910    int i;
5911    uint32_t rx_buf_size;
5912
5913    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5914
5915    for (i = 0; i < sc->num_queues; i++) {
5916        if(rx_buf_size <= MCLBYTES){
5917            sc->fp[i].rx_buf_size = rx_buf_size;
5918            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5919        }else if (rx_buf_size <= MJUMPAGESIZE){
5920            sc->fp[i].rx_buf_size = rx_buf_size;
5921            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5922        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5923            sc->fp[i].rx_buf_size = MCLBYTES;
5924            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5925        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5926            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5927            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5928        }else {
5929            sc->fp[i].rx_buf_size = MCLBYTES;
5930            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5931        }
5932    }
5933}
5934
5935static int
5936bxe_alloc_ilt_mem(struct bxe_softc *sc)
5937{
5938    int rc = 0;
5939
5940    if ((sc->ilt =
5941         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5942                                    M_BXE_ILT,
5943                                    (M_NOWAIT | M_ZERO))) == NULL) {
5944        rc = 1;
5945    }
5946
5947    return (rc);
5948}
5949
5950static int
5951bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5952{
5953    int rc = 0;
5954
5955    if ((sc->ilt->lines =
5956         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5957                                    M_BXE_ILT,
5958                                    (M_NOWAIT | M_ZERO))) == NULL) {
5959        rc = 1;
5960    }
5961
5962    return (rc);
5963}
5964
5965static void
5966bxe_free_ilt_mem(struct bxe_softc *sc)
5967{
5968    if (sc->ilt != NULL) {
5969        free(sc->ilt, M_BXE_ILT);
5970        sc->ilt = NULL;
5971    }
5972}
5973
5974static void
5975bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5976{
5977    if (sc->ilt->lines != NULL) {
5978        free(sc->ilt->lines, M_BXE_ILT);
5979        sc->ilt->lines = NULL;
5980    }
5981}
5982
5983static void
5984bxe_free_mem(struct bxe_softc *sc)
5985{
5986    int i;
5987
5988    for (i = 0; i < L2_ILT_LINES(sc); i++) {
5989        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
5990        sc->context[i].vcxt = NULL;
5991        sc->context[i].size = 0;
5992    }
5993
5994    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
5995
5996    bxe_free_ilt_lines_mem(sc);
5997
5998}
5999
6000static int
6001bxe_alloc_mem(struct bxe_softc *sc)
6002{
6003
6004    int context_size;
6005    int allocated;
6006    int i;
6007
6008    /*
6009     * Allocate memory for CDU context:
6010     * This memory is allocated separately and not in the generic ILT
6011     * functions because CDU differs in few aspects:
6012     * 1. There can be multiple entities allocating memory for context -
6013     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6014     * its own ILT lines.
6015     * 2. Since CDU page-size is not a single 4KB page (which is the case
6016     * for the other ILT clients), to be efficient we want to support
6017     * allocation of sub-page-size in the last entry.
6018     * 3. Context pointers are used by the driver to pass to FW / update
6019     * the context (for the other ILT clients the pointers are used just to
6020     * free the memory during unload).
6021     */
6022    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6023    for (i = 0, allocated = 0; allocated < context_size; i++) {
6024        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6025                                  (context_size - allocated));
6026
6027        if (bxe_dma_alloc(sc, sc->context[i].size,
6028                          &sc->context[i].vcxt_dma,
6029                          "cdu context") != 0) {
6030            bxe_free_mem(sc);
6031            return (-1);
6032        }
6033
6034        sc->context[i].vcxt =
6035            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6036
6037        allocated += sc->context[i].size;
6038    }
6039
6040    bxe_alloc_ilt_lines_mem(sc);
6041
6042    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6043          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6044    {
6045        for (i = 0; i < 4; i++) {
6046            BLOGD(sc, DBG_LOAD,
6047                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6048                  i,
6049                  sc->ilt->clients[i].page_size,
6050                  sc->ilt->clients[i].start,
6051                  sc->ilt->clients[i].end,
6052                  sc->ilt->clients[i].client_num,
6053                  sc->ilt->clients[i].flags);
6054        }
6055    }
6056    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6057        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6058        bxe_free_mem(sc);
6059        return (-1);
6060    }
6061
6062    return (0);
6063}
6064
6065static void
6066bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6067{
6068    int i;
6069
6070    if (fp->rx_mbuf_tag == NULL) {
6071        return;
6072    }
6073
6074    /* free all mbufs and unload all maps */
6075    for (i = 0; i < RX_BD_TOTAL; i++) {
6076        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6077            bus_dmamap_sync(fp->rx_mbuf_tag,
6078                            fp->rx_mbuf_chain[i].m_map,
6079                            BUS_DMASYNC_POSTREAD);
6080            bus_dmamap_unload(fp->rx_mbuf_tag,
6081                              fp->rx_mbuf_chain[i].m_map);
6082        }
6083
6084        if (fp->rx_mbuf_chain[i].m != NULL) {
6085            m_freem(fp->rx_mbuf_chain[i].m);
6086            fp->rx_mbuf_chain[i].m = NULL;
6087            fp->eth_q_stats.mbuf_alloc_rx--;
6088        }
6089    }
6090}
6091
6092static void
6093bxe_free_tpa_pool(struct bxe_fastpath *fp)
6094{
6095    struct bxe_softc *sc;
6096    int i, max_agg_queues;
6097
6098    sc = fp->sc;
6099
6100    if (fp->rx_mbuf_tag == NULL) {
6101        return;
6102    }
6103
6104    max_agg_queues = MAX_AGG_QS(sc);
6105
6106    /* release all mbufs and unload all DMA maps in the TPA pool */
6107    for (i = 0; i < max_agg_queues; i++) {
6108        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6109            bus_dmamap_sync(fp->rx_mbuf_tag,
6110                            fp->rx_tpa_info[i].bd.m_map,
6111                            BUS_DMASYNC_POSTREAD);
6112            bus_dmamap_unload(fp->rx_mbuf_tag,
6113                              fp->rx_tpa_info[i].bd.m_map);
6114        }
6115
6116        if (fp->rx_tpa_info[i].bd.m != NULL) {
6117            m_freem(fp->rx_tpa_info[i].bd.m);
6118            fp->rx_tpa_info[i].bd.m = NULL;
6119            fp->eth_q_stats.mbuf_alloc_tpa--;
6120        }
6121    }
6122}
6123
6124static void
6125bxe_free_sge_chain(struct bxe_fastpath *fp)
6126{
6127    int i;
6128
6129    if (fp->rx_sge_mbuf_tag == NULL) {
6130        return;
6131    }
6132
6133    /* rree all mbufs and unload all maps */
6134    for (i = 0; i < RX_SGE_TOTAL; i++) {
6135        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6136            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6137                            fp->rx_sge_mbuf_chain[i].m_map,
6138                            BUS_DMASYNC_POSTREAD);
6139            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6140                              fp->rx_sge_mbuf_chain[i].m_map);
6141        }
6142
6143        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6144            m_freem(fp->rx_sge_mbuf_chain[i].m);
6145            fp->rx_sge_mbuf_chain[i].m = NULL;
6146            fp->eth_q_stats.mbuf_alloc_sge--;
6147        }
6148    }
6149}
6150
6151static void
6152bxe_free_fp_buffers(struct bxe_softc *sc)
6153{
6154    struct bxe_fastpath *fp;
6155    int i;
6156
6157    for (i = 0; i < sc->num_queues; i++) {
6158        fp = &sc->fp[i];
6159
6160        if (fp->tx_br != NULL) {
6161            /* just in case bxe_mq_flush() wasn't called */
6162            if (mtx_initialized(&fp->tx_mtx)) {
6163                struct mbuf *m;
6164
6165                BXE_FP_TX_LOCK(fp);
6166                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6167                    m_freem(m);
6168                BXE_FP_TX_UNLOCK(fp);
6169            }
6170        }
6171
6172        /* free all RX buffers */
6173        bxe_free_rx_bd_chain(fp);
6174        bxe_free_tpa_pool(fp);
6175        bxe_free_sge_chain(fp);
6176
6177        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6178            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6179                  fp->eth_q_stats.mbuf_alloc_rx);
6180        }
6181
6182        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6183            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6184                  fp->eth_q_stats.mbuf_alloc_sge);
6185        }
6186
6187        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6188            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6189                  fp->eth_q_stats.mbuf_alloc_tpa);
6190        }
6191
6192        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6193            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6194                  fp->eth_q_stats.mbuf_alloc_tx);
6195        }
6196
6197        /* XXX verify all mbufs were reclaimed */
6198    }
6199}
6200
6201static int
6202bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6203                     uint16_t            prev_index,
6204                     uint16_t            index)
6205{
6206    struct bxe_sw_rx_bd *rx_buf;
6207    struct eth_rx_bd *rx_bd;
6208    bus_dma_segment_t segs[1];
6209    bus_dmamap_t map;
6210    struct mbuf *m;
6211    int nsegs, rc;
6212
6213    rc = 0;
6214
6215    /* allocate the new RX BD mbuf */
6216    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6217    if (__predict_false(m == NULL)) {
6218        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6219        return (ENOBUFS);
6220    }
6221
6222    fp->eth_q_stats.mbuf_alloc_rx++;
6223
6224    /* initialize the mbuf buffer length */
6225    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6226
6227    /* map the mbuf into non-paged pool */
6228    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6229                                 fp->rx_mbuf_spare_map,
6230                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6231    if (__predict_false(rc != 0)) {
6232        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6233        m_freem(m);
6234        fp->eth_q_stats.mbuf_alloc_rx--;
6235        return (rc);
6236    }
6237
6238    /* all mbufs must map to a single segment */
6239    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6240
6241    /* release any existing RX BD mbuf mappings */
6242
6243    if (prev_index != index) {
6244        rx_buf = &fp->rx_mbuf_chain[prev_index];
6245
6246        if (rx_buf->m_map != NULL) {
6247            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6248                            BUS_DMASYNC_POSTREAD);
6249            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6250        }
6251
6252        /*
6253         * We only get here from bxe_rxeof() when the maximum number
6254         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6255         * holds the mbuf in the prev_index so it's OK to NULL it out
6256         * here without concern of a memory leak.
6257         */
6258        fp->rx_mbuf_chain[prev_index].m = NULL;
6259    }
6260
6261    rx_buf = &fp->rx_mbuf_chain[index];
6262
6263    if (rx_buf->m_map != NULL) {
6264        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6265                        BUS_DMASYNC_POSTREAD);
6266        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6267    }
6268
6269    /* save the mbuf and mapping info for a future packet */
6270    map = (prev_index != index) ?
6271              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6272    rx_buf->m_map = fp->rx_mbuf_spare_map;
6273    fp->rx_mbuf_spare_map = map;
6274    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6275                    BUS_DMASYNC_PREREAD);
6276    rx_buf->m = m;
6277
6278    rx_bd = &fp->rx_chain[index];
6279    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6280    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6281
6282    return (rc);
6283}
6284
6285static int
6286bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6287                      int                 queue)
6288{
6289    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6290    bus_dma_segment_t segs[1];
6291    bus_dmamap_t map;
6292    struct mbuf *m;
6293    int nsegs;
6294    int rc = 0;
6295
6296    /* allocate the new TPA mbuf */
6297    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6298    if (__predict_false(m == NULL)) {
6299        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6300        return (ENOBUFS);
6301    }
6302
6303    fp->eth_q_stats.mbuf_alloc_tpa++;
6304
6305    /* initialize the mbuf buffer length */
6306    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6307
6308    /* map the mbuf into non-paged pool */
6309    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6310                                 fp->rx_tpa_info_mbuf_spare_map,
6311                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6312    if (__predict_false(rc != 0)) {
6313        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6314        m_free(m);
6315        fp->eth_q_stats.mbuf_alloc_tpa--;
6316        return (rc);
6317    }
6318
6319    /* all mbufs must map to a single segment */
6320    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6321
6322    /* release any existing TPA mbuf mapping */
6323    if (tpa_info->bd.m_map != NULL) {
6324        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6325                        BUS_DMASYNC_POSTREAD);
6326        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6327    }
6328
6329    /* save the mbuf and mapping info for the TPA mbuf */
6330    map = tpa_info->bd.m_map;
6331    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6332    fp->rx_tpa_info_mbuf_spare_map = map;
6333    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6334                    BUS_DMASYNC_PREREAD);
6335    tpa_info->bd.m = m;
6336    tpa_info->seg = segs[0];
6337
6338    return (rc);
6339}
6340
6341/*
6342 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6343 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6344 * chain.
6345 */
6346static int
6347bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6348                      uint16_t            index)
6349{
6350    struct bxe_sw_rx_bd *sge_buf;
6351    struct eth_rx_sge *sge;
6352    bus_dma_segment_t segs[1];
6353    bus_dmamap_t map;
6354    struct mbuf *m;
6355    int nsegs;
6356    int rc = 0;
6357
6358    /* allocate a new SGE mbuf */
6359    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6360    if (__predict_false(m == NULL)) {
6361        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6362        return (ENOMEM);
6363    }
6364
6365    fp->eth_q_stats.mbuf_alloc_sge++;
6366
6367    /* initialize the mbuf buffer length */
6368    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6369
6370    /* map the SGE mbuf into non-paged pool */
6371    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6372                                 fp->rx_sge_mbuf_spare_map,
6373                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6374    if (__predict_false(rc != 0)) {
6375        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6376        m_freem(m);
6377        fp->eth_q_stats.mbuf_alloc_sge--;
6378        return (rc);
6379    }
6380
6381    /* all mbufs must map to a single segment */
6382    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6383
6384    sge_buf = &fp->rx_sge_mbuf_chain[index];
6385
6386    /* release any existing SGE mbuf mapping */
6387    if (sge_buf->m_map != NULL) {
6388        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6389                        BUS_DMASYNC_POSTREAD);
6390        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6391    }
6392
6393    /* save the mbuf and mapping info for a future packet */
6394    map = sge_buf->m_map;
6395    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6396    fp->rx_sge_mbuf_spare_map = map;
6397    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6398                    BUS_DMASYNC_PREREAD);
6399    sge_buf->m = m;
6400
6401    sge = &fp->rx_sge_chain[index];
6402    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6403    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6404
6405    return (rc);
6406}
6407
6408static __noinline int
6409bxe_alloc_fp_buffers(struct bxe_softc *sc)
6410{
6411    struct bxe_fastpath *fp;
6412    int i, j, rc = 0;
6413    int ring_prod, cqe_ring_prod;
6414    int max_agg_queues;
6415
6416    for (i = 0; i < sc->num_queues; i++) {
6417        fp = &sc->fp[i];
6418
6419        ring_prod = cqe_ring_prod = 0;
6420        fp->rx_bd_cons = 0;
6421        fp->rx_cq_cons = 0;
6422
6423        /* allocate buffers for the RX BDs in RX BD chain */
6424        for (j = 0; j < sc->max_rx_bufs; j++) {
6425            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6426            if (rc != 0) {
6427                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6428                      i, rc);
6429                goto bxe_alloc_fp_buffers_error;
6430            }
6431
6432            ring_prod     = RX_BD_NEXT(ring_prod);
6433            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6434        }
6435
6436        fp->rx_bd_prod = ring_prod;
6437        fp->rx_cq_prod = cqe_ring_prod;
6438        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6439
6440        max_agg_queues = MAX_AGG_QS(sc);
6441
6442        fp->tpa_enable = TRUE;
6443
6444        /* fill the TPA pool */
6445        for (j = 0; j < max_agg_queues; j++) {
6446            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6447            if (rc != 0) {
6448                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6449                          i, j);
6450                fp->tpa_enable = FALSE;
6451                goto bxe_alloc_fp_buffers_error;
6452            }
6453
6454            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6455        }
6456
6457        if (fp->tpa_enable) {
6458            /* fill the RX SGE chain */
6459            ring_prod = 0;
6460            for (j = 0; j < RX_SGE_USABLE; j++) {
6461                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6462                if (rc != 0) {
6463                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6464                              i, ring_prod);
6465                    fp->tpa_enable = FALSE;
6466                    ring_prod = 0;
6467                    goto bxe_alloc_fp_buffers_error;
6468                }
6469
6470                ring_prod = RX_SGE_NEXT(ring_prod);
6471            }
6472
6473            fp->rx_sge_prod = ring_prod;
6474        }
6475    }
6476
6477    return (0);
6478
6479bxe_alloc_fp_buffers_error:
6480
6481    /* unwind what was already allocated */
6482    bxe_free_rx_bd_chain(fp);
6483    bxe_free_tpa_pool(fp);
6484    bxe_free_sge_chain(fp);
6485
6486    return (ENOBUFS);
6487}
6488
6489static void
6490bxe_free_fw_stats_mem(struct bxe_softc *sc)
6491{
6492    bxe_dma_free(sc, &sc->fw_stats_dma);
6493
6494    sc->fw_stats_num = 0;
6495
6496    sc->fw_stats_req_size = 0;
6497    sc->fw_stats_req = NULL;
6498    sc->fw_stats_req_mapping = 0;
6499
6500    sc->fw_stats_data_size = 0;
6501    sc->fw_stats_data = NULL;
6502    sc->fw_stats_data_mapping = 0;
6503}
6504
6505static int
6506bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6507{
6508    uint8_t num_queue_stats;
6509    int num_groups;
6510
6511    /* number of queues for statistics is number of eth queues */
6512    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6513
6514    /*
6515     * Total number of FW statistics requests =
6516     *   1 for port stats + 1 for PF stats + num of queues
6517     */
6518    sc->fw_stats_num = (2 + num_queue_stats);
6519
6520    /*
6521     * Request is built from stats_query_header and an array of
6522     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6523     * rules. The real number or requests is configured in the
6524     * stats_query_header.
6525     */
6526    num_groups =
6527        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6528         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6529
6530    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6531          sc->fw_stats_num, num_groups);
6532
6533    sc->fw_stats_req_size =
6534        (sizeof(struct stats_query_header) +
6535         (num_groups * sizeof(struct stats_query_cmd_group)));
6536
6537    /*
6538     * Data for statistics requests + stats_counter.
6539     * stats_counter holds per-STORM counters that are incremented when
6540     * STORM has finished with the current request. Memory for FCoE
6541     * offloaded statistics are counted anyway, even if they will not be sent.
6542     * VF stats are not accounted for here as the data of VF stats is stored
6543     * in memory allocated by the VF, not here.
6544     */
6545    sc->fw_stats_data_size =
6546        (sizeof(struct stats_counter) +
6547         sizeof(struct per_port_stats) +
6548         sizeof(struct per_pf_stats) +
6549         /* sizeof(struct fcoe_statistics_params) + */
6550         (sizeof(struct per_queue_stats) * num_queue_stats));
6551
6552    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6553                      &sc->fw_stats_dma, "fw stats") != 0) {
6554        bxe_free_fw_stats_mem(sc);
6555        return (-1);
6556    }
6557
6558    /* set up the shortcuts */
6559
6560    sc->fw_stats_req =
6561        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6562    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6563
6564    sc->fw_stats_data =
6565        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6566                                     sc->fw_stats_req_size);
6567    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6568                                 sc->fw_stats_req_size);
6569
6570    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6571          (uintmax_t)sc->fw_stats_req_mapping);
6572
6573    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6574          (uintmax_t)sc->fw_stats_data_mapping);
6575
6576    return (0);
6577}
6578
6579/*
6580 * Bits map:
6581 * 0-7  - Engine0 load counter.
6582 * 8-15 - Engine1 load counter.
6583 * 16   - Engine0 RESET_IN_PROGRESS bit.
6584 * 17   - Engine1 RESET_IN_PROGRESS bit.
6585 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6586 *        function on the engine
6587 * 19   - Engine1 ONE_IS_LOADED.
6588 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6589 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6590 *        for just the one belonging to its engine).
6591 */
6592#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6593#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6594#define BXE_PATH0_LOAD_CNT_SHIFT  0
6595#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6596#define BXE_PATH1_LOAD_CNT_SHIFT  8
6597#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6598#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6599#define BXE_GLOBAL_RESET_BIT      0x00040000
6600
6601/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6602static void
6603bxe_set_reset_global(struct bxe_softc *sc)
6604{
6605    uint32_t val;
6606    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6607    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6608    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6609    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6610}
6611
6612/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6613static void
6614bxe_clear_reset_global(struct bxe_softc *sc)
6615{
6616    uint32_t val;
6617    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6618    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6619    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6620    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6621}
6622
6623/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6624static uint8_t
6625bxe_reset_is_global(struct bxe_softc *sc)
6626{
6627    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6628    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6629    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6630}
6631
6632/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6633static void
6634bxe_set_reset_done(struct bxe_softc *sc)
6635{
6636    uint32_t val;
6637    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6638                                 BXE_PATH0_RST_IN_PROG_BIT;
6639
6640    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6641
6642    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6643    /* Clear the bit */
6644    val &= ~bit;
6645    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6646
6647    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6648}
6649
6650/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6651static void
6652bxe_set_reset_in_progress(struct bxe_softc *sc)
6653{
6654    uint32_t val;
6655    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6656                                 BXE_PATH0_RST_IN_PROG_BIT;
6657
6658    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6659
6660    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6661    /* Set the bit */
6662    val |= bit;
6663    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6664
6665    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6666}
6667
6668/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6669static uint8_t
6670bxe_reset_is_done(struct bxe_softc *sc,
6671                  int              engine)
6672{
6673    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6674    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6675                            BXE_PATH0_RST_IN_PROG_BIT;
6676
6677    /* return false if bit is set */
6678    return (val & bit) ? FALSE : TRUE;
6679}
6680
6681/* get the load status for an engine, should be run under rtnl lock */
6682static uint8_t
6683bxe_get_load_status(struct bxe_softc *sc,
6684                    int              engine)
6685{
6686    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6687                             BXE_PATH0_LOAD_CNT_MASK;
6688    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6689                              BXE_PATH0_LOAD_CNT_SHIFT;
6690    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6691
6692    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6693
6694    val = ((val & mask) >> shift);
6695
6696    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6697
6698    return (val != 0);
6699}
6700
6701/* set pf load mark */
6702/* XXX needs to be under rtnl lock */
6703static void
6704bxe_set_pf_load(struct bxe_softc *sc)
6705{
6706    uint32_t val;
6707    uint32_t val1;
6708    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6709                                  BXE_PATH0_LOAD_CNT_MASK;
6710    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6711                                   BXE_PATH0_LOAD_CNT_SHIFT;
6712
6713    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6714
6715    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6716    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6717
6718    /* get the current counter value */
6719    val1 = ((val & mask) >> shift);
6720
6721    /* set bit of this PF */
6722    val1 |= (1 << SC_ABS_FUNC(sc));
6723
6724    /* clear the old value */
6725    val &= ~mask;
6726
6727    /* set the new one */
6728    val |= ((val1 << shift) & mask);
6729
6730    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6731
6732    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6733}
6734
6735/* clear pf load mark */
6736/* XXX needs to be under rtnl lock */
6737static uint8_t
6738bxe_clear_pf_load(struct bxe_softc *sc)
6739{
6740    uint32_t val1, val;
6741    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6742                                  BXE_PATH0_LOAD_CNT_MASK;
6743    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6744                                   BXE_PATH0_LOAD_CNT_SHIFT;
6745
6746    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6747    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6748    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6749
6750    /* get the current counter value */
6751    val1 = (val & mask) >> shift;
6752
6753    /* clear bit of that PF */
6754    val1 &= ~(1 << SC_ABS_FUNC(sc));
6755
6756    /* clear the old value */
6757    val &= ~mask;
6758
6759    /* set the new one */
6760    val |= ((val1 << shift) & mask);
6761
6762    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6763    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6764    return (val1 != 0);
6765}
6766
6767/* send load requrest to mcp and analyze response */
6768static int
6769bxe_nic_load_request(struct bxe_softc *sc,
6770                     uint32_t         *load_code)
6771{
6772    /* init fw_seq */
6773    sc->fw_seq =
6774        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6775         DRV_MSG_SEQ_NUMBER_MASK);
6776
6777    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6778
6779    /* get the current FW pulse sequence */
6780    sc->fw_drv_pulse_wr_seq =
6781        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6782         DRV_PULSE_SEQ_MASK);
6783
6784    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6785          sc->fw_drv_pulse_wr_seq);
6786
6787    /* load request */
6788    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6789                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6790
6791    /* if the MCP fails to respond we must abort */
6792    if (!(*load_code)) {
6793        BLOGE(sc, "MCP response failure!\n");
6794        return (-1);
6795    }
6796
6797    /* if MCP refused then must abort */
6798    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6799        BLOGE(sc, "MCP refused load request\n");
6800        return (-1);
6801    }
6802
6803    return (0);
6804}
6805
6806/*
6807 * Check whether another PF has already loaded FW to chip. In virtualized
6808 * environments a pf from anoth VM may have already initialized the device
6809 * including loading FW.
6810 */
6811static int
6812bxe_nic_load_analyze_req(struct bxe_softc *sc,
6813                         uint32_t         load_code)
6814{
6815    uint32_t my_fw, loaded_fw;
6816
6817    /* is another pf loaded on this engine? */
6818    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6819        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6820        /* build my FW version dword */
6821        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6822                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6823                 (BCM_5710_FW_REVISION_VERSION << 16) +
6824                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6825
6826        /* read loaded FW from chip */
6827        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6828        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6829              loaded_fw, my_fw);
6830
6831        /* abort nic load if version mismatch */
6832        if (my_fw != loaded_fw) {
6833            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6834                  loaded_fw, my_fw);
6835            return (-1);
6836        }
6837    }
6838
6839    return (0);
6840}
6841
6842/* mark PMF if applicable */
6843static void
6844bxe_nic_load_pmf(struct bxe_softc *sc,
6845                 uint32_t         load_code)
6846{
6847    uint32_t ncsi_oem_data_addr;
6848
6849    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6850        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6851        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6852        /*
6853         * Barrier here for ordering between the writing to sc->port.pmf here
6854         * and reading it from the periodic task.
6855         */
6856        sc->port.pmf = 1;
6857        mb();
6858    } else {
6859        sc->port.pmf = 0;
6860    }
6861
6862    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6863
6864    /* XXX needed? */
6865    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6866        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6867            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6868            if (ncsi_oem_data_addr) {
6869                REG_WR(sc,
6870                       (ncsi_oem_data_addr +
6871                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6872                       0);
6873            }
6874        }
6875    }
6876}
6877
6878static void
6879bxe_read_mf_cfg(struct bxe_softc *sc)
6880{
6881    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6882    int abs_func;
6883    int vn;
6884
6885    if (BXE_NOMCP(sc)) {
6886        return; /* what should be the default bvalue in this case */
6887    }
6888
6889    /*
6890     * The formula for computing the absolute function number is...
6891     * For 2 port configuration (4 functions per port):
6892     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6893     * For 4 port configuration (2 functions per port):
6894     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6895     */
6896    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6897        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6898        if (abs_func >= E1H_FUNC_MAX) {
6899            break;
6900        }
6901        sc->devinfo.mf_info.mf_config[vn] =
6902            MFCFG_RD(sc, func_mf_config[abs_func].config);
6903    }
6904
6905    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6906        FUNC_MF_CFG_FUNC_DISABLED) {
6907        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6908        sc->flags |= BXE_MF_FUNC_DIS;
6909    } else {
6910        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6911        sc->flags &= ~BXE_MF_FUNC_DIS;
6912    }
6913}
6914
6915/* acquire split MCP access lock register */
6916static int bxe_acquire_alr(struct bxe_softc *sc)
6917{
6918    uint32_t j, val;
6919
6920    for (j = 0; j < 1000; j++) {
6921        val = (1UL << 31);
6922        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6923        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6924        if (val & (1L << 31))
6925            break;
6926
6927        DELAY(5000);
6928    }
6929
6930    if (!(val & (1L << 31))) {
6931        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6932        return (-1);
6933    }
6934
6935    return (0);
6936}
6937
6938/* release split MCP access lock register */
6939static void bxe_release_alr(struct bxe_softc *sc)
6940{
6941    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6942}
6943
6944static void
6945bxe_fan_failure(struct bxe_softc *sc)
6946{
6947    int port = SC_PORT(sc);
6948    uint32_t ext_phy_config;
6949
6950    /* mark the failure */
6951    ext_phy_config =
6952        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6953
6954    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6955    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6956    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6957             ext_phy_config);
6958
6959    /* log the failure */
6960    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6961              "the card to prevent permanent damage. "
6962              "Please contact OEM Support for assistance\n");
6963
6964    /* XXX */
6965#if 1
6966    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6967#else
6968    /*
6969     * Schedule device reset (unload)
6970     * This is due to some boards consuming sufficient power when driver is
6971     * up to overheat if fan fails.
6972     */
6973    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6974    schedule_delayed_work(&sc->sp_rtnl_task, 0);
6975#endif
6976}
6977
6978/* this function is called upon a link interrupt */
6979static void
6980bxe_link_attn(struct bxe_softc *sc)
6981{
6982    uint32_t pause_enabled = 0;
6983    struct host_port_stats *pstats;
6984    int cmng_fns;
6985    struct bxe_fastpath *fp;
6986    int i;
6987
6988    /* Make sure that we are synced with the current statistics */
6989    bxe_stats_handle(sc, STATS_EVENT_STOP);
6990    BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
6991    elink_link_update(&sc->link_params, &sc->link_vars);
6992
6993    if (sc->link_vars.link_up) {
6994
6995        /* dropless flow control */
6996        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
6997            pause_enabled = 0;
6998
6999            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7000                pause_enabled = 1;
7001            }
7002
7003            REG_WR(sc,
7004                   (BAR_USTRORM_INTMEM +
7005                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7006                   pause_enabled);
7007        }
7008
7009        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7010            pstats = BXE_SP(sc, port_stats);
7011            /* reset old mac stats */
7012            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7013        }
7014
7015        if (sc->state == BXE_STATE_OPEN) {
7016            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7017	    /* Restart tx when the link comes back. */
7018	    FOR_EACH_ETH_QUEUE(sc, i) {
7019		fp = &sc->fp[i];
7020		taskqueue_enqueue(fp->tq, &fp->tx_task);
7021	    }
7022        }
7023
7024    }
7025
7026    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7027        cmng_fns = bxe_get_cmng_fns_mode(sc);
7028
7029        if (cmng_fns != CMNG_FNS_NONE) {
7030            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7031            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7032        } else {
7033            /* rate shaping and fairness are disabled */
7034            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7035        }
7036    }
7037
7038    bxe_link_report_locked(sc);
7039
7040    if (IS_MF(sc)) {
7041        ; // XXX bxe_link_sync_notify(sc);
7042    }
7043}
7044
7045static void
7046bxe_attn_int_asserted(struct bxe_softc *sc,
7047                      uint32_t         asserted)
7048{
7049    int port = SC_PORT(sc);
7050    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7051                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7052    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7053                                        NIG_REG_MASK_INTERRUPT_PORT0;
7054    uint32_t aeu_mask;
7055    uint32_t nig_mask = 0;
7056    uint32_t reg_addr;
7057    uint32_t igu_acked;
7058    uint32_t cnt;
7059
7060    if (sc->attn_state & asserted) {
7061        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7062    }
7063
7064    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7065
7066    aeu_mask = REG_RD(sc, aeu_addr);
7067
7068    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7069          aeu_mask, asserted);
7070
7071    aeu_mask &= ~(asserted & 0x3ff);
7072
7073    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7074
7075    REG_WR(sc, aeu_addr, aeu_mask);
7076
7077    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7078
7079    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7080    sc->attn_state |= asserted;
7081    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7082
7083    if (asserted & ATTN_HARD_WIRED_MASK) {
7084        if (asserted & ATTN_NIG_FOR_FUNC) {
7085
7086	    bxe_acquire_phy_lock(sc);
7087            /* save nig interrupt mask */
7088            nig_mask = REG_RD(sc, nig_int_mask_addr);
7089
7090            /* If nig_mask is not set, no need to call the update function */
7091            if (nig_mask) {
7092                REG_WR(sc, nig_int_mask_addr, 0);
7093
7094                bxe_link_attn(sc);
7095            }
7096
7097            /* handle unicore attn? */
7098        }
7099
7100        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7101            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7102        }
7103
7104        if (asserted & GPIO_2_FUNC) {
7105            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7106        }
7107
7108        if (asserted & GPIO_3_FUNC) {
7109            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7110        }
7111
7112        if (asserted & GPIO_4_FUNC) {
7113            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7114        }
7115
7116        if (port == 0) {
7117            if (asserted & ATTN_GENERAL_ATTN_1) {
7118                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7119                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7120            }
7121            if (asserted & ATTN_GENERAL_ATTN_2) {
7122                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7123                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7124            }
7125            if (asserted & ATTN_GENERAL_ATTN_3) {
7126                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7127                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7128            }
7129        } else {
7130            if (asserted & ATTN_GENERAL_ATTN_4) {
7131                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7132                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7133            }
7134            if (asserted & ATTN_GENERAL_ATTN_5) {
7135                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7136                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7137            }
7138            if (asserted & ATTN_GENERAL_ATTN_6) {
7139                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7140                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7141            }
7142        }
7143    } /* hardwired */
7144
7145    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7146        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7147    } else {
7148        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7149    }
7150
7151    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7152          asserted,
7153          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7154    REG_WR(sc, reg_addr, asserted);
7155
7156    /* now set back the mask */
7157    if (asserted & ATTN_NIG_FOR_FUNC) {
7158        /*
7159         * Verify that IGU ack through BAR was written before restoring
7160         * NIG mask. This loop should exit after 2-3 iterations max.
7161         */
7162        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7163            cnt = 0;
7164
7165            do {
7166                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7167            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7168                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7169
7170            if (!igu_acked) {
7171                BLOGE(sc, "Failed to verify IGU ack on time\n");
7172            }
7173
7174            mb();
7175        }
7176
7177        REG_WR(sc, nig_int_mask_addr, nig_mask);
7178
7179	bxe_release_phy_lock(sc);
7180    }
7181}
7182
7183static void
7184bxe_print_next_block(struct bxe_softc *sc,
7185                     int              idx,
7186                     const char       *blk)
7187{
7188    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7189}
7190
7191static int
7192bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7193                              uint32_t         sig,
7194                              int              par_num,
7195                              uint8_t          print)
7196{
7197    uint32_t cur_bit = 0;
7198    int i = 0;
7199
7200    for (i = 0; sig; i++) {
7201        cur_bit = ((uint32_t)0x1 << i);
7202        if (sig & cur_bit) {
7203            switch (cur_bit) {
7204            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7205                if (print)
7206                    bxe_print_next_block(sc, par_num++, "BRB");
7207                break;
7208            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7209                if (print)
7210                    bxe_print_next_block(sc, par_num++, "PARSER");
7211                break;
7212            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7213                if (print)
7214                    bxe_print_next_block(sc, par_num++, "TSDM");
7215                break;
7216            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7217                if (print)
7218                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7219                break;
7220            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7221                if (print)
7222                    bxe_print_next_block(sc, par_num++, "TCM");
7223                break;
7224            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7225                if (print)
7226                    bxe_print_next_block(sc, par_num++, "TSEMI");
7227                break;
7228            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7229                if (print)
7230                    bxe_print_next_block(sc, par_num++, "XPB");
7231                break;
7232            }
7233
7234            /* Clear the bit */
7235            sig &= ~cur_bit;
7236        }
7237    }
7238
7239    return (par_num);
7240}
7241
7242static int
7243bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7244                              uint32_t         sig,
7245                              int              par_num,
7246                              uint8_t          *global,
7247                              uint8_t          print)
7248{
7249    int i = 0;
7250    uint32_t cur_bit = 0;
7251    for (i = 0; sig; i++) {
7252        cur_bit = ((uint32_t)0x1 << i);
7253        if (sig & cur_bit) {
7254            switch (cur_bit) {
7255            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7256                if (print)
7257                    bxe_print_next_block(sc, par_num++, "PBF");
7258                break;
7259            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7260                if (print)
7261                    bxe_print_next_block(sc, par_num++, "QM");
7262                break;
7263            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7264                if (print)
7265                    bxe_print_next_block(sc, par_num++, "TM");
7266                break;
7267            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7268                if (print)
7269                    bxe_print_next_block(sc, par_num++, "XSDM");
7270                break;
7271            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7272                if (print)
7273                    bxe_print_next_block(sc, par_num++, "XCM");
7274                break;
7275            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7276                if (print)
7277                    bxe_print_next_block(sc, par_num++, "XSEMI");
7278                break;
7279            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7280                if (print)
7281                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7282                break;
7283            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7284                if (print)
7285                    bxe_print_next_block(sc, par_num++, "NIG");
7286                break;
7287            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7288                if (print)
7289                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7290                *global = TRUE;
7291                break;
7292            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7293                if (print)
7294                    bxe_print_next_block(sc, par_num++, "DEBUG");
7295                break;
7296            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7297                if (print)
7298                    bxe_print_next_block(sc, par_num++, "USDM");
7299                break;
7300            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7301                if (print)
7302                    bxe_print_next_block(sc, par_num++, "UCM");
7303                break;
7304            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7305                if (print)
7306                    bxe_print_next_block(sc, par_num++, "USEMI");
7307                break;
7308            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7309                if (print)
7310                    bxe_print_next_block(sc, par_num++, "UPB");
7311                break;
7312            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7313                if (print)
7314                    bxe_print_next_block(sc, par_num++, "CSDM");
7315                break;
7316            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7317                if (print)
7318                    bxe_print_next_block(sc, par_num++, "CCM");
7319                break;
7320            }
7321
7322            /* Clear the bit */
7323            sig &= ~cur_bit;
7324        }
7325    }
7326
7327    return (par_num);
7328}
7329
7330static int
7331bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7332                              uint32_t         sig,
7333                              int              par_num,
7334                              uint8_t          print)
7335{
7336    uint32_t cur_bit = 0;
7337    int i = 0;
7338
7339    for (i = 0; sig; i++) {
7340        cur_bit = ((uint32_t)0x1 << i);
7341        if (sig & cur_bit) {
7342            switch (cur_bit) {
7343            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7344                if (print)
7345                    bxe_print_next_block(sc, par_num++, "CSEMI");
7346                break;
7347            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7348                if (print)
7349                    bxe_print_next_block(sc, par_num++, "PXP");
7350                break;
7351            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7352                if (print)
7353                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7354                break;
7355            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7356                if (print)
7357                    bxe_print_next_block(sc, par_num++, "CFC");
7358                break;
7359            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7360                if (print)
7361                    bxe_print_next_block(sc, par_num++, "CDU");
7362                break;
7363            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7364                if (print)
7365                    bxe_print_next_block(sc, par_num++, "DMAE");
7366                break;
7367            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7368                if (print)
7369                    bxe_print_next_block(sc, par_num++, "IGU");
7370                break;
7371            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7372                if (print)
7373                    bxe_print_next_block(sc, par_num++, "MISC");
7374                break;
7375            }
7376
7377            /* Clear the bit */
7378            sig &= ~cur_bit;
7379        }
7380    }
7381
7382    return (par_num);
7383}
7384
7385static int
7386bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7387                              uint32_t         sig,
7388                              int              par_num,
7389                              uint8_t          *global,
7390                              uint8_t          print)
7391{
7392    uint32_t cur_bit = 0;
7393    int i = 0;
7394
7395    for (i = 0; sig; i++) {
7396        cur_bit = ((uint32_t)0x1 << i);
7397        if (sig & cur_bit) {
7398            switch (cur_bit) {
7399            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7400                if (print)
7401                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7402                *global = TRUE;
7403                break;
7404            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7405                if (print)
7406                    bxe_print_next_block(sc, par_num++,
7407                              "MCP UMP RX");
7408                *global = TRUE;
7409                break;
7410            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7411                if (print)
7412                    bxe_print_next_block(sc, par_num++,
7413                              "MCP UMP TX");
7414                *global = TRUE;
7415                break;
7416            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7417                if (print)
7418                    bxe_print_next_block(sc, par_num++,
7419                              "MCP SCPAD");
7420                *global = TRUE;
7421                break;
7422            }
7423
7424            /* Clear the bit */
7425            sig &= ~cur_bit;
7426        }
7427    }
7428
7429    return (par_num);
7430}
7431
7432static int
7433bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7434                              uint32_t         sig,
7435                              int              par_num,
7436                              uint8_t          print)
7437{
7438    uint32_t cur_bit = 0;
7439    int i = 0;
7440
7441    for (i = 0; sig; i++) {
7442        cur_bit = ((uint32_t)0x1 << i);
7443        if (sig & cur_bit) {
7444            switch (cur_bit) {
7445            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7446                if (print)
7447                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7448                break;
7449            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7450                if (print)
7451                    bxe_print_next_block(sc, par_num++, "ATC");
7452                break;
7453            }
7454
7455            /* Clear the bit */
7456            sig &= ~cur_bit;
7457        }
7458    }
7459
7460    return (par_num);
7461}
7462
7463static uint8_t
7464bxe_parity_attn(struct bxe_softc *sc,
7465                uint8_t          *global,
7466                uint8_t          print,
7467                uint32_t         *sig)
7468{
7469    int par_num = 0;
7470
7471    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7472        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7473        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7474        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7475        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7476        BLOGE(sc, "Parity error: HW block parity attention:\n"
7477                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7478              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7479              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7480              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7481              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7482              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7483
7484        if (print)
7485            BLOGI(sc, "Parity errors detected in blocks: ");
7486
7487        par_num =
7488            bxe_check_blocks_with_parity0(sc, sig[0] &
7489                                          HW_PRTY_ASSERT_SET_0,
7490                                          par_num, print);
7491        par_num =
7492            bxe_check_blocks_with_parity1(sc, sig[1] &
7493                                          HW_PRTY_ASSERT_SET_1,
7494                                          par_num, global, print);
7495        par_num =
7496            bxe_check_blocks_with_parity2(sc, sig[2] &
7497                                          HW_PRTY_ASSERT_SET_2,
7498                                          par_num, print);
7499        par_num =
7500            bxe_check_blocks_with_parity3(sc, sig[3] &
7501                                          HW_PRTY_ASSERT_SET_3,
7502                                          par_num, global, print);
7503        par_num =
7504            bxe_check_blocks_with_parity4(sc, sig[4] &
7505                                          HW_PRTY_ASSERT_SET_4,
7506                                          par_num, print);
7507
7508        if (print)
7509            BLOGI(sc, "\n");
7510
7511	if( *global == TRUE ) {
7512                BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7513        }
7514
7515        return (TRUE);
7516    }
7517
7518    return (FALSE);
7519}
7520
7521static uint8_t
7522bxe_chk_parity_attn(struct bxe_softc *sc,
7523                    uint8_t          *global,
7524                    uint8_t          print)
7525{
7526    struct attn_route attn = { {0} };
7527    int port = SC_PORT(sc);
7528
7529    if(sc->state != BXE_STATE_OPEN)
7530        return FALSE;
7531
7532    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7533    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7534    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7535    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7536
7537    /*
7538     * Since MCP attentions can't be disabled inside the block, we need to
7539     * read AEU registers to see whether they're currently disabled
7540     */
7541    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7542                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7543                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7544                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7545
7546
7547    if (!CHIP_IS_E1x(sc))
7548        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7549
7550    return (bxe_parity_attn(sc, global, print, attn.sig));
7551}
7552
7553static void
7554bxe_attn_int_deasserted4(struct bxe_softc *sc,
7555                         uint32_t         attn)
7556{
7557    uint32_t val;
7558    bool err_flg = false;
7559
7560    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7561        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7562        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7563        err_flg = true;
7564        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7565            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7566        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7567            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7568        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7569            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7570        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7571            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7572        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7573            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7574        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7575            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7576        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7577            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7578        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7579            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7580        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7581            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7582    }
7583
7584    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7585        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7586        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7587	err_flg = true;
7588        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7589            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7590        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7591            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7592        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7593            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7594        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7595            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7596        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7597            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7598        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7599            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7600    }
7601
7602    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7603                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7604        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7605              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7606                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7607	err_flg = true;
7608    }
7609    if (err_flg) {
7610	BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7611	taskqueue_enqueue_timeout(taskqueue_thread,
7612	    &sc->sp_err_timeout_task, hz/10);
7613    }
7614
7615}
7616
7617static void
7618bxe_e1h_disable(struct bxe_softc *sc)
7619{
7620    int port = SC_PORT(sc);
7621
7622    bxe_tx_disable(sc);
7623
7624    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7625}
7626
7627static void
7628bxe_e1h_enable(struct bxe_softc *sc)
7629{
7630    int port = SC_PORT(sc);
7631
7632    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7633
7634    // XXX bxe_tx_enable(sc);
7635}
7636
7637/*
7638 * called due to MCP event (on pmf):
7639 *   reread new bandwidth configuration
7640 *   configure FW
7641 *   notify others function about the change
7642 */
7643static void
7644bxe_config_mf_bw(struct bxe_softc *sc)
7645{
7646    if (sc->link_vars.link_up) {
7647        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7648        // XXX bxe_link_sync_notify(sc);
7649    }
7650
7651    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7652}
7653
7654static void
7655bxe_set_mf_bw(struct bxe_softc *sc)
7656{
7657    bxe_config_mf_bw(sc);
7658    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7659}
7660
7661static void
7662bxe_handle_eee_event(struct bxe_softc *sc)
7663{
7664    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7665    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7666}
7667
7668#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7669
7670static void
7671bxe_drv_info_ether_stat(struct bxe_softc *sc)
7672{
7673    struct eth_stats_info *ether_stat =
7674        &sc->sp->drv_info_to_mcp.ether_stat;
7675
7676    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7677            ETH_STAT_INFO_VERSION_LEN);
7678
7679    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7680    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7681                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7682                                          ether_stat->mac_local + MAC_PAD,
7683                                          MAC_PAD, ETH_ALEN);
7684
7685    ether_stat->mtu_size = sc->mtu;
7686
7687    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7688    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7689        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7690    }
7691
7692    // XXX ether_stat->feature_flags |= ???;
7693
7694    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7695
7696    ether_stat->txq_size = sc->tx_ring_size;
7697    ether_stat->rxq_size = sc->rx_ring_size;
7698}
7699
7700static void
7701bxe_handle_drv_info_req(struct bxe_softc *sc)
7702{
7703    enum drv_info_opcode op_code;
7704    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7705
7706    /* if drv_info version supported by MFW doesn't match - send NACK */
7707    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7708        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7709        return;
7710    }
7711
7712    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7713               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7714
7715    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7716
7717    switch (op_code) {
7718    case ETH_STATS_OPCODE:
7719        bxe_drv_info_ether_stat(sc);
7720        break;
7721    case FCOE_STATS_OPCODE:
7722    case ISCSI_STATS_OPCODE:
7723    default:
7724        /* if op code isn't supported - send NACK */
7725        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7726        return;
7727    }
7728
7729    /*
7730     * If we got drv_info attn from MFW then these fields are defined in
7731     * shmem2 for sure
7732     */
7733    SHMEM2_WR(sc, drv_info_host_addr_lo,
7734              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7735    SHMEM2_WR(sc, drv_info_host_addr_hi,
7736              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7737
7738    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7739}
7740
7741static void
7742bxe_dcc_event(struct bxe_softc *sc,
7743              uint32_t         dcc_event)
7744{
7745    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7746
7747    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7748        /*
7749         * This is the only place besides the function initialization
7750         * where the sc->flags can change so it is done without any
7751         * locks
7752         */
7753        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7754            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7755            sc->flags |= BXE_MF_FUNC_DIS;
7756            bxe_e1h_disable(sc);
7757        } else {
7758            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7759            sc->flags &= ~BXE_MF_FUNC_DIS;
7760            bxe_e1h_enable(sc);
7761        }
7762        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7763    }
7764
7765    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7766        bxe_config_mf_bw(sc);
7767        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7768    }
7769
7770    /* Report results to MCP */
7771    if (dcc_event)
7772        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7773    else
7774        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7775}
7776
7777static void
7778bxe_pmf_update(struct bxe_softc *sc)
7779{
7780    int port = SC_PORT(sc);
7781    uint32_t val;
7782
7783    sc->port.pmf = 1;
7784    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7785
7786    /*
7787     * We need the mb() to ensure the ordering between the writing to
7788     * sc->port.pmf here and reading it from the bxe_periodic_task().
7789     */
7790    mb();
7791
7792    /* queue a periodic task */
7793    // XXX schedule task...
7794
7795    // XXX bxe_dcbx_pmf_update(sc);
7796
7797    /* enable nig attention */
7798    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7799    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7800        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7801        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7802    } else if (!CHIP_IS_E1x(sc)) {
7803        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7804        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7805    }
7806
7807    bxe_stats_handle(sc, STATS_EVENT_PMF);
7808}
7809
7810static int
7811bxe_mc_assert(struct bxe_softc *sc)
7812{
7813    char last_idx;
7814    int i, rc = 0;
7815    uint32_t row0, row1, row2, row3;
7816
7817    /* XSTORM */
7818    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7819    if (last_idx)
7820        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7821
7822    /* print the asserts */
7823    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7824
7825        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7826        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7827        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7828        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7829
7830        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7831            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7832                  i, row3, row2, row1, row0);
7833            rc++;
7834        } else {
7835            break;
7836        }
7837    }
7838
7839    /* TSTORM */
7840    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7841    if (last_idx) {
7842        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7843    }
7844
7845    /* print the asserts */
7846    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7847
7848        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7849        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7850        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7851        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7852
7853        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7854            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7855                  i, row3, row2, row1, row0);
7856            rc++;
7857        } else {
7858            break;
7859        }
7860    }
7861
7862    /* CSTORM */
7863    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7864    if (last_idx) {
7865        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7866    }
7867
7868    /* print the asserts */
7869    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7870
7871        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7872        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7873        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7874        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7875
7876        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7877            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7878                  i, row3, row2, row1, row0);
7879            rc++;
7880        } else {
7881            break;
7882        }
7883    }
7884
7885    /* USTORM */
7886    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7887    if (last_idx) {
7888        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7889    }
7890
7891    /* print the asserts */
7892    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7893
7894        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7895        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7896        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7897        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7898
7899        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7900            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7901                  i, row3, row2, row1, row0);
7902            rc++;
7903        } else {
7904            break;
7905        }
7906    }
7907
7908    return (rc);
7909}
7910
7911static void
7912bxe_attn_int_deasserted3(struct bxe_softc *sc,
7913                         uint32_t         attn)
7914{
7915    int func = SC_FUNC(sc);
7916    uint32_t val;
7917
7918    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7919
7920        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7921
7922            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7923            bxe_read_mf_cfg(sc);
7924            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7925                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7926            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7927
7928            if (val & DRV_STATUS_DCC_EVENT_MASK)
7929                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7930
7931            if (val & DRV_STATUS_SET_MF_BW)
7932                bxe_set_mf_bw(sc);
7933
7934            if (val & DRV_STATUS_DRV_INFO_REQ)
7935                bxe_handle_drv_info_req(sc);
7936
7937            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7938                bxe_pmf_update(sc);
7939
7940            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7941                bxe_handle_eee_event(sc);
7942
7943            if (sc->link_vars.periodic_flags &
7944                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7945                /* sync with link */
7946		bxe_acquire_phy_lock(sc);
7947                sc->link_vars.periodic_flags &=
7948                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7949		bxe_release_phy_lock(sc);
7950                if (IS_MF(sc))
7951                    ; // XXX bxe_link_sync_notify(sc);
7952                bxe_link_report(sc);
7953            }
7954
7955            /*
7956             * Always call it here: bxe_link_report() will
7957             * prevent the link indication duplication.
7958             */
7959            bxe_link_status_update(sc);
7960
7961        } else if (attn & BXE_MC_ASSERT_BITS) {
7962
7963            BLOGE(sc, "MC assert!\n");
7964            bxe_mc_assert(sc);
7965            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7966            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7967            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7968            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7969            bxe_int_disable(sc);
7970            BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
7971            taskqueue_enqueue_timeout(taskqueue_thread,
7972                &sc->sp_err_timeout_task, hz/10);
7973
7974        } else if (attn & BXE_MCP_ASSERT) {
7975
7976            BLOGE(sc, "MCP assert!\n");
7977            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7978            BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
7979            taskqueue_enqueue_timeout(taskqueue_thread,
7980                &sc->sp_err_timeout_task, hz/10);
7981            bxe_int_disable(sc);  /*avoid repetive assert alert */
7982
7983
7984        } else {
7985            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
7986        }
7987    }
7988
7989    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
7990        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
7991        if (attn & BXE_GRC_TIMEOUT) {
7992            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
7993            BLOGE(sc, "GRC time-out 0x%08x\n", val);
7994        }
7995        if (attn & BXE_GRC_RSV) {
7996            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
7997            BLOGE(sc, "GRC reserved 0x%08x\n", val);
7998        }
7999        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8000    }
8001}
8002
8003static void
8004bxe_attn_int_deasserted2(struct bxe_softc *sc,
8005                         uint32_t         attn)
8006{
8007    int port = SC_PORT(sc);
8008    int reg_offset;
8009    uint32_t val0, mask0, val1, mask1;
8010    uint32_t val;
8011    bool err_flg = false;
8012
8013    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8014        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8015        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8016        /* CFC error attention */
8017        if (val & 0x2) {
8018            BLOGE(sc, "FATAL error from CFC\n");
8019	    err_flg = true;
8020        }
8021    }
8022
8023    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8024        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8025        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8026        /* RQ_USDMDP_FIFO_OVERFLOW */
8027        if (val & 0x18000) {
8028            BLOGE(sc, "FATAL error from PXP\n");
8029	    err_flg = true;
8030        }
8031
8032        if (!CHIP_IS_E1x(sc)) {
8033            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8034            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8035	    err_flg = true;
8036        }
8037    }
8038
8039#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8040#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8041
8042    if (attn & AEU_PXP2_HW_INT_BIT) {
8043        /*  CQ47854 workaround do not panic on
8044         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8045         */
8046        if (!CHIP_IS_E1x(sc)) {
8047            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8048            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8049            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8050            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8051            /*
8052             * If the only PXP2_EOP_ERROR_BIT is set in
8053             * STS0 and STS1 - clear it
8054             *
8055             * probably we lose additional attentions between
8056             * STS0 and STS_CLR0, in this case user will not
8057             * be notified about them
8058             */
8059            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8060                !(val1 & mask1))
8061                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8062
8063            /* print the register, since no one can restore it */
8064            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8065
8066            /*
8067             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8068             * then notify
8069             */
8070            if (val0 & PXP2_EOP_ERROR_BIT) {
8071                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8072		err_flg = true;
8073
8074                /*
8075                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8076                 * set then clear attention from PXP2 block without panic
8077                 */
8078                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8079                    ((val1 & mask1) == 0))
8080                    attn &= ~AEU_PXP2_HW_INT_BIT;
8081            }
8082        }
8083    }
8084
8085    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8086        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8087                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8088
8089        val = REG_RD(sc, reg_offset);
8090        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8091        REG_WR(sc, reg_offset, val);
8092
8093        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8094              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8095	err_flg = true;
8096        bxe_panic(sc, ("HW block attention set2\n"));
8097    }
8098    if(err_flg) {
8099        BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8100        taskqueue_enqueue_timeout(taskqueue_thread,
8101           &sc->sp_err_timeout_task, hz/10);
8102    }
8103
8104}
8105
8106static void
8107bxe_attn_int_deasserted1(struct bxe_softc *sc,
8108                         uint32_t         attn)
8109{
8110    int port = SC_PORT(sc);
8111    int reg_offset;
8112    uint32_t val;
8113    bool err_flg = false;
8114
8115    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8116        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8117        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8118        /* DORQ discard attention */
8119        if (val & 0x2) {
8120            BLOGE(sc, "FATAL error from DORQ\n");
8121	    err_flg = true;
8122        }
8123    }
8124
8125    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8126        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8127                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8128
8129        val = REG_RD(sc, reg_offset);
8130        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8131        REG_WR(sc, reg_offset, val);
8132
8133        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8134              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8135        err_flg = true;
8136        bxe_panic(sc, ("HW block attention set1\n"));
8137    }
8138    if(err_flg) {
8139        BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8140        taskqueue_enqueue_timeout(taskqueue_thread,
8141           &sc->sp_err_timeout_task, hz/10);
8142    }
8143
8144}
8145
8146static void
8147bxe_attn_int_deasserted0(struct bxe_softc *sc,
8148                         uint32_t         attn)
8149{
8150    int port = SC_PORT(sc);
8151    int reg_offset;
8152    uint32_t val;
8153
8154    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8155                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8156
8157    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8158        val = REG_RD(sc, reg_offset);
8159        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8160        REG_WR(sc, reg_offset, val);
8161
8162        BLOGW(sc, "SPIO5 hw attention\n");
8163
8164        /* Fan failure attention */
8165        elink_hw_reset_phy(&sc->link_params);
8166        bxe_fan_failure(sc);
8167    }
8168
8169    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8170	bxe_acquire_phy_lock(sc);
8171        elink_handle_module_detect_int(&sc->link_params);
8172	bxe_release_phy_lock(sc);
8173    }
8174
8175    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8176        val = REG_RD(sc, reg_offset);
8177        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8178        REG_WR(sc, reg_offset, val);
8179
8180
8181        BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8182        taskqueue_enqueue_timeout(taskqueue_thread,
8183           &sc->sp_err_timeout_task, hz/10);
8184
8185        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8186                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8187    }
8188}
8189
8190static void
8191bxe_attn_int_deasserted(struct bxe_softc *sc,
8192                        uint32_t         deasserted)
8193{
8194    struct attn_route attn;
8195    struct attn_route *group_mask;
8196    int port = SC_PORT(sc);
8197    int index;
8198    uint32_t reg_addr;
8199    uint32_t val;
8200    uint32_t aeu_mask;
8201    uint8_t global = FALSE;
8202
8203    /*
8204     * Need to take HW lock because MCP or other port might also
8205     * try to handle this event.
8206     */
8207    bxe_acquire_alr(sc);
8208
8209    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8210        /* XXX
8211         * In case of parity errors don't handle attentions so that
8212         * other function would "see" parity errors.
8213         */
8214        // XXX schedule a recovery task...
8215        /* disable HW interrupts */
8216        bxe_int_disable(sc);
8217        BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8218        taskqueue_enqueue_timeout(taskqueue_thread,
8219           &sc->sp_err_timeout_task, hz/10);
8220        bxe_release_alr(sc);
8221        return;
8222    }
8223
8224    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8225    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8226    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8227    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8228    if (!CHIP_IS_E1x(sc)) {
8229        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8230    } else {
8231        attn.sig[4] = 0;
8232    }
8233
8234    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8235          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8236
8237    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8238        if (deasserted & (1 << index)) {
8239            group_mask = &sc->attn_group[index];
8240
8241            BLOGD(sc, DBG_INTR,
8242                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8243                  group_mask->sig[0], group_mask->sig[1],
8244                  group_mask->sig[2], group_mask->sig[3],
8245                  group_mask->sig[4]);
8246
8247            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8248            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8249            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8250            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8251            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8252        }
8253    }
8254
8255    bxe_release_alr(sc);
8256
8257    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8258        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8259                    COMMAND_REG_ATTN_BITS_CLR);
8260    } else {
8261        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8262    }
8263
8264    val = ~deasserted;
8265    BLOGD(sc, DBG_INTR,
8266          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8267          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8268    REG_WR(sc, reg_addr, val);
8269
8270    if (~sc->attn_state & deasserted) {
8271        BLOGE(sc, "IGU error\n");
8272    }
8273
8274    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8275                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8276
8277    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8278
8279    aeu_mask = REG_RD(sc, reg_addr);
8280
8281    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8282          aeu_mask, deasserted);
8283    aeu_mask |= (deasserted & 0x3ff);
8284    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8285
8286    REG_WR(sc, reg_addr, aeu_mask);
8287    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8288
8289    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8290    sc->attn_state &= ~deasserted;
8291    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8292}
8293
8294static void
8295bxe_attn_int(struct bxe_softc *sc)
8296{
8297    /* read local copy of bits */
8298    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8299    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8300    uint32_t attn_state = sc->attn_state;
8301
8302    /* look for changed bits */
8303    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8304    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8305
8306    BLOGD(sc, DBG_INTR,
8307          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8308          attn_bits, attn_ack, asserted, deasserted);
8309
8310    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8311        BLOGE(sc, "BAD attention state\n");
8312    }
8313
8314    /* handle bits that were raised */
8315    if (asserted) {
8316        bxe_attn_int_asserted(sc, asserted);
8317    }
8318
8319    if (deasserted) {
8320        bxe_attn_int_deasserted(sc, deasserted);
8321    }
8322}
8323
8324static uint16_t
8325bxe_update_dsb_idx(struct bxe_softc *sc)
8326{
8327    struct host_sp_status_block *def_sb = sc->def_sb;
8328    uint16_t rc = 0;
8329
8330    mb(); /* status block is written to by the chip */
8331
8332    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8333        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8334        rc |= BXE_DEF_SB_ATT_IDX;
8335    }
8336
8337    if (sc->def_idx != def_sb->sp_sb.running_index) {
8338        sc->def_idx = def_sb->sp_sb.running_index;
8339        rc |= BXE_DEF_SB_IDX;
8340    }
8341
8342    mb();
8343
8344    return (rc);
8345}
8346
8347static inline struct ecore_queue_sp_obj *
8348bxe_cid_to_q_obj(struct bxe_softc *sc,
8349                 uint32_t         cid)
8350{
8351    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8352    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8353}
8354
8355static void
8356bxe_handle_mcast_eqe(struct bxe_softc *sc)
8357{
8358    struct ecore_mcast_ramrod_params rparam;
8359    int rc;
8360
8361    memset(&rparam, 0, sizeof(rparam));
8362
8363    rparam.mcast_obj = &sc->mcast_obj;
8364
8365    BXE_MCAST_LOCK(sc);
8366
8367    /* clear pending state for the last command */
8368    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8369
8370    /* if there are pending mcast commands - send them */
8371    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8372        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8373        if (rc < 0) {
8374            BLOGD(sc, DBG_SP,
8375                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8376        }
8377    }
8378
8379    BXE_MCAST_UNLOCK(sc);
8380}
8381
8382static void
8383bxe_handle_classification_eqe(struct bxe_softc      *sc,
8384                              union event_ring_elem *elem)
8385{
8386    unsigned long ramrod_flags = 0;
8387    int rc = 0;
8388    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8389    struct ecore_vlan_mac_obj *vlan_mac_obj;
8390
8391    /* always push next commands out, don't wait here */
8392    bit_set(&ramrod_flags, RAMROD_CONT);
8393
8394    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8395    case ECORE_FILTER_MAC_PENDING:
8396        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8397        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8398        break;
8399
8400    case ECORE_FILTER_MCAST_PENDING:
8401        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8402        /*
8403         * This is only relevant for 57710 where multicast MACs are
8404         * configured as unicast MACs using the same ramrod.
8405         */
8406        bxe_handle_mcast_eqe(sc);
8407        return;
8408
8409    default:
8410        BLOGE(sc, "Unsupported classification command: %d\n",
8411              elem->message.data.eth_event.echo);
8412        return;
8413    }
8414
8415    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8416
8417    if (rc < 0) {
8418        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8419    } else if (rc > 0) {
8420        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8421    }
8422}
8423
8424static void
8425bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8426                       union event_ring_elem *elem)
8427{
8428    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8429
8430    /* send rx_mode command again if was requested */
8431    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8432                               &sc->sp_state)) {
8433        bxe_set_storm_rx_mode(sc);
8434    }
8435}
8436
8437static void
8438bxe_update_eq_prod(struct bxe_softc *sc,
8439                   uint16_t         prod)
8440{
8441    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8442    wmb(); /* keep prod updates ordered */
8443}
8444
8445static void
8446bxe_eq_int(struct bxe_softc *sc)
8447{
8448    uint16_t hw_cons, sw_cons, sw_prod;
8449    union event_ring_elem *elem;
8450    uint8_t echo;
8451    uint32_t cid;
8452    uint8_t opcode;
8453    int spqe_cnt = 0;
8454    struct ecore_queue_sp_obj *q_obj;
8455    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8456    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8457
8458    hw_cons = le16toh(*sc->eq_cons_sb);
8459
8460    /*
8461     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8462     * when we get to the next-page we need to adjust so the loop
8463     * condition below will be met. The next element is the size of a
8464     * regular element and hence incrementing by 1
8465     */
8466    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8467        hw_cons++;
8468    }
8469
8470    /*
8471     * This function may never run in parallel with itself for a
8472     * specific sc and no need for a read memory barrier here.
8473     */
8474    sw_cons = sc->eq_cons;
8475    sw_prod = sc->eq_prod;
8476
8477    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8478          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8479
8480    for (;
8481         sw_cons != hw_cons;
8482         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8483
8484        elem = &sc->eq[EQ_DESC(sw_cons)];
8485
8486        /* elem CID originates from FW, actually LE */
8487        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8488        opcode = elem->message.opcode;
8489
8490        /* handle eq element */
8491        switch (opcode) {
8492
8493        case EVENT_RING_OPCODE_STAT_QUERY:
8494            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8495                  sc->stats_comp++);
8496            /* nothing to do with stats comp */
8497            goto next_spqe;
8498
8499        case EVENT_RING_OPCODE_CFC_DEL:
8500            /* handle according to cid range */
8501            /* we may want to verify here that the sc state is HALTING */
8502            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8503            q_obj = bxe_cid_to_q_obj(sc, cid);
8504            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8505                break;
8506            }
8507            goto next_spqe;
8508
8509        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8510            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8511            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8512                break;
8513            }
8514            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8515            goto next_spqe;
8516
8517        case EVENT_RING_OPCODE_START_TRAFFIC:
8518            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8519            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8520                break;
8521            }
8522            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8523            goto next_spqe;
8524
8525        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8526            echo = elem->message.data.function_update_event.echo;
8527            if (echo == SWITCH_UPDATE) {
8528                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8529                if (f_obj->complete_cmd(sc, f_obj,
8530                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8531                    break;
8532                }
8533            }
8534            else {
8535                BLOGD(sc, DBG_SP,
8536                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8537            }
8538            goto next_spqe;
8539
8540        case EVENT_RING_OPCODE_FORWARD_SETUP:
8541            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8542            if (q_obj->complete_cmd(sc, q_obj,
8543                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8544                break;
8545            }
8546            goto next_spqe;
8547
8548        case EVENT_RING_OPCODE_FUNCTION_START:
8549            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8550            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8551                break;
8552            }
8553            goto next_spqe;
8554
8555        case EVENT_RING_OPCODE_FUNCTION_STOP:
8556            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8557            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8558                break;
8559            }
8560            goto next_spqe;
8561        }
8562
8563        switch (opcode | sc->state) {
8564        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8565        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8566            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8567            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8568            rss_raw->clear_pending(rss_raw);
8569            break;
8570
8571        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8572        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8573        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8574        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8575        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8576        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8577            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8578            bxe_handle_classification_eqe(sc, elem);
8579            break;
8580
8581        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8582        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8583        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8584            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8585            bxe_handle_mcast_eqe(sc);
8586            break;
8587
8588        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8589        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8590        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8591            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8592            bxe_handle_rx_mode_eqe(sc, elem);
8593            break;
8594
8595        default:
8596            /* unknown event log error and continue */
8597            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8598                  elem->message.opcode, sc->state);
8599        }
8600
8601next_spqe:
8602        spqe_cnt++;
8603    } /* for */
8604
8605    mb();
8606    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8607
8608    sc->eq_cons = sw_cons;
8609    sc->eq_prod = sw_prod;
8610
8611    /* make sure that above mem writes were issued towards the memory */
8612    wmb();
8613
8614    /* update producer */
8615    bxe_update_eq_prod(sc, sc->eq_prod);
8616}
8617
8618static void
8619bxe_handle_sp_tq(void *context,
8620                 int  pending)
8621{
8622    struct bxe_softc *sc = (struct bxe_softc *)context;
8623    uint16_t status;
8624
8625    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8626
8627    /* what work needs to be performed? */
8628    status = bxe_update_dsb_idx(sc);
8629
8630    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8631
8632    /* HW attentions */
8633    if (status & BXE_DEF_SB_ATT_IDX) {
8634        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8635        bxe_attn_int(sc);
8636        status &= ~BXE_DEF_SB_ATT_IDX;
8637    }
8638
8639    /* SP events: STAT_QUERY and others */
8640    if (status & BXE_DEF_SB_IDX) {
8641        /* handle EQ completions */
8642        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8643        bxe_eq_int(sc);
8644        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8645                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8646        status &= ~BXE_DEF_SB_IDX;
8647    }
8648
8649    /* if status is non zero then something went wrong */
8650    if (__predict_false(status)) {
8651        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8652    }
8653
8654    /* ack status block only if something was actually handled */
8655    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8656               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8657
8658    /*
8659     * Must be called after the EQ processing (since eq leads to sriov
8660     * ramrod completion flows).
8661     * This flow may have been scheduled by the arrival of a ramrod
8662     * completion, or by the sriov code rescheduling itself.
8663     */
8664    // XXX bxe_iov_sp_task(sc);
8665
8666}
8667
8668static void
8669bxe_handle_fp_tq(void *context,
8670                 int  pending)
8671{
8672    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8673    struct bxe_softc *sc = fp->sc;
8674    /* uint8_t more_tx = FALSE; */
8675    uint8_t more_rx = FALSE;
8676
8677    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8678
8679    /* XXX
8680     * IFF_DRV_RUNNING state can't be checked here since we process
8681     * slowpath events on a client queue during setup. Instead
8682     * we need to add a "process/continue" flag here that the driver
8683     * can use to tell the task here not to do anything.
8684     */
8685#if 0
8686    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8687        return;
8688    }
8689#endif
8690
8691    /* update the fastpath index */
8692    bxe_update_fp_sb_idx(fp);
8693
8694    /* XXX add loop here if ever support multiple tx CoS */
8695    /* fp->txdata[cos] */
8696    if (bxe_has_tx_work(fp)) {
8697        BXE_FP_TX_LOCK(fp);
8698        /* more_tx = */ bxe_txeof(sc, fp);
8699        BXE_FP_TX_UNLOCK(fp);
8700    }
8701
8702    if (bxe_has_rx_work(fp)) {
8703        more_rx = bxe_rxeof(sc, fp);
8704    }
8705
8706    if (more_rx /*|| more_tx*/) {
8707        /* still more work to do */
8708        taskqueue_enqueue(fp->tq, &fp->tq_task);
8709        return;
8710    }
8711
8712    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8713               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8714}
8715
8716static void
8717bxe_task_fp(struct bxe_fastpath *fp)
8718{
8719    struct bxe_softc *sc = fp->sc;
8720    /* uint8_t more_tx = FALSE; */
8721    uint8_t more_rx = FALSE;
8722
8723    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8724
8725    /* update the fastpath index */
8726    bxe_update_fp_sb_idx(fp);
8727
8728    /* XXX add loop here if ever support multiple tx CoS */
8729    /* fp->txdata[cos] */
8730    if (bxe_has_tx_work(fp)) {
8731        BXE_FP_TX_LOCK(fp);
8732        /* more_tx = */ bxe_txeof(sc, fp);
8733        BXE_FP_TX_UNLOCK(fp);
8734    }
8735
8736    if (bxe_has_rx_work(fp)) {
8737        more_rx = bxe_rxeof(sc, fp);
8738    }
8739
8740    if (more_rx /*|| more_tx*/) {
8741        /* still more work to do, bail out if this ISR and process later */
8742        taskqueue_enqueue(fp->tq, &fp->tq_task);
8743        return;
8744    }
8745
8746    /*
8747     * Here we write the fastpath index taken before doing any tx or rx work.
8748     * It is very well possible other hw events occurred up to this point and
8749     * they were actually processed accordingly above. Since we're going to
8750     * write an older fastpath index, an interrupt is coming which we might
8751     * not do any work in.
8752     */
8753    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8754               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8755}
8756
8757/*
8758 * Legacy interrupt entry point.
8759 *
8760 * Verifies that the controller generated the interrupt and
8761 * then calls a separate routine to handle the various
8762 * interrupt causes: link, RX, and TX.
8763 */
8764static void
8765bxe_intr_legacy(void *xsc)
8766{
8767    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8768    struct bxe_fastpath *fp;
8769    uint16_t status, mask;
8770    int i;
8771
8772    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8773
8774    /*
8775     * 0 for ustorm, 1 for cstorm
8776     * the bits returned from ack_int() are 0-15
8777     * bit 0 = attention status block
8778     * bit 1 = fast path status block
8779     * a mask of 0x2 or more = tx/rx event
8780     * a mask of 1 = slow path event
8781     */
8782
8783    status = bxe_ack_int(sc);
8784
8785    /* the interrupt is not for us */
8786    if (__predict_false(status == 0)) {
8787        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8788        return;
8789    }
8790
8791    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8792
8793    FOR_EACH_ETH_QUEUE(sc, i) {
8794        fp = &sc->fp[i];
8795        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8796        if (status & mask) {
8797            /* acknowledge and disable further fastpath interrupts */
8798            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8799            bxe_task_fp(fp);
8800            status &= ~mask;
8801        }
8802    }
8803
8804    if (__predict_false(status & 0x1)) {
8805        /* acknowledge and disable further slowpath interrupts */
8806        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8807
8808        /* schedule slowpath handler */
8809        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8810
8811        status &= ~0x1;
8812    }
8813
8814    if (__predict_false(status)) {
8815        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8816    }
8817}
8818
8819/* slowpath interrupt entry point */
8820static void
8821bxe_intr_sp(void *xsc)
8822{
8823    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8824
8825    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8826
8827    /* acknowledge and disable further slowpath interrupts */
8828    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8829
8830    /* schedule slowpath handler */
8831    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8832}
8833
8834/* fastpath interrupt entry point */
8835static void
8836bxe_intr_fp(void *xfp)
8837{
8838    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8839    struct bxe_softc *sc = fp->sc;
8840
8841    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8842
8843    BLOGD(sc, DBG_INTR,
8844          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8845          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8846
8847    /* acknowledge and disable further fastpath interrupts */
8848    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8849
8850    bxe_task_fp(fp);
8851}
8852
8853/* Release all interrupts allocated by the driver. */
8854static void
8855bxe_interrupt_free(struct bxe_softc *sc)
8856{
8857    int i;
8858
8859    switch (sc->interrupt_mode) {
8860    case INTR_MODE_INTX:
8861        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8862        if (sc->intr[0].resource != NULL) {
8863            bus_release_resource(sc->dev,
8864                                 SYS_RES_IRQ,
8865                                 sc->intr[0].rid,
8866                                 sc->intr[0].resource);
8867        }
8868        break;
8869    case INTR_MODE_MSI:
8870        for (i = 0; i < sc->intr_count; i++) {
8871            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8872            if (sc->intr[i].resource && sc->intr[i].rid) {
8873                bus_release_resource(sc->dev,
8874                                     SYS_RES_IRQ,
8875                                     sc->intr[i].rid,
8876                                     sc->intr[i].resource);
8877            }
8878        }
8879        pci_release_msi(sc->dev);
8880        break;
8881    case INTR_MODE_MSIX:
8882        for (i = 0; i < sc->intr_count; i++) {
8883            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8884            if (sc->intr[i].resource && sc->intr[i].rid) {
8885                bus_release_resource(sc->dev,
8886                                     SYS_RES_IRQ,
8887                                     sc->intr[i].rid,
8888                                     sc->intr[i].resource);
8889            }
8890        }
8891        pci_release_msi(sc->dev);
8892        break;
8893    default:
8894        /* nothing to do as initial allocation failed */
8895        break;
8896    }
8897}
8898
8899/*
8900 * This function determines and allocates the appropriate
8901 * interrupt based on system capabilites and user request.
8902 *
8903 * The user may force a particular interrupt mode, specify
8904 * the number of receive queues, specify the method for
8905 * distribuitng received frames to receive queues, or use
8906 * the default settings which will automatically select the
8907 * best supported combination.  In addition, the OS may or
8908 * may not support certain combinations of these settings.
8909 * This routine attempts to reconcile the settings requested
8910 * by the user with the capabilites available from the system
8911 * to select the optimal combination of features.
8912 *
8913 * Returns:
8914 *   0 = Success, !0 = Failure.
8915 */
8916static int
8917bxe_interrupt_alloc(struct bxe_softc *sc)
8918{
8919    int msix_count = 0;
8920    int msi_count = 0;
8921    int num_requested = 0;
8922    int num_allocated = 0;
8923    int rid, i, j;
8924    int rc;
8925
8926    /* get the number of available MSI/MSI-X interrupts from the OS */
8927    if (sc->interrupt_mode > 0) {
8928        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8929            msix_count = pci_msix_count(sc->dev);
8930        }
8931
8932        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8933            msi_count = pci_msi_count(sc->dev);
8934        }
8935
8936        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8937              msi_count, msix_count);
8938    }
8939
8940    do { /* try allocating MSI-X interrupt resources (at least 2) */
8941        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8942            break;
8943        }
8944
8945        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8946            (msix_count < 2)) {
8947            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8948            break;
8949        }
8950
8951        /* ask for the necessary number of MSI-X vectors */
8952        num_requested = min((sc->num_queues + 1), msix_count);
8953
8954        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8955
8956        num_allocated = num_requested;
8957        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8958            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8959            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8960            break;
8961        }
8962
8963        if (num_allocated < 2) { /* possible? */
8964            BLOGE(sc, "MSI-X allocation less than 2!\n");
8965            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8966            pci_release_msi(sc->dev);
8967            break;
8968        }
8969
8970        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8971              num_requested, num_allocated);
8972
8973        /* best effort so use the number of vectors allocated to us */
8974        sc->intr_count = num_allocated;
8975        sc->num_queues = num_allocated - 1;
8976
8977        rid = 1; /* initial resource identifier */
8978
8979        /* allocate the MSI-X vectors */
8980        for (i = 0; i < num_allocated; i++) {
8981            sc->intr[i].rid = (rid + i);
8982
8983            if ((sc->intr[i].resource =
8984                 bus_alloc_resource_any(sc->dev,
8985                                        SYS_RES_IRQ,
8986                                        &sc->intr[i].rid,
8987                                        RF_ACTIVE)) == NULL) {
8988                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8989                      i, (rid + i));
8990
8991                for (j = (i - 1); j >= 0; j--) {
8992                    bus_release_resource(sc->dev,
8993                                         SYS_RES_IRQ,
8994                                         sc->intr[j].rid,
8995                                         sc->intr[j].resource);
8996                }
8997
8998                sc->intr_count = 0;
8999                sc->num_queues = 0;
9000                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9001                pci_release_msi(sc->dev);
9002                break;
9003            }
9004
9005            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9006        }
9007    } while (0);
9008
9009    do { /* try allocating MSI vector resources (at least 2) */
9010        if (sc->interrupt_mode != INTR_MODE_MSI) {
9011            break;
9012        }
9013
9014        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9015            (msi_count < 1)) {
9016            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9017            break;
9018        }
9019
9020        /* ask for a single MSI vector */
9021        num_requested = 1;
9022
9023        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9024
9025        num_allocated = num_requested;
9026        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9027            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9028            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9029            break;
9030        }
9031
9032        if (num_allocated != 1) { /* possible? */
9033            BLOGE(sc, "MSI allocation is not 1!\n");
9034            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9035            pci_release_msi(sc->dev);
9036            break;
9037        }
9038
9039        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9040              num_requested, num_allocated);
9041
9042        /* best effort so use the number of vectors allocated to us */
9043        sc->intr_count = num_allocated;
9044        sc->num_queues = num_allocated;
9045
9046        rid = 1; /* initial resource identifier */
9047
9048        sc->intr[0].rid = rid;
9049
9050        if ((sc->intr[0].resource =
9051             bus_alloc_resource_any(sc->dev,
9052                                    SYS_RES_IRQ,
9053                                    &sc->intr[0].rid,
9054                                    RF_ACTIVE)) == NULL) {
9055            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9056            sc->intr_count = 0;
9057            sc->num_queues = 0;
9058            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9059            pci_release_msi(sc->dev);
9060            break;
9061        }
9062
9063        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9064    } while (0);
9065
9066    do { /* try allocating INTx vector resources */
9067        if (sc->interrupt_mode != INTR_MODE_INTX) {
9068            break;
9069        }
9070
9071        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9072
9073        /* only one vector for INTx */
9074        sc->intr_count = 1;
9075        sc->num_queues = 1;
9076
9077        rid = 0; /* initial resource identifier */
9078
9079        sc->intr[0].rid = rid;
9080
9081        if ((sc->intr[0].resource =
9082             bus_alloc_resource_any(sc->dev,
9083                                    SYS_RES_IRQ,
9084                                    &sc->intr[0].rid,
9085                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9086            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9087            sc->intr_count = 0;
9088            sc->num_queues = 0;
9089            sc->interrupt_mode = -1; /* Failed! */
9090            break;
9091        }
9092
9093        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9094    } while (0);
9095
9096    if (sc->interrupt_mode == -1) {
9097        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9098        rc = 1;
9099    } else {
9100        BLOGD(sc, DBG_LOAD,
9101              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9102              sc->interrupt_mode, sc->num_queues);
9103        rc = 0;
9104    }
9105
9106    return (rc);
9107}
9108
9109static void
9110bxe_interrupt_detach(struct bxe_softc *sc)
9111{
9112    struct bxe_fastpath *fp;
9113    int i;
9114
9115    /* release interrupt resources */
9116    for (i = 0; i < sc->intr_count; i++) {
9117        if (sc->intr[i].resource && sc->intr[i].tag) {
9118            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9119            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9120        }
9121    }
9122
9123    for (i = 0; i < sc->num_queues; i++) {
9124        fp = &sc->fp[i];
9125        if (fp->tq) {
9126            taskqueue_drain(fp->tq, &fp->tq_task);
9127            taskqueue_drain(fp->tq, &fp->tx_task);
9128            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9129                NULL))
9130                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9131        }
9132
9133        for (i = 0; i < sc->num_queues; i++) {
9134            fp = &sc->fp[i];
9135            if (fp->tq != NULL) {
9136                taskqueue_free(fp->tq);
9137                fp->tq = NULL;
9138            }
9139        }
9140    }
9141
9142    if (sc->sp_tq) {
9143        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9144        taskqueue_free(sc->sp_tq);
9145        sc->sp_tq = NULL;
9146    }
9147}
9148
9149/*
9150 * Enables interrupts and attach to the ISR.
9151 *
9152 * When using multiple MSI/MSI-X vectors the first vector
9153 * is used for slowpath operations while all remaining
9154 * vectors are used for fastpath operations.  If only a
9155 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9156 * ISR must look for both slowpath and fastpath completions.
9157 */
9158static int
9159bxe_interrupt_attach(struct bxe_softc *sc)
9160{
9161    struct bxe_fastpath *fp;
9162    int rc = 0;
9163    int i;
9164
9165    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9166             "bxe%d_sp_tq", sc->unit);
9167    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9168    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9169                                 taskqueue_thread_enqueue,
9170                                 &sc->sp_tq);
9171    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9172                            "%s", sc->sp_tq_name);
9173
9174
9175    for (i = 0; i < sc->num_queues; i++) {
9176        fp = &sc->fp[i];
9177        snprintf(fp->tq_name, sizeof(fp->tq_name),
9178                 "bxe%d_fp%d_tq", sc->unit, i);
9179        NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9180        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9181        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9182                                  taskqueue_thread_enqueue,
9183                                  &fp->tq);
9184        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9185                          bxe_tx_mq_start_deferred, fp);
9186        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9187                                "%s", fp->tq_name);
9188    }
9189
9190    /* setup interrupt handlers */
9191    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9192        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9193
9194        /*
9195         * Setup the interrupt handler. Note that we pass the driver instance
9196         * to the interrupt handler for the slowpath.
9197         */
9198        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9199                                 (INTR_TYPE_NET | INTR_MPSAFE),
9200                                 NULL, bxe_intr_sp, sc,
9201                                 &sc->intr[0].tag)) != 0) {
9202            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9203            goto bxe_interrupt_attach_exit;
9204        }
9205
9206        bus_describe_intr(sc->dev, sc->intr[0].resource,
9207                          sc->intr[0].tag, "sp");
9208
9209        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9210
9211        /* initialize the fastpath vectors (note the first was used for sp) */
9212        for (i = 0; i < sc->num_queues; i++) {
9213            fp = &sc->fp[i];
9214            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9215
9216            /*
9217             * Setup the interrupt handler. Note that we pass the
9218             * fastpath context to the interrupt handler in this
9219             * case.
9220             */
9221            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9222                                     (INTR_TYPE_NET | INTR_MPSAFE),
9223                                     NULL, bxe_intr_fp, fp,
9224                                     &sc->intr[i + 1].tag)) != 0) {
9225                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9226                      (i + 1), rc);
9227                goto bxe_interrupt_attach_exit;
9228            }
9229
9230            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9231                              sc->intr[i + 1].tag, "fp%02d", i);
9232
9233            /* bind the fastpath instance to a cpu */
9234            if (sc->num_queues > 1) {
9235                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9236            }
9237
9238            fp->state = BXE_FP_STATE_IRQ;
9239        }
9240    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9241        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9242
9243        /*
9244         * Setup the interrupt handler. Note that we pass the
9245         * driver instance to the interrupt handler which
9246         * will handle both the slowpath and fastpath.
9247         */
9248        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9249                                 (INTR_TYPE_NET | INTR_MPSAFE),
9250                                 NULL, bxe_intr_legacy, sc,
9251                                 &sc->intr[0].tag)) != 0) {
9252            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9253            goto bxe_interrupt_attach_exit;
9254        }
9255
9256    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9257        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9258
9259        /*
9260         * Setup the interrupt handler. Note that we pass the
9261         * driver instance to the interrupt handler which
9262         * will handle both the slowpath and fastpath.
9263         */
9264        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9265                                 (INTR_TYPE_NET | INTR_MPSAFE),
9266                                 NULL, bxe_intr_legacy, sc,
9267                                 &sc->intr[0].tag)) != 0) {
9268            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9269            goto bxe_interrupt_attach_exit;
9270        }
9271    }
9272
9273bxe_interrupt_attach_exit:
9274
9275    return (rc);
9276}
9277
9278static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9279static int  bxe_init_hw_common(struct bxe_softc *sc);
9280static int  bxe_init_hw_port(struct bxe_softc *sc);
9281static int  bxe_init_hw_func(struct bxe_softc *sc);
9282static void bxe_reset_common(struct bxe_softc *sc);
9283static void bxe_reset_port(struct bxe_softc *sc);
9284static void bxe_reset_func(struct bxe_softc *sc);
9285static int  bxe_gunzip_init(struct bxe_softc *sc);
9286static void bxe_gunzip_end(struct bxe_softc *sc);
9287static int  bxe_init_firmware(struct bxe_softc *sc);
9288static void bxe_release_firmware(struct bxe_softc *sc);
9289
9290static struct
9291ecore_func_sp_drv_ops bxe_func_sp_drv = {
9292    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9293    .init_hw_cmn      = bxe_init_hw_common,
9294    .init_hw_port     = bxe_init_hw_port,
9295    .init_hw_func     = bxe_init_hw_func,
9296
9297    .reset_hw_cmn     = bxe_reset_common,
9298    .reset_hw_port    = bxe_reset_port,
9299    .reset_hw_func    = bxe_reset_func,
9300
9301    .gunzip_init      = bxe_gunzip_init,
9302    .gunzip_end       = bxe_gunzip_end,
9303
9304    .init_fw          = bxe_init_firmware,
9305    .release_fw       = bxe_release_firmware,
9306};
9307
9308static void
9309bxe_init_func_obj(struct bxe_softc *sc)
9310{
9311    sc->dmae_ready = 0;
9312
9313    ecore_init_func_obj(sc,
9314                        &sc->func_obj,
9315                        BXE_SP(sc, func_rdata),
9316                        BXE_SP_MAPPING(sc, func_rdata),
9317                        BXE_SP(sc, func_afex_rdata),
9318                        BXE_SP_MAPPING(sc, func_afex_rdata),
9319                        &bxe_func_sp_drv);
9320}
9321
9322static int
9323bxe_init_hw(struct bxe_softc *sc,
9324            uint32_t         load_code)
9325{
9326    struct ecore_func_state_params func_params = { NULL };
9327    int rc;
9328
9329    /* prepare the parameters for function state transitions */
9330    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9331
9332    func_params.f_obj = &sc->func_obj;
9333    func_params.cmd = ECORE_F_CMD_HW_INIT;
9334
9335    func_params.params.hw_init.load_phase = load_code;
9336
9337    /*
9338     * Via a plethora of function pointers, we will eventually reach
9339     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9340     */
9341    rc = ecore_func_state_change(sc, &func_params);
9342
9343    return (rc);
9344}
9345
9346static void
9347bxe_fill(struct bxe_softc *sc,
9348         uint32_t         addr,
9349         int              fill,
9350         uint32_t         len)
9351{
9352    uint32_t i;
9353
9354    if (!(len % 4) && !(addr % 4)) {
9355        for (i = 0; i < len; i += 4) {
9356            REG_WR(sc, (addr + i), fill);
9357        }
9358    } else {
9359        for (i = 0; i < len; i++) {
9360            REG_WR8(sc, (addr + i), fill);
9361        }
9362    }
9363}
9364
9365/* writes FP SP data to FW - data_size in dwords */
9366static void
9367bxe_wr_fp_sb_data(struct bxe_softc *sc,
9368                  int              fw_sb_id,
9369                  uint32_t         *sb_data_p,
9370                  uint32_t         data_size)
9371{
9372    int index;
9373
9374    for (index = 0; index < data_size; index++) {
9375        REG_WR(sc,
9376               (BAR_CSTRORM_INTMEM +
9377                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9378                (sizeof(uint32_t) * index)),
9379               *(sb_data_p + index));
9380    }
9381}
9382
9383static void
9384bxe_zero_fp_sb(struct bxe_softc *sc,
9385               int              fw_sb_id)
9386{
9387    struct hc_status_block_data_e2 sb_data_e2;
9388    struct hc_status_block_data_e1x sb_data_e1x;
9389    uint32_t *sb_data_p;
9390    uint32_t data_size = 0;
9391
9392    if (!CHIP_IS_E1x(sc)) {
9393        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9394        sb_data_e2.common.state = SB_DISABLED;
9395        sb_data_e2.common.p_func.vf_valid = FALSE;
9396        sb_data_p = (uint32_t *)&sb_data_e2;
9397        data_size = (sizeof(struct hc_status_block_data_e2) /
9398                     sizeof(uint32_t));
9399    } else {
9400        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9401        sb_data_e1x.common.state = SB_DISABLED;
9402        sb_data_e1x.common.p_func.vf_valid = FALSE;
9403        sb_data_p = (uint32_t *)&sb_data_e1x;
9404        data_size = (sizeof(struct hc_status_block_data_e1x) /
9405                     sizeof(uint32_t));
9406    }
9407
9408    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9409
9410    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9411             0, CSTORM_STATUS_BLOCK_SIZE);
9412    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9413             0, CSTORM_SYNC_BLOCK_SIZE);
9414}
9415
9416static void
9417bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9418                  struct hc_sp_status_block_data *sp_sb_data)
9419{
9420    int i;
9421
9422    for (i = 0;
9423         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9424         i++) {
9425        REG_WR(sc,
9426               (BAR_CSTRORM_INTMEM +
9427                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9428                (i * sizeof(uint32_t))),
9429               *((uint32_t *)sp_sb_data + i));
9430    }
9431}
9432
9433static void
9434bxe_zero_sp_sb(struct bxe_softc *sc)
9435{
9436    struct hc_sp_status_block_data sp_sb_data;
9437
9438    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9439
9440    sp_sb_data.state           = SB_DISABLED;
9441    sp_sb_data.p_func.vf_valid = FALSE;
9442
9443    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9444
9445    bxe_fill(sc,
9446             (BAR_CSTRORM_INTMEM +
9447              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9448              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9449    bxe_fill(sc,
9450             (BAR_CSTRORM_INTMEM +
9451              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9452              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9453}
9454
9455static void
9456bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9457                             int                       igu_sb_id,
9458                             int                       igu_seg_id)
9459{
9460    hc_sm->igu_sb_id      = igu_sb_id;
9461    hc_sm->igu_seg_id     = igu_seg_id;
9462    hc_sm->timer_value    = 0xFF;
9463    hc_sm->time_to_expire = 0xFFFFFFFF;
9464}
9465
9466static void
9467bxe_map_sb_state_machines(struct hc_index_data *index_data)
9468{
9469    /* zero out state machine indices */
9470
9471    /* rx indices */
9472    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9473
9474    /* tx indices */
9475    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9476    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9477    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9478    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9479
9480    /* map indices */
9481
9482    /* rx indices */
9483    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9484        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9485
9486    /* tx indices */
9487    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9488        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9489    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9490        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9491    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9492        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9493    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9494        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9495}
9496
9497static void
9498bxe_init_sb(struct bxe_softc *sc,
9499            bus_addr_t       busaddr,
9500            int              vfid,
9501            uint8_t          vf_valid,
9502            int              fw_sb_id,
9503            int              igu_sb_id)
9504{
9505    struct hc_status_block_data_e2  sb_data_e2;
9506    struct hc_status_block_data_e1x sb_data_e1x;
9507    struct hc_status_block_sm       *hc_sm_p;
9508    uint32_t *sb_data_p;
9509    int igu_seg_id;
9510    int data_size;
9511
9512    if (CHIP_INT_MODE_IS_BC(sc)) {
9513        igu_seg_id = HC_SEG_ACCESS_NORM;
9514    } else {
9515        igu_seg_id = IGU_SEG_ACCESS_NORM;
9516    }
9517
9518    bxe_zero_fp_sb(sc, fw_sb_id);
9519
9520    if (!CHIP_IS_E1x(sc)) {
9521        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9522        sb_data_e2.common.state = SB_ENABLED;
9523        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9524        sb_data_e2.common.p_func.vf_id = vfid;
9525        sb_data_e2.common.p_func.vf_valid = vf_valid;
9526        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9527        sb_data_e2.common.same_igu_sb_1b = TRUE;
9528        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9529        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9530        hc_sm_p = sb_data_e2.common.state_machine;
9531        sb_data_p = (uint32_t *)&sb_data_e2;
9532        data_size = (sizeof(struct hc_status_block_data_e2) /
9533                     sizeof(uint32_t));
9534        bxe_map_sb_state_machines(sb_data_e2.index_data);
9535    } else {
9536        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9537        sb_data_e1x.common.state = SB_ENABLED;
9538        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9539        sb_data_e1x.common.p_func.vf_id = 0xff;
9540        sb_data_e1x.common.p_func.vf_valid = FALSE;
9541        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9542        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9543        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9544        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9545        hc_sm_p = sb_data_e1x.common.state_machine;
9546        sb_data_p = (uint32_t *)&sb_data_e1x;
9547        data_size = (sizeof(struct hc_status_block_data_e1x) /
9548                     sizeof(uint32_t));
9549        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9550    }
9551
9552    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9553    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9554
9555    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9556
9557    /* write indices to HW - PCI guarantees endianity of regpairs */
9558    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9559}
9560
9561static inline uint8_t
9562bxe_fp_qzone_id(struct bxe_fastpath *fp)
9563{
9564    if (CHIP_IS_E1x(fp->sc)) {
9565        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9566    } else {
9567        return (fp->cl_id);
9568    }
9569}
9570
9571static inline uint32_t
9572bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9573                           struct bxe_fastpath *fp)
9574{
9575    uint32_t offset = BAR_USTRORM_INTMEM;
9576
9577    if (!CHIP_IS_E1x(sc)) {
9578        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9579    } else {
9580        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9581    }
9582
9583    return (offset);
9584}
9585
9586static void
9587bxe_init_eth_fp(struct bxe_softc *sc,
9588                int              idx)
9589{
9590    struct bxe_fastpath *fp = &sc->fp[idx];
9591    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9592    unsigned long q_type = 0;
9593    int cos;
9594
9595    fp->sc    = sc;
9596    fp->index = idx;
9597
9598    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9599    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9600
9601    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9602                    (SC_L_ID(sc) + idx) :
9603                    /* want client ID same as IGU SB ID for non-E1 */
9604                    fp->igu_sb_id;
9605    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9606
9607    /* setup sb indices */
9608    if (!CHIP_IS_E1x(sc)) {
9609        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9610        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9611    } else {
9612        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9613        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9614    }
9615
9616    /* init shortcut */
9617    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9618
9619    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9620
9621    /*
9622     * XXX If multiple CoS is ever supported then each fastpath structure
9623     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9624     */
9625    for (cos = 0; cos < sc->max_cos; cos++) {
9626        cids[cos] = idx;
9627    }
9628    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9629
9630    /* nothing more for a VF to do */
9631    if (IS_VF(sc)) {
9632        return;
9633    }
9634
9635    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9636                fp->fw_sb_id, fp->igu_sb_id);
9637
9638    bxe_update_fp_sb_idx(fp);
9639
9640    /* Configure Queue State object */
9641    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9642    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9643
9644    ecore_init_queue_obj(sc,
9645                         &sc->sp_objs[idx].q_obj,
9646                         fp->cl_id,
9647                         cids,
9648                         sc->max_cos,
9649                         SC_FUNC(sc),
9650                         BXE_SP(sc, q_rdata),
9651                         BXE_SP_MAPPING(sc, q_rdata),
9652                         q_type);
9653
9654    /* configure classification DBs */
9655    ecore_init_mac_obj(sc,
9656                       &sc->sp_objs[idx].mac_obj,
9657                       fp->cl_id,
9658                       idx,
9659                       SC_FUNC(sc),
9660                       BXE_SP(sc, mac_rdata),
9661                       BXE_SP_MAPPING(sc, mac_rdata),
9662                       ECORE_FILTER_MAC_PENDING,
9663                       &sc->sp_state,
9664                       ECORE_OBJ_TYPE_RX_TX,
9665                       &sc->macs_pool);
9666
9667    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9668          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9669}
9670
9671static inline void
9672bxe_update_rx_prod(struct bxe_softc    *sc,
9673                   struct bxe_fastpath *fp,
9674                   uint16_t            rx_bd_prod,
9675                   uint16_t            rx_cq_prod,
9676                   uint16_t            rx_sge_prod)
9677{
9678    struct ustorm_eth_rx_producers rx_prods = { 0 };
9679    uint32_t i;
9680
9681    /* update producers */
9682    rx_prods.bd_prod  = rx_bd_prod;
9683    rx_prods.cqe_prod = rx_cq_prod;
9684    rx_prods.sge_prod = rx_sge_prod;
9685
9686    /*
9687     * Make sure that the BD and SGE data is updated before updating the
9688     * producers since FW might read the BD/SGE right after the producer
9689     * is updated.
9690     * This is only applicable for weak-ordered memory model archs such
9691     * as IA-64. The following barrier is also mandatory since FW will
9692     * assumes BDs must have buffers.
9693     */
9694    wmb();
9695
9696    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9697        REG_WR(sc,
9698               (fp->ustorm_rx_prods_offset + (i * 4)),
9699               ((uint32_t *)&rx_prods)[i]);
9700    }
9701
9702    wmb(); /* keep prod updates ordered */
9703
9704    BLOGD(sc, DBG_RX,
9705          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9706          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9707}
9708
9709static void
9710bxe_init_rx_rings(struct bxe_softc *sc)
9711{
9712    struct bxe_fastpath *fp;
9713    int i;
9714
9715    for (i = 0; i < sc->num_queues; i++) {
9716        fp = &sc->fp[i];
9717
9718        fp->rx_bd_cons = 0;
9719
9720        /*
9721         * Activate the BD ring...
9722         * Warning, this will generate an interrupt (to the TSTORM)
9723         * so this can only be done after the chip is initialized
9724         */
9725        bxe_update_rx_prod(sc, fp,
9726                           fp->rx_bd_prod,
9727                           fp->rx_cq_prod,
9728                           fp->rx_sge_prod);
9729
9730        if (i != 0) {
9731            continue;
9732        }
9733
9734        if (CHIP_IS_E1(sc)) {
9735            REG_WR(sc,
9736                   (BAR_USTRORM_INTMEM +
9737                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9738                   U64_LO(fp->rcq_dma.paddr));
9739            REG_WR(sc,
9740                   (BAR_USTRORM_INTMEM +
9741                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9742                   U64_HI(fp->rcq_dma.paddr));
9743        }
9744    }
9745}
9746
9747static void
9748bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9749{
9750    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9751    fp->tx_db.data.zero_fill1 = 0;
9752    fp->tx_db.data.prod = 0;
9753
9754    fp->tx_pkt_prod = 0;
9755    fp->tx_pkt_cons = 0;
9756    fp->tx_bd_prod = 0;
9757    fp->tx_bd_cons = 0;
9758    fp->eth_q_stats.tx_pkts = 0;
9759}
9760
9761static inline void
9762bxe_init_tx_rings(struct bxe_softc *sc)
9763{
9764    int i;
9765
9766    for (i = 0; i < sc->num_queues; i++) {
9767        bxe_init_tx_ring_one(&sc->fp[i]);
9768    }
9769}
9770
9771static void
9772bxe_init_def_sb(struct bxe_softc *sc)
9773{
9774    struct host_sp_status_block *def_sb = sc->def_sb;
9775    bus_addr_t mapping = sc->def_sb_dma.paddr;
9776    int igu_sp_sb_index;
9777    int igu_seg_id;
9778    int port = SC_PORT(sc);
9779    int func = SC_FUNC(sc);
9780    int reg_offset, reg_offset_en5;
9781    uint64_t section;
9782    int index, sindex;
9783    struct hc_sp_status_block_data sp_sb_data;
9784
9785    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9786
9787    if (CHIP_INT_MODE_IS_BC(sc)) {
9788        igu_sp_sb_index = DEF_SB_IGU_ID;
9789        igu_seg_id = HC_SEG_ACCESS_DEF;
9790    } else {
9791        igu_sp_sb_index = sc->igu_dsb_id;
9792        igu_seg_id = IGU_SEG_ACCESS_DEF;
9793    }
9794
9795    /* attentions */
9796    section = ((uint64_t)mapping +
9797               offsetof(struct host_sp_status_block, atten_status_block));
9798    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9799    sc->attn_state = 0;
9800
9801    reg_offset = (port) ?
9802                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9803                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9804    reg_offset_en5 = (port) ?
9805                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9806                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9807
9808    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9809        /* take care of sig[0]..sig[4] */
9810        for (sindex = 0; sindex < 4; sindex++) {
9811            sc->attn_group[index].sig[sindex] =
9812                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9813        }
9814
9815        if (!CHIP_IS_E1x(sc)) {
9816            /*
9817             * enable5 is separate from the rest of the registers,
9818             * and the address skip is 4 and not 16 between the
9819             * different groups
9820             */
9821            sc->attn_group[index].sig[4] =
9822                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9823        } else {
9824            sc->attn_group[index].sig[4] = 0;
9825        }
9826    }
9827
9828    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9829        reg_offset = (port) ?
9830                         HC_REG_ATTN_MSG1_ADDR_L :
9831                         HC_REG_ATTN_MSG0_ADDR_L;
9832        REG_WR(sc, reg_offset, U64_LO(section));
9833        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9834    } else if (!CHIP_IS_E1x(sc)) {
9835        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9836        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9837    }
9838
9839    section = ((uint64_t)mapping +
9840               offsetof(struct host_sp_status_block, sp_sb));
9841
9842    bxe_zero_sp_sb(sc);
9843
9844    /* PCI guarantees endianity of regpair */
9845    sp_sb_data.state           = SB_ENABLED;
9846    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9847    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9848    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9849    sp_sb_data.igu_seg_id      = igu_seg_id;
9850    sp_sb_data.p_func.pf_id    = func;
9851    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9852    sp_sb_data.p_func.vf_id    = 0xff;
9853
9854    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9855
9856    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9857}
9858
9859static void
9860bxe_init_sp_ring(struct bxe_softc *sc)
9861{
9862    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9863    sc->spq_prod_idx = 0;
9864    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9865    sc->spq_prod_bd = sc->spq;
9866    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9867}
9868
9869static void
9870bxe_init_eq_ring(struct bxe_softc *sc)
9871{
9872    union event_ring_elem *elem;
9873    int i;
9874
9875    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9876        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9877
9878        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9879                                                 BCM_PAGE_SIZE *
9880                                                 (i % NUM_EQ_PAGES)));
9881        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9882                                                 BCM_PAGE_SIZE *
9883                                                 (i % NUM_EQ_PAGES)));
9884    }
9885
9886    sc->eq_cons    = 0;
9887    sc->eq_prod    = NUM_EQ_DESC;
9888    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9889
9890    atomic_store_rel_long(&sc->eq_spq_left,
9891                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9892                               NUM_EQ_DESC) - 1));
9893}
9894
9895static void
9896bxe_init_internal_common(struct bxe_softc *sc)
9897{
9898    int i;
9899
9900    /*
9901     * Zero this manually as its initialization is currently missing
9902     * in the initTool.
9903     */
9904    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9905        REG_WR(sc,
9906               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9907               0);
9908    }
9909
9910    if (!CHIP_IS_E1x(sc)) {
9911        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9912                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9913    }
9914}
9915
9916static void
9917bxe_init_internal(struct bxe_softc *sc,
9918                  uint32_t         load_code)
9919{
9920    switch (load_code) {
9921    case FW_MSG_CODE_DRV_LOAD_COMMON:
9922    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9923        bxe_init_internal_common(sc);
9924        /* no break */
9925
9926    case FW_MSG_CODE_DRV_LOAD_PORT:
9927        /* nothing to do */
9928        /* no break */
9929
9930    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9931        /* internal memory per function is initialized inside bxe_pf_init */
9932        break;
9933
9934    default:
9935        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9936        break;
9937    }
9938}
9939
9940static void
9941storm_memset_func_cfg(struct bxe_softc                         *sc,
9942                      struct tstorm_eth_function_common_config *tcfg,
9943                      uint16_t                                  abs_fid)
9944{
9945    uint32_t addr;
9946    size_t size;
9947
9948    addr = (BAR_TSTRORM_INTMEM +
9949            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9950    size = sizeof(struct tstorm_eth_function_common_config);
9951    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9952}
9953
9954static void
9955bxe_func_init(struct bxe_softc            *sc,
9956              struct bxe_func_init_params *p)
9957{
9958    struct tstorm_eth_function_common_config tcfg = { 0 };
9959
9960    if (CHIP_IS_E1x(sc)) {
9961        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9962    }
9963
9964    /* Enable the function in the FW */
9965    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9966    storm_memset_func_en(sc, p->func_id, 1);
9967
9968    /* spq */
9969    if (p->func_flgs & FUNC_FLG_SPQ) {
9970        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9971        REG_WR(sc,
9972               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9973               p->spq_prod);
9974    }
9975}
9976
9977/*
9978 * Calculates the sum of vn_min_rates.
9979 * It's needed for further normalizing of the min_rates.
9980 * Returns:
9981 *   sum of vn_min_rates.
9982 *     or
9983 *   0 - if all the min_rates are 0.
9984 * In the later case fainess algorithm should be deactivated.
9985 * If all min rates are not zero then those that are zeroes will be set to 1.
9986 */
9987static void
9988bxe_calc_vn_min(struct bxe_softc       *sc,
9989                struct cmng_init_input *input)
9990{
9991    uint32_t vn_cfg;
9992    uint32_t vn_min_rate;
9993    int all_zero = 1;
9994    int vn;
9995
9996    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9997        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9998        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9999                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10000
10001        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10002            /* skip hidden VNs */
10003            vn_min_rate = 0;
10004        } else if (!vn_min_rate) {
10005            /* If min rate is zero - set it to 100 */
10006            vn_min_rate = DEF_MIN_RATE;
10007        } else {
10008            all_zero = 0;
10009        }
10010
10011        input->vnic_min_rate[vn] = vn_min_rate;
10012    }
10013
10014    /* if ETS or all min rates are zeros - disable fairness */
10015    if (BXE_IS_ETS_ENABLED(sc)) {
10016        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10017        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10018    } else if (all_zero) {
10019        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10020        BLOGD(sc, DBG_LOAD,
10021              "Fariness disabled (all MIN values are zeroes)\n");
10022    } else {
10023        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10024    }
10025}
10026
10027static inline uint16_t
10028bxe_extract_max_cfg(struct bxe_softc *sc,
10029                    uint32_t         mf_cfg)
10030{
10031    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10032                        FUNC_MF_CFG_MAX_BW_SHIFT);
10033
10034    if (!max_cfg) {
10035        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10036        max_cfg = 100;
10037    }
10038
10039    return (max_cfg);
10040}
10041
10042static void
10043bxe_calc_vn_max(struct bxe_softc       *sc,
10044                int                    vn,
10045                struct cmng_init_input *input)
10046{
10047    uint16_t vn_max_rate;
10048    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10049    uint32_t max_cfg;
10050
10051    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10052        vn_max_rate = 0;
10053    } else {
10054        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10055
10056        if (IS_MF_SI(sc)) {
10057            /* max_cfg in percents of linkspeed */
10058            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10059        } else { /* SD modes */
10060            /* max_cfg is absolute in 100Mb units */
10061            vn_max_rate = (max_cfg * 100);
10062        }
10063    }
10064
10065    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10066
10067    input->vnic_max_rate[vn] = vn_max_rate;
10068}
10069
10070static void
10071bxe_cmng_fns_init(struct bxe_softc *sc,
10072                  uint8_t          read_cfg,
10073                  uint8_t          cmng_type)
10074{
10075    struct cmng_init_input input;
10076    int vn;
10077
10078    memset(&input, 0, sizeof(struct cmng_init_input));
10079
10080    input.port_rate = sc->link_vars.line_speed;
10081
10082    if (cmng_type == CMNG_FNS_MINMAX) {
10083        /* read mf conf from shmem */
10084        if (read_cfg) {
10085            bxe_read_mf_cfg(sc);
10086        }
10087
10088        /* get VN min rate and enable fairness if not 0 */
10089        bxe_calc_vn_min(sc, &input);
10090
10091        /* get VN max rate */
10092        if (sc->port.pmf) {
10093            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10094                bxe_calc_vn_max(sc, vn, &input);
10095            }
10096        }
10097
10098        /* always enable rate shaping and fairness */
10099        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10100
10101        ecore_init_cmng(&input, &sc->cmng);
10102        return;
10103    }
10104
10105    /* rate shaping and fairness are disabled */
10106    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10107}
10108
10109static int
10110bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10111{
10112    if (CHIP_REV_IS_SLOW(sc)) {
10113        return (CMNG_FNS_NONE);
10114    }
10115
10116    if (IS_MF(sc)) {
10117        return (CMNG_FNS_MINMAX);
10118    }
10119
10120    return (CMNG_FNS_NONE);
10121}
10122
10123static void
10124storm_memset_cmng(struct bxe_softc *sc,
10125                  struct cmng_init *cmng,
10126                  uint8_t          port)
10127{
10128    int vn;
10129    int func;
10130    uint32_t addr;
10131    size_t size;
10132
10133    addr = (BAR_XSTRORM_INTMEM +
10134            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10135    size = sizeof(struct cmng_struct_per_port);
10136    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10137
10138    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10139        func = func_by_vn(sc, vn);
10140
10141        addr = (BAR_XSTRORM_INTMEM +
10142                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10143        size = sizeof(struct rate_shaping_vars_per_vn);
10144        ecore_storm_memset_struct(sc, addr, size,
10145                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10146
10147        addr = (BAR_XSTRORM_INTMEM +
10148                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10149        size = sizeof(struct fairness_vars_per_vn);
10150        ecore_storm_memset_struct(sc, addr, size,
10151                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10152    }
10153}
10154
10155static void
10156bxe_pf_init(struct bxe_softc *sc)
10157{
10158    struct bxe_func_init_params func_init = { 0 };
10159    struct event_ring_data eq_data = { { 0 } };
10160    uint16_t flags;
10161
10162    if (!CHIP_IS_E1x(sc)) {
10163        /* reset IGU PF statistics: MSIX + ATTN */
10164        /* PF */
10165        REG_WR(sc,
10166               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10167                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10168                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10169               0);
10170        /* ATTN */
10171        REG_WR(sc,
10172               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10173                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10174                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10175                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10176               0);
10177    }
10178
10179    /* function setup flags */
10180    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10181
10182    /*
10183     * This flag is relevant for E1x only.
10184     * E2 doesn't have a TPA configuration in a function level.
10185     */
10186    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10187
10188    func_init.func_flgs = flags;
10189    func_init.pf_id     = SC_FUNC(sc);
10190    func_init.func_id   = SC_FUNC(sc);
10191    func_init.spq_map   = sc->spq_dma.paddr;
10192    func_init.spq_prod  = sc->spq_prod_idx;
10193
10194    bxe_func_init(sc, &func_init);
10195
10196    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10197
10198    /*
10199     * Congestion management values depend on the link rate.
10200     * There is no active link so initial link rate is set to 10Gbps.
10201     * When the link comes up the congestion management values are
10202     * re-calculated according to the actual link rate.
10203     */
10204    sc->link_vars.line_speed = SPEED_10000;
10205    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10206
10207    /* Only the PMF sets the HW */
10208    if (sc->port.pmf) {
10209        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10210    }
10211
10212    /* init Event Queue - PCI bus guarantees correct endainity */
10213    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10214    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10215    eq_data.producer     = sc->eq_prod;
10216    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10217    eq_data.sb_id        = DEF_SB_ID;
10218    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10219}
10220
10221static void
10222bxe_hc_int_enable(struct bxe_softc *sc)
10223{
10224    int port = SC_PORT(sc);
10225    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10226    uint32_t val = REG_RD(sc, addr);
10227    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10228    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10229                           (sc->intr_count == 1)) ? TRUE : FALSE;
10230    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10231
10232    if (msix) {
10233        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10234                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10235        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10236                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10237        if (single_msix) {
10238            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10239        }
10240    } else if (msi) {
10241        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10242        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10243                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10244                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10245    } else {
10246        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10247                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10248                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10249                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10250
10251        if (!CHIP_IS_E1(sc)) {
10252            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10253                  val, port, addr);
10254
10255            REG_WR(sc, addr, val);
10256
10257            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10258        }
10259    }
10260
10261    if (CHIP_IS_E1(sc)) {
10262        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10263    }
10264
10265    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10266          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10267
10268    REG_WR(sc, addr, val);
10269
10270    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10271    mb();
10272
10273    if (!CHIP_IS_E1(sc)) {
10274        /* init leading/trailing edge */
10275        if (IS_MF(sc)) {
10276            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10277            if (sc->port.pmf) {
10278                /* enable nig and gpio3 attention */
10279                val |= 0x1100;
10280            }
10281        } else {
10282            val = 0xffff;
10283        }
10284
10285        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10286        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10287    }
10288
10289    /* make sure that interrupts are indeed enabled from here on */
10290    mb();
10291}
10292
10293static void
10294bxe_igu_int_enable(struct bxe_softc *sc)
10295{
10296    uint32_t val;
10297    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10298    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10299                           (sc->intr_count == 1)) ? TRUE : FALSE;
10300    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10301
10302    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10303
10304    if (msix) {
10305        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10306                 IGU_PF_CONF_SINGLE_ISR_EN);
10307        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10308                IGU_PF_CONF_ATTN_BIT_EN);
10309        if (single_msix) {
10310            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10311        }
10312    } else if (msi) {
10313        val &= ~IGU_PF_CONF_INT_LINE_EN;
10314        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10315                IGU_PF_CONF_ATTN_BIT_EN |
10316                IGU_PF_CONF_SINGLE_ISR_EN);
10317    } else {
10318        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10319        val |= (IGU_PF_CONF_INT_LINE_EN |
10320                IGU_PF_CONF_ATTN_BIT_EN |
10321                IGU_PF_CONF_SINGLE_ISR_EN);
10322    }
10323
10324    /* clean previous status - need to configure igu prior to ack*/
10325    if ((!msix) || single_msix) {
10326        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10327        bxe_ack_int(sc);
10328    }
10329
10330    val |= IGU_PF_CONF_FUNC_EN;
10331
10332    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10333          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10334
10335    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10336
10337    mb();
10338
10339    /* init leading/trailing edge */
10340    if (IS_MF(sc)) {
10341        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10342        if (sc->port.pmf) {
10343            /* enable nig and gpio3 attention */
10344            val |= 0x1100;
10345        }
10346    } else {
10347        val = 0xffff;
10348    }
10349
10350    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10351    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10352
10353    /* make sure that interrupts are indeed enabled from here on */
10354    mb();
10355}
10356
10357static void
10358bxe_int_enable(struct bxe_softc *sc)
10359{
10360    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10361        bxe_hc_int_enable(sc);
10362    } else {
10363        bxe_igu_int_enable(sc);
10364    }
10365}
10366
10367static void
10368bxe_hc_int_disable(struct bxe_softc *sc)
10369{
10370    int port = SC_PORT(sc);
10371    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10372    uint32_t val = REG_RD(sc, addr);
10373
10374    /*
10375     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10376     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10377     * block
10378     */
10379    if (CHIP_IS_E1(sc)) {
10380        /*
10381         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10382         * to prevent from HC sending interrupts after we exit the function
10383         */
10384        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10385
10386        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10387                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10388                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10389    } else {
10390        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10391                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10392                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10393                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10394    }
10395
10396    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10397
10398    /* flush all outstanding writes */
10399    mb();
10400
10401    REG_WR(sc, addr, val);
10402    if (REG_RD(sc, addr) != val) {
10403        BLOGE(sc, "proper val not read from HC IGU!\n");
10404    }
10405}
10406
10407static void
10408bxe_igu_int_disable(struct bxe_softc *sc)
10409{
10410    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10411
10412    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10413             IGU_PF_CONF_INT_LINE_EN |
10414             IGU_PF_CONF_ATTN_BIT_EN);
10415
10416    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10417
10418    /* flush all outstanding writes */
10419    mb();
10420
10421    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10422    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10423        BLOGE(sc, "proper val not read from IGU!\n");
10424    }
10425}
10426
10427static void
10428bxe_int_disable(struct bxe_softc *sc)
10429{
10430    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10431        bxe_hc_int_disable(sc);
10432    } else {
10433        bxe_igu_int_disable(sc);
10434    }
10435}
10436
10437static void
10438bxe_nic_init(struct bxe_softc *sc,
10439             int              load_code)
10440{
10441    int i;
10442
10443    for (i = 0; i < sc->num_queues; i++) {
10444        bxe_init_eth_fp(sc, i);
10445    }
10446
10447    rmb(); /* ensure status block indices were read */
10448
10449    bxe_init_rx_rings(sc);
10450    bxe_init_tx_rings(sc);
10451
10452    if (IS_VF(sc)) {
10453        return;
10454    }
10455
10456    /* initialize MOD_ABS interrupts */
10457    elink_init_mod_abs_int(sc, &sc->link_vars,
10458                           sc->devinfo.chip_id,
10459                           sc->devinfo.shmem_base,
10460                           sc->devinfo.shmem2_base,
10461                           SC_PORT(sc));
10462
10463    bxe_init_def_sb(sc);
10464    bxe_update_dsb_idx(sc);
10465    bxe_init_sp_ring(sc);
10466    bxe_init_eq_ring(sc);
10467    bxe_init_internal(sc, load_code);
10468    bxe_pf_init(sc);
10469    bxe_stats_init(sc);
10470
10471    /* flush all before enabling interrupts */
10472    mb();
10473
10474    bxe_int_enable(sc);
10475
10476    /* check for SPIO5 */
10477    bxe_attn_int_deasserted0(sc,
10478                             REG_RD(sc,
10479                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10480                                     SC_PORT(sc)*4)) &
10481                             AEU_INPUTS_ATTN_BITS_SPIO5);
10482}
10483
10484static inline void
10485bxe_init_objs(struct bxe_softc *sc)
10486{
10487    /* mcast rules must be added to tx if tx switching is enabled */
10488    ecore_obj_type o_type =
10489        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10490                                         ECORE_OBJ_TYPE_RX;
10491
10492    /* RX_MODE controlling object */
10493    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10494
10495    /* multicast configuration controlling object */
10496    ecore_init_mcast_obj(sc,
10497                         &sc->mcast_obj,
10498                         sc->fp[0].cl_id,
10499                         sc->fp[0].index,
10500                         SC_FUNC(sc),
10501                         SC_FUNC(sc),
10502                         BXE_SP(sc, mcast_rdata),
10503                         BXE_SP_MAPPING(sc, mcast_rdata),
10504                         ECORE_FILTER_MCAST_PENDING,
10505                         &sc->sp_state,
10506                         o_type);
10507
10508    /* Setup CAM credit pools */
10509    ecore_init_mac_credit_pool(sc,
10510                               &sc->macs_pool,
10511                               SC_FUNC(sc),
10512                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10513                                                 VNICS_PER_PATH(sc));
10514
10515    ecore_init_vlan_credit_pool(sc,
10516                                &sc->vlans_pool,
10517                                SC_ABS_FUNC(sc) >> 1,
10518                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10519                                                  VNICS_PER_PATH(sc));
10520
10521    /* RSS configuration object */
10522    ecore_init_rss_config_obj(sc,
10523                              &sc->rss_conf_obj,
10524                              sc->fp[0].cl_id,
10525                              sc->fp[0].index,
10526                              SC_FUNC(sc),
10527                              SC_FUNC(sc),
10528                              BXE_SP(sc, rss_rdata),
10529                              BXE_SP_MAPPING(sc, rss_rdata),
10530                              ECORE_FILTER_RSS_CONF_PENDING,
10531                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10532}
10533
10534/*
10535 * Initialize the function. This must be called before sending CLIENT_SETUP
10536 * for the first client.
10537 */
10538static inline int
10539bxe_func_start(struct bxe_softc *sc)
10540{
10541    struct ecore_func_state_params func_params = { NULL };
10542    struct ecore_func_start_params *start_params = &func_params.params.start;
10543
10544    /* Prepare parameters for function state transitions */
10545    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10546
10547    func_params.f_obj = &sc->func_obj;
10548    func_params.cmd = ECORE_F_CMD_START;
10549
10550    /* Function parameters */
10551    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10552    start_params->sd_vlan_tag = OVLAN(sc);
10553
10554    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10555        start_params->network_cos_mode = STATIC_COS;
10556    } else { /* CHIP_IS_E1X */
10557        start_params->network_cos_mode = FW_WRR;
10558    }
10559
10560    //start_params->gre_tunnel_mode = 0;
10561    //start_params->gre_tunnel_rss  = 0;
10562
10563    return (ecore_func_state_change(sc, &func_params));
10564}
10565
10566static int
10567bxe_set_power_state(struct bxe_softc *sc,
10568                    uint8_t          state)
10569{
10570    uint16_t pmcsr;
10571
10572    /* If there is no power capability, silently succeed */
10573    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10574        BLOGW(sc, "No power capability\n");
10575        return (0);
10576    }
10577
10578    pmcsr = pci_read_config(sc->dev,
10579                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10580                            2);
10581
10582    switch (state) {
10583    case PCI_PM_D0:
10584        pci_write_config(sc->dev,
10585                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10586                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10587
10588        if (pmcsr & PCIM_PSTAT_DMASK) {
10589            /* delay required during transition out of D3hot */
10590            DELAY(20000);
10591        }
10592
10593        break;
10594
10595    case PCI_PM_D3hot:
10596        /* XXX if there are other clients above don't shut down the power */
10597
10598        /* don't shut down the power for emulation and FPGA */
10599        if (CHIP_REV_IS_SLOW(sc)) {
10600            return (0);
10601        }
10602
10603        pmcsr &= ~PCIM_PSTAT_DMASK;
10604        pmcsr |= PCIM_PSTAT_D3;
10605
10606        if (sc->wol) {
10607            pmcsr |= PCIM_PSTAT_PMEENABLE;
10608        }
10609
10610        pci_write_config(sc->dev,
10611                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10612                         pmcsr, 4);
10613
10614        /*
10615         * No more memory access after this point until device is brought back
10616         * to D0 state.
10617         */
10618        break;
10619
10620    default:
10621        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10622            state, pmcsr);
10623        return (-1);
10624    }
10625
10626    return (0);
10627}
10628
10629
10630/* return true if succeeded to acquire the lock */
10631static uint8_t
10632bxe_trylock_hw_lock(struct bxe_softc *sc,
10633                    uint32_t         resource)
10634{
10635    uint32_t lock_status;
10636    uint32_t resource_bit = (1 << resource);
10637    int func = SC_FUNC(sc);
10638    uint32_t hw_lock_control_reg;
10639
10640    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10641
10642    /* Validating that the resource is within range */
10643    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10644        BLOGD(sc, DBG_LOAD,
10645              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10646              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10647        return (FALSE);
10648    }
10649
10650    if (func <= 5) {
10651        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10652    } else {
10653        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10654    }
10655
10656    /* try to acquire the lock */
10657    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10658    lock_status = REG_RD(sc, hw_lock_control_reg);
10659    if (lock_status & resource_bit) {
10660        return (TRUE);
10661    }
10662
10663    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10664        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10665        lock_status, resource_bit);
10666
10667    return (FALSE);
10668}
10669
10670/*
10671 * Get the recovery leader resource id according to the engine this function
10672 * belongs to. Currently only only 2 engines is supported.
10673 */
10674static int
10675bxe_get_leader_lock_resource(struct bxe_softc *sc)
10676{
10677    if (SC_PATH(sc)) {
10678        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10679    } else {
10680        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10681    }
10682}
10683
10684/* try to acquire a leader lock for current engine */
10685static uint8_t
10686bxe_trylock_leader_lock(struct bxe_softc *sc)
10687{
10688    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10689}
10690
10691static int
10692bxe_release_leader_lock(struct bxe_softc *sc)
10693{
10694    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10695}
10696
10697/* close gates #2, #3 and #4 */
10698static void
10699bxe_set_234_gates(struct bxe_softc *sc,
10700                  uint8_t          close)
10701{
10702    uint32_t val;
10703
10704    /* gates #2 and #4a are closed/opened for "not E1" only */
10705    if (!CHIP_IS_E1(sc)) {
10706        /* #4 */
10707        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10708        /* #2 */
10709        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10710    }
10711
10712    /* #3 */
10713    if (CHIP_IS_E1x(sc)) {
10714        /* prevent interrupts from HC on both ports */
10715        val = REG_RD(sc, HC_REG_CONFIG_1);
10716        REG_WR(sc, HC_REG_CONFIG_1,
10717               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10718               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10719
10720        val = REG_RD(sc, HC_REG_CONFIG_0);
10721        REG_WR(sc, HC_REG_CONFIG_0,
10722               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10723               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10724    } else {
10725        /* Prevent incoming interrupts in IGU */
10726        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10727
10728        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10729               (!close) ?
10730               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10731               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10732    }
10733
10734    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10735          close ? "closing" : "opening");
10736
10737    wmb();
10738}
10739
10740/* poll for pending writes bit, it should get cleared in no more than 1s */
10741static int
10742bxe_er_poll_igu_vq(struct bxe_softc *sc)
10743{
10744    uint32_t cnt = 1000;
10745    uint32_t pend_bits = 0;
10746
10747    do {
10748        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10749
10750        if (pend_bits == 0) {
10751            break;
10752        }
10753
10754        DELAY(1000);
10755    } while (--cnt > 0);
10756
10757    if (cnt == 0) {
10758        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10759        return (-1);
10760    }
10761
10762    return (0);
10763}
10764
10765#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10766
10767static void
10768bxe_clp_reset_prep(struct bxe_softc *sc,
10769                   uint32_t         *magic_val)
10770{
10771    /* Do some magic... */
10772    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10773    *magic_val = val & SHARED_MF_CLP_MAGIC;
10774    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10775}
10776
10777/* restore the value of the 'magic' bit */
10778static void
10779bxe_clp_reset_done(struct bxe_softc *sc,
10780                   uint32_t         magic_val)
10781{
10782    /* Restore the 'magic' bit value... */
10783    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10784    MFCFG_WR(sc, shared_mf_config.clp_mb,
10785              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10786}
10787
10788/* prepare for MCP reset, takes care of CLP configurations */
10789static void
10790bxe_reset_mcp_prep(struct bxe_softc *sc,
10791                   uint32_t         *magic_val)
10792{
10793    uint32_t shmem;
10794    uint32_t validity_offset;
10795
10796    /* set `magic' bit in order to save MF config */
10797    if (!CHIP_IS_E1(sc)) {
10798        bxe_clp_reset_prep(sc, magic_val);
10799    }
10800
10801    /* get shmem offset */
10802    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10803    validity_offset =
10804        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10805
10806    /* Clear validity map flags */
10807    if (shmem > 0) {
10808        REG_WR(sc, shmem + validity_offset, 0);
10809    }
10810}
10811
10812#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10813#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10814
10815static void
10816bxe_mcp_wait_one(struct bxe_softc *sc)
10817{
10818    /* special handling for emulation and FPGA (10 times longer) */
10819    if (CHIP_REV_IS_SLOW(sc)) {
10820        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10821    } else {
10822        DELAY((MCP_ONE_TIMEOUT) * 1000);
10823    }
10824}
10825
10826/* initialize shmem_base and waits for validity signature to appear */
10827static int
10828bxe_init_shmem(struct bxe_softc *sc)
10829{
10830    int cnt = 0;
10831    uint32_t val = 0;
10832
10833    do {
10834        sc->devinfo.shmem_base     =
10835        sc->link_params.shmem_base =
10836            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10837
10838        if (sc->devinfo.shmem_base) {
10839            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10840            if (val & SHR_MEM_VALIDITY_MB)
10841                return (0);
10842        }
10843
10844        bxe_mcp_wait_one(sc);
10845
10846    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10847
10848    BLOGE(sc, "BAD MCP validity signature\n");
10849
10850    return (-1);
10851}
10852
10853static int
10854bxe_reset_mcp_comp(struct bxe_softc *sc,
10855                   uint32_t         magic_val)
10856{
10857    int rc = bxe_init_shmem(sc);
10858
10859    /* Restore the `magic' bit value */
10860    if (!CHIP_IS_E1(sc)) {
10861        bxe_clp_reset_done(sc, magic_val);
10862    }
10863
10864    return (rc);
10865}
10866
10867static void
10868bxe_pxp_prep(struct bxe_softc *sc)
10869{
10870    if (!CHIP_IS_E1(sc)) {
10871        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10872        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10873        wmb();
10874    }
10875}
10876
10877/*
10878 * Reset the whole chip except for:
10879 *      - PCIE core
10880 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10881 *      - IGU
10882 *      - MISC (including AEU)
10883 *      - GRC
10884 *      - RBCN, RBCP
10885 */
10886static void
10887bxe_process_kill_chip_reset(struct bxe_softc *sc,
10888                            uint8_t          global)
10889{
10890    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10891    uint32_t global_bits2, stay_reset2;
10892
10893    /*
10894     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10895     * (per chip) blocks.
10896     */
10897    global_bits2 =
10898        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10899        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10900
10901    /*
10902     * Don't reset the following blocks.
10903     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10904     *            reset, as in 4 port device they might still be owned
10905     *            by the MCP (there is only one leader per path).
10906     */
10907    not_reset_mask1 =
10908        MISC_REGISTERS_RESET_REG_1_RST_HC |
10909        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10910        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10911
10912    not_reset_mask2 =
10913        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10914        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10915        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10916        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10917        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10918        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10919        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10920        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10921        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10922        MISC_REGISTERS_RESET_REG_2_PGLC |
10923        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10924        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10925        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10926        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10927        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10928        MISC_REGISTERS_RESET_REG_2_UMAC1;
10929
10930    /*
10931     * Keep the following blocks in reset:
10932     *  - all xxMACs are handled by the elink code.
10933     */
10934    stay_reset2 =
10935        MISC_REGISTERS_RESET_REG_2_XMAC |
10936        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10937
10938    /* Full reset masks according to the chip */
10939    reset_mask1 = 0xffffffff;
10940
10941    if (CHIP_IS_E1(sc))
10942        reset_mask2 = 0xffff;
10943    else if (CHIP_IS_E1H(sc))
10944        reset_mask2 = 0x1ffff;
10945    else if (CHIP_IS_E2(sc))
10946        reset_mask2 = 0xfffff;
10947    else /* CHIP_IS_E3 */
10948        reset_mask2 = 0x3ffffff;
10949
10950    /* Don't reset global blocks unless we need to */
10951    if (!global)
10952        reset_mask2 &= ~global_bits2;
10953
10954    /*
10955     * In case of attention in the QM, we need to reset PXP
10956     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10957     * because otherwise QM reset would release 'close the gates' shortly
10958     * before resetting the PXP, then the PSWRQ would send a write
10959     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10960     * read the payload data from PSWWR, but PSWWR would not
10961     * respond. The write queue in PGLUE would stuck, dmae commands
10962     * would not return. Therefore it's important to reset the second
10963     * reset register (containing the
10964     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10965     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10966     * bit).
10967     */
10968    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10969           reset_mask2 & (~not_reset_mask2));
10970
10971    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10972           reset_mask1 & (~not_reset_mask1));
10973
10974    mb();
10975    wmb();
10976
10977    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10978           reset_mask2 & (~stay_reset2));
10979
10980    mb();
10981    wmb();
10982
10983    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10984    wmb();
10985}
10986
10987static int
10988bxe_process_kill(struct bxe_softc *sc,
10989                 uint8_t          global)
10990{
10991    int cnt = 1000;
10992    uint32_t val = 0;
10993    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10994    uint32_t tags_63_32 = 0;
10995
10996    /* Empty the Tetris buffer, wait for 1s */
10997    do {
10998        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10999        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11000        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11001        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11002        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11003        if (CHIP_IS_E3(sc)) {
11004            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11005        }
11006
11007        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11008            ((port_is_idle_0 & 0x1) == 0x1) &&
11009            ((port_is_idle_1 & 0x1) == 0x1) &&
11010            (pgl_exp_rom2 == 0xffffffff) &&
11011            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11012            break;
11013        DELAY(1000);
11014    } while (cnt-- > 0);
11015
11016    if (cnt <= 0) {
11017        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11018                  "are still outstanding read requests after 1s! "
11019                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11020                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11021              sr_cnt, blk_cnt, port_is_idle_0,
11022              port_is_idle_1, pgl_exp_rom2);
11023        return (-1);
11024    }
11025
11026    mb();
11027
11028    /* Close gates #2, #3 and #4 */
11029    bxe_set_234_gates(sc, TRUE);
11030
11031    /* Poll for IGU VQs for 57712 and newer chips */
11032    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11033        return (-1);
11034    }
11035
11036    /* XXX indicate that "process kill" is in progress to MCP */
11037
11038    /* clear "unprepared" bit */
11039    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11040    mb();
11041
11042    /* Make sure all is written to the chip before the reset */
11043    wmb();
11044
11045    /*
11046     * Wait for 1ms to empty GLUE and PCI-E core queues,
11047     * PSWHST, GRC and PSWRD Tetris buffer.
11048     */
11049    DELAY(1000);
11050
11051    /* Prepare to chip reset: */
11052    /* MCP */
11053    if (global) {
11054        bxe_reset_mcp_prep(sc, &val);
11055    }
11056
11057    /* PXP */
11058    bxe_pxp_prep(sc);
11059    mb();
11060
11061    /* reset the chip */
11062    bxe_process_kill_chip_reset(sc, global);
11063    mb();
11064
11065    /* clear errors in PGB */
11066    if (!CHIP_IS_E1(sc))
11067        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11068
11069    /* Recover after reset: */
11070    /* MCP */
11071    if (global && bxe_reset_mcp_comp(sc, val)) {
11072        return (-1);
11073    }
11074
11075    /* XXX add resetting the NO_MCP mode DB here */
11076
11077    /* Open the gates #2, #3 and #4 */
11078    bxe_set_234_gates(sc, FALSE);
11079
11080    /* XXX
11081     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11082     * re-enable attentions
11083     */
11084
11085    return (0);
11086}
11087
11088static int
11089bxe_leader_reset(struct bxe_softc *sc)
11090{
11091    int rc = 0;
11092    uint8_t global = bxe_reset_is_global(sc);
11093    uint32_t load_code;
11094
11095    /*
11096     * If not going to reset MCP, load "fake" driver to reset HW while
11097     * driver is owner of the HW.
11098     */
11099    if (!global && !BXE_NOMCP(sc)) {
11100        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11101                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11102        if (!load_code) {
11103            BLOGE(sc, "MCP response failure, aborting\n");
11104            rc = -1;
11105            goto exit_leader_reset;
11106        }
11107
11108        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11109            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11110            BLOGE(sc, "MCP unexpected response, aborting\n");
11111            rc = -1;
11112            goto exit_leader_reset2;
11113        }
11114
11115        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11116        if (!load_code) {
11117            BLOGE(sc, "MCP response failure, aborting\n");
11118            rc = -1;
11119            goto exit_leader_reset2;
11120        }
11121    }
11122
11123    /* try to recover after the failure */
11124    if (bxe_process_kill(sc, global)) {
11125        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11126        rc = -1;
11127        goto exit_leader_reset2;
11128    }
11129
11130    /*
11131     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11132     * state.
11133     */
11134    bxe_set_reset_done(sc);
11135    if (global) {
11136        bxe_clear_reset_global(sc);
11137    }
11138
11139exit_leader_reset2:
11140
11141    /* unload "fake driver" if it was loaded */
11142    if (!global && !BXE_NOMCP(sc)) {
11143        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11144        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11145    }
11146
11147exit_leader_reset:
11148
11149    sc->is_leader = 0;
11150    bxe_release_leader_lock(sc);
11151
11152    mb();
11153    return (rc);
11154}
11155
11156/*
11157 * prepare INIT transition, parameters configured:
11158 *   - HC configuration
11159 *   - Queue's CDU context
11160 */
11161static void
11162bxe_pf_q_prep_init(struct bxe_softc               *sc,
11163                   struct bxe_fastpath            *fp,
11164                   struct ecore_queue_init_params *init_params)
11165{
11166    uint8_t cos;
11167    int cxt_index, cxt_offset;
11168
11169    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11170    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11171
11172    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11173    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11174
11175    /* HC rate */
11176    init_params->rx.hc_rate =
11177        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11178    init_params->tx.hc_rate =
11179        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11180
11181    /* FW SB ID */
11182    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11183
11184    /* CQ index among the SB indices */
11185    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11186    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11187
11188    /* set maximum number of COSs supported by this queue */
11189    init_params->max_cos = sc->max_cos;
11190
11191    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11192          fp->index, init_params->max_cos);
11193
11194    /* set the context pointers queue object */
11195    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11196        /* XXX change index/cid here if ever support multiple tx CoS */
11197        /* fp->txdata[cos]->cid */
11198        cxt_index = fp->index / ILT_PAGE_CIDS;
11199        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11200        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11201    }
11202}
11203
11204/* set flags that are common for the Tx-only and not normal connections */
11205static unsigned long
11206bxe_get_common_flags(struct bxe_softc    *sc,
11207                     struct bxe_fastpath *fp,
11208                     uint8_t             zero_stats)
11209{
11210    unsigned long flags = 0;
11211
11212    /* PF driver will always initialize the Queue to an ACTIVE state */
11213    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11214
11215    /*
11216     * tx only connections collect statistics (on the same index as the
11217     * parent connection). The statistics are zeroed when the parent
11218     * connection is initialized.
11219     */
11220
11221    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11222    if (zero_stats) {
11223        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11224    }
11225
11226    /*
11227     * tx only connections can support tx-switching, though their
11228     * CoS-ness doesn't survive the loopback
11229     */
11230    if (sc->flags & BXE_TX_SWITCHING) {
11231        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11232    }
11233
11234    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11235
11236    return (flags);
11237}
11238
11239static unsigned long
11240bxe_get_q_flags(struct bxe_softc    *sc,
11241                struct bxe_fastpath *fp,
11242                uint8_t             leading)
11243{
11244    unsigned long flags = 0;
11245
11246    if (IS_MF_SD(sc)) {
11247        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11248    }
11249
11250    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11251        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11252        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11253    }
11254
11255    if (leading) {
11256        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11257        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11258    }
11259
11260    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11261
11262    /* merge with common flags */
11263    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11264}
11265
11266static void
11267bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11268                      struct bxe_fastpath               *fp,
11269                      struct ecore_general_setup_params *gen_init,
11270                      uint8_t                           cos)
11271{
11272    gen_init->stat_id = bxe_stats_id(fp);
11273    gen_init->spcl_id = fp->cl_id;
11274    gen_init->mtu = sc->mtu;
11275    gen_init->cos = cos;
11276}
11277
11278static void
11279bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11280                 struct bxe_fastpath           *fp,
11281                 struct rxq_pause_params       *pause,
11282                 struct ecore_rxq_setup_params *rxq_init)
11283{
11284    uint8_t max_sge = 0;
11285    uint16_t sge_sz = 0;
11286    uint16_t tpa_agg_size = 0;
11287
11288    pause->sge_th_lo = SGE_TH_LO(sc);
11289    pause->sge_th_hi = SGE_TH_HI(sc);
11290
11291    /* validate SGE ring has enough to cross high threshold */
11292    if (sc->dropless_fc &&
11293            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11294            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11295        BLOGW(sc, "sge ring threshold limit\n");
11296    }
11297
11298    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11299    tpa_agg_size = (2 * sc->mtu);
11300    if (tpa_agg_size < sc->max_aggregation_size) {
11301        tpa_agg_size = sc->max_aggregation_size;
11302    }
11303
11304    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11305    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11306                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11307    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11308
11309    /* pause - not for e1 */
11310    if (!CHIP_IS_E1(sc)) {
11311        pause->bd_th_lo = BD_TH_LO(sc);
11312        pause->bd_th_hi = BD_TH_HI(sc);
11313
11314        pause->rcq_th_lo = RCQ_TH_LO(sc);
11315        pause->rcq_th_hi = RCQ_TH_HI(sc);
11316
11317        /* validate rings have enough entries to cross high thresholds */
11318        if (sc->dropless_fc &&
11319            pause->bd_th_hi + FW_PREFETCH_CNT >
11320            sc->rx_ring_size) {
11321            BLOGW(sc, "rx bd ring threshold limit\n");
11322        }
11323
11324        if (sc->dropless_fc &&
11325            pause->rcq_th_hi + FW_PREFETCH_CNT >
11326            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11327            BLOGW(sc, "rcq ring threshold limit\n");
11328        }
11329
11330        pause->pri_map = 1;
11331    }
11332
11333    /* rxq setup */
11334    rxq_init->dscr_map   = fp->rx_dma.paddr;
11335    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11336    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11337    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11338
11339    /*
11340     * This should be a maximum number of data bytes that may be
11341     * placed on the BD (not including paddings).
11342     */
11343    rxq_init->buf_sz = (fp->rx_buf_size -
11344                        IP_HEADER_ALIGNMENT_PADDING);
11345
11346    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11347    rxq_init->tpa_agg_sz      = tpa_agg_size;
11348    rxq_init->sge_buf_sz      = sge_sz;
11349    rxq_init->max_sges_pkt    = max_sge;
11350    rxq_init->rss_engine_id   = SC_FUNC(sc);
11351    rxq_init->mcast_engine_id = SC_FUNC(sc);
11352
11353    /*
11354     * Maximum number or simultaneous TPA aggregation for this Queue.
11355     * For PF Clients it should be the maximum available number.
11356     * VF driver(s) may want to define it to a smaller value.
11357     */
11358    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11359
11360    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11361    rxq_init->fw_sb_id = fp->fw_sb_id;
11362
11363    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11364
11365    /*
11366     * configure silent vlan removal
11367     * if multi function mode is afex, then mask default vlan
11368     */
11369    if (IS_MF_AFEX(sc)) {
11370        rxq_init->silent_removal_value =
11371            sc->devinfo.mf_info.afex_def_vlan_tag;
11372        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11373    }
11374}
11375
11376static void
11377bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11378                 struct bxe_fastpath           *fp,
11379                 struct ecore_txq_setup_params *txq_init,
11380                 uint8_t                       cos)
11381{
11382    /*
11383     * XXX If multiple CoS is ever supported then each fastpath structure
11384     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11385     * fp->txdata[cos]->tx_dma.paddr;
11386     */
11387    txq_init->dscr_map     = fp->tx_dma.paddr;
11388    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11389    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11390    txq_init->fw_sb_id     = fp->fw_sb_id;
11391
11392    /*
11393     * set the TSS leading client id for TX classfication to the
11394     * leading RSS client id
11395     */
11396    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11397}
11398
11399/*
11400 * This function performs 2 steps in a queue state machine:
11401 *   1) RESET->INIT
11402 *   2) INIT->SETUP
11403 */
11404static int
11405bxe_setup_queue(struct bxe_softc    *sc,
11406                struct bxe_fastpath *fp,
11407                uint8_t             leading)
11408{
11409    struct ecore_queue_state_params q_params = { NULL };
11410    struct ecore_queue_setup_params *setup_params =
11411                        &q_params.params.setup;
11412    int rc;
11413
11414    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11415
11416    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11417
11418    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11419
11420    /* we want to wait for completion in this context */
11421    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11422
11423    /* prepare the INIT parameters */
11424    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11425
11426    /* Set the command */
11427    q_params.cmd = ECORE_Q_CMD_INIT;
11428
11429    /* Change the state to INIT */
11430    rc = ecore_queue_state_change(sc, &q_params);
11431    if (rc) {
11432        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11433        return (rc);
11434    }
11435
11436    BLOGD(sc, DBG_LOAD, "init complete\n");
11437
11438    /* now move the Queue to the SETUP state */
11439    memset(setup_params, 0, sizeof(*setup_params));
11440
11441    /* set Queue flags */
11442    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11443
11444    /* set general SETUP parameters */
11445    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11446                          FIRST_TX_COS_INDEX);
11447
11448    bxe_pf_rx_q_prep(sc, fp,
11449                     &setup_params->pause_params,
11450                     &setup_params->rxq_params);
11451
11452    bxe_pf_tx_q_prep(sc, fp,
11453                     &setup_params->txq_params,
11454                     FIRST_TX_COS_INDEX);
11455
11456    /* Set the command */
11457    q_params.cmd = ECORE_Q_CMD_SETUP;
11458
11459    /* change the state to SETUP */
11460    rc = ecore_queue_state_change(sc, &q_params);
11461    if (rc) {
11462        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11463        return (rc);
11464    }
11465
11466    return (rc);
11467}
11468
11469static int
11470bxe_setup_leading(struct bxe_softc *sc)
11471{
11472    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11473}
11474
11475static int
11476bxe_config_rss_pf(struct bxe_softc            *sc,
11477                  struct ecore_rss_config_obj *rss_obj,
11478                  uint8_t                     config_hash)
11479{
11480    struct ecore_config_rss_params params = { NULL };
11481    int i;
11482
11483    /*
11484     * Although RSS is meaningless when there is a single HW queue we
11485     * still need it enabled in order to have HW Rx hash generated.
11486     */
11487
11488    params.rss_obj = rss_obj;
11489
11490    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11491
11492    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11493
11494    /* RSS configuration */
11495    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11496    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11497    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11498    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11499    if (rss_obj->udp_rss_v4) {
11500        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11501    }
11502    if (rss_obj->udp_rss_v6) {
11503        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11504    }
11505
11506    /* Hash bits */
11507    params.rss_result_mask = MULTI_MASK;
11508
11509    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11510
11511    if (config_hash) {
11512        /* RSS keys */
11513        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11514            params.rss_key[i] = arc4random();
11515        }
11516
11517        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11518    }
11519
11520    return (ecore_config_rss(sc, &params));
11521}
11522
11523static int
11524bxe_config_rss_eth(struct bxe_softc *sc,
11525                   uint8_t          config_hash)
11526{
11527    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11528}
11529
11530static int
11531bxe_init_rss_pf(struct bxe_softc *sc)
11532{
11533    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11534    int i;
11535
11536    /*
11537     * Prepare the initial contents of the indirection table if
11538     * RSS is enabled
11539     */
11540    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11541        sc->rss_conf_obj.ind_table[i] =
11542            (sc->fp->cl_id + (i % num_eth_queues));
11543    }
11544
11545    if (sc->udp_rss) {
11546        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11547    }
11548
11549    /*
11550     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11551     * per-port, so if explicit configuration is needed, do it only
11552     * for a PMF.
11553     *
11554     * For 57712 and newer it's a per-function configuration.
11555     */
11556    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11557}
11558
11559static int
11560bxe_set_mac_one(struct bxe_softc          *sc,
11561                uint8_t                   *mac,
11562                struct ecore_vlan_mac_obj *obj,
11563                uint8_t                   set,
11564                int                       mac_type,
11565                unsigned long             *ramrod_flags)
11566{
11567    struct ecore_vlan_mac_ramrod_params ramrod_param;
11568    int rc;
11569
11570    memset(&ramrod_param, 0, sizeof(ramrod_param));
11571
11572    /* fill in general parameters */
11573    ramrod_param.vlan_mac_obj = obj;
11574    ramrod_param.ramrod_flags = *ramrod_flags;
11575
11576    /* fill a user request section if needed */
11577    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11578        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11579
11580        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11581
11582        /* Set the command: ADD or DEL */
11583        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11584                                            ECORE_VLAN_MAC_DEL;
11585    }
11586
11587    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11588
11589    if (rc == ECORE_EXISTS) {
11590        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11591        /* do not treat adding same MAC as error */
11592        rc = 0;
11593    } else if (rc < 0) {
11594        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11595    }
11596
11597    return (rc);
11598}
11599
11600static int
11601bxe_set_eth_mac(struct bxe_softc *sc,
11602                uint8_t          set)
11603{
11604    unsigned long ramrod_flags = 0;
11605
11606    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11607
11608    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11609
11610    /* Eth MAC is set on RSS leading client (fp[0]) */
11611    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11612                            &sc->sp_objs->mac_obj,
11613                            set, ECORE_ETH_MAC, &ramrod_flags));
11614}
11615
11616static int
11617bxe_get_cur_phy_idx(struct bxe_softc *sc)
11618{
11619    uint32_t sel_phy_idx = 0;
11620
11621    if (sc->link_params.num_phys <= 1) {
11622        return (ELINK_INT_PHY);
11623    }
11624
11625    if (sc->link_vars.link_up) {
11626        sel_phy_idx = ELINK_EXT_PHY1;
11627        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11628        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11629            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11630             ELINK_SUPPORTED_FIBRE))
11631            sel_phy_idx = ELINK_EXT_PHY2;
11632    } else {
11633        switch (elink_phy_selection(&sc->link_params)) {
11634        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11635        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11636        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11637               sel_phy_idx = ELINK_EXT_PHY1;
11638               break;
11639        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11640        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11641               sel_phy_idx = ELINK_EXT_PHY2;
11642               break;
11643        }
11644    }
11645
11646    return (sel_phy_idx);
11647}
11648
11649static int
11650bxe_get_link_cfg_idx(struct bxe_softc *sc)
11651{
11652    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11653
11654    /*
11655     * The selected activated PHY is always after swapping (in case PHY
11656     * swapping is enabled). So when swapping is enabled, we need to reverse
11657     * the configuration
11658     */
11659
11660    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11661        if (sel_phy_idx == ELINK_EXT_PHY1)
11662            sel_phy_idx = ELINK_EXT_PHY2;
11663        else if (sel_phy_idx == ELINK_EXT_PHY2)
11664            sel_phy_idx = ELINK_EXT_PHY1;
11665    }
11666
11667    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11668}
11669
11670static void
11671bxe_set_requested_fc(struct bxe_softc *sc)
11672{
11673    /*
11674     * Initialize link parameters structure variables
11675     * It is recommended to turn off RX FC for jumbo frames
11676     * for better performance
11677     */
11678    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11679        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11680    } else {
11681        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11682    }
11683}
11684
11685static void
11686bxe_calc_fc_adv(struct bxe_softc *sc)
11687{
11688    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11689
11690
11691    sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11692                                           ADVERTISED_Pause);
11693
11694    switch (sc->link_vars.ieee_fc &
11695            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11696
11697    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11698        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11699                                          ADVERTISED_Pause);
11700        break;
11701
11702    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11703        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11704        break;
11705
11706    default:
11707        break;
11708
11709    }
11710}
11711
11712static uint16_t
11713bxe_get_mf_speed(struct bxe_softc *sc)
11714{
11715    uint16_t line_speed = sc->link_vars.line_speed;
11716    if (IS_MF(sc)) {
11717        uint16_t maxCfg =
11718            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11719
11720        /* calculate the current MAX line speed limit for the MF devices */
11721        if (IS_MF_SI(sc)) {
11722            line_speed = (line_speed * maxCfg) / 100;
11723        } else { /* SD mode */
11724            uint16_t vn_max_rate = maxCfg * 100;
11725
11726            if (vn_max_rate < line_speed) {
11727                line_speed = vn_max_rate;
11728            }
11729        }
11730    }
11731
11732    return (line_speed);
11733}
11734
11735static void
11736bxe_fill_report_data(struct bxe_softc            *sc,
11737                     struct bxe_link_report_data *data)
11738{
11739    uint16_t line_speed = bxe_get_mf_speed(sc);
11740
11741    memset(data, 0, sizeof(*data));
11742
11743    /* fill the report data with the effective line speed */
11744    data->line_speed = line_speed;
11745
11746    /* Link is down */
11747    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11748        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11749    }
11750
11751    /* Full DUPLEX */
11752    if (sc->link_vars.duplex == DUPLEX_FULL) {
11753        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11754    }
11755
11756    /* Rx Flow Control is ON */
11757    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11758        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11759    }
11760
11761    /* Tx Flow Control is ON */
11762    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11763        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11764    }
11765}
11766
11767/* report link status to OS, should be called under phy_lock */
11768static void
11769bxe_link_report_locked(struct bxe_softc *sc)
11770{
11771    struct bxe_link_report_data cur_data;
11772
11773    /* reread mf_cfg */
11774    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11775        bxe_read_mf_cfg(sc);
11776    }
11777
11778    /* Read the current link report info */
11779    bxe_fill_report_data(sc, &cur_data);
11780
11781    /* Don't report link down or exactly the same link status twice */
11782    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11783        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11784                      &sc->last_reported_link.link_report_flags) &&
11785         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11786                      &cur_data.link_report_flags))) {
11787        return;
11788    }
11789
11790	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11791					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11792    sc->link_cnt++;
11793
11794	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11795    /* report new link params and remember the state for the next time */
11796    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11797
11798    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11799                     &cur_data.link_report_flags)) {
11800        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11801    } else {
11802        const char *duplex;
11803        const char *flow;
11804
11805        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11806                                   &cur_data.link_report_flags)) {
11807            duplex = "full";
11808			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11809        } else {
11810            duplex = "half";
11811			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11812        }
11813
11814        /*
11815         * Handle the FC at the end so that only these flags would be
11816         * possibly set. This way we may easily check if there is no FC
11817         * enabled.
11818         */
11819        if (cur_data.link_report_flags) {
11820            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11821                             &cur_data.link_report_flags) &&
11822                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11823                             &cur_data.link_report_flags)) {
11824                flow = "ON - receive & transmit";
11825            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11826                                    &cur_data.link_report_flags) &&
11827                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11828                                     &cur_data.link_report_flags)) {
11829                flow = "ON - receive";
11830            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11831                                     &cur_data.link_report_flags) &&
11832                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11833                                    &cur_data.link_report_flags)) {
11834                flow = "ON - transmit";
11835            } else {
11836                flow = "none"; /* possible? */
11837            }
11838        } else {
11839            flow = "none";
11840        }
11841
11842        if_link_state_change(sc->ifp, LINK_STATE_UP);
11843        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11844              cur_data.line_speed, duplex, flow);
11845    }
11846}
11847
11848static void
11849bxe_link_report(struct bxe_softc *sc)
11850{
11851    bxe_acquire_phy_lock(sc);
11852    bxe_link_report_locked(sc);
11853    bxe_release_phy_lock(sc);
11854}
11855
11856static void
11857bxe_link_status_update(struct bxe_softc *sc)
11858{
11859    if (sc->state != BXE_STATE_OPEN) {
11860        return;
11861    }
11862
11863    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11864        elink_link_status_update(&sc->link_params, &sc->link_vars);
11865    } else {
11866        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11867                                  ELINK_SUPPORTED_10baseT_Full |
11868                                  ELINK_SUPPORTED_100baseT_Half |
11869                                  ELINK_SUPPORTED_100baseT_Full |
11870                                  ELINK_SUPPORTED_1000baseT_Full |
11871                                  ELINK_SUPPORTED_2500baseX_Full |
11872                                  ELINK_SUPPORTED_10000baseT_Full |
11873                                  ELINK_SUPPORTED_TP |
11874                                  ELINK_SUPPORTED_FIBRE |
11875                                  ELINK_SUPPORTED_Autoneg |
11876                                  ELINK_SUPPORTED_Pause |
11877                                  ELINK_SUPPORTED_Asym_Pause);
11878        sc->port.advertising[0] = sc->port.supported[0];
11879
11880        sc->link_params.sc                = sc;
11881        sc->link_params.port              = SC_PORT(sc);
11882        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11883        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11884        sc->link_params.req_line_speed[0] = SPEED_10000;
11885        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11886        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11887
11888        if (CHIP_REV_IS_FPGA(sc)) {
11889            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11890            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11891            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11892                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11893        } else {
11894            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11895            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11896            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11897                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11898        }
11899
11900        sc->link_vars.link_up = 1;
11901
11902        sc->link_vars.duplex    = DUPLEX_FULL;
11903        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11904
11905        if (IS_PF(sc)) {
11906            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11907            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11908            bxe_link_report(sc);
11909        }
11910    }
11911
11912    if (IS_PF(sc)) {
11913        if (sc->link_vars.link_up) {
11914            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11915        } else {
11916            bxe_stats_handle(sc, STATS_EVENT_STOP);
11917        }
11918        bxe_link_report(sc);
11919    } else {
11920        bxe_link_report(sc);
11921        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11922    }
11923}
11924
11925static int
11926bxe_initial_phy_init(struct bxe_softc *sc,
11927                     int              load_mode)
11928{
11929    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11930    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11931    struct elink_params *lp = &sc->link_params;
11932
11933    bxe_set_requested_fc(sc);
11934
11935    if (CHIP_REV_IS_SLOW(sc)) {
11936        uint32_t bond = CHIP_BOND_ID(sc);
11937        uint32_t feat = 0;
11938
11939        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11940            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11941        } else if (bond & 0x4) {
11942            if (CHIP_IS_E3(sc)) {
11943                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11944            } else {
11945                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11946            }
11947        } else if (bond & 0x8) {
11948            if (CHIP_IS_E3(sc)) {
11949                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11950            } else {
11951                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11952            }
11953        }
11954
11955        /* disable EMAC for E3 and above */
11956        if (bond & 0x2) {
11957            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11958        }
11959
11960        sc->link_params.feature_config_flags |= feat;
11961    }
11962
11963    bxe_acquire_phy_lock(sc);
11964
11965    if (load_mode == LOAD_DIAG) {
11966        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11967        /* Prefer doing PHY loopback at 10G speed, if possible */
11968        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11969            if (lp->speed_cap_mask[cfg_idx] &
11970                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11971                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11972            } else {
11973                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11974            }
11975        }
11976    }
11977
11978    if (load_mode == LOAD_LOOPBACK_EXT) {
11979        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11980    }
11981
11982    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11983
11984    bxe_release_phy_lock(sc);
11985
11986    bxe_calc_fc_adv(sc);
11987
11988    if (sc->link_vars.link_up) {
11989        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11990        bxe_link_report(sc);
11991    }
11992
11993    if (!CHIP_REV_IS_SLOW(sc)) {
11994        bxe_periodic_start(sc);
11995    }
11996
11997    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11998    return (rc);
11999}
12000
12001static u_int
12002bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12003{
12004    struct ecore_mcast_list_elem *mc_mac = arg;
12005
12006    mc_mac += cnt;
12007    mc_mac->mac = (uint8_t *)LLADDR(sdl);
12008
12009    return (1);
12010}
12011
12012static int
12013bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
12014                         struct ecore_mcast_ramrod_params *p)
12015{
12016    if_t ifp = sc->ifp;
12017    int mc_count;
12018    struct ecore_mcast_list_elem *mc_mac;
12019
12020    ECORE_LIST_INIT(&p->mcast_list);
12021    p->mcast_list_len = 0;
12022
12023    /* XXXGL: multicast count may change later */
12024    mc_count = if_llmaddr_count(ifp);
12025
12026    if (!mc_count) {
12027        return (0);
12028    }
12029
12030    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12031                    (M_NOWAIT | M_ZERO));
12032    if (!mc_mac) {
12033        BLOGE(sc, "Failed to allocate temp mcast list\n");
12034        return (-1);
12035    }
12036    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12037    if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
12038
12039    for (int i = 0; i < mc_count; i ++) {
12040        ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12041        BLOGD(sc, DBG_LOAD,
12042              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12043              mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
12044              mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
12045              mc_count);
12046    }
12047
12048    p->mcast_list_len = mc_count;
12049
12050    return (0);
12051}
12052
12053static void
12054bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12055{
12056    struct ecore_mcast_list_elem *mc_mac =
12057        ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12058                               struct ecore_mcast_list_elem,
12059                               link);
12060
12061    if (mc_mac) {
12062        /* only a single free as all mc_macs are in the same heap array */
12063        free(mc_mac, M_DEVBUF);
12064    }
12065}
12066static int
12067bxe_set_mc_list(struct bxe_softc *sc)
12068{
12069    struct ecore_mcast_ramrod_params rparam = { NULL };
12070    int rc = 0;
12071
12072    rparam.mcast_obj = &sc->mcast_obj;
12073
12074    BXE_MCAST_LOCK(sc);
12075
12076    /* first, clear all configured multicast MACs */
12077    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12078    if (rc < 0) {
12079        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12080        /* Manual backport parts of FreeBSD upstream r284470. */
12081        BXE_MCAST_UNLOCK(sc);
12082        return (rc);
12083    }
12084
12085    /* configure a new MACs list */
12086    rc = bxe_init_mcast_macs_list(sc, &rparam);
12087    if (rc) {
12088        BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12089        BXE_MCAST_UNLOCK(sc);
12090        return (rc);
12091    }
12092
12093    /* Now add the new MACs */
12094    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12095    if (rc < 0) {
12096        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12097    }
12098
12099    bxe_free_mcast_macs_list(&rparam);
12100
12101    BXE_MCAST_UNLOCK(sc);
12102
12103    return (rc);
12104}
12105
12106struct bxe_set_addr_ctx {
12107   struct bxe_softc *sc;
12108   unsigned long ramrod_flags;
12109   int rc;
12110};
12111
12112static u_int
12113bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12114{
12115    struct bxe_set_addr_ctx *ctx = arg;
12116    struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12117    int rc;
12118
12119    if (ctx->rc < 0)
12120	return (0);
12121
12122    rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12123                         ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12124
12125    /* do not treat adding same MAC as an error */
12126    if (rc == -EEXIST)
12127	BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12128    else if (rc < 0) {
12129            BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12130            ctx->rc = rc;
12131    }
12132
12133    return (1);
12134}
12135
12136static int
12137bxe_set_uc_list(struct bxe_softc *sc)
12138{
12139    if_t ifp = sc->ifp;
12140    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12141    struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
12142    int rc;
12143
12144    /* first schedule a cleanup up of old configuration */
12145    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12146    if (rc < 0) {
12147        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12148        return (rc);
12149    }
12150
12151    if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
12152    if (ctx.rc < 0)
12153	return (ctx.rc);
12154
12155    /* Execute the pending commands */
12156    bit_set(&ctx.ramrod_flags, RAMROD_CONT);
12157    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12158                            ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
12159}
12160
12161static void
12162bxe_set_rx_mode(struct bxe_softc *sc)
12163{
12164    if_t ifp = sc->ifp;
12165    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12166
12167    if (sc->state != BXE_STATE_OPEN) {
12168        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12169        return;
12170    }
12171
12172    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12173
12174    if (if_getflags(ifp) & IFF_PROMISC) {
12175        rx_mode = BXE_RX_MODE_PROMISC;
12176    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12177               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12178                CHIP_IS_E1(sc))) {
12179        rx_mode = BXE_RX_MODE_ALLMULTI;
12180    } else {
12181        if (IS_PF(sc)) {
12182            /* some multicasts */
12183            if (bxe_set_mc_list(sc) < 0) {
12184                rx_mode = BXE_RX_MODE_ALLMULTI;
12185            }
12186            if (bxe_set_uc_list(sc) < 0) {
12187                rx_mode = BXE_RX_MODE_PROMISC;
12188            }
12189        }
12190    }
12191
12192    sc->rx_mode = rx_mode;
12193
12194    /* schedule the rx_mode command */
12195    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12196        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12197        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12198        return;
12199    }
12200
12201    if (IS_PF(sc)) {
12202        bxe_set_storm_rx_mode(sc);
12203    }
12204}
12205
12206
12207/* update flags in shmem */
12208static void
12209bxe_update_drv_flags(struct bxe_softc *sc,
12210                     uint32_t         flags,
12211                     uint32_t         set)
12212{
12213    uint32_t drv_flags;
12214
12215    if (SHMEM2_HAS(sc, drv_flags)) {
12216        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12217        drv_flags = SHMEM2_RD(sc, drv_flags);
12218
12219        if (set) {
12220            SET_FLAGS(drv_flags, flags);
12221        } else {
12222            RESET_FLAGS(drv_flags, flags);
12223        }
12224
12225        SHMEM2_WR(sc, drv_flags, drv_flags);
12226        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12227
12228        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12229    }
12230}
12231
12232/* periodic timer callout routine, only runs when the interface is up */
12233
12234static void
12235bxe_periodic_callout_func(void *xsc)
12236{
12237    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12238    int i;
12239
12240    if (!BXE_CORE_TRYLOCK(sc)) {
12241        /* just bail and try again next time */
12242
12243        if ((sc->state == BXE_STATE_OPEN) &&
12244            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12245            /* schedule the next periodic callout */
12246            callout_reset(&sc->periodic_callout, hz,
12247                          bxe_periodic_callout_func, sc);
12248        }
12249
12250        return;
12251    }
12252
12253    if ((sc->state != BXE_STATE_OPEN) ||
12254        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12255        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12256        BXE_CORE_UNLOCK(sc);
12257        return;
12258        }
12259
12260
12261    /* Check for TX timeouts on any fastpath. */
12262    FOR_EACH_QUEUE(sc, i) {
12263        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12264            /* Ruh-Roh, chip was reset! */
12265            break;
12266        }
12267    }
12268
12269    if (!CHIP_REV_IS_SLOW(sc)) {
12270        /*
12271         * This barrier is needed to ensure the ordering between the writing
12272         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12273         * the reading here.
12274         */
12275        mb();
12276        if (sc->port.pmf) {
12277	    bxe_acquire_phy_lock(sc);
12278            elink_period_func(&sc->link_params, &sc->link_vars);
12279	    bxe_release_phy_lock(sc);
12280        }
12281    }
12282
12283    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12284        int mb_idx = SC_FW_MB_IDX(sc);
12285        uint32_t drv_pulse;
12286        uint32_t mcp_pulse;
12287
12288        ++sc->fw_drv_pulse_wr_seq;
12289        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12290
12291        drv_pulse = sc->fw_drv_pulse_wr_seq;
12292        bxe_drv_pulse(sc);
12293
12294        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12295                     MCP_PULSE_SEQ_MASK);
12296
12297        /*
12298         * The delta between driver pulse and mcp response should
12299         * be 1 (before mcp response) or 0 (after mcp response).
12300         */
12301        if ((drv_pulse != mcp_pulse) &&
12302            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12303            /* someone lost a heartbeat... */
12304            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12305                  drv_pulse, mcp_pulse);
12306        }
12307    }
12308
12309    /* state is BXE_STATE_OPEN */
12310    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12311
12312    BXE_CORE_UNLOCK(sc);
12313
12314    if ((sc->state == BXE_STATE_OPEN) &&
12315        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12316        /* schedule the next periodic callout */
12317        callout_reset(&sc->periodic_callout, hz,
12318                      bxe_periodic_callout_func, sc);
12319    }
12320}
12321
12322static void
12323bxe_periodic_start(struct bxe_softc *sc)
12324{
12325    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12326    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12327}
12328
12329static void
12330bxe_periodic_stop(struct bxe_softc *sc)
12331{
12332    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12333    callout_drain(&sc->periodic_callout);
12334}
12335
12336void
12337bxe_parity_recover(struct bxe_softc *sc)
12338{
12339    uint8_t global = FALSE;
12340    uint32_t error_recovered, error_unrecovered;
12341
12342
12343    if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12344        (sc->state == BXE_STATE_ERROR)) {
12345        BLOGE(sc, "RECOVERY failed, "
12346            "stack notified driver is NOT running! "
12347            "Please reboot/power cycle the system.\n");
12348        return;
12349    }
12350
12351    while (1) {
12352        BLOGD(sc, DBG_SP,
12353           "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12354            __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12355
12356        switch(sc->recovery_state) {
12357
12358        case BXE_RECOVERY_INIT:
12359            bxe_chk_parity_attn(sc, &global, FALSE);
12360
12361            if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12362                (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12363                (sc->error_status & BXE_ERR_GLOBAL)) {
12364
12365                BXE_CORE_LOCK(sc);
12366                if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12367                    bxe_periodic_stop(sc);
12368                }
12369                bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12370                sc->state = BXE_STATE_ERROR;
12371                sc->recovery_state = BXE_RECOVERY_FAILED;
12372                BLOGE(sc, " No Recovery tried for error 0x%x"
12373                    " stack notified driver is NOT running!"
12374                    " Please reboot/power cycle the system.\n",
12375                    sc->error_status);
12376                BXE_CORE_UNLOCK(sc);
12377                return;
12378            }
12379
12380
12381           /* Try to get a LEADER_LOCK HW lock */
12382            if (bxe_trylock_leader_lock(sc)) {
12383
12384                bxe_set_reset_in_progress(sc);
12385                /*
12386                 * Check if there is a global attention and if
12387                 * there was a global attention, set the global
12388                 * reset bit.
12389                 */
12390                if (global) {
12391                    bxe_set_reset_global(sc);
12392                }
12393                sc->is_leader = 1;
12394            }
12395
12396            /* If interface has been removed - break */
12397
12398            if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12399                bxe_periodic_stop(sc);
12400            }
12401
12402            BXE_CORE_LOCK(sc);
12403            bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12404            sc->recovery_state = BXE_RECOVERY_WAIT;
12405            BXE_CORE_UNLOCK(sc);
12406
12407            /*
12408             * Ensure "is_leader", MCP command sequence and
12409             * "recovery_state" update values are seen on other
12410             * CPUs.
12411             */
12412            mb();
12413            break;
12414        case BXE_RECOVERY_WAIT:
12415
12416            if (sc->is_leader) {
12417                int other_engine = SC_PATH(sc) ? 0 : 1;
12418                bool other_load_status =
12419                    bxe_get_load_status(sc, other_engine);
12420                bool load_status =
12421                    bxe_get_load_status(sc, SC_PATH(sc));
12422                global = bxe_reset_is_global(sc);
12423
12424                /*
12425                 * In case of a parity in a global block, let
12426                 * the first leader that performs a
12427                 * leader_reset() reset the global blocks in
12428                 * order to clear global attentions. Otherwise
12429                 * the gates will remain closed for that
12430                 * engine.
12431                 */
12432                if (load_status ||
12433                    (global && other_load_status)) {
12434                    /*
12435                     * Wait until all other functions get
12436                     * down.
12437                     */
12438                    taskqueue_enqueue_timeout(taskqueue_thread,
12439                        &sc->sp_err_timeout_task, hz/10);
12440                    return;
12441                } else {
12442                    /*
12443                     * If all other functions got down
12444                     * try to bring the chip back to
12445                     * normal. In any case it's an exit
12446                     * point for a leader.
12447                     */
12448                    if (bxe_leader_reset(sc)) {
12449                        BLOGE(sc, "RECOVERY failed, "
12450                            "stack notified driver is NOT running!\n");
12451                        sc->recovery_state = BXE_RECOVERY_FAILED;
12452                        sc->state = BXE_STATE_ERROR;
12453                        mb();
12454                        return;
12455                    }
12456
12457                    /*
12458                     * If we are here, means that the
12459                     * leader has succeeded and doesn't
12460                     * want to be a leader any more. Try
12461                     * to continue as a none-leader.
12462                     */
12463                break;
12464                }
12465
12466            } else { /* non-leader */
12467                if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12468                    /*
12469                     * Try to get a LEADER_LOCK HW lock as
12470                     * long as a former leader may have
12471                     * been unloaded by the user or
12472                     * released a leadership by another
12473                     * reason.
12474                     */
12475                    if (bxe_trylock_leader_lock(sc)) {
12476                        /*
12477                         * I'm a leader now! Restart a
12478                         * switch case.
12479                         */
12480                        sc->is_leader = 1;
12481                        break;
12482                    }
12483
12484                    taskqueue_enqueue_timeout(taskqueue_thread,
12485                        &sc->sp_err_timeout_task, hz/10);
12486                    return;
12487
12488                } else {
12489                    /*
12490                     * If there was a global attention, wait
12491                     * for it to be cleared.
12492                     */
12493                    if (bxe_reset_is_global(sc)) {
12494                        taskqueue_enqueue_timeout(taskqueue_thread,
12495                            &sc->sp_err_timeout_task, hz/10);
12496                        return;
12497                     }
12498
12499                     error_recovered =
12500                         sc->eth_stats.recoverable_error;
12501                     error_unrecovered =
12502                         sc->eth_stats.unrecoverable_error;
12503                     BXE_CORE_LOCK(sc);
12504                     sc->recovery_state =
12505                         BXE_RECOVERY_NIC_LOADING;
12506                     if (bxe_nic_load(sc, LOAD_NORMAL)) {
12507                         error_unrecovered++;
12508                         sc->recovery_state = BXE_RECOVERY_FAILED;
12509                         sc->state = BXE_STATE_ERROR;
12510                         BLOGE(sc, "Recovery is NOT successful, "
12511                            " state=0x%x recovery_state=0x%x error=%x\n",
12512                            sc->state, sc->recovery_state, sc->error_status);
12513                         sc->error_status = 0;
12514                     } else {
12515                         sc->recovery_state =
12516                             BXE_RECOVERY_DONE;
12517                         error_recovered++;
12518                         BLOGI(sc, "Recovery is successful from errors %x,"
12519                            " state=0x%x"
12520                            " recovery_state=0x%x \n", sc->error_status,
12521                            sc->state, sc->recovery_state);
12522                         mb();
12523                     }
12524                     sc->error_status = 0;
12525                     BXE_CORE_UNLOCK(sc);
12526                     sc->eth_stats.recoverable_error =
12527                         error_recovered;
12528                     sc->eth_stats.unrecoverable_error =
12529                         error_unrecovered;
12530
12531                     return;
12532                 }
12533             }
12534         default:
12535             return;
12536         }
12537    }
12538}
12539void
12540bxe_handle_error(struct bxe_softc * sc)
12541{
12542
12543    if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12544        return;
12545    }
12546    if(sc->error_status) {
12547        if (sc->state == BXE_STATE_OPEN)  {
12548            bxe_int_disable(sc);
12549        }
12550        if (sc->link_vars.link_up) {
12551            if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12552        }
12553        sc->recovery_state = BXE_RECOVERY_INIT;
12554        BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12555            sc->unit, sc->error_status, sc->recovery_state);
12556        bxe_parity_recover(sc);
12557   }
12558}
12559
12560static void
12561bxe_sp_err_timeout_task(void *arg, int pending)
12562{
12563
12564    struct bxe_softc *sc = (struct bxe_softc *)arg;
12565
12566    BLOGD(sc, DBG_SP,
12567        "%s state = 0x%x rec state=0x%x error_status=%x\n",
12568        __func__, sc->state, sc->recovery_state, sc->error_status);
12569
12570    if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12571       (sc->state == BXE_STATE_ERROR)) {
12572        return;
12573    }
12574    /* if can be taken */
12575    if ((sc->error_status) && (sc->trigger_grcdump)) {
12576        bxe_grc_dump(sc);
12577    }
12578    if (sc->recovery_state != BXE_RECOVERY_DONE) {
12579        bxe_handle_error(sc);
12580        bxe_parity_recover(sc);
12581    } else if (sc->error_status) {
12582        bxe_handle_error(sc);
12583    }
12584
12585    return;
12586}
12587
12588/* start the controller */
12589static __noinline int
12590bxe_nic_load(struct bxe_softc *sc,
12591             int              load_mode)
12592{
12593    uint32_t val;
12594    int load_code = 0;
12595    int i, rc = 0;
12596
12597    BXE_CORE_LOCK_ASSERT(sc);
12598
12599    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12600
12601    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12602
12603    if (IS_PF(sc)) {
12604        /* must be called before memory allocation and HW init */
12605        bxe_ilt_set_info(sc);
12606    }
12607
12608    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12609
12610    bxe_set_fp_rx_buf_size(sc);
12611
12612    if (bxe_alloc_fp_buffers(sc) != 0) {
12613        BLOGE(sc, "Failed to allocate fastpath memory\n");
12614        sc->state = BXE_STATE_CLOSED;
12615        rc = ENOMEM;
12616        goto bxe_nic_load_error0;
12617    }
12618
12619    if (bxe_alloc_mem(sc) != 0) {
12620        sc->state = BXE_STATE_CLOSED;
12621        rc = ENOMEM;
12622        goto bxe_nic_load_error0;
12623    }
12624
12625    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12626        sc->state = BXE_STATE_CLOSED;
12627        rc = ENOMEM;
12628        goto bxe_nic_load_error0;
12629    }
12630
12631    if (IS_PF(sc)) {
12632        /* set pf load just before approaching the MCP */
12633        bxe_set_pf_load(sc);
12634
12635        /* if MCP exists send load request and analyze response */
12636        if (!BXE_NOMCP(sc)) {
12637            /* attempt to load pf */
12638            if (bxe_nic_load_request(sc, &load_code) != 0) {
12639                sc->state = BXE_STATE_CLOSED;
12640                rc = ENXIO;
12641                goto bxe_nic_load_error1;
12642            }
12643
12644            /* what did the MCP say? */
12645            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12646                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12647                sc->state = BXE_STATE_CLOSED;
12648                rc = ENXIO;
12649                goto bxe_nic_load_error2;
12650            }
12651        } else {
12652            BLOGI(sc, "Device has no MCP!\n");
12653            load_code = bxe_nic_load_no_mcp(sc);
12654        }
12655
12656        /* mark PMF if applicable */
12657        bxe_nic_load_pmf(sc, load_code);
12658
12659        /* Init Function state controlling object */
12660        bxe_init_func_obj(sc);
12661
12662        /* Initialize HW */
12663        if (bxe_init_hw(sc, load_code) != 0) {
12664            BLOGE(sc, "HW init failed\n");
12665            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12666            sc->state = BXE_STATE_CLOSED;
12667            rc = ENXIO;
12668            goto bxe_nic_load_error2;
12669        }
12670    }
12671
12672    /* set ALWAYS_ALIVE bit in shmem */
12673    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12674    bxe_drv_pulse(sc);
12675    sc->flags |= BXE_NO_PULSE;
12676
12677    /* attach interrupts */
12678    if (bxe_interrupt_attach(sc) != 0) {
12679        sc->state = BXE_STATE_CLOSED;
12680        rc = ENXIO;
12681        goto bxe_nic_load_error2;
12682    }
12683
12684    bxe_nic_init(sc, load_code);
12685
12686    /* Init per-function objects */
12687    if (IS_PF(sc)) {
12688        bxe_init_objs(sc);
12689        // XXX bxe_iov_nic_init(sc);
12690
12691        /* set AFEX default VLAN tag to an invalid value */
12692        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12693        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12694
12695        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12696        rc = bxe_func_start(sc);
12697        if (rc) {
12698            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12699            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12700            sc->state = BXE_STATE_ERROR;
12701            goto bxe_nic_load_error3;
12702        }
12703
12704        /* send LOAD_DONE command to MCP */
12705        if (!BXE_NOMCP(sc)) {
12706            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12707            if (!load_code) {
12708                BLOGE(sc, "MCP response failure, aborting\n");
12709                sc->state = BXE_STATE_ERROR;
12710                rc = ENXIO;
12711                goto bxe_nic_load_error3;
12712            }
12713        }
12714
12715        rc = bxe_setup_leading(sc);
12716        if (rc) {
12717            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12718            sc->state = BXE_STATE_ERROR;
12719            goto bxe_nic_load_error3;
12720        }
12721
12722        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12723            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12724            if (rc) {
12725                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12726                sc->state = BXE_STATE_ERROR;
12727                goto bxe_nic_load_error3;
12728            }
12729        }
12730
12731        rc = bxe_init_rss_pf(sc);
12732        if (rc) {
12733            BLOGE(sc, "PF RSS init failed\n");
12734            sc->state = BXE_STATE_ERROR;
12735            goto bxe_nic_load_error3;
12736        }
12737    }
12738    /* XXX VF */
12739
12740    /* now when Clients are configured we are ready to work */
12741    sc->state = BXE_STATE_OPEN;
12742
12743    /* Configure a ucast MAC */
12744    if (IS_PF(sc)) {
12745        rc = bxe_set_eth_mac(sc, TRUE);
12746    }
12747    if (rc) {
12748        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12749        sc->state = BXE_STATE_ERROR;
12750        goto bxe_nic_load_error3;
12751    }
12752
12753    if (sc->port.pmf) {
12754        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12755        if (rc) {
12756            sc->state = BXE_STATE_ERROR;
12757            goto bxe_nic_load_error3;
12758        }
12759    }
12760
12761    sc->link_params.feature_config_flags &=
12762        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12763
12764    /* start fast path */
12765
12766    /* Initialize Rx filter */
12767    bxe_set_rx_mode(sc);
12768
12769    /* start the Tx */
12770    switch (/* XXX load_mode */LOAD_OPEN) {
12771    case LOAD_NORMAL:
12772    case LOAD_OPEN:
12773        break;
12774
12775    case LOAD_DIAG:
12776    case LOAD_LOOPBACK_EXT:
12777        sc->state = BXE_STATE_DIAG;
12778        break;
12779
12780    default:
12781        break;
12782    }
12783
12784    if (sc->port.pmf) {
12785        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12786    } else {
12787        bxe_link_status_update(sc);
12788    }
12789
12790    /* start the periodic timer callout */
12791    bxe_periodic_start(sc);
12792
12793    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12794        /* mark driver is loaded in shmem2 */
12795        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12796        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12797                  (val |
12798                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12799                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12800    }
12801
12802    /* wait for all pending SP commands to complete */
12803    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12804        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12805        bxe_periodic_stop(sc);
12806        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12807        return (ENXIO);
12808    }
12809
12810    /* Tell the stack the driver is running! */
12811    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12812
12813    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12814
12815    return (0);
12816
12817bxe_nic_load_error3:
12818
12819    if (IS_PF(sc)) {
12820        bxe_int_disable_sync(sc, 1);
12821
12822        /* clean out queued objects */
12823        bxe_squeeze_objects(sc);
12824    }
12825
12826    bxe_interrupt_detach(sc);
12827
12828bxe_nic_load_error2:
12829
12830    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12831        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12832        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12833    }
12834
12835    sc->port.pmf = 0;
12836
12837bxe_nic_load_error1:
12838
12839    /* clear pf_load status, as it was already set */
12840    if (IS_PF(sc)) {
12841        bxe_clear_pf_load(sc);
12842    }
12843
12844bxe_nic_load_error0:
12845
12846    bxe_free_fw_stats_mem(sc);
12847    bxe_free_fp_buffers(sc);
12848    bxe_free_mem(sc);
12849
12850    return (rc);
12851}
12852
12853static int
12854bxe_init_locked(struct bxe_softc *sc)
12855{
12856    int other_engine = SC_PATH(sc) ? 0 : 1;
12857    uint8_t other_load_status, load_status;
12858    uint8_t global = FALSE;
12859    int rc;
12860
12861    BXE_CORE_LOCK_ASSERT(sc);
12862
12863    /* check if the driver is already running */
12864    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12865        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12866        return (0);
12867    }
12868
12869    if((sc->state == BXE_STATE_ERROR) &&
12870        (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12871        BLOGE(sc, "Initialization not done, "
12872                  "as previous recovery failed."
12873                  "Reboot/Power-cycle the system\n" );
12874        return (ENXIO);
12875    }
12876
12877
12878    bxe_set_power_state(sc, PCI_PM_D0);
12879
12880    /*
12881     * If parity occurred during the unload, then attentions and/or
12882     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12883     * loaded on the current engine to complete the recovery. Parity recovery
12884     * is only relevant for PF driver.
12885     */
12886    if (IS_PF(sc)) {
12887        other_load_status = bxe_get_load_status(sc, other_engine);
12888        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12889
12890        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12891            bxe_chk_parity_attn(sc, &global, TRUE)) {
12892            do {
12893                /*
12894                 * If there are attentions and they are in global blocks, set
12895                 * the GLOBAL_RESET bit regardless whether it will be this
12896                 * function that will complete the recovery or not.
12897                 */
12898                if (global) {
12899                    bxe_set_reset_global(sc);
12900                }
12901
12902                /*
12903                 * Only the first function on the current engine should try
12904                 * to recover in open. In case of attentions in global blocks
12905                 * only the first in the chip should try to recover.
12906                 */
12907                if ((!load_status && (!global || !other_load_status)) &&
12908                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12909                    BLOGI(sc, "Recovered during init\n");
12910                    break;
12911                }
12912
12913                /* recovery has failed... */
12914                bxe_set_power_state(sc, PCI_PM_D3hot);
12915                sc->recovery_state = BXE_RECOVERY_FAILED;
12916
12917                BLOGE(sc, "Recovery flow hasn't properly "
12918                          "completed yet, try again later. "
12919                          "If you still see this message after a "
12920                          "few retries then power cycle is required.\n");
12921
12922                rc = ENXIO;
12923                goto bxe_init_locked_done;
12924            } while (0);
12925        }
12926    }
12927
12928    sc->recovery_state = BXE_RECOVERY_DONE;
12929
12930    rc = bxe_nic_load(sc, LOAD_OPEN);
12931
12932bxe_init_locked_done:
12933
12934    if (rc) {
12935        /* Tell the stack the driver is NOT running! */
12936        BLOGE(sc, "Initialization failed, "
12937                  "stack notified driver is NOT running!\n");
12938	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12939    }
12940
12941    return (rc);
12942}
12943
12944static int
12945bxe_stop_locked(struct bxe_softc *sc)
12946{
12947    BXE_CORE_LOCK_ASSERT(sc);
12948    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12949}
12950
12951/*
12952 * Handles controller initialization when called from an unlocked routine.
12953 * ifconfig calls this function.
12954 *
12955 * Returns:
12956 *   void
12957 */
12958static void
12959bxe_init(void *xsc)
12960{
12961    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12962
12963    BXE_CORE_LOCK(sc);
12964    bxe_init_locked(sc);
12965    BXE_CORE_UNLOCK(sc);
12966}
12967
12968static int
12969bxe_init_ifnet(struct bxe_softc *sc)
12970{
12971    if_t ifp;
12972    int capabilities;
12973
12974    /* ifconfig entrypoint for media type/status reporting */
12975    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12976                 bxe_ifmedia_update,
12977                 bxe_ifmedia_status);
12978
12979    /* set the default interface values */
12980    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12981    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12982    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12983
12984    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12985	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12986
12987    /* allocate the ifnet structure */
12988    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12989        BLOGE(sc, "Interface allocation failed!\n");
12990        return (ENXIO);
12991    }
12992
12993    if_setsoftc(ifp, sc);
12994    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12995    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12996    if_setioctlfn(ifp, bxe_ioctl);
12997    if_setstartfn(ifp, bxe_tx_start);
12998    if_setgetcounterfn(ifp, bxe_get_counter);
12999    if_settransmitfn(ifp, bxe_tx_mq_start);
13000    if_setqflushfn(ifp, bxe_mq_flush);
13001    if_setinitfn(ifp, bxe_init);
13002    if_setmtu(ifp, sc->mtu);
13003    if_sethwassist(ifp, (CSUM_IP      |
13004                        CSUM_TCP      |
13005                        CSUM_UDP      |
13006                        CSUM_TSO      |
13007                        CSUM_TCP_IPV6 |
13008                        CSUM_UDP_IPV6));
13009
13010    capabilities =
13011        (IFCAP_VLAN_MTU       |
13012         IFCAP_VLAN_HWTAGGING |
13013         IFCAP_VLAN_HWTSO     |
13014         IFCAP_VLAN_HWFILTER  |
13015         IFCAP_VLAN_HWCSUM    |
13016         IFCAP_HWCSUM         |
13017         IFCAP_JUMBO_MTU      |
13018         IFCAP_LRO            |
13019         IFCAP_TSO4           |
13020         IFCAP_TSO6           |
13021         IFCAP_WOL_MAGIC);
13022    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13023    if_setcapenable(ifp, if_getcapabilities(ifp));
13024    if_setbaudrate(ifp, IF_Gbps(10));
13025/* XXX */
13026    if_setsendqlen(ifp, sc->tx_ring_size);
13027    if_setsendqready(ifp);
13028/* XXX */
13029
13030    sc->ifp = ifp;
13031
13032    /* attach to the Ethernet interface list */
13033    ether_ifattach(ifp, sc->link_params.mac_addr);
13034
13035    /* Attach driver debugnet methods. */
13036    DEBUGNET_SET(ifp, bxe);
13037
13038    return (0);
13039}
13040
13041static void
13042bxe_deallocate_bars(struct bxe_softc *sc)
13043{
13044    int i;
13045
13046    for (i = 0; i < MAX_BARS; i++) {
13047        if (sc->bar[i].resource != NULL) {
13048            bus_release_resource(sc->dev,
13049                                 SYS_RES_MEMORY,
13050                                 sc->bar[i].rid,
13051                                 sc->bar[i].resource);
13052            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13053                  i, PCIR_BAR(i));
13054        }
13055    }
13056}
13057
13058static int
13059bxe_allocate_bars(struct bxe_softc *sc)
13060{
13061    u_int flags;
13062    int i;
13063
13064    memset(sc->bar, 0, sizeof(sc->bar));
13065
13066    for (i = 0; i < MAX_BARS; i++) {
13067
13068        /* memory resources reside at BARs 0, 2, 4 */
13069        /* Run `pciconf -lb` to see mappings */
13070        if ((i != 0) && (i != 2) && (i != 4)) {
13071            continue;
13072        }
13073
13074        sc->bar[i].rid = PCIR_BAR(i);
13075
13076        flags = RF_ACTIVE;
13077        if (i == 0) {
13078            flags |= RF_SHAREABLE;
13079        }
13080
13081        if ((sc->bar[i].resource =
13082             bus_alloc_resource_any(sc->dev,
13083                                    SYS_RES_MEMORY,
13084                                    &sc->bar[i].rid,
13085                                    flags)) == NULL) {
13086            return (0);
13087        }
13088
13089        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
13090        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13091        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13092
13093        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13094              i, PCIR_BAR(i),
13095              rman_get_start(sc->bar[i].resource),
13096              rman_get_end(sc->bar[i].resource),
13097              rman_get_size(sc->bar[i].resource),
13098              (uintmax_t)sc->bar[i].kva);
13099    }
13100
13101    return (0);
13102}
13103
13104static void
13105bxe_get_function_num(struct bxe_softc *sc)
13106{
13107    uint32_t val = 0;
13108
13109    /*
13110     * Read the ME register to get the function number. The ME register
13111     * holds the relative-function number and absolute-function number. The
13112     * absolute-function number appears only in E2 and above. Before that
13113     * these bits always contained zero, therefore we cannot blindly use them.
13114     */
13115
13116    val = REG_RD(sc, BAR_ME_REGISTER);
13117
13118    sc->pfunc_rel =
13119        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13120    sc->path_id =
13121        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13122
13123    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13124        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13125    } else {
13126        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13127    }
13128
13129    BLOGD(sc, DBG_LOAD,
13130          "Relative function %d, Absolute function %d, Path %d\n",
13131          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13132}
13133
13134static uint32_t
13135bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13136{
13137    uint32_t shmem2_size;
13138    uint32_t offset;
13139    uint32_t mf_cfg_offset_value;
13140
13141    /* Non 57712 */
13142    offset = (SHMEM_RD(sc, func_mb) +
13143              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13144
13145    /* 57712 plus */
13146    if (sc->devinfo.shmem2_base != 0) {
13147        shmem2_size = SHMEM2_RD(sc, size);
13148        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13149            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13150            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13151                offset = mf_cfg_offset_value;
13152            }
13153        }
13154    }
13155
13156    return (offset);
13157}
13158
13159static uint32_t
13160bxe_pcie_capability_read(struct bxe_softc *sc,
13161                         int    reg,
13162                         int    width)
13163{
13164    int pcie_reg;
13165
13166    /* ensure PCIe capability is enabled */
13167    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13168        if (pcie_reg != 0) {
13169            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13170            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13171        }
13172    }
13173
13174    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13175
13176    return (0);
13177}
13178
13179static uint8_t
13180bxe_is_pcie_pending(struct bxe_softc *sc)
13181{
13182    return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
13183            PCIEM_STA_TRANSACTION_PND);
13184}
13185
13186/*
13187 * Walk the PCI capabiites list for the device to find what features are
13188 * supported. These capabilites may be enabled/disabled by firmware so it's
13189 * best to walk the list rather than make assumptions.
13190 */
13191static void
13192bxe_probe_pci_caps(struct bxe_softc *sc)
13193{
13194    uint16_t link_status;
13195    int reg;
13196
13197    /* check if PCI Power Management is enabled */
13198    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
13199        if (reg != 0) {
13200            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13201
13202            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13203            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13204        }
13205    }
13206
13207    link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
13208
13209    /* handle PCIe 2.0 workarounds for 57710 */
13210    if (CHIP_IS_E1(sc)) {
13211        /* workaround for 57710 errata E4_57710_27462 */
13212        sc->devinfo.pcie_link_speed =
13213            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13214
13215        /* workaround for 57710 errata E4_57710_27488 */
13216        sc->devinfo.pcie_link_width =
13217            ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13218        if (sc->devinfo.pcie_link_speed > 1) {
13219            sc->devinfo.pcie_link_width =
13220                ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
13221        }
13222    } else {
13223        sc->devinfo.pcie_link_speed =
13224            (link_status & PCIEM_LINK_STA_SPEED);
13225        sc->devinfo.pcie_link_width =
13226            ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13227    }
13228
13229    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13230          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13231
13232    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13233    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13234
13235    /* check if MSI capability is enabled */
13236    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13237        if (reg != 0) {
13238            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13239
13240            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13241            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13242        }
13243    }
13244
13245    /* check if MSI-X capability is enabled */
13246    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13247        if (reg != 0) {
13248            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13249
13250            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13251            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13252        }
13253    }
13254}
13255
13256static int
13257bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13258{
13259    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13260    uint32_t val;
13261
13262    /* get the outer vlan if we're in switch-dependent mode */
13263
13264    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13265    mf_info->ext_id = (uint16_t)val;
13266
13267    mf_info->multi_vnics_mode = 1;
13268
13269    if (!VALID_OVLAN(mf_info->ext_id)) {
13270        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13271        return (1);
13272    }
13273
13274    /* get the capabilities */
13275    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13276        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13277        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13278    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13279               FUNC_MF_CFG_PROTOCOL_FCOE) {
13280        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13281    } else {
13282        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13283    }
13284
13285    mf_info->vnics_per_port =
13286        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13287
13288    return (0);
13289}
13290
13291static uint32_t
13292bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13293{
13294    uint32_t retval = 0;
13295    uint32_t val;
13296
13297    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13298
13299    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13300        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13301            retval |= MF_PROTO_SUPPORT_ETHERNET;
13302        }
13303        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13304            retval |= MF_PROTO_SUPPORT_ISCSI;
13305        }
13306        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13307            retval |= MF_PROTO_SUPPORT_FCOE;
13308        }
13309    }
13310
13311    return (retval);
13312}
13313
13314static int
13315bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13316{
13317    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13318    uint32_t val;
13319
13320    /*
13321     * There is no outer vlan if we're in switch-independent mode.
13322     * If the mac is valid then assume multi-function.
13323     */
13324
13325    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13326
13327    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13328
13329    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13330
13331    mf_info->vnics_per_port =
13332        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13333
13334    return (0);
13335}
13336
13337static int
13338bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13339{
13340    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13341    uint32_t e1hov_tag;
13342    uint32_t func_config;
13343    uint32_t niv_config;
13344
13345    mf_info->multi_vnics_mode = 1;
13346
13347    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13348    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13349    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13350
13351    mf_info->ext_id =
13352        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13353                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13354
13355    mf_info->default_vlan =
13356        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13357                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13358
13359    mf_info->niv_allowed_priorities =
13360        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13361                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13362
13363    mf_info->niv_default_cos =
13364        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13365                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13366
13367    mf_info->afex_vlan_mode =
13368        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13369         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13370
13371    mf_info->niv_mba_enabled =
13372        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13373         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13374
13375    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13376
13377    mf_info->vnics_per_port =
13378        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13379
13380    return (0);
13381}
13382
13383static int
13384bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13385{
13386    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13387    uint32_t mf_cfg1;
13388    uint32_t mf_cfg2;
13389    uint32_t ovlan1;
13390    uint32_t ovlan2;
13391    uint8_t i, j;
13392
13393    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13394          SC_PORT(sc));
13395    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13396          mf_info->mf_config[SC_VN(sc)]);
13397    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13398          mf_info->multi_vnics_mode);
13399    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13400          mf_info->vnics_per_port);
13401    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13402          mf_info->ext_id);
13403    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13404          mf_info->min_bw[0], mf_info->min_bw[1],
13405          mf_info->min_bw[2], mf_info->min_bw[3]);
13406    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13407          mf_info->max_bw[0], mf_info->max_bw[1],
13408          mf_info->max_bw[2], mf_info->max_bw[3]);
13409    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13410          sc->mac_addr_str);
13411
13412    /* various MF mode sanity checks... */
13413
13414    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13415        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13416              SC_PORT(sc));
13417        return (1);
13418    }
13419
13420    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13421        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13422              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13423        return (1);
13424    }
13425
13426    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13427        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13428        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13429            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13430                  SC_VN(sc), OVLAN(sc));
13431            return (1);
13432        }
13433
13434        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13435            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13436                  mf_info->multi_vnics_mode, OVLAN(sc));
13437            return (1);
13438        }
13439
13440        /*
13441         * Verify all functions are either MF or SF mode. If MF, make sure
13442         * sure that all non-hidden functions have a valid ovlan. If SF,
13443         * make sure that all non-hidden functions have an invalid ovlan.
13444         */
13445        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13446            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13447            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13448            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13449                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13450                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13451                BLOGE(sc, "mf_mode=SD function %d MF config "
13452                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13453                      i, mf_info->multi_vnics_mode, ovlan1);
13454                return (1);
13455            }
13456        }
13457
13458        /* Verify all funcs on the same port each have a different ovlan. */
13459        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13460            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13461            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13462            /* iterate from the next function on the port to the max func */
13463            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13464                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13465                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13466                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13467                    VALID_OVLAN(ovlan1) &&
13468                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13469                    VALID_OVLAN(ovlan2) &&
13470                    (ovlan1 == ovlan2)) {
13471                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13472                              "have the same ovlan (%d)\n",
13473                          i, j, ovlan1);
13474                    return (1);
13475                }
13476            }
13477        }
13478    } /* MULTI_FUNCTION_SD */
13479
13480    return (0);
13481}
13482
13483static int
13484bxe_get_mf_cfg_info(struct bxe_softc *sc)
13485{
13486    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13487    uint32_t val, mac_upper;
13488    uint8_t i, vnic;
13489
13490    /* initialize mf_info defaults */
13491    mf_info->vnics_per_port   = 1;
13492    mf_info->multi_vnics_mode = FALSE;
13493    mf_info->path_has_ovlan   = FALSE;
13494    mf_info->mf_mode          = SINGLE_FUNCTION;
13495
13496    if (!CHIP_IS_MF_CAP(sc)) {
13497        return (0);
13498    }
13499
13500    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13501        BLOGE(sc, "Invalid mf_cfg_base!\n");
13502        return (1);
13503    }
13504
13505    /* get the MF mode (switch dependent / independent / single-function) */
13506
13507    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13508
13509    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13510    {
13511    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13512
13513        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13514
13515        /* check for legal upper mac bytes */
13516        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13517            mf_info->mf_mode = MULTI_FUNCTION_SI;
13518        } else {
13519            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13520        }
13521
13522        break;
13523
13524    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13525    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13526
13527        /* get outer vlan configuration */
13528        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13529
13530        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13531            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13532            mf_info->mf_mode = MULTI_FUNCTION_SD;
13533        } else {
13534            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13535        }
13536
13537        break;
13538
13539    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13540
13541        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13542        return (0);
13543
13544    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13545
13546        /*
13547         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13548         * and the MAC address is valid.
13549         */
13550        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13551
13552        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13553            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13554            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13555        } else {
13556            BLOGE(sc, "Invalid config for AFEX mode\n");
13557        }
13558
13559        break;
13560
13561    default:
13562
13563        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13564              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13565
13566        return (1);
13567    }
13568
13569    /* set path mf_mode (which could be different than function mf_mode) */
13570    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13571        mf_info->path_has_ovlan = TRUE;
13572    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13573        /*
13574         * Decide on path multi vnics mode. If we're not in MF mode and in
13575         * 4-port mode, this is good enough to check vnic-0 of the other port
13576         * on the same path
13577         */
13578        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13579            uint8_t other_port = !(PORT_ID(sc) & 1);
13580            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13581
13582            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13583
13584            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13585        }
13586    }
13587
13588    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13589        /* invalid MF config */
13590        if (SC_VN(sc) >= 1) {
13591            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13592            return (1);
13593        }
13594
13595        return (0);
13596    }
13597
13598    /* get the MF configuration */
13599    mf_info->mf_config[SC_VN(sc)] =
13600        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13601
13602    switch(mf_info->mf_mode)
13603    {
13604    case MULTI_FUNCTION_SD:
13605
13606        bxe_get_shmem_mf_cfg_info_sd(sc);
13607        break;
13608
13609    case MULTI_FUNCTION_SI:
13610
13611        bxe_get_shmem_mf_cfg_info_si(sc);
13612        break;
13613
13614    case MULTI_FUNCTION_AFEX:
13615
13616        bxe_get_shmem_mf_cfg_info_niv(sc);
13617        break;
13618
13619    default:
13620
13621        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13622              mf_info->mf_mode);
13623        return (1);
13624    }
13625
13626    /* get the congestion management parameters */
13627
13628    vnic = 0;
13629    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13630        /* get min/max bw */
13631        val = MFCFG_RD(sc, func_mf_config[i].config);
13632        mf_info->min_bw[vnic] =
13633            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13634        mf_info->max_bw[vnic] =
13635            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13636        vnic++;
13637    }
13638
13639    return (bxe_check_valid_mf_cfg(sc));
13640}
13641
13642static int
13643bxe_get_shmem_info(struct bxe_softc *sc)
13644{
13645    int port;
13646    uint32_t mac_hi, mac_lo, val;
13647
13648    port = SC_PORT(sc);
13649    mac_hi = mac_lo = 0;
13650
13651    sc->link_params.sc   = sc;
13652    sc->link_params.port = port;
13653
13654    /* get the hardware config info */
13655    sc->devinfo.hw_config =
13656        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13657    sc->devinfo.hw_config2 =
13658        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13659
13660    sc->link_params.hw_led_mode =
13661        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13662         SHARED_HW_CFG_LED_MODE_SHIFT);
13663
13664    /* get the port feature config */
13665    sc->port.config =
13666        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13667
13668    /* get the link params */
13669    sc->link_params.speed_cap_mask[0] =
13670        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13671    sc->link_params.speed_cap_mask[1] =
13672        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13673
13674    /* get the lane config */
13675    sc->link_params.lane_config =
13676        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13677
13678    /* get the link config */
13679    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13680    sc->port.link_config[ELINK_INT_PHY] = val;
13681    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13682    sc->port.link_config[ELINK_EXT_PHY1] =
13683        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13684
13685    /* get the override preemphasis flag and enable it or turn it off */
13686    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13687    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13688        sc->link_params.feature_config_flags |=
13689            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13690    } else {
13691        sc->link_params.feature_config_flags &=
13692            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13693    }
13694
13695    /* get the initial value of the link params */
13696    sc->link_params.multi_phy_config =
13697        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13698
13699    /* get external phy info */
13700    sc->port.ext_phy_config =
13701        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13702
13703    /* get the multifunction configuration */
13704    bxe_get_mf_cfg_info(sc);
13705
13706    /* get the mac address */
13707    if (IS_MF(sc)) {
13708        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13709        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13710    } else {
13711        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13712        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13713    }
13714
13715    if ((mac_lo == 0) && (mac_hi == 0)) {
13716        *sc->mac_addr_str = 0;
13717        BLOGE(sc, "No Ethernet address programmed!\n");
13718    } else {
13719        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13720        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13721        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13722        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13723        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13724        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13725        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13726                 "%02x:%02x:%02x:%02x:%02x:%02x",
13727                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13728                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13729                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13730        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13731    }
13732
13733    return (0);
13734}
13735
13736static void
13737bxe_get_tunable_params(struct bxe_softc *sc)
13738{
13739    /* sanity checks */
13740
13741    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13742        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13743        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13744        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13745        bxe_interrupt_mode = INTR_MODE_MSIX;
13746    }
13747
13748    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13749        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13750        bxe_queue_count = 0;
13751    }
13752
13753    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13754        if (bxe_max_rx_bufs == 0) {
13755            bxe_max_rx_bufs = RX_BD_USABLE;
13756        } else {
13757            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13758            bxe_max_rx_bufs = 2048;
13759        }
13760    }
13761
13762    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13763        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13764        bxe_hc_rx_ticks = 25;
13765    }
13766
13767    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13768        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13769        bxe_hc_tx_ticks = 50;
13770    }
13771
13772    if (bxe_max_aggregation_size == 0) {
13773        bxe_max_aggregation_size = TPA_AGG_SIZE;
13774    }
13775
13776    if (bxe_max_aggregation_size > 0xffff) {
13777        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13778              bxe_max_aggregation_size);
13779        bxe_max_aggregation_size = TPA_AGG_SIZE;
13780    }
13781
13782    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13783        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13784        bxe_mrrs = -1;
13785    }
13786
13787    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13788        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13789        bxe_autogreeen = 0;
13790    }
13791
13792    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13793        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13794        bxe_udp_rss = 0;
13795    }
13796
13797    /* pull in user settings */
13798
13799    sc->interrupt_mode       = bxe_interrupt_mode;
13800    sc->max_rx_bufs          = bxe_max_rx_bufs;
13801    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13802    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13803    sc->max_aggregation_size = bxe_max_aggregation_size;
13804    sc->mrrs                 = bxe_mrrs;
13805    sc->autogreeen           = bxe_autogreeen;
13806    sc->udp_rss              = bxe_udp_rss;
13807
13808    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13809        sc->num_queues = 1;
13810    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13811        sc->num_queues =
13812            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13813                MAX_RSS_CHAINS);
13814        if (sc->num_queues > mp_ncpus) {
13815            sc->num_queues = mp_ncpus;
13816        }
13817    }
13818
13819    BLOGD(sc, DBG_LOAD,
13820          "User Config: "
13821          "debug=0x%lx "
13822          "interrupt_mode=%d "
13823          "queue_count=%d "
13824          "hc_rx_ticks=%d "
13825          "hc_tx_ticks=%d "
13826          "rx_budget=%d "
13827          "max_aggregation_size=%d "
13828          "mrrs=%d "
13829          "autogreeen=%d "
13830          "udp_rss=%d\n",
13831          bxe_debug,
13832          sc->interrupt_mode,
13833          sc->num_queues,
13834          sc->hc_rx_ticks,
13835          sc->hc_tx_ticks,
13836          bxe_rx_budget,
13837          sc->max_aggregation_size,
13838          sc->mrrs,
13839          sc->autogreeen,
13840          sc->udp_rss);
13841}
13842
13843static int
13844bxe_media_detect(struct bxe_softc *sc)
13845{
13846    int port_type;
13847    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13848
13849    switch (sc->link_params.phy[phy_idx].media_type) {
13850    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13851    case ELINK_ETH_PHY_XFP_FIBER:
13852        BLOGI(sc, "Found 10Gb Fiber media.\n");
13853        sc->media = IFM_10G_SR;
13854        port_type = PORT_FIBRE;
13855        break;
13856    case ELINK_ETH_PHY_SFP_1G_FIBER:
13857        BLOGI(sc, "Found 1Gb Fiber media.\n");
13858        sc->media = IFM_1000_SX;
13859        port_type = PORT_FIBRE;
13860        break;
13861    case ELINK_ETH_PHY_KR:
13862    case ELINK_ETH_PHY_CX4:
13863        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13864        sc->media = IFM_10G_CX4;
13865        port_type = PORT_FIBRE;
13866        break;
13867    case ELINK_ETH_PHY_DA_TWINAX:
13868        BLOGI(sc, "Found 10Gb Twinax media.\n");
13869        sc->media = IFM_10G_TWINAX;
13870        port_type = PORT_DA;
13871        break;
13872    case ELINK_ETH_PHY_BASE_T:
13873        if (sc->link_params.speed_cap_mask[0] &
13874            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13875            BLOGI(sc, "Found 10GBase-T media.\n");
13876            sc->media = IFM_10G_T;
13877            port_type = PORT_TP;
13878        } else {
13879            BLOGI(sc, "Found 1000Base-T media.\n");
13880            sc->media = IFM_1000_T;
13881            port_type = PORT_TP;
13882        }
13883        break;
13884    case ELINK_ETH_PHY_NOT_PRESENT:
13885        BLOGI(sc, "Media not present.\n");
13886        sc->media = 0;
13887        port_type = PORT_OTHER;
13888        break;
13889    case ELINK_ETH_PHY_UNSPECIFIED:
13890    default:
13891        BLOGI(sc, "Unknown media!\n");
13892        sc->media = 0;
13893        port_type = PORT_OTHER;
13894        break;
13895    }
13896    return port_type;
13897}
13898
13899#define GET_FIELD(value, fname)                     \
13900    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13901#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13902#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13903
13904static int
13905bxe_get_igu_cam_info(struct bxe_softc *sc)
13906{
13907    int pfid = SC_FUNC(sc);
13908    int igu_sb_id;
13909    uint32_t val;
13910    uint8_t fid, igu_sb_cnt = 0;
13911
13912    sc->igu_base_sb = 0xff;
13913
13914    if (CHIP_INT_MODE_IS_BC(sc)) {
13915        int vn = SC_VN(sc);
13916        igu_sb_cnt = sc->igu_sb_cnt;
13917        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13918                           FP_SB_MAX_E1x);
13919        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13920                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13921        return (0);
13922    }
13923
13924    /* IGU in normal mode - read CAM */
13925    for (igu_sb_id = 0;
13926         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13927         igu_sb_id++) {
13928        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13929        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13930            continue;
13931        }
13932        fid = IGU_FID(val);
13933        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13934            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13935                continue;
13936            }
13937            if (IGU_VEC(val) == 0) {
13938                /* default status block */
13939                sc->igu_dsb_id = igu_sb_id;
13940            } else {
13941                if (sc->igu_base_sb == 0xff) {
13942                    sc->igu_base_sb = igu_sb_id;
13943                }
13944                igu_sb_cnt++;
13945            }
13946        }
13947    }
13948
13949    /*
13950     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13951     * that number of CAM entries will not be equal to the value advertised in
13952     * PCI. Driver should use the minimal value of both as the actual status
13953     * block count
13954     */
13955    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13956
13957    if (igu_sb_cnt == 0) {
13958        BLOGE(sc, "CAM configuration error\n");
13959        return (-1);
13960    }
13961
13962    return (0);
13963}
13964
13965/*
13966 * Gather various information from the device config space, the device itself,
13967 * shmem, and the user input.
13968 */
13969static int
13970bxe_get_device_info(struct bxe_softc *sc)
13971{
13972    uint32_t val;
13973    int rc;
13974
13975    /* Get the data for the device */
13976    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13977    sc->devinfo.device_id    = pci_get_device(sc->dev);
13978    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13979    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13980
13981    /* get the chip revision (chip metal comes from pci config space) */
13982    sc->devinfo.chip_id     =
13983    sc->link_params.chip_id =
13984        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13985         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13986         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13987         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13988
13989    /* force 57811 according to MISC register */
13990    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13991        if (CHIP_IS_57810(sc)) {
13992            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13993                                   (sc->devinfo.chip_id & 0x0000ffff));
13994        } else if (CHIP_IS_57810_MF(sc)) {
13995            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13996                                   (sc->devinfo.chip_id & 0x0000ffff));
13997        }
13998        sc->devinfo.chip_id |= 0x1;
13999    }
14000
14001    BLOGD(sc, DBG_LOAD,
14002          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14003          sc->devinfo.chip_id,
14004          ((sc->devinfo.chip_id >> 16) & 0xffff),
14005          ((sc->devinfo.chip_id >> 12) & 0xf),
14006          ((sc->devinfo.chip_id >>  4) & 0xff),
14007          ((sc->devinfo.chip_id >>  0) & 0xf));
14008
14009    val = (REG_RD(sc, 0x2874) & 0x55);
14010    if ((sc->devinfo.chip_id & 0x1) ||
14011        (CHIP_IS_E1(sc) && val) ||
14012        (CHIP_IS_E1H(sc) && (val == 0x55))) {
14013        sc->flags |= BXE_ONE_PORT_FLAG;
14014        BLOGD(sc, DBG_LOAD, "single port device\n");
14015    }
14016
14017    /* set the doorbell size */
14018    sc->doorbell_size = (1 << BXE_DB_SHIFT);
14019
14020    /* determine whether the device is in 2 port or 4 port mode */
14021    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14022    if (CHIP_IS_E2E3(sc)) {
14023        /*
14024         * Read port4mode_en_ovwr[0]:
14025         *   If 1, four port mode is in port4mode_en_ovwr[1].
14026         *   If 0, four port mode is in port4mode_en[0].
14027         */
14028        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14029        if (val & 1) {
14030            val = ((val >> 1) & 1);
14031        } else {
14032            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14033        }
14034
14035        sc->devinfo.chip_port_mode =
14036            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14037
14038        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14039    }
14040
14041    /* get the function and path info for the device */
14042    bxe_get_function_num(sc);
14043
14044    /* get the shared memory base address */
14045    sc->devinfo.shmem_base     =
14046    sc->link_params.shmem_base =
14047        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14048    sc->devinfo.shmem2_base =
14049        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14050                                  MISC_REG_GENERIC_CR_0));
14051
14052    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14053          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14054
14055    if (!sc->devinfo.shmem_base) {
14056        /* this should ONLY prevent upcoming shmem reads */
14057        BLOGI(sc, "MCP not active\n");
14058        sc->flags |= BXE_NO_MCP_FLAG;
14059        return (0);
14060    }
14061
14062    /* make sure the shared memory contents are valid */
14063    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14064    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14065        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14066        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14067        return (0);
14068    }
14069    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14070
14071    /* get the bootcode version */
14072    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14073    snprintf(sc->devinfo.bc_ver_str,
14074             sizeof(sc->devinfo.bc_ver_str),
14075             "%d.%d.%d",
14076             ((sc->devinfo.bc_ver >> 24) & 0xff),
14077             ((sc->devinfo.bc_ver >> 16) & 0xff),
14078             ((sc->devinfo.bc_ver >>  8) & 0xff));
14079    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14080
14081    /* get the bootcode shmem address */
14082    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14083    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14084
14085    /* clean indirect addresses as they're not used */
14086    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14087    if (IS_PF(sc)) {
14088        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14089        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14090        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14091        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14092        if (CHIP_IS_E1x(sc)) {
14093            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14094            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14095            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14096            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14097        }
14098
14099        /*
14100         * Enable internal target-read (in case we are probed after PF
14101         * FLR). Must be done prior to any BAR read access. Only for
14102         * 57712 and up
14103         */
14104        if (!CHIP_IS_E1x(sc)) {
14105            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14106        }
14107    }
14108
14109    /* get the nvram size */
14110    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14111    sc->devinfo.flash_size =
14112        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14113    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14114
14115    /* get PCI capabilites */
14116    bxe_probe_pci_caps(sc);
14117
14118    bxe_set_power_state(sc, PCI_PM_D0);
14119
14120    /* get various configuration parameters from shmem */
14121    bxe_get_shmem_info(sc);
14122
14123    if (sc->devinfo.pcie_msix_cap_reg != 0) {
14124        val = pci_read_config(sc->dev,
14125                              (sc->devinfo.pcie_msix_cap_reg +
14126                               PCIR_MSIX_CTRL),
14127                              2);
14128        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14129    } else {
14130        sc->igu_sb_cnt = 1;
14131    }
14132
14133    sc->igu_base_addr = BAR_IGU_INTMEM;
14134
14135    /* initialize IGU parameters */
14136    if (CHIP_IS_E1x(sc)) {
14137        sc->devinfo.int_block = INT_BLOCK_HC;
14138        sc->igu_dsb_id = DEF_SB_IGU_ID;
14139        sc->igu_base_sb = 0;
14140    } else {
14141        sc->devinfo.int_block = INT_BLOCK_IGU;
14142
14143        /* do not allow device reset during IGU info preocessing */
14144        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14145
14146        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14147
14148        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14149            int tout = 5000;
14150
14151            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14152
14153            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14154            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14155            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14156
14157            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14158                tout--;
14159                DELAY(1000);
14160            }
14161
14162            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14163                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14164                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14165                return (-1);
14166            }
14167        }
14168
14169        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14170            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14171            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14172        } else {
14173            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14174        }
14175
14176        rc = bxe_get_igu_cam_info(sc);
14177
14178        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14179
14180        if (rc) {
14181            return (rc);
14182        }
14183    }
14184
14185    /*
14186     * Get base FW non-default (fast path) status block ID. This value is
14187     * used to initialize the fw_sb_id saved on the fp/queue structure to
14188     * determine the id used by the FW.
14189     */
14190    if (CHIP_IS_E1x(sc)) {
14191        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14192    } else {
14193        /*
14194         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14195         * the same queue are indicated on the same IGU SB). So we prefer
14196         * FW and IGU SBs to be the same value.
14197         */
14198        sc->base_fw_ndsb = sc->igu_base_sb;
14199    }
14200
14201    BLOGD(sc, DBG_LOAD,
14202          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14203          sc->igu_dsb_id, sc->igu_base_sb,
14204          sc->igu_sb_cnt, sc->base_fw_ndsb);
14205
14206    elink_phy_probe(&sc->link_params);
14207
14208    return (0);
14209}
14210
14211static void
14212bxe_link_settings_supported(struct bxe_softc *sc,
14213                            uint32_t         switch_cfg)
14214{
14215    uint32_t cfg_size = 0;
14216    uint32_t idx;
14217    uint8_t port = SC_PORT(sc);
14218
14219    /* aggregation of supported attributes of all external phys */
14220    sc->port.supported[0] = 0;
14221    sc->port.supported[1] = 0;
14222
14223    switch (sc->link_params.num_phys) {
14224    case 1:
14225        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14226        cfg_size = 1;
14227        break;
14228    case 2:
14229        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14230        cfg_size = 1;
14231        break;
14232    case 3:
14233        if (sc->link_params.multi_phy_config &
14234            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14235            sc->port.supported[1] =
14236                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14237            sc->port.supported[0] =
14238                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14239        } else {
14240            sc->port.supported[0] =
14241                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14242            sc->port.supported[1] =
14243                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14244        }
14245        cfg_size = 2;
14246        break;
14247    }
14248
14249    if (!(sc->port.supported[0] || sc->port.supported[1])) {
14250        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14251              SHMEM_RD(sc,
14252                       dev_info.port_hw_config[port].external_phy_config),
14253              SHMEM_RD(sc,
14254                       dev_info.port_hw_config[port].external_phy_config2));
14255        return;
14256    }
14257
14258    if (CHIP_IS_E3(sc))
14259        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14260    else {
14261        switch (switch_cfg) {
14262        case ELINK_SWITCH_CFG_1G:
14263            sc->port.phy_addr =
14264                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14265            break;
14266        case ELINK_SWITCH_CFG_10G:
14267            sc->port.phy_addr =
14268                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14269            break;
14270        default:
14271            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14272                  sc->port.link_config[0]);
14273            return;
14274        }
14275    }
14276
14277    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14278
14279    /* mask what we support according to speed_cap_mask per configuration */
14280    for (idx = 0; idx < cfg_size; idx++) {
14281        if (!(sc->link_params.speed_cap_mask[idx] &
14282              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14283            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14284        }
14285
14286        if (!(sc->link_params.speed_cap_mask[idx] &
14287              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14288            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14289        }
14290
14291        if (!(sc->link_params.speed_cap_mask[idx] &
14292              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14293            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14294        }
14295
14296        if (!(sc->link_params.speed_cap_mask[idx] &
14297              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14298            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14299        }
14300
14301        if (!(sc->link_params.speed_cap_mask[idx] &
14302              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14303            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14304        }
14305
14306        if (!(sc->link_params.speed_cap_mask[idx] &
14307              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14308            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14309        }
14310
14311        if (!(sc->link_params.speed_cap_mask[idx] &
14312              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14313            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14314        }
14315
14316        if (!(sc->link_params.speed_cap_mask[idx] &
14317              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14318            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14319        }
14320    }
14321
14322    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14323          sc->port.supported[0], sc->port.supported[1]);
14324	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14325					sc->port.supported[0], sc->port.supported[1]);
14326}
14327
14328static void
14329bxe_link_settings_requested(struct bxe_softc *sc)
14330{
14331    uint32_t link_config;
14332    uint32_t idx;
14333    uint32_t cfg_size = 0;
14334
14335    sc->port.advertising[0] = 0;
14336    sc->port.advertising[1] = 0;
14337
14338    switch (sc->link_params.num_phys) {
14339    case 1:
14340    case 2:
14341        cfg_size = 1;
14342        break;
14343    case 3:
14344        cfg_size = 2;
14345        break;
14346    }
14347
14348    for (idx = 0; idx < cfg_size; idx++) {
14349        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14350        link_config = sc->port.link_config[idx];
14351
14352        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14353        case PORT_FEATURE_LINK_SPEED_AUTO:
14354            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14355                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14356                sc->port.advertising[idx] |= sc->port.supported[idx];
14357                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14358                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14359                    sc->port.advertising[idx] |=
14360                        (ELINK_SUPPORTED_100baseT_Half |
14361                         ELINK_SUPPORTED_100baseT_Full);
14362            } else {
14363                /* force 10G, no AN */
14364                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14365                sc->port.advertising[idx] |=
14366                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14367                continue;
14368            }
14369            break;
14370
14371        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14372            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14373                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14374                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14375                                              ADVERTISED_TP);
14376            } else {
14377                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14378                          "speed_cap_mask=0x%08x\n",
14379                      link_config, sc->link_params.speed_cap_mask[idx]);
14380                return;
14381            }
14382            break;
14383
14384        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14385            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14386                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14387                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14388                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14389                                              ADVERTISED_TP);
14390				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14391								sc->link_params.req_duplex[idx]);
14392            } else {
14393                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14394                          "speed_cap_mask=0x%08x\n",
14395                      link_config, sc->link_params.speed_cap_mask[idx]);
14396                return;
14397            }
14398            break;
14399
14400        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14401            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14402                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14403                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14404                                              ADVERTISED_TP);
14405            } else {
14406                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14407                          "speed_cap_mask=0x%08x\n",
14408                      link_config, sc->link_params.speed_cap_mask[idx]);
14409                return;
14410            }
14411            break;
14412
14413        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14414            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14415                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14416                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14417                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14418                                              ADVERTISED_TP);
14419            } else {
14420                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14421                          "speed_cap_mask=0x%08x\n",
14422                      link_config, sc->link_params.speed_cap_mask[idx]);
14423                return;
14424            }
14425            break;
14426
14427        case PORT_FEATURE_LINK_SPEED_1G:
14428            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14429                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14430                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14431                                              ADVERTISED_TP);
14432            } else {
14433                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14434                          "speed_cap_mask=0x%08x\n",
14435                      link_config, sc->link_params.speed_cap_mask[idx]);
14436                return;
14437            }
14438            break;
14439
14440        case PORT_FEATURE_LINK_SPEED_2_5G:
14441            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14442                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14443                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14444                                              ADVERTISED_TP);
14445            } else {
14446                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14447                          "speed_cap_mask=0x%08x\n",
14448                      link_config, sc->link_params.speed_cap_mask[idx]);
14449                return;
14450            }
14451            break;
14452
14453        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14454            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14455                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14456                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14457                                              ADVERTISED_FIBRE);
14458            } else {
14459                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14460                          "speed_cap_mask=0x%08x\n",
14461                      link_config, sc->link_params.speed_cap_mask[idx]);
14462                return;
14463            }
14464            break;
14465
14466        case PORT_FEATURE_LINK_SPEED_20G:
14467            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14468            break;
14469
14470        default:
14471            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14472                      "speed_cap_mask=0x%08x\n",
14473                  link_config, sc->link_params.speed_cap_mask[idx]);
14474            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14475            sc->port.advertising[idx] = sc->port.supported[idx];
14476            break;
14477        }
14478
14479        sc->link_params.req_flow_ctrl[idx] =
14480            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14481
14482        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14483            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14484                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14485            } else {
14486                bxe_set_requested_fc(sc);
14487            }
14488        }
14489
14490        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14491                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14492              sc->link_params.req_line_speed[idx],
14493              sc->link_params.req_duplex[idx],
14494              sc->link_params.req_flow_ctrl[idx],
14495              sc->port.advertising[idx]);
14496		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14497						"advertising=0x%x\n",
14498						sc->link_params.req_line_speed[idx],
14499						sc->link_params.req_duplex[idx],
14500						sc->port.advertising[idx]);
14501    }
14502}
14503
14504static void
14505bxe_get_phy_info(struct bxe_softc *sc)
14506{
14507    uint8_t port = SC_PORT(sc);
14508    uint32_t config = sc->port.config;
14509    uint32_t eee_mode;
14510
14511    /* shmem data already read in bxe_get_shmem_info() */
14512
14513    ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14514                        "link_config0=0x%08x\n",
14515               sc->link_params.lane_config,
14516               sc->link_params.speed_cap_mask[0],
14517               sc->port.link_config[0]);
14518
14519
14520    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14521    bxe_link_settings_requested(sc);
14522
14523    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14524        sc->link_params.feature_config_flags |=
14525            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14526    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14527        sc->link_params.feature_config_flags &=
14528            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14529    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14530        sc->link_params.feature_config_flags |=
14531            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14532    }
14533
14534    /* configure link feature according to nvram value */
14535    eee_mode =
14536        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14537          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14538         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14539    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14540        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14541                                    ELINK_EEE_MODE_ENABLE_LPI |
14542                                    ELINK_EEE_MODE_OUTPUT_TIME);
14543    } else {
14544        sc->link_params.eee_mode = 0;
14545    }
14546
14547    /* get the media type */
14548    bxe_media_detect(sc);
14549	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14550}
14551
14552static void
14553bxe_get_params(struct bxe_softc *sc)
14554{
14555    /* get user tunable params */
14556    bxe_get_tunable_params(sc);
14557
14558    /* select the RX and TX ring sizes */
14559    sc->tx_ring_size = TX_BD_USABLE;
14560    sc->rx_ring_size = RX_BD_USABLE;
14561
14562    /* XXX disable WoL */
14563    sc->wol = 0;
14564}
14565
14566static void
14567bxe_set_modes_bitmap(struct bxe_softc *sc)
14568{
14569    uint32_t flags = 0;
14570
14571    if (CHIP_REV_IS_FPGA(sc)) {
14572        SET_FLAGS(flags, MODE_FPGA);
14573    } else if (CHIP_REV_IS_EMUL(sc)) {
14574        SET_FLAGS(flags, MODE_EMUL);
14575    } else {
14576        SET_FLAGS(flags, MODE_ASIC);
14577    }
14578
14579    if (CHIP_IS_MODE_4_PORT(sc)) {
14580        SET_FLAGS(flags, MODE_PORT4);
14581    } else {
14582        SET_FLAGS(flags, MODE_PORT2);
14583    }
14584
14585    if (CHIP_IS_E2(sc)) {
14586        SET_FLAGS(flags, MODE_E2);
14587    } else if (CHIP_IS_E3(sc)) {
14588        SET_FLAGS(flags, MODE_E3);
14589        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14590            SET_FLAGS(flags, MODE_E3_A0);
14591        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14592            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14593        }
14594    }
14595
14596    if (IS_MF(sc)) {
14597        SET_FLAGS(flags, MODE_MF);
14598        switch (sc->devinfo.mf_info.mf_mode) {
14599        case MULTI_FUNCTION_SD:
14600            SET_FLAGS(flags, MODE_MF_SD);
14601            break;
14602        case MULTI_FUNCTION_SI:
14603            SET_FLAGS(flags, MODE_MF_SI);
14604            break;
14605        case MULTI_FUNCTION_AFEX:
14606            SET_FLAGS(flags, MODE_MF_AFEX);
14607            break;
14608        }
14609    } else {
14610        SET_FLAGS(flags, MODE_SF);
14611    }
14612
14613#if defined(__LITTLE_ENDIAN)
14614    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14615#else /* __BIG_ENDIAN */
14616    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14617#endif
14618
14619    INIT_MODE_FLAGS(sc) = flags;
14620}
14621
14622static int
14623bxe_alloc_hsi_mem(struct bxe_softc *sc)
14624{
14625    struct bxe_fastpath *fp;
14626    bus_addr_t busaddr;
14627    int max_agg_queues;
14628    int max_segments;
14629    bus_size_t max_size;
14630    bus_size_t max_seg_size;
14631    char buf[32];
14632    int rc;
14633    int i, j;
14634
14635    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14636
14637    /* allocate the parent bus DMA tag */
14638    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14639                            1,                        /* alignment */
14640                            0,                        /* boundary limit */
14641                            BUS_SPACE_MAXADDR,        /* restricted low */
14642                            BUS_SPACE_MAXADDR,        /* restricted hi */
14643                            NULL,                     /* addr filter() */
14644                            NULL,                     /* addr filter() arg */
14645                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14646                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14647                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14648                            0,                        /* flags */
14649                            NULL,                     /* lock() */
14650                            NULL,                     /* lock() arg */
14651                            &sc->parent_dma_tag);     /* returned dma tag */
14652    if (rc != 0) {
14653        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14654        return (1);
14655    }
14656
14657    /************************/
14658    /* DEFAULT STATUS BLOCK */
14659    /************************/
14660
14661    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14662                      &sc->def_sb_dma, "default status block") != 0) {
14663        /* XXX */
14664        bus_dma_tag_destroy(sc->parent_dma_tag);
14665        return (1);
14666    }
14667
14668    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14669
14670    /***************/
14671    /* EVENT QUEUE */
14672    /***************/
14673
14674    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14675                      &sc->eq_dma, "event queue") != 0) {
14676        /* XXX */
14677        bxe_dma_free(sc, &sc->def_sb_dma);
14678        sc->def_sb = NULL;
14679        bus_dma_tag_destroy(sc->parent_dma_tag);
14680        return (1);
14681    }
14682
14683    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14684
14685    /*************/
14686    /* SLOW PATH */
14687    /*************/
14688
14689    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14690                      &sc->sp_dma, "slow path") != 0) {
14691        /* XXX */
14692        bxe_dma_free(sc, &sc->eq_dma);
14693        sc->eq = NULL;
14694        bxe_dma_free(sc, &sc->def_sb_dma);
14695        sc->def_sb = NULL;
14696        bus_dma_tag_destroy(sc->parent_dma_tag);
14697        return (1);
14698    }
14699
14700    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14701
14702    /*******************/
14703    /* SLOW PATH QUEUE */
14704    /*******************/
14705
14706    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14707                      &sc->spq_dma, "slow path queue") != 0) {
14708        /* XXX */
14709        bxe_dma_free(sc, &sc->sp_dma);
14710        sc->sp = NULL;
14711        bxe_dma_free(sc, &sc->eq_dma);
14712        sc->eq = NULL;
14713        bxe_dma_free(sc, &sc->def_sb_dma);
14714        sc->def_sb = NULL;
14715        bus_dma_tag_destroy(sc->parent_dma_tag);
14716        return (1);
14717    }
14718
14719    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14720
14721    /***************************/
14722    /* FW DECOMPRESSION BUFFER */
14723    /***************************/
14724
14725    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14726                      "fw decompression buffer") != 0) {
14727        /* XXX */
14728        bxe_dma_free(sc, &sc->spq_dma);
14729        sc->spq = NULL;
14730        bxe_dma_free(sc, &sc->sp_dma);
14731        sc->sp = NULL;
14732        bxe_dma_free(sc, &sc->eq_dma);
14733        sc->eq = NULL;
14734        bxe_dma_free(sc, &sc->def_sb_dma);
14735        sc->def_sb = NULL;
14736        bus_dma_tag_destroy(sc->parent_dma_tag);
14737        return (1);
14738    }
14739
14740    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14741
14742    if ((sc->gz_strm =
14743         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14744        /* XXX */
14745        bxe_dma_free(sc, &sc->gz_buf_dma);
14746        sc->gz_buf = NULL;
14747        bxe_dma_free(sc, &sc->spq_dma);
14748        sc->spq = NULL;
14749        bxe_dma_free(sc, &sc->sp_dma);
14750        sc->sp = NULL;
14751        bxe_dma_free(sc, &sc->eq_dma);
14752        sc->eq = NULL;
14753        bxe_dma_free(sc, &sc->def_sb_dma);
14754        sc->def_sb = NULL;
14755        bus_dma_tag_destroy(sc->parent_dma_tag);
14756        return (1);
14757    }
14758
14759    /*************/
14760    /* FASTPATHS */
14761    /*************/
14762
14763    /* allocate DMA memory for each fastpath structure */
14764    for (i = 0; i < sc->num_queues; i++) {
14765        fp = &sc->fp[i];
14766        fp->sc    = sc;
14767        fp->index = i;
14768
14769        /*******************/
14770        /* FP STATUS BLOCK */
14771        /*******************/
14772
14773        snprintf(buf, sizeof(buf), "fp %d status block", i);
14774        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14775                          &fp->sb_dma, buf) != 0) {
14776            /* XXX unwind and free previous fastpath allocations */
14777            BLOGE(sc, "Failed to alloc %s\n", buf);
14778            return (1);
14779        } else {
14780            if (CHIP_IS_E2E3(sc)) {
14781                fp->status_block.e2_sb =
14782                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14783            } else {
14784                fp->status_block.e1x_sb =
14785                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14786            }
14787        }
14788
14789        /******************/
14790        /* FP TX BD CHAIN */
14791        /******************/
14792
14793        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14794        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14795                          &fp->tx_dma, buf) != 0) {
14796            /* XXX unwind and free previous fastpath allocations */
14797            BLOGE(sc, "Failed to alloc %s\n", buf);
14798            return (1);
14799        } else {
14800            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14801        }
14802
14803        /* link together the tx bd chain pages */
14804        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14805            /* index into the tx bd chain array to last entry per page */
14806            struct eth_tx_next_bd *tx_next_bd =
14807                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14808            /* point to the next page and wrap from last page */
14809            busaddr = (fp->tx_dma.paddr +
14810                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14811            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14812            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14813        }
14814
14815        /******************/
14816        /* FP RX BD CHAIN */
14817        /******************/
14818
14819        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14820        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14821                          &fp->rx_dma, buf) != 0) {
14822            /* XXX unwind and free previous fastpath allocations */
14823            BLOGE(sc, "Failed to alloc %s\n", buf);
14824            return (1);
14825        } else {
14826            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14827        }
14828
14829        /* link together the rx bd chain pages */
14830        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14831            /* index into the rx bd chain array to last entry per page */
14832            struct eth_rx_bd *rx_bd =
14833                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14834            /* point to the next page and wrap from last page */
14835            busaddr = (fp->rx_dma.paddr +
14836                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14837            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14838            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14839        }
14840
14841        /*******************/
14842        /* FP RX RCQ CHAIN */
14843        /*******************/
14844
14845        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14846        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14847                          &fp->rcq_dma, buf) != 0) {
14848            /* XXX unwind and free previous fastpath allocations */
14849            BLOGE(sc, "Failed to alloc %s\n", buf);
14850            return (1);
14851        } else {
14852            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14853        }
14854
14855        /* link together the rcq chain pages */
14856        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14857            /* index into the rcq chain array to last entry per page */
14858            struct eth_rx_cqe_next_page *rx_cqe_next =
14859                (struct eth_rx_cqe_next_page *)
14860                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14861            /* point to the next page and wrap from last page */
14862            busaddr = (fp->rcq_dma.paddr +
14863                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14864            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14865            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14866        }
14867
14868        /*******************/
14869        /* FP RX SGE CHAIN */
14870        /*******************/
14871
14872        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14873        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14874                          &fp->rx_sge_dma, buf) != 0) {
14875            /* XXX unwind and free previous fastpath allocations */
14876            BLOGE(sc, "Failed to alloc %s\n", buf);
14877            return (1);
14878        } else {
14879            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14880        }
14881
14882        /* link together the sge chain pages */
14883        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14884            /* index into the rcq chain array to last entry per page */
14885            struct eth_rx_sge *rx_sge =
14886                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14887            /* point to the next page and wrap from last page */
14888            busaddr = (fp->rx_sge_dma.paddr +
14889                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14890            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14891            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14892        }
14893
14894        /***********************/
14895        /* FP TX MBUF DMA MAPS */
14896        /***********************/
14897
14898        /* set required sizes before mapping to conserve resources */
14899        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14900            max_size     = BXE_TSO_MAX_SIZE;
14901            max_segments = BXE_TSO_MAX_SEGMENTS;
14902            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14903        } else {
14904            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14905            max_segments = BXE_MAX_SEGMENTS;
14906            max_seg_size = MCLBYTES;
14907        }
14908
14909        /* create a dma tag for the tx mbufs */
14910        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14911                                1,                  /* alignment */
14912                                0,                  /* boundary limit */
14913                                BUS_SPACE_MAXADDR,  /* restricted low */
14914                                BUS_SPACE_MAXADDR,  /* restricted hi */
14915                                NULL,               /* addr filter() */
14916                                NULL,               /* addr filter() arg */
14917                                max_size,           /* max map size */
14918                                max_segments,       /* num discontinuous */
14919                                max_seg_size,       /* max seg size */
14920                                0,                  /* flags */
14921                                NULL,               /* lock() */
14922                                NULL,               /* lock() arg */
14923                                &fp->tx_mbuf_tag);  /* returned dma tag */
14924        if (rc != 0) {
14925            /* XXX unwind and free previous fastpath allocations */
14926            BLOGE(sc, "Failed to create dma tag for "
14927                      "'fp %d tx mbufs' (%d)\n", i, rc);
14928            return (1);
14929        }
14930
14931        /* create dma maps for each of the tx mbuf clusters */
14932        for (j = 0; j < TX_BD_TOTAL; j++) {
14933            if (bus_dmamap_create(fp->tx_mbuf_tag,
14934                                  BUS_DMA_NOWAIT,
14935                                  &fp->tx_mbuf_chain[j].m_map)) {
14936                /* XXX unwind and free previous fastpath allocations */
14937                BLOGE(sc, "Failed to create dma map for "
14938                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14939                return (1);
14940            }
14941        }
14942
14943        /***********************/
14944        /* FP RX MBUF DMA MAPS */
14945        /***********************/
14946
14947        /* create a dma tag for the rx mbufs */
14948        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14949                                1,                  /* alignment */
14950                                0,                  /* boundary limit */
14951                                BUS_SPACE_MAXADDR,  /* restricted low */
14952                                BUS_SPACE_MAXADDR,  /* restricted hi */
14953                                NULL,               /* addr filter() */
14954                                NULL,               /* addr filter() arg */
14955                                MJUM9BYTES,         /* max map size */
14956                                1,                  /* num discontinuous */
14957                                MJUM9BYTES,         /* max seg size */
14958                                0,                  /* flags */
14959                                NULL,               /* lock() */
14960                                NULL,               /* lock() arg */
14961                                &fp->rx_mbuf_tag);  /* returned dma tag */
14962        if (rc != 0) {
14963            /* XXX unwind and free previous fastpath allocations */
14964            BLOGE(sc, "Failed to create dma tag for "
14965                      "'fp %d rx mbufs' (%d)\n", i, rc);
14966            return (1);
14967        }
14968
14969        /* create dma maps for each of the rx mbuf clusters */
14970        for (j = 0; j < RX_BD_TOTAL; j++) {
14971            if (bus_dmamap_create(fp->rx_mbuf_tag,
14972                                  BUS_DMA_NOWAIT,
14973                                  &fp->rx_mbuf_chain[j].m_map)) {
14974                /* XXX unwind and free previous fastpath allocations */
14975                BLOGE(sc, "Failed to create dma map for "
14976                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14977                return (1);
14978            }
14979        }
14980
14981        /* create dma map for the spare rx mbuf cluster */
14982        if (bus_dmamap_create(fp->rx_mbuf_tag,
14983                              BUS_DMA_NOWAIT,
14984                              &fp->rx_mbuf_spare_map)) {
14985            /* XXX unwind and free previous fastpath allocations */
14986            BLOGE(sc, "Failed to create dma map for "
14987                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14988            return (1);
14989        }
14990
14991        /***************************/
14992        /* FP RX SGE MBUF DMA MAPS */
14993        /***************************/
14994
14995        /* create a dma tag for the rx sge mbufs */
14996        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14997                                1,                  /* alignment */
14998                                0,                  /* boundary limit */
14999                                BUS_SPACE_MAXADDR,  /* restricted low */
15000                                BUS_SPACE_MAXADDR,  /* restricted hi */
15001                                NULL,               /* addr filter() */
15002                                NULL,               /* addr filter() arg */
15003                                BCM_PAGE_SIZE,      /* max map size */
15004                                1,                  /* num discontinuous */
15005                                BCM_PAGE_SIZE,      /* max seg size */
15006                                0,                  /* flags */
15007                                NULL,               /* lock() */
15008                                NULL,               /* lock() arg */
15009                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
15010        if (rc != 0) {
15011            /* XXX unwind and free previous fastpath allocations */
15012            BLOGE(sc, "Failed to create dma tag for "
15013                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
15014            return (1);
15015        }
15016
15017        /* create dma maps for the rx sge mbuf clusters */
15018        for (j = 0; j < RX_SGE_TOTAL; j++) {
15019            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15020                                  BUS_DMA_NOWAIT,
15021                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
15022                /* XXX unwind and free previous fastpath allocations */
15023                BLOGE(sc, "Failed to create dma map for "
15024                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15025                return (1);
15026            }
15027        }
15028
15029        /* create dma map for the spare rx sge mbuf cluster */
15030        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15031                              BUS_DMA_NOWAIT,
15032                              &fp->rx_sge_mbuf_spare_map)) {
15033            /* XXX unwind and free previous fastpath allocations */
15034            BLOGE(sc, "Failed to create dma map for "
15035                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15036            return (1);
15037        }
15038
15039        /***************************/
15040        /* FP RX TPA MBUF DMA MAPS */
15041        /***************************/
15042
15043        /* create dma maps for the rx tpa mbuf clusters */
15044        max_agg_queues = MAX_AGG_QS(sc);
15045
15046        for (j = 0; j < max_agg_queues; j++) {
15047            if (bus_dmamap_create(fp->rx_mbuf_tag,
15048                                  BUS_DMA_NOWAIT,
15049                                  &fp->rx_tpa_info[j].bd.m_map)) {
15050                /* XXX unwind and free previous fastpath allocations */
15051                BLOGE(sc, "Failed to create dma map for "
15052                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15053                return (1);
15054            }
15055        }
15056
15057        /* create dma map for the spare rx tpa mbuf cluster */
15058        if (bus_dmamap_create(fp->rx_mbuf_tag,
15059                              BUS_DMA_NOWAIT,
15060                              &fp->rx_tpa_info_mbuf_spare_map)) {
15061            /* XXX unwind and free previous fastpath allocations */
15062            BLOGE(sc, "Failed to create dma map for "
15063                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15064            return (1);
15065        }
15066
15067        bxe_init_sge_ring_bit_mask(fp);
15068    }
15069
15070    return (0);
15071}
15072
15073static void
15074bxe_free_hsi_mem(struct bxe_softc *sc)
15075{
15076    struct bxe_fastpath *fp;
15077    int max_agg_queues;
15078    int i, j;
15079
15080    if (sc->parent_dma_tag == NULL) {
15081        return; /* assume nothing was allocated */
15082    }
15083
15084    for (i = 0; i < sc->num_queues; i++) {
15085        fp = &sc->fp[i];
15086
15087        /*******************/
15088        /* FP STATUS BLOCK */
15089        /*******************/
15090
15091        bxe_dma_free(sc, &fp->sb_dma);
15092        memset(&fp->status_block, 0, sizeof(fp->status_block));
15093
15094        /******************/
15095        /* FP TX BD CHAIN */
15096        /******************/
15097
15098        bxe_dma_free(sc, &fp->tx_dma);
15099        fp->tx_chain = NULL;
15100
15101        /******************/
15102        /* FP RX BD CHAIN */
15103        /******************/
15104
15105        bxe_dma_free(sc, &fp->rx_dma);
15106        fp->rx_chain = NULL;
15107
15108        /*******************/
15109        /* FP RX RCQ CHAIN */
15110        /*******************/
15111
15112        bxe_dma_free(sc, &fp->rcq_dma);
15113        fp->rcq_chain = NULL;
15114
15115        /*******************/
15116        /* FP RX SGE CHAIN */
15117        /*******************/
15118
15119        bxe_dma_free(sc, &fp->rx_sge_dma);
15120        fp->rx_sge_chain = NULL;
15121
15122        /***********************/
15123        /* FP TX MBUF DMA MAPS */
15124        /***********************/
15125
15126        if (fp->tx_mbuf_tag != NULL) {
15127            for (j = 0; j < TX_BD_TOTAL; j++) {
15128                if (fp->tx_mbuf_chain[j].m_map != NULL) {
15129                    bus_dmamap_unload(fp->tx_mbuf_tag,
15130                                      fp->tx_mbuf_chain[j].m_map);
15131                    bus_dmamap_destroy(fp->tx_mbuf_tag,
15132                                       fp->tx_mbuf_chain[j].m_map);
15133                }
15134            }
15135
15136            bus_dma_tag_destroy(fp->tx_mbuf_tag);
15137            fp->tx_mbuf_tag = NULL;
15138        }
15139
15140        /***********************/
15141        /* FP RX MBUF DMA MAPS */
15142        /***********************/
15143
15144        if (fp->rx_mbuf_tag != NULL) {
15145            for (j = 0; j < RX_BD_TOTAL; j++) {
15146                if (fp->rx_mbuf_chain[j].m_map != NULL) {
15147                    bus_dmamap_unload(fp->rx_mbuf_tag,
15148                                      fp->rx_mbuf_chain[j].m_map);
15149                    bus_dmamap_destroy(fp->rx_mbuf_tag,
15150                                       fp->rx_mbuf_chain[j].m_map);
15151                }
15152            }
15153
15154            if (fp->rx_mbuf_spare_map != NULL) {
15155                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15156                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15157            }
15158
15159            /***************************/
15160            /* FP RX TPA MBUF DMA MAPS */
15161            /***************************/
15162
15163            max_agg_queues = MAX_AGG_QS(sc);
15164
15165            for (j = 0; j < max_agg_queues; j++) {
15166                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15167                    bus_dmamap_unload(fp->rx_mbuf_tag,
15168                                      fp->rx_tpa_info[j].bd.m_map);
15169                    bus_dmamap_destroy(fp->rx_mbuf_tag,
15170                                       fp->rx_tpa_info[j].bd.m_map);
15171                }
15172            }
15173
15174            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15175                bus_dmamap_unload(fp->rx_mbuf_tag,
15176                                  fp->rx_tpa_info_mbuf_spare_map);
15177                bus_dmamap_destroy(fp->rx_mbuf_tag,
15178                                   fp->rx_tpa_info_mbuf_spare_map);
15179            }
15180
15181            bus_dma_tag_destroy(fp->rx_mbuf_tag);
15182            fp->rx_mbuf_tag = NULL;
15183        }
15184
15185        /***************************/
15186        /* FP RX SGE MBUF DMA MAPS */
15187        /***************************/
15188
15189        if (fp->rx_sge_mbuf_tag != NULL) {
15190            for (j = 0; j < RX_SGE_TOTAL; j++) {
15191                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15192                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15193                                      fp->rx_sge_mbuf_chain[j].m_map);
15194                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15195                                       fp->rx_sge_mbuf_chain[j].m_map);
15196                }
15197            }
15198
15199            if (fp->rx_sge_mbuf_spare_map != NULL) {
15200                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15201                                  fp->rx_sge_mbuf_spare_map);
15202                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15203                                   fp->rx_sge_mbuf_spare_map);
15204            }
15205
15206            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15207            fp->rx_sge_mbuf_tag = NULL;
15208        }
15209    }
15210
15211    /***************************/
15212    /* FW DECOMPRESSION BUFFER */
15213    /***************************/
15214
15215    bxe_dma_free(sc, &sc->gz_buf_dma);
15216    sc->gz_buf = NULL;
15217    free(sc->gz_strm, M_DEVBUF);
15218    sc->gz_strm = NULL;
15219
15220    /*******************/
15221    /* SLOW PATH QUEUE */
15222    /*******************/
15223
15224    bxe_dma_free(sc, &sc->spq_dma);
15225    sc->spq = NULL;
15226
15227    /*************/
15228    /* SLOW PATH */
15229    /*************/
15230
15231    bxe_dma_free(sc, &sc->sp_dma);
15232    sc->sp = NULL;
15233
15234    /***************/
15235    /* EVENT QUEUE */
15236    /***************/
15237
15238    bxe_dma_free(sc, &sc->eq_dma);
15239    sc->eq = NULL;
15240
15241    /************************/
15242    /* DEFAULT STATUS BLOCK */
15243    /************************/
15244
15245    bxe_dma_free(sc, &sc->def_sb_dma);
15246    sc->def_sb = NULL;
15247
15248    bus_dma_tag_destroy(sc->parent_dma_tag);
15249    sc->parent_dma_tag = NULL;
15250}
15251
15252/*
15253 * Previous driver DMAE transaction may have occurred when pre-boot stage
15254 * ended and boot began. This would invalidate the addresses of the
15255 * transaction, resulting in was-error bit set in the PCI causing all
15256 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15257 * the interrupt which detected this from the pglueb and the was-done bit
15258 */
15259static void
15260bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15261{
15262    uint32_t val;
15263
15264    if (!CHIP_IS_E1x(sc)) {
15265        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15266        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15267            BLOGD(sc, DBG_LOAD,
15268                  "Clearing 'was-error' bit that was set in pglueb");
15269            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15270        }
15271    }
15272}
15273
15274static int
15275bxe_prev_mcp_done(struct bxe_softc *sc)
15276{
15277    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15278                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15279    if (!rc) {
15280        BLOGE(sc, "MCP response failure, aborting\n");
15281        return (-1);
15282    }
15283
15284    return (0);
15285}
15286
15287static struct bxe_prev_list_node *
15288bxe_prev_path_get_entry(struct bxe_softc *sc)
15289{
15290    struct bxe_prev_list_node *tmp;
15291
15292    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15293        if ((sc->pcie_bus == tmp->bus) &&
15294            (sc->pcie_device == tmp->slot) &&
15295            (SC_PATH(sc) == tmp->path)) {
15296            return (tmp);
15297        }
15298    }
15299
15300    return (NULL);
15301}
15302
15303static uint8_t
15304bxe_prev_is_path_marked(struct bxe_softc *sc)
15305{
15306    struct bxe_prev_list_node *tmp;
15307    int rc = FALSE;
15308
15309    mtx_lock(&bxe_prev_mtx);
15310
15311    tmp = bxe_prev_path_get_entry(sc);
15312    if (tmp) {
15313        if (tmp->aer) {
15314            BLOGD(sc, DBG_LOAD,
15315                  "Path %d/%d/%d was marked by AER\n",
15316                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15317        } else {
15318            rc = TRUE;
15319            BLOGD(sc, DBG_LOAD,
15320                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15321                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15322        }
15323    }
15324
15325    mtx_unlock(&bxe_prev_mtx);
15326
15327    return (rc);
15328}
15329
15330static int
15331bxe_prev_mark_path(struct bxe_softc *sc,
15332                   uint8_t          after_undi)
15333{
15334    struct bxe_prev_list_node *tmp;
15335
15336    mtx_lock(&bxe_prev_mtx);
15337
15338    /* Check whether the entry for this path already exists */
15339    tmp = bxe_prev_path_get_entry(sc);
15340    if (tmp) {
15341        if (!tmp->aer) {
15342            BLOGD(sc, DBG_LOAD,
15343                  "Re-marking AER in path %d/%d/%d\n",
15344                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15345        } else {
15346            BLOGD(sc, DBG_LOAD,
15347                  "Removing AER indication from path %d/%d/%d\n",
15348                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15349            tmp->aer = 0;
15350        }
15351
15352        mtx_unlock(&bxe_prev_mtx);
15353        return (0);
15354    }
15355
15356    mtx_unlock(&bxe_prev_mtx);
15357
15358    /* Create an entry for this path and add it */
15359    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15360                 (M_NOWAIT | M_ZERO));
15361    if (!tmp) {
15362        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15363        return (-1);
15364    }
15365
15366    tmp->bus  = sc->pcie_bus;
15367    tmp->slot = sc->pcie_device;
15368    tmp->path = SC_PATH(sc);
15369    tmp->aer  = 0;
15370    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15371
15372    mtx_lock(&bxe_prev_mtx);
15373
15374    BLOGD(sc, DBG_LOAD,
15375          "Marked path %d/%d/%d - finished previous unload\n",
15376          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15377    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15378
15379    mtx_unlock(&bxe_prev_mtx);
15380
15381    return (0);
15382}
15383
15384static int
15385bxe_do_flr(struct bxe_softc *sc)
15386{
15387    int i;
15388
15389    /* only E2 and onwards support FLR */
15390    if (CHIP_IS_E1x(sc)) {
15391        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15392        return (-1);
15393    }
15394
15395    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15396    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15397        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15398              sc->devinfo.bc_ver);
15399        return (-1);
15400    }
15401
15402    /* Wait for Transaction Pending bit clean */
15403    for (i = 0; i < 4; i++) {
15404        if (i) {
15405            DELAY(((1 << (i - 1)) * 100) * 1000);
15406        }
15407
15408        if (!bxe_is_pcie_pending(sc)) {
15409            goto clear;
15410        }
15411    }
15412
15413    BLOGE(sc, "PCIE transaction is not cleared, "
15414              "proceeding with reset anyway\n");
15415
15416clear:
15417
15418    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15419    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15420
15421    return (0);
15422}
15423
15424struct bxe_mac_vals {
15425    uint32_t xmac_addr;
15426    uint32_t xmac_val;
15427    uint32_t emac_addr;
15428    uint32_t emac_val;
15429    uint32_t umac_addr;
15430    uint32_t umac_val;
15431    uint32_t bmac_addr;
15432    uint32_t bmac_val[2];
15433};
15434
15435static void
15436bxe_prev_unload_close_mac(struct bxe_softc *sc,
15437                          struct bxe_mac_vals *vals)
15438{
15439    uint32_t val, base_addr, offset, mask, reset_reg;
15440    uint8_t mac_stopped = FALSE;
15441    uint8_t port = SC_PORT(sc);
15442    uint32_t wb_data[2];
15443
15444    /* reset addresses as they also mark which values were changed */
15445    vals->bmac_addr = 0;
15446    vals->umac_addr = 0;
15447    vals->xmac_addr = 0;
15448    vals->emac_addr = 0;
15449
15450    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15451
15452    if (!CHIP_IS_E3(sc)) {
15453        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15454        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15455        if ((mask & reset_reg) && val) {
15456            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15457            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15458                                    : NIG_REG_INGRESS_BMAC0_MEM;
15459            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15460                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15461
15462            /*
15463             * use rd/wr since we cannot use dmae. This is safe
15464             * since MCP won't access the bus due to the request
15465             * to unload, and no function on the path can be
15466             * loaded at this time.
15467             */
15468            wb_data[0] = REG_RD(sc, base_addr + offset);
15469            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15470            vals->bmac_addr = base_addr + offset;
15471            vals->bmac_val[0] = wb_data[0];
15472            vals->bmac_val[1] = wb_data[1];
15473            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15474            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15475            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15476        }
15477
15478        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15479        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15480        vals->emac_val = REG_RD(sc, vals->emac_addr);
15481        REG_WR(sc, vals->emac_addr, 0);
15482        mac_stopped = TRUE;
15483    } else {
15484        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15485            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15486            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15487            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15488            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15489            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15490            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15491            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15492            REG_WR(sc, vals->xmac_addr, 0);
15493            mac_stopped = TRUE;
15494        }
15495
15496        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15497        if (mask & reset_reg) {
15498            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15499            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15500            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15501            vals->umac_val = REG_RD(sc, vals->umac_addr);
15502            REG_WR(sc, vals->umac_addr, 0);
15503            mac_stopped = TRUE;
15504        }
15505    }
15506
15507    if (mac_stopped) {
15508        DELAY(20000);
15509    }
15510}
15511
15512#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15513#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15514#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15515#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15516
15517static void
15518bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15519                         uint8_t          port,
15520                         uint8_t          inc)
15521{
15522    uint16_t rcq, bd;
15523    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15524
15525    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15526    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15527
15528    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15529    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15530
15531    BLOGD(sc, DBG_LOAD,
15532          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15533          port, bd, rcq);
15534}
15535
15536static int
15537bxe_prev_unload_common(struct bxe_softc *sc)
15538{
15539    uint32_t reset_reg, tmp_reg = 0, rc;
15540    uint8_t prev_undi = FALSE;
15541    struct bxe_mac_vals mac_vals;
15542    uint32_t timer_count = 1000;
15543    uint32_t prev_brb;
15544
15545    /*
15546     * It is possible a previous function received 'common' answer,
15547     * but hasn't loaded yet, therefore creating a scenario of
15548     * multiple functions receiving 'common' on the same path.
15549     */
15550    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15551
15552    memset(&mac_vals, 0, sizeof(mac_vals));
15553
15554    if (bxe_prev_is_path_marked(sc)) {
15555        return (bxe_prev_mcp_done(sc));
15556    }
15557
15558    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15559
15560    /* Reset should be performed after BRB is emptied */
15561    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15562        /* Close the MAC Rx to prevent BRB from filling up */
15563        bxe_prev_unload_close_mac(sc, &mac_vals);
15564
15565        /* close LLH filters towards the BRB */
15566        elink_set_rx_filter(&sc->link_params, 0);
15567
15568        /*
15569         * Check if the UNDI driver was previously loaded.
15570         * UNDI driver initializes CID offset for normal bell to 0x7
15571         */
15572        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15573            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15574            if (tmp_reg == 0x7) {
15575                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15576                prev_undi = TRUE;
15577                /* clear the UNDI indication */
15578                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15579                /* clear possible idle check errors */
15580                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15581            }
15582        }
15583
15584        /* wait until BRB is empty */
15585        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15586        while (timer_count) {
15587            prev_brb = tmp_reg;
15588
15589            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15590            if (!tmp_reg) {
15591                break;
15592            }
15593
15594            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15595
15596            /* reset timer as long as BRB actually gets emptied */
15597            if (prev_brb > tmp_reg) {
15598                timer_count = 1000;
15599            } else {
15600                timer_count--;
15601            }
15602
15603            /* If UNDI resides in memory, manually increment it */
15604            if (prev_undi) {
15605                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15606            }
15607
15608            DELAY(10);
15609        }
15610
15611        if (!timer_count) {
15612            BLOGE(sc, "Failed to empty BRB\n");
15613        }
15614    }
15615
15616    /* No packets are in the pipeline, path is ready for reset */
15617    bxe_reset_common(sc);
15618
15619    if (mac_vals.xmac_addr) {
15620        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15621    }
15622    if (mac_vals.umac_addr) {
15623        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15624    }
15625    if (mac_vals.emac_addr) {
15626        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15627    }
15628    if (mac_vals.bmac_addr) {
15629        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15630        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15631    }
15632
15633    rc = bxe_prev_mark_path(sc, prev_undi);
15634    if (rc) {
15635        bxe_prev_mcp_done(sc);
15636        return (rc);
15637    }
15638
15639    return (bxe_prev_mcp_done(sc));
15640}
15641
15642static int
15643bxe_prev_unload_uncommon(struct bxe_softc *sc)
15644{
15645    int rc;
15646
15647    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15648
15649    /* Test if previous unload process was already finished for this path */
15650    if (bxe_prev_is_path_marked(sc)) {
15651        return (bxe_prev_mcp_done(sc));
15652    }
15653
15654    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15655
15656    /*
15657     * If function has FLR capabilities, and existing FW version matches
15658     * the one required, then FLR will be sufficient to clean any residue
15659     * left by previous driver
15660     */
15661    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15662    if (!rc) {
15663        /* fw version is good */
15664        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15665        rc = bxe_do_flr(sc);
15666    }
15667
15668    if (!rc) {
15669        /* FLR was performed */
15670        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15671        return (0);
15672    }
15673
15674    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15675
15676    /* Close the MCP request, return failure*/
15677    rc = bxe_prev_mcp_done(sc);
15678    if (!rc) {
15679        rc = BXE_PREV_WAIT_NEEDED;
15680    }
15681
15682    return (rc);
15683}
15684
15685static int
15686bxe_prev_unload(struct bxe_softc *sc)
15687{
15688    int time_counter = 10;
15689    uint32_t fw, hw_lock_reg, hw_lock_val;
15690    uint32_t rc = 0;
15691
15692    /*
15693     * Clear HW from errors which may have resulted from an interrupted
15694     * DMAE transaction.
15695     */
15696    bxe_prev_interrupted_dmae(sc);
15697
15698    /* Release previously held locks */
15699    hw_lock_reg =
15700        (SC_FUNC(sc) <= 5) ?
15701            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15702            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15703
15704    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15705    if (hw_lock_val) {
15706        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15707            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15708            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15709                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15710        }
15711        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15712        REG_WR(sc, hw_lock_reg, 0xffffffff);
15713    } else {
15714        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15715    }
15716
15717    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15718        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15719        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15720    }
15721
15722    do {
15723        /* Lock MCP using an unload request */
15724        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15725        if (!fw) {
15726            BLOGE(sc, "MCP response failure, aborting\n");
15727            rc = -1;
15728            break;
15729        }
15730
15731        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15732            rc = bxe_prev_unload_common(sc);
15733            break;
15734        }
15735
15736        /* non-common reply from MCP night require looping */
15737        rc = bxe_prev_unload_uncommon(sc);
15738        if (rc != BXE_PREV_WAIT_NEEDED) {
15739            break;
15740        }
15741
15742        DELAY(20000);
15743    } while (--time_counter);
15744
15745    if (!time_counter || rc) {
15746        BLOGE(sc, "Failed to unload previous driver!"
15747            " time_counter %d rc %d\n", time_counter, rc);
15748        rc = -1;
15749    }
15750
15751    return (rc);
15752}
15753
15754void
15755bxe_dcbx_set_state(struct bxe_softc *sc,
15756                   uint8_t          dcb_on,
15757                   uint32_t         dcbx_enabled)
15758{
15759    if (!CHIP_IS_E1x(sc)) {
15760        sc->dcb_state = dcb_on;
15761        sc->dcbx_enabled = dcbx_enabled;
15762    } else {
15763        sc->dcb_state = FALSE;
15764        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15765    }
15766    BLOGD(sc, DBG_LOAD,
15767          "DCB state [%s:%s]\n",
15768          dcb_on ? "ON" : "OFF",
15769          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15770          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15771          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15772          "on-chip with negotiation" : "invalid");
15773}
15774
15775/* must be called after sriov-enable */
15776static int
15777bxe_set_qm_cid_count(struct bxe_softc *sc)
15778{
15779    int cid_count = BXE_L2_MAX_CID(sc);
15780
15781    if (IS_SRIOV(sc)) {
15782        cid_count += BXE_VF_CIDS;
15783    }
15784
15785    if (CNIC_SUPPORT(sc)) {
15786        cid_count += CNIC_CID_MAX;
15787    }
15788
15789    return (roundup(cid_count, QM_CID_ROUND));
15790}
15791
15792static void
15793bxe_init_multi_cos(struct bxe_softc *sc)
15794{
15795    int pri, cos;
15796
15797    uint32_t pri_map = 0; /* XXX change to user config */
15798
15799    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15800        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15801        if (cos < sc->max_cos) {
15802            sc->prio_to_cos[pri] = cos;
15803        } else {
15804            BLOGW(sc, "Invalid COS %d for priority %d "
15805                      "(max COS is %d), setting to 0\n",
15806                  cos, pri, (sc->max_cos - 1));
15807            sc->prio_to_cos[pri] = 0;
15808        }
15809    }
15810}
15811
15812static int
15813bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15814{
15815    struct bxe_softc *sc;
15816    int error, result;
15817
15818    result = 0;
15819    error = sysctl_handle_int(oidp, &result, 0, req);
15820
15821    if (error || !req->newptr) {
15822        return (error);
15823    }
15824
15825    if (result == 1) {
15826        uint32_t  temp;
15827        sc = (struct bxe_softc *)arg1;
15828
15829        BLOGI(sc, "... dumping driver state ...\n");
15830        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15831        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15832    }
15833
15834    return (error);
15835}
15836
15837static int
15838bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15839{
15840    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15841    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15842    uint32_t *offset;
15843    uint64_t value = 0;
15844    int index = (int)arg2;
15845
15846    if (index >= BXE_NUM_ETH_STATS) {
15847        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15848        return (-1);
15849    }
15850
15851    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15852
15853    switch (bxe_eth_stats_arr[index].size) {
15854    case 4:
15855        value = (uint64_t)*offset;
15856        break;
15857    case 8:
15858        value = HILO_U64(*offset, *(offset + 1));
15859        break;
15860    default:
15861        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15862              index, bxe_eth_stats_arr[index].size);
15863        return (-1);
15864    }
15865
15866    return (sysctl_handle_64(oidp, &value, 0, req));
15867}
15868
15869static int
15870bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15871{
15872    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15873    uint32_t *eth_stats;
15874    uint32_t *offset;
15875    uint64_t value = 0;
15876    uint32_t q_stat = (uint32_t)arg2;
15877    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15878    uint32_t index = (q_stat & 0xffff);
15879
15880    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15881
15882    if (index >= BXE_NUM_ETH_Q_STATS) {
15883        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15884        return (-1);
15885    }
15886
15887    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15888
15889    switch (bxe_eth_q_stats_arr[index].size) {
15890    case 4:
15891        value = (uint64_t)*offset;
15892        break;
15893    case 8:
15894        value = HILO_U64(*offset, *(offset + 1));
15895        break;
15896    default:
15897        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15898              index, bxe_eth_q_stats_arr[index].size);
15899        return (-1);
15900    }
15901
15902    return (sysctl_handle_64(oidp, &value, 0, req));
15903}
15904
15905static void bxe_force_link_reset(struct bxe_softc *sc)
15906{
15907
15908        bxe_acquire_phy_lock(sc);
15909        elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15910        bxe_release_phy_lock(sc);
15911}
15912
15913static int
15914bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15915{
15916        struct bxe_softc *sc = (struct bxe_softc *)arg1;
15917        uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15918        int rc = 0;
15919        int error;
15920        int result;
15921
15922
15923        error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15924
15925        if (error || !req->newptr) {
15926                return (error);
15927        }
15928        if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15929                BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param);
15930                sc->bxe_pause_param = 8;
15931        }
15932
15933        result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15934
15935
15936        if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15937                        BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15938                        return -EINVAL;
15939        }
15940
15941        if(IS_MF(sc))
15942                return 0;
15943       sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15944        if(result & ELINK_FLOW_CTRL_RX)
15945                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15946
15947        if(result & ELINK_FLOW_CTRL_TX)
15948                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15949        if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15950                sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15951
15952        if(result & 0x400) {
15953                if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15954                        sc->link_params.req_flow_ctrl[cfg_idx] =
15955                                ELINK_FLOW_CTRL_AUTO;
15956                }
15957                sc->link_params.req_fc_auto_adv = 0;
15958                if (result & ELINK_FLOW_CTRL_RX)
15959                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15960
15961                if (result & ELINK_FLOW_CTRL_TX)
15962                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15963                if (!sc->link_params.req_fc_auto_adv)
15964                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15965        }
15966         if (IS_PF(sc)) {
15967                        if (sc->link_vars.link_up) {
15968                                bxe_stats_handle(sc, STATS_EVENT_STOP);
15969                        }
15970			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15971                        bxe_force_link_reset(sc);
15972                        bxe_acquire_phy_lock(sc);
15973
15974                        rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15975
15976                        bxe_release_phy_lock(sc);
15977
15978                        bxe_calc_fc_adv(sc);
15979                        }
15980        }
15981        return rc;
15982}
15983
15984
15985static void
15986bxe_add_sysctls(struct bxe_softc *sc)
15987{
15988    struct sysctl_ctx_list *ctx;
15989    struct sysctl_oid_list *children;
15990    struct sysctl_oid *queue_top, *queue;
15991    struct sysctl_oid_list *queue_top_children, *queue_children;
15992    char queue_num_buf[32];
15993    uint32_t q_stat;
15994    int i, j;
15995
15996    ctx = device_get_sysctl_ctx(sc->dev);
15997    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15998
15999    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16000                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16001                      "version");
16002
16003    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16004             BCM_5710_FW_MAJOR_VERSION,
16005             BCM_5710_FW_MINOR_VERSION,
16006             BCM_5710_FW_REVISION_VERSION,
16007             BCM_5710_FW_ENGINEERING_VERSION);
16008
16009    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16010        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
16011         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
16012         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
16013         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16014                                                                "Unknown"));
16015    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16016                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16017                    "multifunction vnics per port");
16018
16019    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16020        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16021         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16022         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16023                                              "???GT/s"),
16024        sc->devinfo.pcie_link_width);
16025
16026    sc->debug = bxe_debug;
16027
16028    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16029                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16030                      "bootcode version");
16031    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16032                      CTLFLAG_RD, sc->fw_ver_str, 0,
16033                      "firmware version");
16034    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16035                      CTLFLAG_RD, sc->mf_mode_str, 0,
16036                      "multifunction mode");
16037    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16038                      CTLFLAG_RD, sc->mac_addr_str, 0,
16039                      "mac address");
16040    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16041                      CTLFLAG_RD, sc->pci_link_str, 0,
16042                      "pci link status");
16043    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16044                    CTLFLAG_RW, &sc->debug,
16045                    "debug logging mode");
16046
16047    sc->trigger_grcdump = 0;
16048    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16049                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
16050                   "trigger grcdump should be invoked"
16051                   "  before collecting grcdump");
16052
16053    sc->grcdump_started = 0;
16054    sc->grcdump_done = 0;
16055    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16056                   CTLFLAG_RD, &sc->grcdump_done, 0,
16057                   "set by driver when grcdump is done");
16058
16059    sc->rx_budget = bxe_rx_budget;
16060    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16061                    CTLFLAG_RW, &sc->rx_budget, 0,
16062                    "rx processing budget");
16063
16064    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16065        CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16066        bxe_sysctl_pauseparam, "IU",
16067        "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16068
16069
16070    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16071        CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16072        bxe_sysctl_state, "IU", "dump driver state");
16073
16074    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16075        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16076            bxe_eth_stats_arr[i].string,
16077            CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
16078            bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
16079    }
16080
16081    /* add a new parent node for all queues "dev.bxe.#.queue" */
16082    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16083        CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
16084    queue_top_children = SYSCTL_CHILDREN(queue_top);
16085
16086    for (i = 0; i < sc->num_queues; i++) {
16087        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16088        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16089        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16090            queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
16091        queue_children = SYSCTL_CHILDREN(queue);
16092
16093        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16094            q_stat = ((i << 16) | j);
16095            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16096                 bxe_eth_q_stats_arr[j].string,
16097                 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
16098                 bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
16099        }
16100    }
16101}
16102
16103static int
16104bxe_alloc_buf_rings(struct bxe_softc *sc)
16105{
16106    int i;
16107    struct bxe_fastpath *fp;
16108
16109    for (i = 0; i < sc->num_queues; i++) {
16110
16111        fp = &sc->fp[i];
16112
16113        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16114                                   M_NOWAIT, &fp->tx_mtx);
16115        if (fp->tx_br == NULL)
16116            return (-1);
16117    }
16118
16119    return (0);
16120}
16121
16122static void
16123bxe_free_buf_rings(struct bxe_softc *sc)
16124{
16125    int i;
16126    struct bxe_fastpath *fp;
16127
16128    for (i = 0; i < sc->num_queues; i++) {
16129
16130        fp = &sc->fp[i];
16131
16132        if (fp->tx_br) {
16133            buf_ring_free(fp->tx_br, M_DEVBUF);
16134            fp->tx_br = NULL;
16135        }
16136    }
16137}
16138
16139static void
16140bxe_init_fp_mutexs(struct bxe_softc *sc)
16141{
16142    int i;
16143    struct bxe_fastpath *fp;
16144
16145    for (i = 0; i < sc->num_queues; i++) {
16146
16147        fp = &sc->fp[i];
16148
16149        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16150            "bxe%d_fp%d_tx_lock", sc->unit, i);
16151        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16152
16153        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16154            "bxe%d_fp%d_rx_lock", sc->unit, i);
16155        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16156    }
16157}
16158
16159static void
16160bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16161{
16162    int i;
16163    struct bxe_fastpath *fp;
16164
16165    for (i = 0; i < sc->num_queues; i++) {
16166
16167        fp = &sc->fp[i];
16168
16169        if (mtx_initialized(&fp->tx_mtx)) {
16170            mtx_destroy(&fp->tx_mtx);
16171        }
16172
16173        if (mtx_initialized(&fp->rx_mtx)) {
16174            mtx_destroy(&fp->rx_mtx);
16175        }
16176    }
16177}
16178
16179
16180/*
16181 * Device attach function.
16182 *
16183 * Allocates device resources, performs secondary chip identification, and
16184 * initializes driver instance variables. This function is called from driver
16185 * load after a successful probe.
16186 *
16187 * Returns:
16188 *   0 = Success, >0 = Failure
16189 */
16190static int
16191bxe_attach(device_t dev)
16192{
16193    struct bxe_softc *sc;
16194
16195    sc = device_get_softc(dev);
16196
16197    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16198
16199    sc->state = BXE_STATE_CLOSED;
16200
16201    sc->dev  = dev;
16202    sc->unit = device_get_unit(dev);
16203
16204    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16205
16206    sc->pcie_bus    = pci_get_bus(dev);
16207    sc->pcie_device = pci_get_slot(dev);
16208    sc->pcie_func   = pci_get_function(dev);
16209
16210    /* enable bus master capability */
16211    pci_enable_busmaster(dev);
16212
16213    /* get the BARs */
16214    if (bxe_allocate_bars(sc) != 0) {
16215        return (ENXIO);
16216    }
16217
16218    /* initialize the mutexes */
16219    bxe_init_mutexes(sc);
16220
16221    /* prepare the periodic callout */
16222    callout_init(&sc->periodic_callout, 1);
16223
16224    /* prepare the chip taskqueue */
16225    sc->chip_tq_flags = CHIP_TQ_NONE;
16226    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16227             "bxe%d_chip_tq", sc->unit);
16228    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16229    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16230                                   taskqueue_thread_enqueue,
16231                                   &sc->chip_tq);
16232    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16233                            "%s", sc->chip_tq_name);
16234
16235    TIMEOUT_TASK_INIT(taskqueue_thread,
16236        &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task,  sc);
16237
16238
16239    /* get device info and set params */
16240    if (bxe_get_device_info(sc) != 0) {
16241        BLOGE(sc, "getting device info\n");
16242        bxe_deallocate_bars(sc);
16243        pci_disable_busmaster(dev);
16244        return (ENXIO);
16245    }
16246
16247    /* get final misc params */
16248    bxe_get_params(sc);
16249
16250    /* set the default MTU (changed via ifconfig) */
16251    sc->mtu = ETHERMTU;
16252
16253    bxe_set_modes_bitmap(sc);
16254
16255    /* XXX
16256     * If in AFEX mode and the function is configured for FCoE
16257     * then bail... no L2 allowed.
16258     */
16259
16260    /* get phy settings from shmem and 'and' against admin settings */
16261    bxe_get_phy_info(sc);
16262
16263    /* initialize the FreeBSD ifnet interface */
16264    if (bxe_init_ifnet(sc) != 0) {
16265        bxe_release_mutexes(sc);
16266        bxe_deallocate_bars(sc);
16267        pci_disable_busmaster(dev);
16268        return (ENXIO);
16269    }
16270
16271    if (bxe_add_cdev(sc) != 0) {
16272        if (sc->ifp != NULL) {
16273            ether_ifdetach(sc->ifp);
16274        }
16275        ifmedia_removeall(&sc->ifmedia);
16276        bxe_release_mutexes(sc);
16277        bxe_deallocate_bars(sc);
16278        pci_disable_busmaster(dev);
16279        return (ENXIO);
16280    }
16281
16282    /* allocate device interrupts */
16283    if (bxe_interrupt_alloc(sc) != 0) {
16284        bxe_del_cdev(sc);
16285        if (sc->ifp != NULL) {
16286            ether_ifdetach(sc->ifp);
16287        }
16288        ifmedia_removeall(&sc->ifmedia);
16289        bxe_release_mutexes(sc);
16290        bxe_deallocate_bars(sc);
16291        pci_disable_busmaster(dev);
16292        return (ENXIO);
16293    }
16294
16295    bxe_init_fp_mutexs(sc);
16296
16297    if (bxe_alloc_buf_rings(sc) != 0) {
16298	bxe_free_buf_rings(sc);
16299        bxe_interrupt_free(sc);
16300        bxe_del_cdev(sc);
16301        if (sc->ifp != NULL) {
16302            ether_ifdetach(sc->ifp);
16303        }
16304        ifmedia_removeall(&sc->ifmedia);
16305        bxe_release_mutexes(sc);
16306        bxe_deallocate_bars(sc);
16307        pci_disable_busmaster(dev);
16308        return (ENXIO);
16309    }
16310
16311    /* allocate ilt */
16312    if (bxe_alloc_ilt_mem(sc) != 0) {
16313	bxe_free_buf_rings(sc);
16314        bxe_interrupt_free(sc);
16315        bxe_del_cdev(sc);
16316        if (sc->ifp != NULL) {
16317            ether_ifdetach(sc->ifp);
16318        }
16319        ifmedia_removeall(&sc->ifmedia);
16320        bxe_release_mutexes(sc);
16321        bxe_deallocate_bars(sc);
16322        pci_disable_busmaster(dev);
16323        return (ENXIO);
16324    }
16325
16326    /* allocate the host hardware/software hsi structures */
16327    if (bxe_alloc_hsi_mem(sc) != 0) {
16328        bxe_free_ilt_mem(sc);
16329	bxe_free_buf_rings(sc);
16330        bxe_interrupt_free(sc);
16331        bxe_del_cdev(sc);
16332        if (sc->ifp != NULL) {
16333            ether_ifdetach(sc->ifp);
16334        }
16335        ifmedia_removeall(&sc->ifmedia);
16336        bxe_release_mutexes(sc);
16337        bxe_deallocate_bars(sc);
16338        pci_disable_busmaster(dev);
16339        return (ENXIO);
16340    }
16341
16342    /* need to reset chip if UNDI was active */
16343    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16344        /* init fw_seq */
16345        sc->fw_seq =
16346            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16347             DRV_MSG_SEQ_NUMBER_MASK);
16348        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16349        bxe_prev_unload(sc);
16350    }
16351
16352#if 1
16353    /* XXX */
16354    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16355#else
16356    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16357        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16358        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16359        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16360        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16361        bxe_dcbx_init_params(sc);
16362    } else {
16363        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16364    }
16365#endif
16366
16367    /* calculate qm_cid_count */
16368    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16369    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16370
16371    sc->max_cos = 1;
16372    bxe_init_multi_cos(sc);
16373
16374    bxe_add_sysctls(sc);
16375
16376    return (0);
16377}
16378
16379/*
16380 * Device detach function.
16381 *
16382 * Stops the controller, resets the controller, and releases resources.
16383 *
16384 * Returns:
16385 *   0 = Success, >0 = Failure
16386 */
16387static int
16388bxe_detach(device_t dev)
16389{
16390    struct bxe_softc *sc;
16391    if_t ifp;
16392
16393    sc = device_get_softc(dev);
16394
16395    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16396
16397    ifp = sc->ifp;
16398    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16399        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16400        return(EBUSY);
16401    }
16402
16403    bxe_del_cdev(sc);
16404
16405    /* stop the periodic callout */
16406    bxe_periodic_stop(sc);
16407
16408    /* stop the chip taskqueue */
16409    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16410    if (sc->chip_tq) {
16411        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16412        taskqueue_free(sc->chip_tq);
16413        sc->chip_tq = NULL;
16414        taskqueue_drain_timeout(taskqueue_thread,
16415            &sc->sp_err_timeout_task);
16416    }
16417
16418    /* stop and reset the controller if it was open */
16419    if (sc->state != BXE_STATE_CLOSED) {
16420        BXE_CORE_LOCK(sc);
16421        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16422        sc->state = BXE_STATE_DISABLED;
16423        BXE_CORE_UNLOCK(sc);
16424    }
16425
16426    /* release the network interface */
16427    if (ifp != NULL) {
16428        ether_ifdetach(ifp);
16429    }
16430    ifmedia_removeall(&sc->ifmedia);
16431
16432    /* XXX do the following based on driver state... */
16433
16434    /* free the host hardware/software hsi structures */
16435    bxe_free_hsi_mem(sc);
16436
16437    /* free ilt */
16438    bxe_free_ilt_mem(sc);
16439
16440    bxe_free_buf_rings(sc);
16441
16442    /* release the interrupts */
16443    bxe_interrupt_free(sc);
16444
16445    /* Release the mutexes*/
16446    bxe_destroy_fp_mutexs(sc);
16447    bxe_release_mutexes(sc);
16448
16449
16450    /* Release the PCIe BAR mapped memory */
16451    bxe_deallocate_bars(sc);
16452
16453    /* Release the FreeBSD interface. */
16454    if (sc->ifp != NULL) {
16455        if_free(sc->ifp);
16456    }
16457
16458    pci_disable_busmaster(dev);
16459
16460    return (0);
16461}
16462
16463/*
16464 * Device shutdown function.
16465 *
16466 * Stops and resets the controller.
16467 *
16468 * Returns:
16469 *   Nothing
16470 */
16471static int
16472bxe_shutdown(device_t dev)
16473{
16474    struct bxe_softc *sc;
16475
16476    sc = device_get_softc(dev);
16477
16478    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16479
16480    /* stop the periodic callout */
16481    bxe_periodic_stop(sc);
16482
16483    if (sc->state != BXE_STATE_CLOSED) {
16484    	BXE_CORE_LOCK(sc);
16485    	bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16486    	BXE_CORE_UNLOCK(sc);
16487    }
16488
16489    return (0);
16490}
16491
16492void
16493bxe_igu_ack_sb(struct bxe_softc *sc,
16494               uint8_t          igu_sb_id,
16495               uint8_t          segment,
16496               uint16_t         index,
16497               uint8_t          op,
16498               uint8_t          update)
16499{
16500    uint32_t igu_addr = sc->igu_base_addr;
16501    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16502    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16503}
16504
16505static void
16506bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16507                     uint8_t          func,
16508                     uint8_t          idu_sb_id,
16509                     uint8_t          is_pf)
16510{
16511    uint32_t data, ctl, cnt = 100;
16512    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16513    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16514    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16515    uint32_t sb_bit =  1 << (idu_sb_id%32);
16516    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16517    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16518
16519    /* Not supported in BC mode */
16520    if (CHIP_INT_MODE_IS_BC(sc)) {
16521        return;
16522    }
16523
16524    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16525             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16526            IGU_REGULAR_CLEANUP_SET |
16527            IGU_REGULAR_BCLEANUP);
16528
16529    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16530           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16531           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16532
16533    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16534            data, igu_addr_data);
16535    REG_WR(sc, igu_addr_data, data);
16536
16537    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16538                      BUS_SPACE_BARRIER_WRITE);
16539    mb();
16540
16541    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16542            ctl, igu_addr_ctl);
16543    REG_WR(sc, igu_addr_ctl, ctl);
16544
16545    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16546                      BUS_SPACE_BARRIER_WRITE);
16547    mb();
16548
16549    /* wait for clean up to finish */
16550    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16551        DELAY(20000);
16552    }
16553
16554    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16555        BLOGD(sc, DBG_LOAD,
16556              "Unable to finish IGU cleanup: "
16557              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16558              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16559    }
16560}
16561
16562static void
16563bxe_igu_clear_sb(struct bxe_softc *sc,
16564                 uint8_t          idu_sb_id)
16565{
16566    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16567}
16568
16569
16570
16571
16572
16573
16574
16575/*******************/
16576/* ECORE CALLBACKS */
16577/*******************/
16578
16579static void
16580bxe_reset_common(struct bxe_softc *sc)
16581{
16582    uint32_t val = 0x1400;
16583
16584    /* reset_common */
16585    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16586
16587    if (CHIP_IS_E3(sc)) {
16588        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16589        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16590    }
16591
16592    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16593}
16594
16595static void
16596bxe_common_init_phy(struct bxe_softc *sc)
16597{
16598    uint32_t shmem_base[2];
16599    uint32_t shmem2_base[2];
16600
16601    /* Avoid common init in case MFW supports LFA */
16602    if (SHMEM2_RD(sc, size) >
16603        (uint32_t)offsetof(struct shmem2_region,
16604                           lfa_host_addr[SC_PORT(sc)])) {
16605        return;
16606    }
16607
16608    shmem_base[0]  = sc->devinfo.shmem_base;
16609    shmem2_base[0] = sc->devinfo.shmem2_base;
16610
16611    if (!CHIP_IS_E1x(sc)) {
16612        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16613        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16614    }
16615
16616    bxe_acquire_phy_lock(sc);
16617    elink_common_init_phy(sc, shmem_base, shmem2_base,
16618                          sc->devinfo.chip_id, 0);
16619    bxe_release_phy_lock(sc);
16620}
16621
16622static void
16623bxe_pf_disable(struct bxe_softc *sc)
16624{
16625    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16626
16627    val &= ~IGU_PF_CONF_FUNC_EN;
16628
16629    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16630    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16631    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16632}
16633
16634static void
16635bxe_init_pxp(struct bxe_softc *sc)
16636{
16637    uint16_t devctl;
16638    int r_order, w_order;
16639
16640    devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
16641
16642    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16643
16644    w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
16645
16646    if (sc->mrrs == -1) {
16647        r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
16648    } else {
16649        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16650        r_order = sc->mrrs;
16651    }
16652
16653    ecore_init_pxp_arb(sc, r_order, w_order);
16654}
16655
16656static uint32_t
16657bxe_get_pretend_reg(struct bxe_softc *sc)
16658{
16659    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16660    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16661    return (base + (SC_ABS_FUNC(sc)) * stride);
16662}
16663
16664/*
16665 * Called only on E1H or E2.
16666 * When pretending to be PF, the pretend value is the function number 0..7.
16667 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16668 * combination.
16669 */
16670static int
16671bxe_pretend_func(struct bxe_softc *sc,
16672                 uint16_t         pretend_func_val)
16673{
16674    uint32_t pretend_reg;
16675
16676    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16677        return (-1);
16678    }
16679
16680    /* get my own pretend register */
16681    pretend_reg = bxe_get_pretend_reg(sc);
16682    REG_WR(sc, pretend_reg, pretend_func_val);
16683    REG_RD(sc, pretend_reg);
16684    return (0);
16685}
16686
16687static void
16688bxe_iov_init_dmae(struct bxe_softc *sc)
16689{
16690    return;
16691}
16692
16693static void
16694bxe_iov_init_dq(struct bxe_softc *sc)
16695{
16696    return;
16697}
16698
16699/* send a NIG loopback debug packet */
16700static void
16701bxe_lb_pckt(struct bxe_softc *sc)
16702{
16703    uint32_t wb_write[3];
16704
16705    /* Ethernet source and destination addresses */
16706    wb_write[0] = 0x55555555;
16707    wb_write[1] = 0x55555555;
16708    wb_write[2] = 0x20;     /* SOP */
16709    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16710
16711    /* NON-IP protocol */
16712    wb_write[0] = 0x09000000;
16713    wb_write[1] = 0x55555555;
16714    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16715    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16716}
16717
16718/*
16719 * Some of the internal memories are not directly readable from the driver.
16720 * To test them we send debug packets.
16721 */
16722static int
16723bxe_int_mem_test(struct bxe_softc *sc)
16724{
16725    int factor;
16726    int count, i;
16727    uint32_t val = 0;
16728
16729    if (CHIP_REV_IS_FPGA(sc)) {
16730        factor = 120;
16731    } else if (CHIP_REV_IS_EMUL(sc)) {
16732        factor = 200;
16733    } else {
16734        factor = 1;
16735    }
16736
16737    /* disable inputs of parser neighbor blocks */
16738    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16739    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16740    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16741    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16742
16743    /*  write 0 to parser credits for CFC search request */
16744    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16745
16746    /* send Ethernet packet */
16747    bxe_lb_pckt(sc);
16748
16749    /* TODO do i reset NIG statistic? */
16750    /* Wait until NIG register shows 1 packet of size 0x10 */
16751    count = 1000 * factor;
16752    while (count) {
16753        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16754        val = *BXE_SP(sc, wb_data[0]);
16755        if (val == 0x10) {
16756            break;
16757        }
16758
16759        DELAY(10000);
16760        count--;
16761    }
16762
16763    if (val != 0x10) {
16764        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16765        return (-1);
16766    }
16767
16768    /* wait until PRS register shows 1 packet */
16769    count = (1000 * factor);
16770    while (count) {
16771        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16772        if (val == 1) {
16773            break;
16774        }
16775
16776        DELAY(10000);
16777        count--;
16778    }
16779
16780    if (val != 0x1) {
16781        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16782        return (-2);
16783    }
16784
16785    /* Reset and init BRB, PRS */
16786    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16787    DELAY(50000);
16788    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16789    DELAY(50000);
16790    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16791    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16792
16793    /* Disable inputs of parser neighbor blocks */
16794    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16795    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16796    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16797    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16798
16799    /* Write 0 to parser credits for CFC search request */
16800    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16801
16802    /* send 10 Ethernet packets */
16803    for (i = 0; i < 10; i++) {
16804        bxe_lb_pckt(sc);
16805    }
16806
16807    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16808    count = (1000 * factor);
16809    while (count) {
16810        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16811        val = *BXE_SP(sc, wb_data[0]);
16812        if (val == 0xb0) {
16813            break;
16814        }
16815
16816        DELAY(10000);
16817        count--;
16818    }
16819
16820    if (val != 0xb0) {
16821        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16822        return (-3);
16823    }
16824
16825    /* Wait until PRS register shows 2 packets */
16826    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16827    if (val != 2) {
16828        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16829    }
16830
16831    /* Write 1 to parser credits for CFC search request */
16832    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16833
16834    /* Wait until PRS register shows 3 packets */
16835    DELAY(10000 * factor);
16836
16837    /* Wait until NIG register shows 1 packet of size 0x10 */
16838    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16839    if (val != 3) {
16840        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16841    }
16842
16843    /* clear NIG EOP FIFO */
16844    for (i = 0; i < 11; i++) {
16845        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16846    }
16847
16848    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16849    if (val != 1) {
16850        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16851        return (-4);
16852    }
16853
16854    /* Reset and init BRB, PRS, NIG */
16855    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16856    DELAY(50000);
16857    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16858    DELAY(50000);
16859    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16860    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16861    if (!CNIC_SUPPORT(sc)) {
16862        /* set NIC mode */
16863        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16864    }
16865
16866    /* Enable inputs of parser neighbor blocks */
16867    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16868    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16869    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16870    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16871
16872    return (0);
16873}
16874
16875static void
16876bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16877{
16878    int is_required;
16879    uint32_t val;
16880    int port;
16881
16882    is_required = 0;
16883    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16884           SHARED_HW_CFG_FAN_FAILURE_MASK);
16885
16886    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16887        is_required = 1;
16888    }
16889    /*
16890     * The fan failure mechanism is usually related to the PHY type since
16891     * the power consumption of the board is affected by the PHY. Currently,
16892     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16893     */
16894    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16895        for (port = PORT_0; port < PORT_MAX; port++) {
16896            is_required |= elink_fan_failure_det_req(sc,
16897                                                     sc->devinfo.shmem_base,
16898                                                     sc->devinfo.shmem2_base,
16899                                                     port);
16900        }
16901    }
16902
16903    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16904
16905    if (is_required == 0) {
16906        return;
16907    }
16908
16909    /* Fan failure is indicated by SPIO 5 */
16910    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16911
16912    /* set to active low mode */
16913    val = REG_RD(sc, MISC_REG_SPIO_INT);
16914    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16915    REG_WR(sc, MISC_REG_SPIO_INT, val);
16916
16917    /* enable interrupt to signal the IGU */
16918    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16919    val |= MISC_SPIO_SPIO5;
16920    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16921}
16922
16923static void
16924bxe_enable_blocks_attention(struct bxe_softc *sc)
16925{
16926    uint32_t val;
16927
16928    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16929    if (!CHIP_IS_E1x(sc)) {
16930        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16931    } else {
16932        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16933    }
16934    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16935    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16936    /*
16937     * mask read length error interrupts in brb for parser
16938     * (parsing unit and 'checksum and crc' unit)
16939     * these errors are legal (PU reads fixed length and CAC can cause
16940     * read length error on truncated packets)
16941     */
16942    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16943    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16944    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16945    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16946    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16947    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16948/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16949/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16950    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16951    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16952    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16953/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16954/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16955    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16956    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16957    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16958    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16959/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16960/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16961
16962    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16963           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16964           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16965    if (!CHIP_IS_E1x(sc)) {
16966        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16967                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16968    }
16969    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16970
16971    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16972    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16973    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16974/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16975
16976    if (!CHIP_IS_E1x(sc)) {
16977        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16978        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16979    }
16980
16981    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16982    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16983/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16984    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16985}
16986
16987/**
16988 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16989 *
16990 * @sc:     driver handle
16991 */
16992static int
16993bxe_init_hw_common(struct bxe_softc *sc)
16994{
16995    uint8_t abs_func_id;
16996    uint32_t val;
16997
16998    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16999          SC_ABS_FUNC(sc));
17000
17001    /*
17002     * take the RESET lock to protect undi_unload flow from accessing
17003     * registers while we are resetting the chip
17004     */
17005    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17006
17007    bxe_reset_common(sc);
17008
17009    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17010
17011    val = 0xfffc;
17012    if (CHIP_IS_E3(sc)) {
17013        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17014        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17015    }
17016
17017    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17018
17019    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17020
17021    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17022    BLOGD(sc, DBG_LOAD, "after misc block init\n");
17023
17024    if (!CHIP_IS_E1x(sc)) {
17025        /*
17026         * 4-port mode or 2-port mode we need to turn off master-enable for
17027         * everyone. After that we turn it back on for self. So, we disregard
17028         * multi-function, and always disable all functions on the given path,
17029         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17030         */
17031        for (abs_func_id = SC_PATH(sc);
17032             abs_func_id < (E2_FUNC_MAX * 2);
17033             abs_func_id += 2) {
17034            if (abs_func_id == SC_ABS_FUNC(sc)) {
17035                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17036                continue;
17037            }
17038
17039            bxe_pretend_func(sc, abs_func_id);
17040
17041            /* clear pf enable */
17042            bxe_pf_disable(sc);
17043
17044            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17045        }
17046    }
17047
17048    BLOGD(sc, DBG_LOAD, "after pf disable\n");
17049
17050    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17051
17052    if (CHIP_IS_E1(sc)) {
17053        /*
17054         * enable HW interrupt from PXP on USDM overflow
17055         * bit 16 on INT_MASK_0
17056         */
17057        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17058    }
17059
17060    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17061    bxe_init_pxp(sc);
17062
17063#ifdef __BIG_ENDIAN
17064    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17065    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17066    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17067    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17068    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17069    /* make sure this value is 0 */
17070    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17071
17072    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17073    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17074    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17075    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17076    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17077#endif
17078
17079    ecore_ilt_init_page_size(sc, INITOP_SET);
17080
17081    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17082        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17083    }
17084
17085    /* let the HW do it's magic... */
17086    DELAY(100000);
17087
17088    /* finish PXP init */
17089    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17090    if (val != 1) {
17091        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17092            val);
17093        return (-1);
17094    }
17095    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17096    if (val != 1) {
17097        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17098        return (-1);
17099    }
17100
17101    BLOGD(sc, DBG_LOAD, "after pxp init\n");
17102
17103    /*
17104     * Timer bug workaround for E2 only. We need to set the entire ILT to have
17105     * entries with value "0" and valid bit on. This needs to be done by the
17106     * first PF that is loaded in a path (i.e. common phase)
17107     */
17108    if (!CHIP_IS_E1x(sc)) {
17109/*
17110 * In E2 there is a bug in the timers block that can cause function 6 / 7
17111 * (i.e. vnic3) to start even if it is marked as "scan-off".
17112 * This occurs when a different function (func2,3) is being marked
17113 * as "scan-off". Real-life scenario for example: if a driver is being
17114 * load-unloaded while func6,7 are down. This will cause the timer to access
17115 * the ilt, translate to a logical address and send a request to read/write.
17116 * Since the ilt for the function that is down is not valid, this will cause
17117 * a translation error which is unrecoverable.
17118 * The Workaround is intended to make sure that when this happens nothing
17119 * fatal will occur. The workaround:
17120 *  1.  First PF driver which loads on a path will:
17121 *      a.  After taking the chip out of reset, by using pretend,
17122 *          it will write "0" to the following registers of
17123 *          the other vnics.
17124 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17125 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17126 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17127 *          And for itself it will write '1' to
17128 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17129 *          dmae-operations (writing to pram for example.)
17130 *          note: can be done for only function 6,7 but cleaner this
17131 *            way.
17132 *      b.  Write zero+valid to the entire ILT.
17133 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
17134 *          VNIC3 (of that port). The range allocated will be the
17135 *          entire ILT. This is needed to prevent  ILT range error.
17136 *  2.  Any PF driver load flow:
17137 *      a.  ILT update with the physical addresses of the allocated
17138 *          logical pages.
17139 *      b.  Wait 20msec. - note that this timeout is needed to make
17140 *          sure there are no requests in one of the PXP internal
17141 *          queues with "old" ILT addresses.
17142 *      c.  PF enable in the PGLC.
17143 *      d.  Clear the was_error of the PF in the PGLC. (could have
17144 *          occurred while driver was down)
17145 *      e.  PF enable in the CFC (WEAK + STRONG)
17146 *      f.  Timers scan enable
17147 *  3.  PF driver unload flow:
17148 *      a.  Clear the Timers scan_en.
17149 *      b.  Polling for scan_on=0 for that PF.
17150 *      c.  Clear the PF enable bit in the PXP.
17151 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
17152 *      e.  Write zero+valid to all ILT entries (The valid bit must
17153 *          stay set)
17154 *      f.  If this is VNIC 3 of a port then also init
17155 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
17156 *          to the last entry in the ILT.
17157 *
17158 *      Notes:
17159 *      Currently the PF error in the PGLC is non recoverable.
17160 *      In the future the there will be a recovery routine for this error.
17161 *      Currently attention is masked.
17162 *      Having an MCP lock on the load/unload process does not guarantee that
17163 *      there is no Timer disable during Func6/7 enable. This is because the
17164 *      Timers scan is currently being cleared by the MCP on FLR.
17165 *      Step 2.d can be done only for PF6/7 and the driver can also check if
17166 *      there is error before clearing it. But the flow above is simpler and
17167 *      more general.
17168 *      All ILT entries are written by zero+valid and not just PF6/7
17169 *      ILT entries since in the future the ILT entries allocation for
17170 *      PF-s might be dynamic.
17171 */
17172        struct ilt_client_info ilt_cli;
17173        struct ecore_ilt ilt;
17174
17175        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17176        memset(&ilt, 0, sizeof(struct ecore_ilt));
17177
17178        /* initialize dummy TM client */
17179        ilt_cli.start      = 0;
17180        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
17181        ilt_cli.client_num = ILT_CLIENT_TM;
17182
17183        /*
17184         * Step 1: set zeroes to all ilt page entries with valid bit on
17185         * Step 2: set the timers first/last ilt entry to point
17186         * to the entire range to prevent ILT range error for 3rd/4th
17187         * vnic (this code assumes existence of the vnic)
17188         *
17189         * both steps performed by call to ecore_ilt_client_init_op()
17190         * with dummy TM client
17191         *
17192         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17193         * and his brother are split registers
17194         */
17195
17196        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17197        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17198        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17199
17200        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17201        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17202        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17203    }
17204
17205    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17206    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17207
17208    if (!CHIP_IS_E1x(sc)) {
17209        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17210                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17211
17212        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17213        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17214
17215        /* let the HW do it's magic... */
17216        do {
17217            DELAY(200000);
17218            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17219        } while (factor-- && (val != 1));
17220
17221        if (val != 1) {
17222            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17223            return (-1);
17224        }
17225    }
17226
17227    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17228
17229    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17230
17231    bxe_iov_init_dmae(sc);
17232
17233    /* clean the DMAE memory */
17234    sc->dmae_ready = 1;
17235    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17236
17237    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17238
17239    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17240
17241    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17242
17243    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17244
17245    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17246    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17247    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17248    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17249
17250    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17251
17252    /* QM queues pointers table */
17253    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17254
17255    /* soft reset pulse */
17256    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17257    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17258
17259    if (CNIC_SUPPORT(sc))
17260        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17261
17262    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17263    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17264    if (!CHIP_REV_IS_SLOW(sc)) {
17265        /* enable hw interrupt from doorbell Q */
17266        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17267    }
17268
17269    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17270
17271    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17272    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17273
17274    if (!CHIP_IS_E1(sc)) {
17275        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17276    }
17277
17278    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17279        if (IS_MF_AFEX(sc)) {
17280            /*
17281             * configure that AFEX and VLAN headers must be
17282             * received in AFEX mode
17283             */
17284            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17285            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17286            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17287            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17288            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17289        } else {
17290            /*
17291             * Bit-map indicating which L2 hdrs may appear
17292             * after the basic Ethernet header
17293             */
17294            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17295                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17296        }
17297    }
17298
17299    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17300    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17301    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17302    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17303
17304    if (!CHIP_IS_E1x(sc)) {
17305        /* reset VFC memories */
17306        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17307               VFC_MEMORIES_RST_REG_CAM_RST |
17308               VFC_MEMORIES_RST_REG_RAM_RST);
17309        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17310               VFC_MEMORIES_RST_REG_CAM_RST |
17311               VFC_MEMORIES_RST_REG_RAM_RST);
17312
17313        DELAY(20000);
17314    }
17315
17316    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17317    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17318    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17319    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17320
17321    /* sync semi rtc */
17322    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17323           0x80000000);
17324    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17325           0x80000000);
17326
17327    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17328    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17329    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17330
17331    if (!CHIP_IS_E1x(sc)) {
17332        if (IS_MF_AFEX(sc)) {
17333            /*
17334             * configure that AFEX and VLAN headers must be
17335             * sent in AFEX mode
17336             */
17337            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17338            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17339            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17340            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17341            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17342        } else {
17343            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17344                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17345        }
17346    }
17347
17348    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17349
17350    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17351
17352    if (CNIC_SUPPORT(sc)) {
17353        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17354        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17355        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17356        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17357        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17358        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17359        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17360        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17361        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17362        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17363    }
17364    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17365
17366    if (sizeof(union cdu_context) != 1024) {
17367        /* we currently assume that a context is 1024 bytes */
17368        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17369              (long)sizeof(union cdu_context));
17370    }
17371
17372    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17373    val = (4 << 24) + (0 << 12) + 1024;
17374    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17375
17376    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17377
17378    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17379    /* enable context validation interrupt from CFC */
17380    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17381
17382    /* set the thresholds to prevent CFC/CDU race */
17383    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17384    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17385
17386    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17387        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17388    }
17389
17390    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17391    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17392
17393    /* Reset PCIE errors for debug */
17394    REG_WR(sc, 0x2814, 0xffffffff);
17395    REG_WR(sc, 0x3820, 0xffffffff);
17396
17397    if (!CHIP_IS_E1x(sc)) {
17398        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17399               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17400                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17401        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17402               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17403                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17404                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17405        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17406               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17407                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17408                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17409    }
17410
17411    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17412
17413    if (!CHIP_IS_E1(sc)) {
17414        /* in E3 this done in per-port section */
17415        if (!CHIP_IS_E3(sc))
17416            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17417    }
17418
17419    if (CHIP_IS_E1H(sc)) {
17420        /* not applicable for E2 (and above ...) */
17421        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17422    }
17423
17424    if (CHIP_REV_IS_SLOW(sc)) {
17425        DELAY(200000);
17426    }
17427
17428    /* finish CFC init */
17429    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17430    if (val != 1) {
17431        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17432        return (-1);
17433    }
17434    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17435    if (val != 1) {
17436        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17437        return (-1);
17438    }
17439    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17440    if (val != 1) {
17441        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17442        return (-1);
17443    }
17444    REG_WR(sc, CFC_REG_DEBUG0, 0);
17445
17446    if (CHIP_IS_E1(sc)) {
17447        /* read NIG statistic to see if this is our first up since powerup */
17448        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17449        val = *BXE_SP(sc, wb_data[0]);
17450
17451        /* do internal memory self test */
17452        if ((val == 0) && bxe_int_mem_test(sc)) {
17453            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17454            return (-1);
17455        }
17456    }
17457
17458    bxe_setup_fan_failure_detection(sc);
17459
17460    /* clear PXP2 attentions */
17461    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17462
17463    bxe_enable_blocks_attention(sc);
17464
17465    if (!CHIP_REV_IS_SLOW(sc)) {
17466        ecore_enable_blocks_parity(sc);
17467    }
17468
17469    if (!BXE_NOMCP(sc)) {
17470        if (CHIP_IS_E1x(sc)) {
17471            bxe_common_init_phy(sc);
17472        }
17473    }
17474
17475    return (0);
17476}
17477
17478/**
17479 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17480 *
17481 * @sc:     driver handle
17482 */
17483static int
17484bxe_init_hw_common_chip(struct bxe_softc *sc)
17485{
17486    int rc = bxe_init_hw_common(sc);
17487
17488    if (rc) {
17489        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17490        return (rc);
17491    }
17492
17493    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17494    if (!BXE_NOMCP(sc)) {
17495        bxe_common_init_phy(sc);
17496    }
17497
17498    return (0);
17499}
17500
17501static int
17502bxe_init_hw_port(struct bxe_softc *sc)
17503{
17504    int port = SC_PORT(sc);
17505    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17506    uint32_t low, high;
17507    uint32_t val;
17508
17509    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17510
17511    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17512
17513    ecore_init_block(sc, BLOCK_MISC, init_phase);
17514    ecore_init_block(sc, BLOCK_PXP, init_phase);
17515    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17516
17517    /*
17518     * Timers bug workaround: disables the pf_master bit in pglue at
17519     * common phase, we need to enable it here before any dmae access are
17520     * attempted. Therefore we manually added the enable-master to the
17521     * port phase (it also happens in the function phase)
17522     */
17523    if (!CHIP_IS_E1x(sc)) {
17524        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17525    }
17526
17527    ecore_init_block(sc, BLOCK_ATC, init_phase);
17528    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17529    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17530    ecore_init_block(sc, BLOCK_QM, init_phase);
17531
17532    ecore_init_block(sc, BLOCK_TCM, init_phase);
17533    ecore_init_block(sc, BLOCK_UCM, init_phase);
17534    ecore_init_block(sc, BLOCK_CCM, init_phase);
17535    ecore_init_block(sc, BLOCK_XCM, init_phase);
17536
17537    /* QM cid (connection) count */
17538    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17539
17540    if (CNIC_SUPPORT(sc)) {
17541        ecore_init_block(sc, BLOCK_TM, init_phase);
17542        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17543        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17544    }
17545
17546    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17547
17548    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17549
17550    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17551        if (IS_MF(sc)) {
17552            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17553        } else if (sc->mtu > 4096) {
17554            if (BXE_ONE_PORT(sc)) {
17555                low = 160;
17556            } else {
17557                val = sc->mtu;
17558                /* (24*1024 + val*4)/256 */
17559                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17560            }
17561        } else {
17562            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17563        }
17564        high = (low + 56); /* 14*1024/256 */
17565        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17566        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17567    }
17568
17569    if (CHIP_IS_MODE_4_PORT(sc)) {
17570        REG_WR(sc, SC_PORT(sc) ?
17571               BRB1_REG_MAC_GUARANTIED_1 :
17572               BRB1_REG_MAC_GUARANTIED_0, 40);
17573    }
17574
17575    ecore_init_block(sc, BLOCK_PRS, init_phase);
17576    if (CHIP_IS_E3B0(sc)) {
17577        if (IS_MF_AFEX(sc)) {
17578            /* configure headers for AFEX mode */
17579            REG_WR(sc, SC_PORT(sc) ?
17580                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17581                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17582            REG_WR(sc, SC_PORT(sc) ?
17583                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17584                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17585            REG_WR(sc, SC_PORT(sc) ?
17586                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17587                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17588        } else {
17589            /* Ovlan exists only if we are in multi-function +
17590             * switch-dependent mode, in switch-independent there
17591             * is no ovlan headers
17592             */
17593            REG_WR(sc, SC_PORT(sc) ?
17594                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17595                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17596                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17597        }
17598    }
17599
17600    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17601    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17602    ecore_init_block(sc, BLOCK_USDM, init_phase);
17603    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17604
17605    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17606    ecore_init_block(sc, BLOCK_USEM, init_phase);
17607    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17608    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17609
17610    ecore_init_block(sc, BLOCK_UPB, init_phase);
17611    ecore_init_block(sc, BLOCK_XPB, init_phase);
17612
17613    ecore_init_block(sc, BLOCK_PBF, init_phase);
17614
17615    if (CHIP_IS_E1x(sc)) {
17616        /* configure PBF to work without PAUSE mtu 9000 */
17617        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17618
17619        /* update threshold */
17620        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17621        /* update init credit */
17622        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17623
17624        /* probe changes */
17625        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17626        DELAY(50);
17627        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17628    }
17629
17630    if (CNIC_SUPPORT(sc)) {
17631        ecore_init_block(sc, BLOCK_SRC, init_phase);
17632    }
17633
17634    ecore_init_block(sc, BLOCK_CDU, init_phase);
17635    ecore_init_block(sc, BLOCK_CFC, init_phase);
17636
17637    if (CHIP_IS_E1(sc)) {
17638        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17639        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17640    }
17641    ecore_init_block(sc, BLOCK_HC, init_phase);
17642
17643    ecore_init_block(sc, BLOCK_IGU, init_phase);
17644
17645    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17646    /* init aeu_mask_attn_func_0/1:
17647     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17648     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17649     *             bits 4-7 are used for "per vn group attention" */
17650    val = IS_MF(sc) ? 0xF7 : 0x7;
17651    /* Enable DCBX attention for all but E1 */
17652    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17653    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17654
17655    ecore_init_block(sc, BLOCK_NIG, init_phase);
17656
17657    if (!CHIP_IS_E1x(sc)) {
17658        /* Bit-map indicating which L2 hdrs may appear after the
17659         * basic Ethernet header
17660         */
17661        if (IS_MF_AFEX(sc)) {
17662            REG_WR(sc, SC_PORT(sc) ?
17663                   NIG_REG_P1_HDRS_AFTER_BASIC :
17664                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17665        } else {
17666            REG_WR(sc, SC_PORT(sc) ?
17667                   NIG_REG_P1_HDRS_AFTER_BASIC :
17668                   NIG_REG_P0_HDRS_AFTER_BASIC,
17669                   IS_MF_SD(sc) ? 7 : 6);
17670        }
17671
17672        if (CHIP_IS_E3(sc)) {
17673            REG_WR(sc, SC_PORT(sc) ?
17674                   NIG_REG_LLH1_MF_MODE :
17675                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17676        }
17677    }
17678    if (!CHIP_IS_E3(sc)) {
17679        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17680    }
17681
17682    if (!CHIP_IS_E1(sc)) {
17683        /* 0x2 disable mf_ov, 0x1 enable */
17684        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17685               (IS_MF_SD(sc) ? 0x1 : 0x2));
17686
17687        if (!CHIP_IS_E1x(sc)) {
17688            val = 0;
17689            switch (sc->devinfo.mf_info.mf_mode) {
17690            case MULTI_FUNCTION_SD:
17691                val = 1;
17692                break;
17693            case MULTI_FUNCTION_SI:
17694            case MULTI_FUNCTION_AFEX:
17695                val = 2;
17696                break;
17697            }
17698
17699            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17700                        NIG_REG_LLH0_CLS_TYPE), val);
17701        }
17702        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17703        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17704        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17705    }
17706
17707    /* If SPIO5 is set to generate interrupts, enable it for this port */
17708    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17709    if (val & MISC_SPIO_SPIO5) {
17710        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17711                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17712        val = REG_RD(sc, reg_addr);
17713        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17714        REG_WR(sc, reg_addr, val);
17715    }
17716
17717    return (0);
17718}
17719
17720static uint32_t
17721bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17722                       uint32_t         reg,
17723                       uint32_t         expected,
17724                       uint32_t         poll_count)
17725{
17726    uint32_t cur_cnt = poll_count;
17727    uint32_t val;
17728
17729    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17730        DELAY(FLR_WAIT_INTERVAL);
17731    }
17732
17733    return (val);
17734}
17735
17736static int
17737bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17738                              uint32_t         reg,
17739                              char             *msg,
17740                              uint32_t         poll_cnt)
17741{
17742    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17743
17744    if (val != 0) {
17745        BLOGE(sc, "%s usage count=%d\n", msg, val);
17746        return (1);
17747    }
17748
17749    return (0);
17750}
17751
17752/* Common routines with VF FLR cleanup */
17753static uint32_t
17754bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17755{
17756    /* adjust polling timeout */
17757    if (CHIP_REV_IS_EMUL(sc)) {
17758        return (FLR_POLL_CNT * 2000);
17759    }
17760
17761    if (CHIP_REV_IS_FPGA(sc)) {
17762        return (FLR_POLL_CNT * 120);
17763    }
17764
17765    return (FLR_POLL_CNT);
17766}
17767
17768static int
17769bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17770                           uint32_t         poll_cnt)
17771{
17772    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17773    if (bxe_flr_clnup_poll_hw_counter(sc,
17774                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17775                                      "CFC PF usage counter timed out",
17776                                      poll_cnt)) {
17777        return (1);
17778    }
17779
17780    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17781    if (bxe_flr_clnup_poll_hw_counter(sc,
17782                                      DORQ_REG_PF_USAGE_CNT,
17783                                      "DQ PF usage counter timed out",
17784                                      poll_cnt)) {
17785        return (1);
17786    }
17787
17788    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17789    if (bxe_flr_clnup_poll_hw_counter(sc,
17790                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17791                                      "QM PF usage counter timed out",
17792                                      poll_cnt)) {
17793        return (1);
17794    }
17795
17796    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17797    if (bxe_flr_clnup_poll_hw_counter(sc,
17798                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17799                                      "Timers VNIC usage counter timed out",
17800                                      poll_cnt)) {
17801        return (1);
17802    }
17803
17804    if (bxe_flr_clnup_poll_hw_counter(sc,
17805                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17806                                      "Timers NUM_SCANS usage counter timed out",
17807                                      poll_cnt)) {
17808        return (1);
17809    }
17810
17811    /* Wait DMAE PF usage counter to zero */
17812    if (bxe_flr_clnup_poll_hw_counter(sc,
17813                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17814                                      "DMAE dommand register timed out",
17815                                      poll_cnt)) {
17816        return (1);
17817    }
17818
17819    return (0);
17820}
17821
17822#define OP_GEN_PARAM(param)                                            \
17823    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17824#define OP_GEN_TYPE(type)                                           \
17825    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17826#define OP_GEN_AGG_VECT(index)                                             \
17827    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17828
17829static int
17830bxe_send_final_clnup(struct bxe_softc *sc,
17831                     uint8_t          clnup_func,
17832                     uint32_t         poll_cnt)
17833{
17834    uint32_t op_gen_command = 0;
17835    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17836                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17837    int ret = 0;
17838
17839    if (REG_RD(sc, comp_addr)) {
17840        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17841        return (1);
17842    }
17843
17844    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17845    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17846    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17847    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17848
17849    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17850    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17851
17852    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17853        BLOGE(sc, "FW final cleanup did not succeed\n");
17854        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17855              (REG_RD(sc, comp_addr)));
17856        bxe_panic(sc, ("FLR cleanup failed\n"));
17857        return (1);
17858    }
17859
17860    /* Zero completion for nxt FLR */
17861    REG_WR(sc, comp_addr, 0);
17862
17863    return (ret);
17864}
17865
17866static void
17867bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17868                       struct pbf_pN_buf_regs *regs,
17869                       uint32_t               poll_count)
17870{
17871    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17872    uint32_t cur_cnt = poll_count;
17873
17874    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17875    crd = crd_start = REG_RD(sc, regs->crd);
17876    init_crd = REG_RD(sc, regs->init_crd);
17877
17878    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17879    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17880    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17881
17882    while ((crd != init_crd) &&
17883           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17884            (init_crd - crd_start))) {
17885        if (cur_cnt--) {
17886            DELAY(FLR_WAIT_INTERVAL);
17887            crd = REG_RD(sc, regs->crd);
17888            crd_freed = REG_RD(sc, regs->crd_freed);
17889        } else {
17890            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17891            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17892            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17893            break;
17894        }
17895    }
17896
17897    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17898          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17899}
17900
17901static void
17902bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17903                       struct pbf_pN_cmd_regs *regs,
17904                       uint32_t               poll_count)
17905{
17906    uint32_t occup, to_free, freed, freed_start;
17907    uint32_t cur_cnt = poll_count;
17908
17909    occup = to_free = REG_RD(sc, regs->lines_occup);
17910    freed = freed_start = REG_RD(sc, regs->lines_freed);
17911
17912    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17913    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17914
17915    while (occup &&
17916           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17917        if (cur_cnt--) {
17918            DELAY(FLR_WAIT_INTERVAL);
17919            occup = REG_RD(sc, regs->lines_occup);
17920            freed = REG_RD(sc, regs->lines_freed);
17921        } else {
17922            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17923            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17924            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17925            break;
17926        }
17927    }
17928
17929    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17930          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17931}
17932
17933static void
17934bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17935{
17936    struct pbf_pN_cmd_regs cmd_regs[] = {
17937        {0, (CHIP_IS_E3B0(sc)) ?
17938            PBF_REG_TQ_OCCUPANCY_Q0 :
17939            PBF_REG_P0_TQ_OCCUPANCY,
17940            (CHIP_IS_E3B0(sc)) ?
17941            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17942            PBF_REG_P0_TQ_LINES_FREED_CNT},
17943        {1, (CHIP_IS_E3B0(sc)) ?
17944            PBF_REG_TQ_OCCUPANCY_Q1 :
17945            PBF_REG_P1_TQ_OCCUPANCY,
17946            (CHIP_IS_E3B0(sc)) ?
17947            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17948            PBF_REG_P1_TQ_LINES_FREED_CNT},
17949        {4, (CHIP_IS_E3B0(sc)) ?
17950            PBF_REG_TQ_OCCUPANCY_LB_Q :
17951            PBF_REG_P4_TQ_OCCUPANCY,
17952            (CHIP_IS_E3B0(sc)) ?
17953            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17954            PBF_REG_P4_TQ_LINES_FREED_CNT}
17955    };
17956
17957    struct pbf_pN_buf_regs buf_regs[] = {
17958        {0, (CHIP_IS_E3B0(sc)) ?
17959            PBF_REG_INIT_CRD_Q0 :
17960            PBF_REG_P0_INIT_CRD ,
17961            (CHIP_IS_E3B0(sc)) ?
17962            PBF_REG_CREDIT_Q0 :
17963            PBF_REG_P0_CREDIT,
17964            (CHIP_IS_E3B0(sc)) ?
17965            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17966            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17967        {1, (CHIP_IS_E3B0(sc)) ?
17968            PBF_REG_INIT_CRD_Q1 :
17969            PBF_REG_P1_INIT_CRD,
17970            (CHIP_IS_E3B0(sc)) ?
17971            PBF_REG_CREDIT_Q1 :
17972            PBF_REG_P1_CREDIT,
17973            (CHIP_IS_E3B0(sc)) ?
17974            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17975            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17976        {4, (CHIP_IS_E3B0(sc)) ?
17977            PBF_REG_INIT_CRD_LB_Q :
17978            PBF_REG_P4_INIT_CRD,
17979            (CHIP_IS_E3B0(sc)) ?
17980            PBF_REG_CREDIT_LB_Q :
17981            PBF_REG_P4_CREDIT,
17982            (CHIP_IS_E3B0(sc)) ?
17983            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17984            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17985    };
17986
17987    int i;
17988
17989    /* Verify the command queues are flushed P0, P1, P4 */
17990    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17991        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17992    }
17993
17994    /* Verify the transmission buffers are flushed P0, P1, P4 */
17995    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17996        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17997    }
17998}
17999
18000static void
18001bxe_hw_enable_status(struct bxe_softc *sc)
18002{
18003    uint32_t val;
18004
18005    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18006    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18007
18008    val = REG_RD(sc, PBF_REG_DISABLE_PF);
18009    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18010
18011    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18012    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18013
18014    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18015    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18016
18017    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18018    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18019
18020    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18021    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18022
18023    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18024    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18025
18026    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18027    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18028}
18029
18030static int
18031bxe_pf_flr_clnup(struct bxe_softc *sc)
18032{
18033    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18034
18035    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18036
18037    /* Re-enable PF target read access */
18038    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18039
18040    /* Poll HW usage counters */
18041    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18042    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18043        return (-1);
18044    }
18045
18046    /* Zero the igu 'trailing edge' and 'leading edge' */
18047
18048    /* Send the FW cleanup command */
18049    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18050        return (-1);
18051    }
18052
18053    /* ATC cleanup */
18054
18055    /* Verify TX hw is flushed */
18056    bxe_tx_hw_flushed(sc, poll_cnt);
18057
18058    /* Wait 100ms (not adjusted according to platform) */
18059    DELAY(100000);
18060
18061    /* Verify no pending pci transactions */
18062    if (bxe_is_pcie_pending(sc)) {
18063        BLOGE(sc, "PCIE Transactions still pending\n");
18064    }
18065
18066    /* Debug */
18067    bxe_hw_enable_status(sc);
18068
18069    /*
18070     * Master enable - Due to WB DMAE writes performed before this
18071     * register is re-initialized as part of the regular function init
18072     */
18073    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18074
18075    return (0);
18076}
18077
18078static int
18079bxe_init_hw_func(struct bxe_softc *sc)
18080{
18081    int port = SC_PORT(sc);
18082    int func = SC_FUNC(sc);
18083    int init_phase = PHASE_PF0 + func;
18084    struct ecore_ilt *ilt = sc->ilt;
18085    uint16_t cdu_ilt_start;
18086    uint32_t addr, val;
18087    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18088    int i, main_mem_width, rc;
18089
18090    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18091
18092    /* FLR cleanup */
18093    if (!CHIP_IS_E1x(sc)) {
18094        rc = bxe_pf_flr_clnup(sc);
18095        if (rc) {
18096            BLOGE(sc, "FLR cleanup failed!\n");
18097            // XXX bxe_fw_dump(sc);
18098            // XXX bxe_idle_chk(sc);
18099            return (rc);
18100        }
18101    }
18102
18103    /* set MSI reconfigure capability */
18104    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18105        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18106        val = REG_RD(sc, addr);
18107        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18108        REG_WR(sc, addr, val);
18109    }
18110
18111    ecore_init_block(sc, BLOCK_PXP, init_phase);
18112    ecore_init_block(sc, BLOCK_PXP2, init_phase);
18113
18114    ilt = sc->ilt;
18115    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18116
18117    for (i = 0; i < L2_ILT_LINES(sc); i++) {
18118        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18119        ilt->lines[cdu_ilt_start + i].page_mapping =
18120            sc->context[i].vcxt_dma.paddr;
18121        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18122    }
18123    ecore_ilt_init_op(sc, INITOP_SET);
18124
18125    /* Set NIC mode */
18126    REG_WR(sc, PRS_REG_NIC_MODE, 1);
18127    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18128
18129    if (!CHIP_IS_E1x(sc)) {
18130        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18131
18132        /* Turn on a single ISR mode in IGU if driver is going to use
18133         * INT#x or MSI
18134         */
18135        if (sc->interrupt_mode != INTR_MODE_MSIX) {
18136            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18137        }
18138
18139        /*
18140         * Timers workaround bug: function init part.
18141         * Need to wait 20msec after initializing ILT,
18142         * needed to make sure there are no requests in
18143         * one of the PXP internal queues with "old" ILT addresses
18144         */
18145        DELAY(20000);
18146
18147        /*
18148         * Master enable - Due to WB DMAE writes performed before this
18149         * register is re-initialized as part of the regular function
18150         * init
18151         */
18152        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18153        /* Enable the function in IGU */
18154        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18155    }
18156
18157    sc->dmae_ready = 1;
18158
18159    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18160
18161    if (!CHIP_IS_E1x(sc))
18162        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18163
18164    ecore_init_block(sc, BLOCK_ATC, init_phase);
18165    ecore_init_block(sc, BLOCK_DMAE, init_phase);
18166    ecore_init_block(sc, BLOCK_NIG, init_phase);
18167    ecore_init_block(sc, BLOCK_SRC, init_phase);
18168    ecore_init_block(sc, BLOCK_MISC, init_phase);
18169    ecore_init_block(sc, BLOCK_TCM, init_phase);
18170    ecore_init_block(sc, BLOCK_UCM, init_phase);
18171    ecore_init_block(sc, BLOCK_CCM, init_phase);
18172    ecore_init_block(sc, BLOCK_XCM, init_phase);
18173    ecore_init_block(sc, BLOCK_TSEM, init_phase);
18174    ecore_init_block(sc, BLOCK_USEM, init_phase);
18175    ecore_init_block(sc, BLOCK_CSEM, init_phase);
18176    ecore_init_block(sc, BLOCK_XSEM, init_phase);
18177
18178    if (!CHIP_IS_E1x(sc))
18179        REG_WR(sc, QM_REG_PF_EN, 1);
18180
18181    if (!CHIP_IS_E1x(sc)) {
18182        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18183        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18184        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18185        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18186    }
18187    ecore_init_block(sc, BLOCK_QM, init_phase);
18188
18189    ecore_init_block(sc, BLOCK_TM, init_phase);
18190    ecore_init_block(sc, BLOCK_DORQ, init_phase);
18191
18192    bxe_iov_init_dq(sc);
18193
18194    ecore_init_block(sc, BLOCK_BRB1, init_phase);
18195    ecore_init_block(sc, BLOCK_PRS, init_phase);
18196    ecore_init_block(sc, BLOCK_TSDM, init_phase);
18197    ecore_init_block(sc, BLOCK_CSDM, init_phase);
18198    ecore_init_block(sc, BLOCK_USDM, init_phase);
18199    ecore_init_block(sc, BLOCK_XSDM, init_phase);
18200    ecore_init_block(sc, BLOCK_UPB, init_phase);
18201    ecore_init_block(sc, BLOCK_XPB, init_phase);
18202    ecore_init_block(sc, BLOCK_PBF, init_phase);
18203    if (!CHIP_IS_E1x(sc))
18204        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18205
18206    ecore_init_block(sc, BLOCK_CDU, init_phase);
18207
18208    ecore_init_block(sc, BLOCK_CFC, init_phase);
18209
18210    if (!CHIP_IS_E1x(sc))
18211        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18212
18213    if (IS_MF(sc)) {
18214        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18215        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18216    }
18217
18218    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18219
18220    /* HC init per function */
18221    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18222        if (CHIP_IS_E1H(sc)) {
18223            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18224
18225            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18226            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18227        }
18228        ecore_init_block(sc, BLOCK_HC, init_phase);
18229
18230    } else {
18231        int num_segs, sb_idx, prod_offset;
18232
18233        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18234
18235        if (!CHIP_IS_E1x(sc)) {
18236            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18237            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18238        }
18239
18240        ecore_init_block(sc, BLOCK_IGU, init_phase);
18241
18242        if (!CHIP_IS_E1x(sc)) {
18243            int dsb_idx = 0;
18244            /**
18245             * Producer memory:
18246             * E2 mode: address 0-135 match to the mapping memory;
18247             * 136 - PF0 default prod; 137 - PF1 default prod;
18248             * 138 - PF2 default prod; 139 - PF3 default prod;
18249             * 140 - PF0 attn prod;    141 - PF1 attn prod;
18250             * 142 - PF2 attn prod;    143 - PF3 attn prod;
18251             * 144-147 reserved.
18252             *
18253             * E1.5 mode - In backward compatible mode;
18254             * for non default SB; each even line in the memory
18255             * holds the U producer and each odd line hold
18256             * the C producer. The first 128 producers are for
18257             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18258             * producers are for the DSB for each PF.
18259             * Each PF has five segments: (the order inside each
18260             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18261             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18262             * 144-147 attn prods;
18263             */
18264            /* non-default-status-blocks */
18265            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18266                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18267            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18268                prod_offset = (sc->igu_base_sb + sb_idx) *
18269                    num_segs;
18270
18271                for (i = 0; i < num_segs; i++) {
18272                    addr = IGU_REG_PROD_CONS_MEMORY +
18273                            (prod_offset + i) * 4;
18274                    REG_WR(sc, addr, 0);
18275                }
18276                /* send consumer update with value 0 */
18277                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18278                           USTORM_ID, 0, IGU_INT_NOP, 1);
18279                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18280            }
18281
18282            /* default-status-blocks */
18283            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18284                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18285
18286            if (CHIP_IS_MODE_4_PORT(sc))
18287                dsb_idx = SC_FUNC(sc);
18288            else
18289                dsb_idx = SC_VN(sc);
18290
18291            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18292                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18293                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18294
18295            /*
18296             * igu prods come in chunks of E1HVN_MAX (4) -
18297             * does not matters what is the current chip mode
18298             */
18299            for (i = 0; i < (num_segs * E1HVN_MAX);
18300                 i += E1HVN_MAX) {
18301                addr = IGU_REG_PROD_CONS_MEMORY +
18302                            (prod_offset + i)*4;
18303                REG_WR(sc, addr, 0);
18304            }
18305            /* send consumer update with 0 */
18306            if (CHIP_INT_MODE_IS_BC(sc)) {
18307                bxe_ack_sb(sc, sc->igu_dsb_id,
18308                           USTORM_ID, 0, IGU_INT_NOP, 1);
18309                bxe_ack_sb(sc, sc->igu_dsb_id,
18310                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18311                bxe_ack_sb(sc, sc->igu_dsb_id,
18312                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18313                bxe_ack_sb(sc, sc->igu_dsb_id,
18314                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18315                bxe_ack_sb(sc, sc->igu_dsb_id,
18316                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18317            } else {
18318                bxe_ack_sb(sc, sc->igu_dsb_id,
18319                           USTORM_ID, 0, IGU_INT_NOP, 1);
18320                bxe_ack_sb(sc, sc->igu_dsb_id,
18321                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18322            }
18323            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18324
18325            /* !!! these should become driver const once
18326               rf-tool supports split-68 const */
18327            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18328            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18329            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18330            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18331            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18332            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18333        }
18334    }
18335
18336    /* Reset PCIE errors for debug */
18337    REG_WR(sc, 0x2114, 0xffffffff);
18338    REG_WR(sc, 0x2120, 0xffffffff);
18339
18340    if (CHIP_IS_E1x(sc)) {
18341        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18342        main_mem_base = HC_REG_MAIN_MEMORY +
18343                SC_PORT(sc) * (main_mem_size * 4);
18344        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18345        main_mem_width = 8;
18346
18347        val = REG_RD(sc, main_mem_prty_clr);
18348        if (val) {
18349            BLOGD(sc, DBG_LOAD,
18350                  "Parity errors in HC block during function init (0x%x)!\n",
18351                  val);
18352        }
18353
18354        /* Clear "false" parity errors in MSI-X table */
18355        for (i = main_mem_base;
18356             i < main_mem_base + main_mem_size * 4;
18357             i += main_mem_width) {
18358            bxe_read_dmae(sc, i, main_mem_width / 4);
18359            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18360                           i, main_mem_width / 4);
18361        }
18362        /* Clear HC parity attention */
18363        REG_RD(sc, main_mem_prty_clr);
18364    }
18365
18366#if 1
18367    /* Enable STORMs SP logging */
18368    REG_WR8(sc, BAR_USTRORM_INTMEM +
18369           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18370    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18371           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18372    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18373           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18374    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18375           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18376#endif
18377
18378    elink_phy_probe(&sc->link_params);
18379
18380    return (0);
18381}
18382
18383static void
18384bxe_link_reset(struct bxe_softc *sc)
18385{
18386    if (!BXE_NOMCP(sc)) {
18387	bxe_acquire_phy_lock(sc);
18388        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18389	bxe_release_phy_lock(sc);
18390    } else {
18391        if (!CHIP_REV_IS_SLOW(sc)) {
18392            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18393        }
18394    }
18395}
18396
18397static void
18398bxe_reset_port(struct bxe_softc *sc)
18399{
18400    int port = SC_PORT(sc);
18401    uint32_t val;
18402
18403	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18404    /* reset physical Link */
18405    bxe_link_reset(sc);
18406
18407    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18408
18409    /* Do not rcv packets to BRB */
18410    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18411    /* Do not direct rcv packets that are not for MCP to the BRB */
18412    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18413               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18414
18415    /* Configure AEU */
18416    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18417
18418    DELAY(100000);
18419
18420    /* Check for BRB port occupancy */
18421    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18422    if (val) {
18423        BLOGD(sc, DBG_LOAD,
18424              "BRB1 is not empty, %d blocks are occupied\n", val);
18425    }
18426
18427    /* TODO: Close Doorbell port? */
18428}
18429
18430static void
18431bxe_ilt_wr(struct bxe_softc *sc,
18432           uint32_t         index,
18433           bus_addr_t       addr)
18434{
18435    int reg;
18436    uint32_t wb_write[2];
18437
18438    if (CHIP_IS_E1(sc)) {
18439        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18440    } else {
18441        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18442    }
18443
18444    wb_write[0] = ONCHIP_ADDR1(addr);
18445    wb_write[1] = ONCHIP_ADDR2(addr);
18446    REG_WR_DMAE(sc, reg, wb_write, 2);
18447}
18448
18449static void
18450bxe_clear_func_ilt(struct bxe_softc *sc,
18451                   uint32_t         func)
18452{
18453    uint32_t i, base = FUNC_ILT_BASE(func);
18454    for (i = base; i < base + ILT_PER_FUNC; i++) {
18455        bxe_ilt_wr(sc, i, 0);
18456    }
18457}
18458
18459static void
18460bxe_reset_func(struct bxe_softc *sc)
18461{
18462    struct bxe_fastpath *fp;
18463    int port = SC_PORT(sc);
18464    int func = SC_FUNC(sc);
18465    int i;
18466
18467    /* Disable the function in the FW */
18468    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18469    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18470    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18471    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18472
18473    /* FP SBs */
18474    FOR_EACH_ETH_QUEUE(sc, i) {
18475        fp = &sc->fp[i];
18476        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18477                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18478                SB_DISABLED);
18479    }
18480
18481    /* SP SB */
18482    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18483            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18484            SB_DISABLED);
18485
18486    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18487        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18488    }
18489
18490    /* Configure IGU */
18491    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18492        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18493        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18494    } else {
18495        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18496        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18497    }
18498
18499    if (CNIC_LOADED(sc)) {
18500        /* Disable Timer scan */
18501        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18502        /*
18503         * Wait for at least 10ms and up to 2 second for the timers
18504         * scan to complete
18505         */
18506        for (i = 0; i < 200; i++) {
18507            DELAY(10000);
18508            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18509                break;
18510        }
18511    }
18512
18513    /* Clear ILT */
18514    bxe_clear_func_ilt(sc, func);
18515
18516    /*
18517     * Timers workaround bug for E2: if this is vnic-3,
18518     * we need to set the entire ilt range for this timers.
18519     */
18520    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18521        struct ilt_client_info ilt_cli;
18522        /* use dummy TM client */
18523        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18524        ilt_cli.start = 0;
18525        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18526        ilt_cli.client_num = ILT_CLIENT_TM;
18527
18528        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18529    }
18530
18531    /* this assumes that reset_port() called before reset_func()*/
18532    if (!CHIP_IS_E1x(sc)) {
18533        bxe_pf_disable(sc);
18534    }
18535
18536    sc->dmae_ready = 0;
18537}
18538
18539static int
18540bxe_gunzip_init(struct bxe_softc *sc)
18541{
18542    return (0);
18543}
18544
18545static void
18546bxe_gunzip_end(struct bxe_softc *sc)
18547{
18548    return;
18549}
18550
18551static int
18552bxe_init_firmware(struct bxe_softc *sc)
18553{
18554    if (CHIP_IS_E1(sc)) {
18555        ecore_init_e1_firmware(sc);
18556        sc->iro_array = e1_iro_arr;
18557    } else if (CHIP_IS_E1H(sc)) {
18558        ecore_init_e1h_firmware(sc);
18559        sc->iro_array = e1h_iro_arr;
18560    } else if (!CHIP_IS_E1x(sc)) {
18561        ecore_init_e2_firmware(sc);
18562        sc->iro_array = e2_iro_arr;
18563    } else {
18564        BLOGE(sc, "Unsupported chip revision\n");
18565        return (-1);
18566    }
18567
18568    return (0);
18569}
18570
18571static void
18572bxe_release_firmware(struct bxe_softc *sc)
18573{
18574    /* Do nothing */
18575    return;
18576}
18577
18578static int
18579ecore_gunzip(struct bxe_softc *sc,
18580             const uint8_t    *zbuf,
18581             int              len)
18582{
18583    /* XXX : Implement... */
18584    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18585    return (FALSE);
18586}
18587
18588static void
18589ecore_reg_wr_ind(struct bxe_softc *sc,
18590                 uint32_t         addr,
18591                 uint32_t         val)
18592{
18593    bxe_reg_wr_ind(sc, addr, val);
18594}
18595
18596static void
18597ecore_write_dmae_phys_len(struct bxe_softc *sc,
18598                          bus_addr_t       phys_addr,
18599                          uint32_t         addr,
18600                          uint32_t         len)
18601{
18602    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18603}
18604
18605void
18606ecore_storm_memset_struct(struct bxe_softc *sc,
18607                          uint32_t         addr,
18608                          size_t           size,
18609                          uint32_t         *data)
18610{
18611    uint8_t i;
18612    for (i = 0; i < size/4; i++) {
18613        REG_WR(sc, addr + (i * 4), data[i]);
18614    }
18615}
18616
18617
18618/*
18619 * character device - ioctl interface definitions
18620 */
18621
18622
18623#include "bxe_dump.h"
18624#include "bxe_ioctl.h"
18625#include <sys/conf.h>
18626
18627static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18628                struct thread *td);
18629
18630static struct cdevsw bxe_cdevsw = {
18631    .d_version = D_VERSION,
18632    .d_ioctl = bxe_eioctl,
18633    .d_name = "bxecnic",
18634};
18635
18636#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18637
18638
18639#define DUMP_ALL_PRESETS        0x1FFF
18640#define DUMP_MAX_PRESETS        13
18641#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18642#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18643#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18644#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18645#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18646
18647#define IS_REG_IN_PRESET(presets, idx)  \
18648                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18649
18650
18651static int
18652bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18653{
18654    if (CHIP_IS_E1(sc))
18655        return dump_num_registers[0][preset-1];
18656    else if (CHIP_IS_E1H(sc))
18657        return dump_num_registers[1][preset-1];
18658    else if (CHIP_IS_E2(sc))
18659        return dump_num_registers[2][preset-1];
18660    else if (CHIP_IS_E3A0(sc))
18661        return dump_num_registers[3][preset-1];
18662    else if (CHIP_IS_E3B0(sc))
18663        return dump_num_registers[4][preset-1];
18664    else
18665        return 0;
18666}
18667
18668static int
18669bxe_get_total_regs_len32(struct bxe_softc *sc)
18670{
18671    uint32_t preset_idx;
18672    int regdump_len32 = 0;
18673
18674
18675    /* Calculate the total preset regs length */
18676    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18677        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18678    }
18679
18680    return regdump_len32;
18681}
18682
18683static const uint32_t *
18684__bxe_get_page_addr_ar(struct bxe_softc *sc)
18685{
18686    if (CHIP_IS_E2(sc))
18687        return page_vals_e2;
18688    else if (CHIP_IS_E3(sc))
18689        return page_vals_e3;
18690    else
18691        return NULL;
18692}
18693
18694static uint32_t
18695__bxe_get_page_reg_num(struct bxe_softc *sc)
18696{
18697    if (CHIP_IS_E2(sc))
18698        return PAGE_MODE_VALUES_E2;
18699    else if (CHIP_IS_E3(sc))
18700        return PAGE_MODE_VALUES_E3;
18701    else
18702        return 0;
18703}
18704
18705static const uint32_t *
18706__bxe_get_page_write_ar(struct bxe_softc *sc)
18707{
18708    if (CHIP_IS_E2(sc))
18709        return page_write_regs_e2;
18710    else if (CHIP_IS_E3(sc))
18711        return page_write_regs_e3;
18712    else
18713        return NULL;
18714}
18715
18716static uint32_t
18717__bxe_get_page_write_num(struct bxe_softc *sc)
18718{
18719    if (CHIP_IS_E2(sc))
18720        return PAGE_WRITE_REGS_E2;
18721    else if (CHIP_IS_E3(sc))
18722        return PAGE_WRITE_REGS_E3;
18723    else
18724        return 0;
18725}
18726
18727static const struct reg_addr *
18728__bxe_get_page_read_ar(struct bxe_softc *sc)
18729{
18730    if (CHIP_IS_E2(sc))
18731        return page_read_regs_e2;
18732    else if (CHIP_IS_E3(sc))
18733        return page_read_regs_e3;
18734    else
18735        return NULL;
18736}
18737
18738static uint32_t
18739__bxe_get_page_read_num(struct bxe_softc *sc)
18740{
18741    if (CHIP_IS_E2(sc))
18742        return PAGE_READ_REGS_E2;
18743    else if (CHIP_IS_E3(sc))
18744        return PAGE_READ_REGS_E3;
18745    else
18746        return 0;
18747}
18748
18749static bool
18750bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18751{
18752    if (CHIP_IS_E1(sc))
18753        return IS_E1_REG(reg_info->chips);
18754    else if (CHIP_IS_E1H(sc))
18755        return IS_E1H_REG(reg_info->chips);
18756    else if (CHIP_IS_E2(sc))
18757        return IS_E2_REG(reg_info->chips);
18758    else if (CHIP_IS_E3A0(sc))
18759        return IS_E3A0_REG(reg_info->chips);
18760    else if (CHIP_IS_E3B0(sc))
18761        return IS_E3B0_REG(reg_info->chips);
18762    else
18763        return 0;
18764}
18765
18766static bool
18767bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18768{
18769    if (CHIP_IS_E1(sc))
18770        return IS_E1_REG(wreg_info->chips);
18771    else if (CHIP_IS_E1H(sc))
18772        return IS_E1H_REG(wreg_info->chips);
18773    else if (CHIP_IS_E2(sc))
18774        return IS_E2_REG(wreg_info->chips);
18775    else if (CHIP_IS_E3A0(sc))
18776        return IS_E3A0_REG(wreg_info->chips);
18777    else if (CHIP_IS_E3B0(sc))
18778        return IS_E3B0_REG(wreg_info->chips);
18779    else
18780        return 0;
18781}
18782
18783/**
18784 * bxe_read_pages_regs - read "paged" registers
18785 *
18786 * @bp          device handle
18787 * @p           output buffer
18788 *
18789 * Reads "paged" memories: memories that may only be read by first writing to a
18790 * specific address ("write address") and then reading from a specific address
18791 * ("read address"). There may be more than one write address per "page" and
18792 * more than one read address per write address.
18793 */
18794static void
18795bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18796{
18797    uint32_t i, j, k, n;
18798
18799    /* addresses of the paged registers */
18800    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18801    /* number of paged registers */
18802    int num_pages = __bxe_get_page_reg_num(sc);
18803    /* write addresses */
18804    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18805    /* number of write addresses */
18806    int write_num = __bxe_get_page_write_num(sc);
18807    /* read addresses info */
18808    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18809    /* number of read addresses */
18810    int read_num = __bxe_get_page_read_num(sc);
18811    uint32_t addr, size;
18812
18813    for (i = 0; i < num_pages; i++) {
18814        for (j = 0; j < write_num; j++) {
18815            REG_WR(sc, write_addr[j], page_addr[i]);
18816
18817            for (k = 0; k < read_num; k++) {
18818                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18819                    size = read_addr[k].size;
18820                    for (n = 0; n < size; n++) {
18821                        addr = read_addr[k].addr + n*4;
18822                        *p++ = REG_RD(sc, addr);
18823                    }
18824                }
18825            }
18826        }
18827    }
18828    return;
18829}
18830
18831
18832static int
18833bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18834{
18835    uint32_t i, j, addr;
18836    const struct wreg_addr *wreg_addr_p = NULL;
18837
18838    if (CHIP_IS_E1(sc))
18839        wreg_addr_p = &wreg_addr_e1;
18840    else if (CHIP_IS_E1H(sc))
18841        wreg_addr_p = &wreg_addr_e1h;
18842    else if (CHIP_IS_E2(sc))
18843        wreg_addr_p = &wreg_addr_e2;
18844    else if (CHIP_IS_E3A0(sc))
18845        wreg_addr_p = &wreg_addr_e3;
18846    else if (CHIP_IS_E3B0(sc))
18847        wreg_addr_p = &wreg_addr_e3b0;
18848    else
18849        return (-1);
18850
18851    /* Read the idle_chk registers */
18852    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18853        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18854            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18855            for (j = 0; j < idle_reg_addrs[i].size; j++)
18856                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18857        }
18858    }
18859
18860    /* Read the regular registers */
18861    for (i = 0; i < REGS_COUNT; i++) {
18862        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18863            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18864            for (j = 0; j < reg_addrs[i].size; j++)
18865                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18866        }
18867    }
18868
18869    /* Read the CAM registers */
18870    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18871        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18872        for (i = 0; i < wreg_addr_p->size; i++) {
18873            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18874
18875            /* In case of wreg_addr register, read additional
18876               registers from read_regs array
18877             */
18878            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18879                addr = *(wreg_addr_p->read_regs);
18880                *p++ = REG_RD(sc, addr + j*4);
18881            }
18882        }
18883    }
18884
18885    /* Paged registers are supported in E2 & E3 only */
18886    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18887        /* Read "paged" registers */
18888        bxe_read_pages_regs(sc, p, preset);
18889    }
18890
18891    return 0;
18892}
18893
18894int
18895bxe_grc_dump(struct bxe_softc *sc)
18896{
18897    int rval = 0;
18898    uint32_t preset_idx;
18899    uint8_t *buf;
18900    uint32_t size;
18901    struct  dump_header *d_hdr;
18902    uint32_t i;
18903    uint32_t reg_val;
18904    uint32_t reg_addr;
18905    uint32_t cmd_offset;
18906    struct ecore_ilt *ilt = SC_ILT(sc);
18907    struct bxe_fastpath *fp;
18908    struct ilt_client_info *ilt_cli;
18909    int grc_dump_size;
18910
18911
18912    if (sc->grcdump_done || sc->grcdump_started)
18913	return (rval);
18914
18915    sc->grcdump_started = 1;
18916    BLOGI(sc, "Started collecting grcdump\n");
18917
18918    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18919                sizeof(struct  dump_header);
18920
18921    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18922
18923    if (sc->grc_dump == NULL) {
18924        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18925        return(ENOMEM);
18926    }
18927
18928
18929
18930    /* Disable parity attentions as long as following dump may
18931     * cause false alarms by reading never written registers. We
18932     * will re-enable parity attentions right after the dump.
18933     */
18934
18935    /* Disable parity on path 0 */
18936    bxe_pretend_func(sc, 0);
18937
18938    ecore_disable_blocks_parity(sc);
18939
18940    /* Disable parity on path 1 */
18941    bxe_pretend_func(sc, 1);
18942    ecore_disable_blocks_parity(sc);
18943
18944    /* Return to current function */
18945    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18946
18947    buf = sc->grc_dump;
18948    d_hdr = sc->grc_dump;
18949
18950    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18951    d_hdr->version = BNX2X_DUMP_VERSION;
18952    d_hdr->preset = DUMP_ALL_PRESETS;
18953
18954    if (CHIP_IS_E1(sc)) {
18955        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18956    } else if (CHIP_IS_E1H(sc)) {
18957        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18958    } else if (CHIP_IS_E2(sc)) {
18959        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18960                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18961    } else if (CHIP_IS_E3A0(sc)) {
18962        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18963                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18964    } else if (CHIP_IS_E3B0(sc)) {
18965        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18966                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18967    }
18968
18969    buf += sizeof(struct  dump_header);
18970
18971    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18972
18973        /* Skip presets with IOR */
18974        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18975            (preset_idx == 11))
18976            continue;
18977
18978        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18979
18980	if (rval)
18981            break;
18982
18983        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18984
18985        buf += size;
18986    }
18987
18988    bxe_pretend_func(sc, 0);
18989    ecore_clear_blocks_parity(sc);
18990    ecore_enable_blocks_parity(sc);
18991
18992    bxe_pretend_func(sc, 1);
18993    ecore_clear_blocks_parity(sc);
18994    ecore_enable_blocks_parity(sc);
18995
18996    /* Return to current function */
18997    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18998
18999
19000
19001    if(sc->state == BXE_STATE_OPEN) {
19002        if(sc->fw_stats_req  != NULL) {
19003    		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
19004        			(uintmax_t)sc->fw_stats_req_mapping,
19005        			(uintmax_t)sc->fw_stats_data_mapping,
19006        			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
19007		}
19008		if(sc->def_sb != NULL) {
19009			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19010        			(void *)sc->def_sb_dma.paddr, sc->def_sb,
19011        			sizeof(struct host_sp_status_block));
19012		}
19013		if(sc->eq_dma.vaddr != NULL) {
19014    		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19015        			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19016		}
19017		if(sc->sp_dma.vaddr != NULL) {
19018    		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19019        			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19020        			sizeof(struct bxe_slowpath));
19021		}
19022		if(sc->spq_dma.vaddr != NULL) {
19023    		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19024        			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19025		}
19026		if(sc->gz_buf_dma.vaddr != NULL) {
19027    		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19028        			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19029        			FW_BUF_SIZE);
19030		}
19031    	for (i = 0; i < sc->num_queues; i++) {
19032        	fp = &sc->fp[i];
19033			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19034                        fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19035                        fp->rx_sge_dma.vaddr != NULL) {
19036
19037				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19038            			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19039            			sizeof(union bxe_host_hc_status_block));
19040				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19041            			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19042            			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19043        		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19044            			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19045            			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19046        		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19047            			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19048            			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19049        		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19050            			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19051            			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19052    		}
19053		}
19054		if(ilt != NULL ) {
19055    		ilt_cli = &ilt->clients[1];
19056			if(ilt->lines != NULL) {
19057    		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19058        		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19059            			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19060            			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19061    		}
19062			}
19063		}
19064
19065
19066    	cmd_offset = DMAE_REG_CMD_MEM;
19067    	for (i = 0; i < 224; i++) {
19068        	reg_addr = (cmd_offset +(i * 4));
19069        	reg_val = REG_RD(sc, reg_addr);
19070        	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19071            			reg_addr, reg_val);
19072    	}
19073	}
19074
19075    BLOGI(sc, "Collection of grcdump done\n");
19076    sc->grcdump_done = 1;
19077    return(rval);
19078}
19079
19080static int
19081bxe_add_cdev(struct bxe_softc *sc)
19082{
19083    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19084
19085    if (sc->eeprom == NULL) {
19086        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19087        return (-1);
19088    }
19089
19090    sc->ioctl_dev = make_dev(&bxe_cdevsw,
19091                            if_getdunit(sc->ifp),
19092                            UID_ROOT,
19093                            GID_WHEEL,
19094                            0600,
19095                            "%s",
19096                            if_name(sc->ifp));
19097
19098    if (sc->ioctl_dev == NULL) {
19099        free(sc->eeprom, M_DEVBUF);
19100        sc->eeprom = NULL;
19101        return (-1);
19102    }
19103
19104    sc->ioctl_dev->si_drv1 = sc;
19105
19106    return (0);
19107}
19108
19109static void
19110bxe_del_cdev(struct bxe_softc *sc)
19111{
19112    if (sc->ioctl_dev != NULL)
19113        destroy_dev(sc->ioctl_dev);
19114
19115    if (sc->eeprom != NULL) {
19116        free(sc->eeprom, M_DEVBUF);
19117        sc->eeprom = NULL;
19118    }
19119    sc->ioctl_dev = NULL;
19120
19121    return;
19122}
19123
19124static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19125{
19126
19127    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19128        return FALSE;
19129
19130    return TRUE;
19131}
19132
19133
19134static int
19135bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19136{
19137    int rval = 0;
19138
19139    if(!bxe_is_nvram_accessible(sc)) {
19140        BLOGW(sc, "Cannot access eeprom when interface is down\n");
19141        return (-EAGAIN);
19142    }
19143    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19144
19145
19146   return (rval);
19147}
19148
19149static int
19150bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19151{
19152    int rval = 0;
19153
19154    if(!bxe_is_nvram_accessible(sc)) {
19155        BLOGW(sc, "Cannot access eeprom when interface is down\n");
19156        return (-EAGAIN);
19157    }
19158    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19159
19160   return (rval);
19161}
19162
19163static int
19164bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19165{
19166    int rval = 0;
19167
19168    switch (eeprom->eeprom_cmd) {
19169
19170    case BXE_EEPROM_CMD_SET_EEPROM:
19171
19172        rval = copyin(eeprom->eeprom_data, sc->eeprom,
19173                       eeprom->eeprom_data_len);
19174
19175        if (rval)
19176            break;
19177
19178        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19179                       eeprom->eeprom_data_len);
19180        break;
19181
19182    case BXE_EEPROM_CMD_GET_EEPROM:
19183
19184        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19185                       eeprom->eeprom_data_len);
19186
19187        if (rval) {
19188            break;
19189        }
19190
19191        rval = copyout(sc->eeprom, eeprom->eeprom_data,
19192                       eeprom->eeprom_data_len);
19193        break;
19194
19195    default:
19196            rval = EINVAL;
19197            break;
19198    }
19199
19200    if (rval) {
19201        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
19202    }
19203
19204    return (rval);
19205}
19206
19207static int
19208bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19209{
19210    uint32_t ext_phy_config;
19211    int port = SC_PORT(sc);
19212    int cfg_idx = bxe_get_link_cfg_idx(sc);
19213
19214    dev_p->supported = sc->port.supported[cfg_idx] |
19215            (sc->port.supported[cfg_idx ^ 1] &
19216            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19217    dev_p->advertising = sc->port.advertising[cfg_idx];
19218    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19219        ELINK_ETH_PHY_SFP_1G_FIBER) {
19220        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19221        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19222    }
19223    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19224        !(sc->flags & BXE_MF_FUNC_DIS)) {
19225        dev_p->duplex = sc->link_vars.duplex;
19226        if (IS_MF(sc) && !BXE_NOMCP(sc))
19227            dev_p->speed = bxe_get_mf_speed(sc);
19228        else
19229            dev_p->speed = sc->link_vars.line_speed;
19230    } else {
19231        dev_p->duplex = DUPLEX_UNKNOWN;
19232        dev_p->speed = SPEED_UNKNOWN;
19233    }
19234
19235    dev_p->port = bxe_media_detect(sc);
19236
19237    ext_phy_config = SHMEM_RD(sc,
19238                         dev_info.port_hw_config[port].external_phy_config);
19239    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19240        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19241        dev_p->phy_address =  sc->port.phy_addr;
19242    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19243            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19244        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19245            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19246        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19247    else
19248        dev_p->phy_address = 0;
19249
19250    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19251        dev_p->autoneg = AUTONEG_ENABLE;
19252    else
19253       dev_p->autoneg = AUTONEG_DISABLE;
19254
19255
19256    return 0;
19257}
19258
19259static int
19260bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19261        struct thread *td)
19262{
19263    struct bxe_softc    *sc;
19264    int                 rval = 0;
19265    bxe_grcdump_t       *dump = NULL;
19266    int grc_dump_size;
19267    bxe_drvinfo_t   *drv_infop = NULL;
19268    bxe_dev_setting_t  *dev_p;
19269    bxe_dev_setting_t  dev_set;
19270    bxe_get_regs_t  *reg_p;
19271    bxe_reg_rdw_t *reg_rdw_p;
19272    bxe_pcicfg_rdw_t *cfg_rdw_p;
19273    bxe_perm_mac_addr_t *mac_addr_p;
19274
19275
19276    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19277        return ENXIO;
19278
19279    dump = (bxe_grcdump_t *)data;
19280
19281    switch(cmd) {
19282
19283        case BXE_GRC_DUMP_SIZE:
19284            dump->pci_func = sc->pcie_func;
19285            dump->grcdump_size =
19286                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19287                     sizeof(struct  dump_header);
19288            break;
19289
19290        case BXE_GRC_DUMP:
19291
19292            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19293                                sizeof(struct  dump_header);
19294            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19295                (dump->grcdump_size < grc_dump_size)) {
19296                rval = EINVAL;
19297                break;
19298            }
19299
19300            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19301                (!sc->grcdump_started)) {
19302                rval =  bxe_grc_dump(sc);
19303            }
19304
19305            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19306                (sc->grc_dump != NULL))  {
19307                dump->grcdump_dwords = grc_dump_size >> 2;
19308                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19309                free(sc->grc_dump, M_DEVBUF);
19310                sc->grc_dump = NULL;
19311                sc->grcdump_started = 0;
19312                sc->grcdump_done = 0;
19313            }
19314
19315            break;
19316
19317        case BXE_DRV_INFO:
19318            drv_infop = (bxe_drvinfo_t *)data;
19319            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19320            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19321                BXE_DRIVER_VERSION);
19322            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19323                sc->devinfo.bc_ver_str);
19324            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19325                "%s", sc->fw_ver_str);
19326            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19327            drv_infop->reg_dump_len =
19328                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19329                    + sizeof(struct  dump_header);
19330            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19331                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19332            break;
19333
19334        case BXE_DEV_SETTING:
19335            dev_p = (bxe_dev_setting_t *)data;
19336            bxe_get_settings(sc, &dev_set);
19337            dev_p->supported = dev_set.supported;
19338            dev_p->advertising = dev_set.advertising;
19339            dev_p->speed = dev_set.speed;
19340            dev_p->duplex = dev_set.duplex;
19341            dev_p->port = dev_set.port;
19342            dev_p->phy_address = dev_set.phy_address;
19343            dev_p->autoneg = dev_set.autoneg;
19344
19345            break;
19346
19347        case BXE_GET_REGS:
19348
19349            reg_p = (bxe_get_regs_t *)data;
19350            grc_dump_size = reg_p->reg_buf_len;
19351
19352            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19353                bxe_grc_dump(sc);
19354            }
19355            if((sc->grcdump_done) && (sc->grcdump_started) &&
19356                (sc->grc_dump != NULL))  {
19357                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19358                free(sc->grc_dump, M_DEVBUF);
19359                sc->grc_dump = NULL;
19360                sc->grcdump_started = 0;
19361                sc->grcdump_done = 0;
19362            }
19363
19364            break;
19365
19366        case BXE_RDW_REG:
19367            reg_rdw_p = (bxe_reg_rdw_t *)data;
19368            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19369                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19370                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19371
19372            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19373                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19374                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19375
19376            break;
19377
19378        case BXE_RDW_PCICFG:
19379            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19380            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19381
19382                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19383                                         cfg_rdw_p->cfg_width);
19384
19385            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19386                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19387                            cfg_rdw_p->cfg_width);
19388            } else {
19389                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19390            }
19391            break;
19392
19393        case BXE_MAC_ADDR:
19394            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19395            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19396                sc->mac_addr_str);
19397            break;
19398
19399        case BXE_EEPROM:
19400            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19401            break;
19402
19403
19404        default:
19405            break;
19406    }
19407
19408    return (rval);
19409}
19410
19411#ifdef DEBUGNET
19412static void
19413bxe_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
19414{
19415	struct bxe_softc *sc;
19416
19417	sc = if_getsoftc(ifp);
19418	BXE_CORE_LOCK(sc);
19419	*nrxr = sc->num_queues;
19420	*ncl = DEBUGNET_MAX_IN_FLIGHT;
19421	*clsize = sc->fp[0].mbuf_alloc_size;
19422	BXE_CORE_UNLOCK(sc);
19423}
19424
19425static void
19426bxe_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
19427{
19428}
19429
19430static int
19431bxe_debugnet_transmit(if_t ifp, struct mbuf *m)
19432{
19433	struct bxe_softc *sc;
19434	int error;
19435
19436	sc = if_getsoftc(ifp);
19437	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
19438	    IFF_DRV_RUNNING || !sc->link_vars.link_up)
19439		return (ENOENT);
19440
19441	error = bxe_tx_encap(&sc->fp[0], &m);
19442	if (error != 0 && m != NULL)
19443		m_freem(m);
19444	return (error);
19445}
19446
19447static int
19448bxe_debugnet_poll(if_t ifp, int count)
19449{
19450	struct bxe_softc *sc;
19451	int i;
19452
19453	sc = if_getsoftc(ifp);
19454	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
19455	    !sc->link_vars.link_up)
19456		return (ENOENT);
19457
19458	for (i = 0; i < sc->num_queues; i++)
19459		(void)bxe_rxeof(sc, &sc->fp[i]);
19460	(void)bxe_txeof(sc, &sc->fp[0]);
19461	return (0);
19462}
19463#endif /* DEBUGNET */
19464