bxe.c revision 314093
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/bxe/bxe.c 314093 2017-02-22 08:49:52Z royger $");
29
30#define BXE_DRIVER_VERSION "1.78.89"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        BRCM_VENDORID,
171        CHIP_NUM_57840_2_20,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 2x20GbE"
174    },
175    {
176        BRCM_VENDORID,
177        CHIP_NUM_57840_MF,
178        PCI_ANY_ID, PCI_ANY_ID,
179        "QLogic NetXtreme II BCM57840 MF 10GbE"
180    },
181    {
182        0, 0, 0, 0, NULL
183    }
184};
185
186MALLOC_DECLARE(M_BXE_ILT);
187MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
188
189/*
190 * FreeBSD device entry points.
191 */
192static int bxe_probe(device_t);
193static int bxe_attach(device_t);
194static int bxe_detach(device_t);
195static int bxe_shutdown(device_t);
196
197/*
198 * FreeBSD KLD module/device interface event handler method.
199 */
200static device_method_t bxe_methods[] = {
201    /* Device interface (device_if.h) */
202    DEVMETHOD(device_probe,     bxe_probe),
203    DEVMETHOD(device_attach,    bxe_attach),
204    DEVMETHOD(device_detach,    bxe_detach),
205    DEVMETHOD(device_shutdown,  bxe_shutdown),
206    /* Bus interface (bus_if.h) */
207    DEVMETHOD(bus_print_child,  bus_generic_print_child),
208    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
209    KOBJMETHOD_END
210};
211
212/*
213 * FreeBSD KLD Module data declaration
214 */
215static driver_t bxe_driver = {
216    "bxe",                   /* module name */
217    bxe_methods,             /* event handler */
218    sizeof(struct bxe_softc) /* extra data */
219};
220
221/*
222 * FreeBSD dev class is needed to manage dev instances and
223 * to associate with a bus type
224 */
225static devclass_t bxe_devclass;
226
227MODULE_DEPEND(bxe, pci, 1, 1, 1);
228MODULE_DEPEND(bxe, ether, 1, 1, 1);
229DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
230
231/* resources needed for unloading a previously loaded device */
232
233#define BXE_PREV_WAIT_NEEDED 1
234struct mtx bxe_prev_mtx;
235MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
236struct bxe_prev_list_node {
237    LIST_ENTRY(bxe_prev_list_node) node;
238    uint8_t bus;
239    uint8_t slot;
240    uint8_t path;
241    uint8_t aer; /* XXX automatic error recovery */
242    uint8_t undi;
243};
244static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
245
246static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
247
248/* Tunable device values... */
249
250SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
251
252/* Debug */
253unsigned long bxe_debug = 0;
254SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
255             &bxe_debug, 0, "Debug logging mode");
256
257/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
258static int bxe_interrupt_mode = INTR_MODE_MSIX;
259SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
260           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
261
262/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
263static int bxe_queue_count = 4;
264SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
265           &bxe_queue_count, 0, "Multi-Queue queue count");
266
267/* max number of buffers per queue (default RX_BD_USABLE) */
268static int bxe_max_rx_bufs = 0;
269SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
270           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
271
272/* Host interrupt coalescing RX tick timer (usecs) */
273static int bxe_hc_rx_ticks = 25;
274SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
275           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
276
277/* Host interrupt coalescing TX tick timer (usecs) */
278static int bxe_hc_tx_ticks = 50;
279SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
280           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
281
282/* Maximum number of Rx packets to process at a time */
283static int bxe_rx_budget = 0xffffffff;
284SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
285           &bxe_rx_budget, 0, "Rx processing budget");
286
287/* Maximum LRO aggregation size */
288static int bxe_max_aggregation_size = 0;
289SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
290           &bxe_max_aggregation_size, 0, "max aggregation size");
291
292/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
293static int bxe_mrrs = -1;
294SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
295           &bxe_mrrs, 0, "PCIe maximum read request size");
296
297/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
298static int bxe_autogreeen = 0;
299SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
300           &bxe_autogreeen, 0, "AutoGrEEEn support");
301
302/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
303static int bxe_udp_rss = 0;
304SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
305           &bxe_udp_rss, 0, "UDP RSS support");
306
307
308#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
309
310#define STATS_OFFSET32(stat_name)                   \
311    (offsetof(struct bxe_eth_stats, stat_name) / 4)
312
313#define Q_STATS_OFFSET32(stat_name)                   \
314    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
315
316static const struct {
317    uint32_t offset;
318    uint32_t size;
319    uint32_t flags;
320#define STATS_FLAGS_PORT  1
321#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
322#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
323    char string[STAT_NAME_LEN];
324} bxe_eth_stats_arr[] = {
325    { STATS_OFFSET32(total_bytes_received_hi),
326                8, STATS_FLAGS_BOTH, "rx_bytes" },
327    { STATS_OFFSET32(error_bytes_received_hi),
328                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
329    { STATS_OFFSET32(total_unicast_packets_received_hi),
330                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
331    { STATS_OFFSET32(total_multicast_packets_received_hi),
332                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
333    { STATS_OFFSET32(total_broadcast_packets_received_hi),
334                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
335    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
336                8, STATS_FLAGS_PORT, "rx_crc_errors" },
337    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
338                8, STATS_FLAGS_PORT, "rx_align_errors" },
339    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
340                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
341    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
342                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
343    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
344                8, STATS_FLAGS_PORT, "rx_fragments" },
345    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
346                8, STATS_FLAGS_PORT, "rx_jabbers" },
347    { STATS_OFFSET32(no_buff_discard_hi),
348                8, STATS_FLAGS_BOTH, "rx_discards" },
349    { STATS_OFFSET32(mac_filter_discard),
350                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
351    { STATS_OFFSET32(mf_tag_discard),
352                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
353    { STATS_OFFSET32(pfc_frames_received_hi),
354                8, STATS_FLAGS_PORT, "pfc_frames_received" },
355    { STATS_OFFSET32(pfc_frames_sent_hi),
356                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
357    { STATS_OFFSET32(brb_drop_hi),
358                8, STATS_FLAGS_PORT, "rx_brb_discard" },
359    { STATS_OFFSET32(brb_truncate_hi),
360                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
361    { STATS_OFFSET32(pause_frames_received_hi),
362                8, STATS_FLAGS_PORT, "rx_pause_frames" },
363    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
364                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
365    { STATS_OFFSET32(nig_timer_max),
366                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
367    { STATS_OFFSET32(total_bytes_transmitted_hi),
368                8, STATS_FLAGS_BOTH, "tx_bytes" },
369    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
370                8, STATS_FLAGS_PORT, "tx_error_bytes" },
371    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
372                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
373    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
374                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
375    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
376                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
377    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
378                8, STATS_FLAGS_PORT, "tx_mac_errors" },
379    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
380                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
381    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
382                8, STATS_FLAGS_PORT, "tx_single_collisions" },
383    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
384                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
385    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
386                8, STATS_FLAGS_PORT, "tx_deferred" },
387    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
388                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
389    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
390                8, STATS_FLAGS_PORT, "tx_late_collisions" },
391    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
392                8, STATS_FLAGS_PORT, "tx_total_collisions" },
393    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
394                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
395    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
396                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
397    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
398                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
399    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
400                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
401    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
402                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
403    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
404                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
405    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
406                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
407    { STATS_OFFSET32(pause_frames_sent_hi),
408                8, STATS_FLAGS_PORT, "tx_pause_frames" },
409    { STATS_OFFSET32(total_tpa_aggregations_hi),
410                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
411    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
412                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
413    { STATS_OFFSET32(total_tpa_bytes_hi),
414                8, STATS_FLAGS_FUNC, "tpa_bytes"},
415    { STATS_OFFSET32(eee_tx_lpi),
416                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
417    { STATS_OFFSET32(rx_calls),
418                4, STATS_FLAGS_FUNC, "rx_calls"},
419    { STATS_OFFSET32(rx_pkts),
420                4, STATS_FLAGS_FUNC, "rx_pkts"},
421    { STATS_OFFSET32(rx_tpa_pkts),
422                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
423    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
424                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
425    { STATS_OFFSET32(rx_bxe_service_rxsgl),
426                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
427    { STATS_OFFSET32(rx_jumbo_sge_pkts),
428                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
429    { STATS_OFFSET32(rx_soft_errors),
430                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
431    { STATS_OFFSET32(rx_hw_csum_errors),
432                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
433    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
434                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
435    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
436                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
437    { STATS_OFFSET32(rx_budget_reached),
438                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
439    { STATS_OFFSET32(tx_pkts),
440                4, STATS_FLAGS_FUNC, "tx_pkts"},
441    { STATS_OFFSET32(tx_soft_errors),
442                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
443    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
444                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
445    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
446                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
447    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
448                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
449    { STATS_OFFSET32(tx_ofld_frames_lso),
450                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
451    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
452                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
453    { STATS_OFFSET32(tx_encap_failures),
454                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
455    { STATS_OFFSET32(tx_hw_queue_full),
456                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
457    { STATS_OFFSET32(tx_hw_max_queue_depth),
458                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
459    { STATS_OFFSET32(tx_dma_mapping_failure),
460                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
461    { STATS_OFFSET32(tx_max_drbr_queue_depth),
462                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
463    { STATS_OFFSET32(tx_window_violation_std),
464                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
465    { STATS_OFFSET32(tx_window_violation_tso),
466                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
467    { STATS_OFFSET32(tx_chain_lost_mbuf),
468                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
469    { STATS_OFFSET32(tx_frames_deferred),
470                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
471    { STATS_OFFSET32(tx_queue_xoff),
472                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
473    { STATS_OFFSET32(mbuf_defrag_attempts),
474                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
475    { STATS_OFFSET32(mbuf_defrag_failures),
476                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
477    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
478                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
479    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
480                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
481    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
482                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
483    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
485    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
486                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
487    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
488                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
489    { STATS_OFFSET32(mbuf_alloc_tx),
490                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
491    { STATS_OFFSET32(mbuf_alloc_rx),
492                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
493    { STATS_OFFSET32(mbuf_alloc_sge),
494                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
495    { STATS_OFFSET32(mbuf_alloc_tpa),
496                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
497    { STATS_OFFSET32(tx_queue_full_return),
498                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
499    { STATS_OFFSET32(tx_request_link_down_failures),
500                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
501    { STATS_OFFSET32(bd_avail_too_less_failures),
502                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
503    { STATS_OFFSET32(tx_mq_not_empty),
504                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}
505
506};
507
508static const struct {
509    uint32_t offset;
510    uint32_t size;
511    char string[STAT_NAME_LEN];
512} bxe_eth_q_stats_arr[] = {
513    { Q_STATS_OFFSET32(total_bytes_received_hi),
514                8, "rx_bytes" },
515    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
516                8, "rx_ucast_packets" },
517    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
518                8, "rx_mcast_packets" },
519    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
520                8, "rx_bcast_packets" },
521    { Q_STATS_OFFSET32(no_buff_discard_hi),
522                8, "rx_discards" },
523    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
524                8, "tx_bytes" },
525    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
526                8, "tx_ucast_packets" },
527    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
528                8, "tx_mcast_packets" },
529    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
530                8, "tx_bcast_packets" },
531    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
532                8, "tpa_aggregations" },
533    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
534                8, "tpa_aggregated_frames"},
535    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
536                8, "tpa_bytes"},
537    { Q_STATS_OFFSET32(rx_calls),
538                4, "rx_calls"},
539    { Q_STATS_OFFSET32(rx_pkts),
540                4, "rx_pkts"},
541    { Q_STATS_OFFSET32(rx_tpa_pkts),
542                4, "rx_tpa_pkts"},
543    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
544                4, "rx_erroneous_jumbo_sge_pkts"},
545    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
546                4, "rx_bxe_service_rxsgl"},
547    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
548                4, "rx_jumbo_sge_pkts"},
549    { Q_STATS_OFFSET32(rx_soft_errors),
550                4, "rx_soft_errors"},
551    { Q_STATS_OFFSET32(rx_hw_csum_errors),
552                4, "rx_hw_csum_errors"},
553    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
554                4, "rx_ofld_frames_csum_ip"},
555    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
556                4, "rx_ofld_frames_csum_tcp_udp"},
557    { Q_STATS_OFFSET32(rx_budget_reached),
558                4, "rx_budget_reached"},
559    { Q_STATS_OFFSET32(tx_pkts),
560                4, "tx_pkts"},
561    { Q_STATS_OFFSET32(tx_soft_errors),
562                4, "tx_soft_errors"},
563    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
564                4, "tx_ofld_frames_csum_ip"},
565    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
566                4, "tx_ofld_frames_csum_tcp"},
567    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
568                4, "tx_ofld_frames_csum_udp"},
569    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
570                4, "tx_ofld_frames_lso"},
571    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
572                4, "tx_ofld_frames_lso_hdr_splits"},
573    { Q_STATS_OFFSET32(tx_encap_failures),
574                4, "tx_encap_failures"},
575    { Q_STATS_OFFSET32(tx_hw_queue_full),
576                4, "tx_hw_queue_full"},
577    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
578                4, "tx_hw_max_queue_depth"},
579    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
580                4, "tx_dma_mapping_failure"},
581    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
582                4, "tx_max_drbr_queue_depth"},
583    { Q_STATS_OFFSET32(tx_window_violation_std),
584                4, "tx_window_violation_std"},
585    { Q_STATS_OFFSET32(tx_window_violation_tso),
586                4, "tx_window_violation_tso"},
587    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
588                4, "tx_chain_lost_mbuf"},
589    { Q_STATS_OFFSET32(tx_frames_deferred),
590                4, "tx_frames_deferred"},
591    { Q_STATS_OFFSET32(tx_queue_xoff),
592                4, "tx_queue_xoff"},
593    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
594                4, "mbuf_defrag_attempts"},
595    { Q_STATS_OFFSET32(mbuf_defrag_failures),
596                4, "mbuf_defrag_failures"},
597    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
598                4, "mbuf_rx_bd_alloc_failed"},
599    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
600                4, "mbuf_rx_bd_mapping_failed"},
601    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
602                4, "mbuf_rx_tpa_alloc_failed"},
603    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
604                4, "mbuf_rx_tpa_mapping_failed"},
605    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
606                4, "mbuf_rx_sge_alloc_failed"},
607    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
608                4, "mbuf_rx_sge_mapping_failed"},
609    { Q_STATS_OFFSET32(mbuf_alloc_tx),
610                4, "mbuf_alloc_tx"},
611    { Q_STATS_OFFSET32(mbuf_alloc_rx),
612                4, "mbuf_alloc_rx"},
613    { Q_STATS_OFFSET32(mbuf_alloc_sge),
614                4, "mbuf_alloc_sge"},
615    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
616                4, "mbuf_alloc_tpa"},
617    { Q_STATS_OFFSET32(tx_queue_full_return),
618                4, "tx_queue_full_return"},
619    { Q_STATS_OFFSET32(tx_request_link_down_failures),
620                4, "tx_request_link_down_failures"},
621    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
622                4, "bd_avail_too_less_failures"},
623    { Q_STATS_OFFSET32(tx_mq_not_empty),
624                4, "tx_mq_not_empty"}
625
626};
627
628#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
629#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
630
631
632static void    bxe_cmng_fns_init(struct bxe_softc *sc,
633                                 uint8_t          read_cfg,
634                                 uint8_t          cmng_type);
635static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
636static void    storm_memset_cmng(struct bxe_softc *sc,
637                                 struct cmng_init *cmng,
638                                 uint8_t          port);
639static void    bxe_set_reset_global(struct bxe_softc *sc);
640static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
641static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
642                                 int              engine);
643static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
644static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
645                                   uint8_t          *global,
646                                   uint8_t          print);
647static void    bxe_int_disable(struct bxe_softc *sc);
648static int     bxe_release_leader_lock(struct bxe_softc *sc);
649static void    bxe_pf_disable(struct bxe_softc *sc);
650static void    bxe_free_fp_buffers(struct bxe_softc *sc);
651static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
652                                      struct bxe_fastpath *fp,
653                                      uint16_t            rx_bd_prod,
654                                      uint16_t            rx_cq_prod,
655                                      uint16_t            rx_sge_prod);
656static void    bxe_link_report_locked(struct bxe_softc *sc);
657static void    bxe_link_report(struct bxe_softc *sc);
658static void    bxe_link_status_update(struct bxe_softc *sc);
659static void    bxe_periodic_callout_func(void *xsc);
660static void    bxe_periodic_start(struct bxe_softc *sc);
661static void    bxe_periodic_stop(struct bxe_softc *sc);
662static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
663                                    uint16_t prev_index,
664                                    uint16_t index);
665static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
666                                     int                 queue);
667static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
668                                     uint16_t            index);
669static uint8_t bxe_txeof(struct bxe_softc *sc,
670                         struct bxe_fastpath *fp);
671static void    bxe_task_fp(struct bxe_fastpath *fp);
672static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
673                                     struct mbuf      *m,
674                                     uint8_t          contents);
675static int     bxe_alloc_mem(struct bxe_softc *sc);
676static void    bxe_free_mem(struct bxe_softc *sc);
677static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
678static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
679static int     bxe_interrupt_attach(struct bxe_softc *sc);
680static void    bxe_interrupt_detach(struct bxe_softc *sc);
681static void    bxe_set_rx_mode(struct bxe_softc *sc);
682static int     bxe_init_locked(struct bxe_softc *sc);
683static int     bxe_stop_locked(struct bxe_softc *sc);
684static __noinline int bxe_nic_load(struct bxe_softc *sc,
685                                   int              load_mode);
686static __noinline int bxe_nic_unload(struct bxe_softc *sc,
687                                     uint32_t         unload_mode,
688                                     uint8_t          keep_link);
689
690static void bxe_handle_sp_tq(void *context, int pending);
691static void bxe_handle_fp_tq(void *context, int pending);
692
693static int bxe_add_cdev(struct bxe_softc *sc);
694static void bxe_del_cdev(struct bxe_softc *sc);
695static int bxe_alloc_buf_rings(struct bxe_softc *sc);
696static void bxe_free_buf_rings(struct bxe_softc *sc);
697
698/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
699uint32_t
700calc_crc32(uint8_t  *crc32_packet,
701           uint32_t crc32_length,
702           uint32_t crc32_seed,
703           uint8_t  complement)
704{
705   uint32_t byte         = 0;
706   uint32_t bit          = 0;
707   uint8_t  msb          = 0;
708   uint32_t temp         = 0;
709   uint32_t shft         = 0;
710   uint8_t  current_byte = 0;
711   uint32_t crc32_result = crc32_seed;
712   const uint32_t CRC32_POLY = 0x1edc6f41;
713
714   if ((crc32_packet == NULL) ||
715       (crc32_length == 0) ||
716       ((crc32_length % 8) != 0))
717    {
718        return (crc32_result);
719    }
720
721    for (byte = 0; byte < crc32_length; byte = byte + 1)
722    {
723        current_byte = crc32_packet[byte];
724        for (bit = 0; bit < 8; bit = bit + 1)
725        {
726            /* msb = crc32_result[31]; */
727            msb = (uint8_t)(crc32_result >> 31);
728
729            crc32_result = crc32_result << 1;
730
731            /* it (msb != current_byte[bit]) */
732            if (msb != (0x1 & (current_byte >> bit)))
733            {
734                crc32_result = crc32_result ^ CRC32_POLY;
735                /* crc32_result[0] = 1 */
736                crc32_result |= 1;
737            }
738        }
739    }
740
741    /* Last step is to:
742     * 1. "mirror" every bit
743     * 2. swap the 4 bytes
744     * 3. complement each bit
745     */
746
747    /* Mirror */
748    temp = crc32_result;
749    shft = sizeof(crc32_result) * 8 - 1;
750
751    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
752    {
753        temp <<= 1;
754        temp |= crc32_result & 1;
755        shft-- ;
756    }
757
758    /* temp[31-bit] = crc32_result[bit] */
759    temp <<= shft;
760
761    /* Swap */
762    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
763    {
764        uint32_t t0, t1, t2, t3;
765        t0 = (0x000000ff & (temp >> 24));
766        t1 = (0x0000ff00 & (temp >> 8));
767        t2 = (0x00ff0000 & (temp << 8));
768        t3 = (0xff000000 & (temp << 24));
769        crc32_result = t0 | t1 | t2 | t3;
770    }
771
772    /* Complement */
773    if (complement)
774    {
775        crc32_result = ~crc32_result;
776    }
777
778    return (crc32_result);
779}
780
781int
782bxe_test_bit(int                    nr,
783             volatile unsigned long *addr)
784{
785    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
786}
787
788void
789bxe_set_bit(unsigned int           nr,
790            volatile unsigned long *addr)
791{
792    atomic_set_acq_long(addr, (1 << nr));
793}
794
795void
796bxe_clear_bit(int                    nr,
797              volatile unsigned long *addr)
798{
799    atomic_clear_acq_long(addr, (1 << nr));
800}
801
802int
803bxe_test_and_set_bit(int                    nr,
804                       volatile unsigned long *addr)
805{
806    unsigned long x;
807    nr = (1 << nr);
808    do {
809        x = *addr;
810    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
811    // if (x & nr) bit_was_set; else bit_was_not_set;
812    return (x & nr);
813}
814
815int
816bxe_test_and_clear_bit(int                    nr,
817                       volatile unsigned long *addr)
818{
819    unsigned long x;
820    nr = (1 << nr);
821    do {
822        x = *addr;
823    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
824    // if (x & nr) bit_was_set; else bit_was_not_set;
825    return (x & nr);
826}
827
828int
829bxe_cmpxchg(volatile int *addr,
830            int          old,
831            int          new)
832{
833    int x;
834    do {
835        x = *addr;
836    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
837    return (x);
838}
839
840/*
841 * Get DMA memory from the OS.
842 *
843 * Validates that the OS has provided DMA buffers in response to a
844 * bus_dmamap_load call and saves the physical address of those buffers.
845 * When the callback is used the OS will return 0 for the mapping function
846 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
847 * failures back to the caller.
848 *
849 * Returns:
850 *   Nothing.
851 */
852static void
853bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
854{
855    struct bxe_dma *dma = arg;
856
857    if (error) {
858        dma->paddr = 0;
859        dma->nseg  = 0;
860        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
861    } else {
862        dma->paddr = segs->ds_addr;
863        dma->nseg  = nseg;
864    }
865}
866
867/*
868 * Allocate a block of memory and map it for DMA. No partial completions
869 * allowed and release any resources acquired if we can't acquire all
870 * resources.
871 *
872 * Returns:
873 *   0 = Success, !0 = Failure
874 */
875int
876bxe_dma_alloc(struct bxe_softc *sc,
877              bus_size_t       size,
878              struct bxe_dma   *dma,
879              const char       *msg)
880{
881    int rc;
882
883    if (dma->size > 0) {
884        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
885              (unsigned long)dma->size);
886        return (1);
887    }
888
889    memset(dma, 0, sizeof(*dma)); /* sanity */
890    dma->sc   = sc;
891    dma->size = size;
892    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
893
894    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
895                            BCM_PAGE_SIZE,      /* alignment */
896                            0,                  /* boundary limit */
897                            BUS_SPACE_MAXADDR,  /* restricted low */
898                            BUS_SPACE_MAXADDR,  /* restricted hi */
899                            NULL,               /* addr filter() */
900                            NULL,               /* addr filter() arg */
901                            size,               /* max map size */
902                            1,                  /* num discontinuous */
903                            size,               /* max seg size */
904                            BUS_DMA_ALLOCNOW,   /* flags */
905                            NULL,               /* lock() */
906                            NULL,               /* lock() arg */
907                            &dma->tag);         /* returned dma tag */
908    if (rc != 0) {
909        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
910        memset(dma, 0, sizeof(*dma));
911        return (1);
912    }
913
914    rc = bus_dmamem_alloc(dma->tag,
915                          (void **)&dma->vaddr,
916                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
917                          &dma->map);
918    if (rc != 0) {
919        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
920        bus_dma_tag_destroy(dma->tag);
921        memset(dma, 0, sizeof(*dma));
922        return (1);
923    }
924
925    rc = bus_dmamap_load(dma->tag,
926                         dma->map,
927                         dma->vaddr,
928                         size,
929                         bxe_dma_map_addr, /* BLOGD in here */
930                         dma,
931                         BUS_DMA_NOWAIT);
932    if (rc != 0) {
933        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
934        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
935        bus_dma_tag_destroy(dma->tag);
936        memset(dma, 0, sizeof(*dma));
937        return (1);
938    }
939
940    return (0);
941}
942
943void
944bxe_dma_free(struct bxe_softc *sc,
945             struct bxe_dma   *dma)
946{
947    if (dma->size > 0) {
948        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
949
950        bus_dmamap_sync(dma->tag, dma->map,
951                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
952        bus_dmamap_unload(dma->tag, dma->map);
953        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
954        bus_dma_tag_destroy(dma->tag);
955    }
956
957    memset(dma, 0, sizeof(*dma));
958}
959
960/*
961 * These indirect read and write routines are only during init.
962 * The locking is handled by the MCP.
963 */
964
965void
966bxe_reg_wr_ind(struct bxe_softc *sc,
967               uint32_t         addr,
968               uint32_t         val)
969{
970    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
971    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
972    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
973}
974
975uint32_t
976bxe_reg_rd_ind(struct bxe_softc *sc,
977               uint32_t         addr)
978{
979    uint32_t val;
980
981    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
982    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
983    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
984
985    return (val);
986}
987
988static int
989bxe_acquire_hw_lock(struct bxe_softc *sc,
990                    uint32_t         resource)
991{
992    uint32_t lock_status;
993    uint32_t resource_bit = (1 << resource);
994    int func = SC_FUNC(sc);
995    uint32_t hw_lock_control_reg;
996    int cnt;
997
998    /* validate the resource is within range */
999    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1000        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1001            " resource_bit 0x%x\n", resource, resource_bit);
1002        return (-1);
1003    }
1004
1005    if (func <= 5) {
1006        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1007    } else {
1008        hw_lock_control_reg =
1009                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1010    }
1011
1012    /* validate the resource is not already taken */
1013    lock_status = REG_RD(sc, hw_lock_control_reg);
1014    if (lock_status & resource_bit) {
1015        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1016              resource, lock_status, resource_bit);
1017        return (-1);
1018    }
1019
1020    /* try every 5ms for 5 seconds */
1021    for (cnt = 0; cnt < 1000; cnt++) {
1022        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1023        lock_status = REG_RD(sc, hw_lock_control_reg);
1024        if (lock_status & resource_bit) {
1025            return (0);
1026        }
1027        DELAY(5000);
1028    }
1029
1030    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1031        resource, resource_bit);
1032    return (-1);
1033}
1034
1035static int
1036bxe_release_hw_lock(struct bxe_softc *sc,
1037                    uint32_t         resource)
1038{
1039    uint32_t lock_status;
1040    uint32_t resource_bit = (1 << resource);
1041    int func = SC_FUNC(sc);
1042    uint32_t hw_lock_control_reg;
1043
1044    /* validate the resource is within range */
1045    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1046        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1047            " resource_bit 0x%x\n", resource, resource_bit);
1048        return (-1);
1049    }
1050
1051    if (func <= 5) {
1052        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1053    } else {
1054        hw_lock_control_reg =
1055                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1056    }
1057
1058    /* validate the resource is currently taken */
1059    lock_status = REG_RD(sc, hw_lock_control_reg);
1060    if (!(lock_status & resource_bit)) {
1061        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1062              resource, lock_status, resource_bit);
1063        return (-1);
1064    }
1065
1066    REG_WR(sc, hw_lock_control_reg, resource_bit);
1067    return (0);
1068}
1069static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1070{
1071	BXE_PHY_LOCK(sc);
1072	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1073}
1074
1075static void bxe_release_phy_lock(struct bxe_softc *sc)
1076{
1077	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1078	BXE_PHY_UNLOCK(sc);
1079}
1080/*
1081 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1082 * had we done things the other way around, if two pfs from the same port
1083 * would attempt to access nvram at the same time, we could run into a
1084 * scenario such as:
1085 * pf A takes the port lock.
1086 * pf B succeeds in taking the same lock since they are from the same port.
1087 * pf A takes the per pf misc lock. Performs eeprom access.
1088 * pf A finishes. Unlocks the per pf misc lock.
1089 * Pf B takes the lock and proceeds to perform it's own access.
1090 * pf A unlocks the per port lock, while pf B is still working (!).
1091 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1092 * access corrupted by pf B).*
1093 */
1094static int
1095bxe_acquire_nvram_lock(struct bxe_softc *sc)
1096{
1097    int port = SC_PORT(sc);
1098    int count, i;
1099    uint32_t val = 0;
1100
1101    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1102    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1103
1104    /* adjust timeout for emulation/FPGA */
1105    count = NVRAM_TIMEOUT_COUNT;
1106    if (CHIP_REV_IS_SLOW(sc)) {
1107        count *= 100;
1108    }
1109
1110    /* request access to nvram interface */
1111    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1112           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1113
1114    for (i = 0; i < count*10; i++) {
1115        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1116        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1117            break;
1118        }
1119
1120        DELAY(5);
1121    }
1122
1123    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1124        BLOGE(sc, "Cannot get access to nvram interface "
1125            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1126            port, val);
1127        return (-1);
1128    }
1129
1130    return (0);
1131}
1132
1133static int
1134bxe_release_nvram_lock(struct bxe_softc *sc)
1135{
1136    int port = SC_PORT(sc);
1137    int count, i;
1138    uint32_t val = 0;
1139
1140    /* adjust timeout for emulation/FPGA */
1141    count = NVRAM_TIMEOUT_COUNT;
1142    if (CHIP_REV_IS_SLOW(sc)) {
1143        count *= 100;
1144    }
1145
1146    /* relinquish nvram interface */
1147    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1148           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1149
1150    for (i = 0; i < count*10; i++) {
1151        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1152        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1153            break;
1154        }
1155
1156        DELAY(5);
1157    }
1158
1159    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1160        BLOGE(sc, "Cannot free access to nvram interface "
1161            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1162            port, val);
1163        return (-1);
1164    }
1165
1166    /* release HW lock: protect against other PFs in PF Direct Assignment */
1167    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1168
1169    return (0);
1170}
1171
1172static void
1173bxe_enable_nvram_access(struct bxe_softc *sc)
1174{
1175    uint32_t val;
1176
1177    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1178
1179    /* enable both bits, even on read */
1180    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1181           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1182}
1183
1184static void
1185bxe_disable_nvram_access(struct bxe_softc *sc)
1186{
1187    uint32_t val;
1188
1189    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1190
1191    /* disable both bits, even after read */
1192    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1193           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1194                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1195}
1196
1197static int
1198bxe_nvram_read_dword(struct bxe_softc *sc,
1199                     uint32_t         offset,
1200                     uint32_t         *ret_val,
1201                     uint32_t         cmd_flags)
1202{
1203    int count, i, rc;
1204    uint32_t val;
1205
1206    /* build the command word */
1207    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1208
1209    /* need to clear DONE bit separately */
1210    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1211
1212    /* address of the NVRAM to read from */
1213    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1214           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1215
1216    /* issue a read command */
1217    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1218
1219    /* adjust timeout for emulation/FPGA */
1220    count = NVRAM_TIMEOUT_COUNT;
1221    if (CHIP_REV_IS_SLOW(sc)) {
1222        count *= 100;
1223    }
1224
1225    /* wait for completion */
1226    *ret_val = 0;
1227    rc = -1;
1228    for (i = 0; i < count; i++) {
1229        DELAY(5);
1230        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1231
1232        if (val & MCPR_NVM_COMMAND_DONE) {
1233            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1234            /* we read nvram data in cpu order
1235             * but ethtool sees it as an array of bytes
1236             * converting to big-endian will do the work
1237             */
1238            *ret_val = htobe32(val);
1239            rc = 0;
1240            break;
1241        }
1242    }
1243
1244    if (rc == -1) {
1245        BLOGE(sc, "nvram read timeout expired "
1246            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1247            offset, cmd_flags, val);
1248    }
1249
1250    return (rc);
1251}
1252
1253static int
1254bxe_nvram_read(struct bxe_softc *sc,
1255               uint32_t         offset,
1256               uint8_t          *ret_buf,
1257               int              buf_size)
1258{
1259    uint32_t cmd_flags;
1260    uint32_t val;
1261    int rc;
1262
1263    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1264        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1265              offset, buf_size);
1266        return (-1);
1267    }
1268
1269    if ((offset + buf_size) > sc->devinfo.flash_size) {
1270        BLOGE(sc, "Invalid parameter, "
1271                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1272              offset, buf_size, sc->devinfo.flash_size);
1273        return (-1);
1274    }
1275
1276    /* request access to nvram interface */
1277    rc = bxe_acquire_nvram_lock(sc);
1278    if (rc) {
1279        return (rc);
1280    }
1281
1282    /* enable access to nvram interface */
1283    bxe_enable_nvram_access(sc);
1284
1285    /* read the first word(s) */
1286    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1287    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1288        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1289        memcpy(ret_buf, &val, 4);
1290
1291        /* advance to the next dword */
1292        offset += sizeof(uint32_t);
1293        ret_buf += sizeof(uint32_t);
1294        buf_size -= sizeof(uint32_t);
1295        cmd_flags = 0;
1296    }
1297
1298    if (rc == 0) {
1299        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1300        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1301        memcpy(ret_buf, &val, 4);
1302    }
1303
1304    /* disable access to nvram interface */
1305    bxe_disable_nvram_access(sc);
1306    bxe_release_nvram_lock(sc);
1307
1308    return (rc);
1309}
1310
1311static int
1312bxe_nvram_write_dword(struct bxe_softc *sc,
1313                      uint32_t         offset,
1314                      uint32_t         val,
1315                      uint32_t         cmd_flags)
1316{
1317    int count, i, rc;
1318
1319    /* build the command word */
1320    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1321
1322    /* need to clear DONE bit separately */
1323    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1324
1325    /* write the data */
1326    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1327
1328    /* address of the NVRAM to write to */
1329    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1330           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1331
1332    /* issue the write command */
1333    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1334
1335    /* adjust timeout for emulation/FPGA */
1336    count = NVRAM_TIMEOUT_COUNT;
1337    if (CHIP_REV_IS_SLOW(sc)) {
1338        count *= 100;
1339    }
1340
1341    /* wait for completion */
1342    rc = -1;
1343    for (i = 0; i < count; i++) {
1344        DELAY(5);
1345        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1346        if (val & MCPR_NVM_COMMAND_DONE) {
1347            rc = 0;
1348            break;
1349        }
1350    }
1351
1352    if (rc == -1) {
1353        BLOGE(sc, "nvram write timeout expired "
1354            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1355            offset, cmd_flags, val);
1356    }
1357
1358    return (rc);
1359}
1360
1361#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1362
1363static int
1364bxe_nvram_write1(struct bxe_softc *sc,
1365                 uint32_t         offset,
1366                 uint8_t          *data_buf,
1367                 int              buf_size)
1368{
1369    uint32_t cmd_flags;
1370    uint32_t align_offset;
1371    uint32_t val;
1372    int rc;
1373
1374    if ((offset + buf_size) > sc->devinfo.flash_size) {
1375        BLOGE(sc, "Invalid parameter, "
1376                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1377              offset, buf_size, sc->devinfo.flash_size);
1378        return (-1);
1379    }
1380
1381    /* request access to nvram interface */
1382    rc = bxe_acquire_nvram_lock(sc);
1383    if (rc) {
1384        return (rc);
1385    }
1386
1387    /* enable access to nvram interface */
1388    bxe_enable_nvram_access(sc);
1389
1390    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1391    align_offset = (offset & ~0x03);
1392    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1393
1394    if (rc == 0) {
1395        val &= ~(0xff << BYTE_OFFSET(offset));
1396        val |= (*data_buf << BYTE_OFFSET(offset));
1397
1398        /* nvram data is returned as an array of bytes
1399         * convert it back to cpu order
1400         */
1401        val = be32toh(val);
1402
1403        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1404    }
1405
1406    /* disable access to nvram interface */
1407    bxe_disable_nvram_access(sc);
1408    bxe_release_nvram_lock(sc);
1409
1410    return (rc);
1411}
1412
1413static int
1414bxe_nvram_write(struct bxe_softc *sc,
1415                uint32_t         offset,
1416                uint8_t          *data_buf,
1417                int              buf_size)
1418{
1419    uint32_t cmd_flags;
1420    uint32_t val;
1421    uint32_t written_so_far;
1422    int rc;
1423
1424    if (buf_size == 1) {
1425        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1426    }
1427
1428    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1429        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1430              offset, buf_size);
1431        return (-1);
1432    }
1433
1434    if (buf_size == 0) {
1435        return (0); /* nothing to do */
1436    }
1437
1438    if ((offset + buf_size) > sc->devinfo.flash_size) {
1439        BLOGE(sc, "Invalid parameter, "
1440                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1441              offset, buf_size, sc->devinfo.flash_size);
1442        return (-1);
1443    }
1444
1445    /* request access to nvram interface */
1446    rc = bxe_acquire_nvram_lock(sc);
1447    if (rc) {
1448        return (rc);
1449    }
1450
1451    /* enable access to nvram interface */
1452    bxe_enable_nvram_access(sc);
1453
1454    written_so_far = 0;
1455    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1456    while ((written_so_far < buf_size) && (rc == 0)) {
1457        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1458            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1459        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1460            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1461        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1462            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1463        }
1464
1465        memcpy(&val, data_buf, 4);
1466
1467        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1468
1469        /* advance to the next dword */
1470        offset += sizeof(uint32_t);
1471        data_buf += sizeof(uint32_t);
1472        written_so_far += sizeof(uint32_t);
1473        cmd_flags = 0;
1474    }
1475
1476    /* disable access to nvram interface */
1477    bxe_disable_nvram_access(sc);
1478    bxe_release_nvram_lock(sc);
1479
1480    return (rc);
1481}
1482
1483/* copy command into DMAE command memory and set DMAE command Go */
1484void
1485bxe_post_dmae(struct bxe_softc    *sc,
1486              struct dmae_cmd *dmae,
1487              int                 idx)
1488{
1489    uint32_t cmd_offset;
1490    int i;
1491
1492    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1493    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1494        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1495    }
1496
1497    REG_WR(sc, dmae_reg_go_c[idx], 1);
1498}
1499
1500uint32_t
1501bxe_dmae_opcode_add_comp(uint32_t opcode,
1502                         uint8_t  comp_type)
1503{
1504    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1505                      DMAE_CMD_C_TYPE_ENABLE));
1506}
1507
1508uint32_t
1509bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1510{
1511    return (opcode & ~DMAE_CMD_SRC_RESET);
1512}
1513
1514uint32_t
1515bxe_dmae_opcode(struct bxe_softc *sc,
1516                uint8_t          src_type,
1517                uint8_t          dst_type,
1518                uint8_t          with_comp,
1519                uint8_t          comp_type)
1520{
1521    uint32_t opcode = 0;
1522
1523    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1524               (dst_type << DMAE_CMD_DST_SHIFT));
1525
1526    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1527
1528    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1529
1530    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1531               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1532
1533    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1534
1535#ifdef __BIG_ENDIAN
1536    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1537#else
1538    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1539#endif
1540
1541    if (with_comp) {
1542        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1543    }
1544
1545    return (opcode);
1546}
1547
1548static void
1549bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1550                        struct dmae_cmd *dmae,
1551                        uint8_t             src_type,
1552                        uint8_t             dst_type)
1553{
1554    memset(dmae, 0, sizeof(struct dmae_cmd));
1555
1556    /* set the opcode */
1557    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1558                                   TRUE, DMAE_COMP_PCI);
1559
1560    /* fill in the completion parameters */
1561    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1562    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1563    dmae->comp_val     = DMAE_COMP_VAL;
1564}
1565
1566/* issue a DMAE command over the init channel and wait for completion */
1567static int
1568bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1569                         struct dmae_cmd *dmae)
1570{
1571    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1572    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1573
1574    BXE_DMAE_LOCK(sc);
1575
1576    /* reset completion */
1577    *wb_comp = 0;
1578
1579    /* post the command on the channel used for initializations */
1580    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1581
1582    /* wait for completion */
1583    DELAY(5);
1584
1585    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1586        if (!timeout ||
1587            (sc->recovery_state != BXE_RECOVERY_DONE &&
1588             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1589            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1590                *wb_comp, sc->recovery_state);
1591            BXE_DMAE_UNLOCK(sc);
1592            return (DMAE_TIMEOUT);
1593        }
1594
1595        timeout--;
1596        DELAY(50);
1597    }
1598
1599    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1600        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1601                *wb_comp, sc->recovery_state);
1602        BXE_DMAE_UNLOCK(sc);
1603        return (DMAE_PCI_ERROR);
1604    }
1605
1606    BXE_DMAE_UNLOCK(sc);
1607    return (0);
1608}
1609
1610void
1611bxe_read_dmae(struct bxe_softc *sc,
1612              uint32_t         src_addr,
1613              uint32_t         len32)
1614{
1615    struct dmae_cmd dmae;
1616    uint32_t *data;
1617    int i, rc;
1618
1619    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1620
1621    if (!sc->dmae_ready) {
1622        data = BXE_SP(sc, wb_data[0]);
1623
1624        for (i = 0; i < len32; i++) {
1625            data[i] = (CHIP_IS_E1(sc)) ?
1626                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1627                          REG_RD(sc, (src_addr + (i * 4)));
1628        }
1629
1630        return;
1631    }
1632
1633    /* set opcode and fixed command fields */
1634    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1635
1636    /* fill in addresses and len */
1637    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1638    dmae.src_addr_hi = 0;
1639    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1640    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1641    dmae.len         = len32;
1642
1643    /* issue the command and wait for completion */
1644    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1645        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1646    }
1647}
1648
1649void
1650bxe_write_dmae(struct bxe_softc *sc,
1651               bus_addr_t       dma_addr,
1652               uint32_t         dst_addr,
1653               uint32_t         len32)
1654{
1655    struct dmae_cmd dmae;
1656    int rc;
1657
1658    if (!sc->dmae_ready) {
1659        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1660
1661        if (CHIP_IS_E1(sc)) {
1662            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1663        } else {
1664            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1665        }
1666
1667        return;
1668    }
1669
1670    /* set opcode and fixed command fields */
1671    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1672
1673    /* fill in addresses and len */
1674    dmae.src_addr_lo = U64_LO(dma_addr);
1675    dmae.src_addr_hi = U64_HI(dma_addr);
1676    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1677    dmae.dst_addr_hi = 0;
1678    dmae.len         = len32;
1679
1680    /* issue the command and wait for completion */
1681    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1682        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1683    }
1684}
1685
1686void
1687bxe_write_dmae_phys_len(struct bxe_softc *sc,
1688                        bus_addr_t       phys_addr,
1689                        uint32_t         addr,
1690                        uint32_t         len)
1691{
1692    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1693    int offset = 0;
1694
1695    while (len > dmae_wr_max) {
1696        bxe_write_dmae(sc,
1697                       (phys_addr + offset), /* src DMA address */
1698                       (addr + offset),      /* dst GRC address */
1699                       dmae_wr_max);
1700        offset += (dmae_wr_max * 4);
1701        len -= dmae_wr_max;
1702    }
1703
1704    bxe_write_dmae(sc,
1705                   (phys_addr + offset), /* src DMA address */
1706                   (addr + offset),      /* dst GRC address */
1707                   len);
1708}
1709
1710void
1711bxe_set_ctx_validation(struct bxe_softc   *sc,
1712                       struct eth_context *cxt,
1713                       uint32_t           cid)
1714{
1715    /* ustorm cxt validation */
1716    cxt->ustorm_ag_context.cdu_usage =
1717        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1718            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1719    /* xcontext validation */
1720    cxt->xstorm_ag_context.cdu_reserved =
1721        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1722            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1723}
1724
1725static void
1726bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1727                            uint8_t          port,
1728                            uint8_t          fw_sb_id,
1729                            uint8_t          sb_index,
1730                            uint8_t          ticks)
1731{
1732    uint32_t addr =
1733        (BAR_CSTRORM_INTMEM +
1734         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1735
1736    REG_WR8(sc, addr, ticks);
1737
1738    BLOGD(sc, DBG_LOAD,
1739          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1740          port, fw_sb_id, sb_index, ticks);
1741}
1742
1743static void
1744bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1745                            uint8_t          port,
1746                            uint16_t         fw_sb_id,
1747                            uint8_t          sb_index,
1748                            uint8_t          disable)
1749{
1750    uint32_t enable_flag =
1751        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1752    uint32_t addr =
1753        (BAR_CSTRORM_INTMEM +
1754         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1755    uint8_t flags;
1756
1757    /* clear and set */
1758    flags = REG_RD8(sc, addr);
1759    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1760    flags |= enable_flag;
1761    REG_WR8(sc, addr, flags);
1762
1763    BLOGD(sc, DBG_LOAD,
1764          "port %d fw_sb_id %d sb_index %d disable %d\n",
1765          port, fw_sb_id, sb_index, disable);
1766}
1767
1768void
1769bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1770                             uint8_t          fw_sb_id,
1771                             uint8_t          sb_index,
1772                             uint8_t          disable,
1773                             uint16_t         usec)
1774{
1775    int port = SC_PORT(sc);
1776    uint8_t ticks = (usec / 4); /* XXX ??? */
1777
1778    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1779
1780    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1781    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1782}
1783
1784void
1785elink_cb_udelay(struct bxe_softc *sc,
1786                uint32_t         usecs)
1787{
1788    DELAY(usecs);
1789}
1790
1791uint32_t
1792elink_cb_reg_read(struct bxe_softc *sc,
1793                  uint32_t         reg_addr)
1794{
1795    return (REG_RD(sc, reg_addr));
1796}
1797
1798void
1799elink_cb_reg_write(struct bxe_softc *sc,
1800                   uint32_t         reg_addr,
1801                   uint32_t         val)
1802{
1803    REG_WR(sc, reg_addr, val);
1804}
1805
1806void
1807elink_cb_reg_wb_write(struct bxe_softc *sc,
1808                      uint32_t         offset,
1809                      uint32_t         *wb_write,
1810                      uint16_t         len)
1811{
1812    REG_WR_DMAE(sc, offset, wb_write, len);
1813}
1814
1815void
1816elink_cb_reg_wb_read(struct bxe_softc *sc,
1817                     uint32_t         offset,
1818                     uint32_t         *wb_write,
1819                     uint16_t         len)
1820{
1821    REG_RD_DMAE(sc, offset, wb_write, len);
1822}
1823
1824uint8_t
1825elink_cb_path_id(struct bxe_softc *sc)
1826{
1827    return (SC_PATH(sc));
1828}
1829
1830void
1831elink_cb_event_log(struct bxe_softc     *sc,
1832                   const elink_log_id_t elink_log_id,
1833                   ...)
1834{
1835    /* XXX */
1836    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1837}
1838
1839static int
1840bxe_set_spio(struct bxe_softc *sc,
1841             int              spio,
1842             uint32_t         mode)
1843{
1844    uint32_t spio_reg;
1845
1846    /* Only 2 SPIOs are configurable */
1847    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1848        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1849        return (-1);
1850    }
1851
1852    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1853
1854    /* read SPIO and mask except the float bits */
1855    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1856
1857    switch (mode) {
1858    case MISC_SPIO_OUTPUT_LOW:
1859        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1860        /* clear FLOAT and set CLR */
1861        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1862        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1863        break;
1864
1865    case MISC_SPIO_OUTPUT_HIGH:
1866        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1867        /* clear FLOAT and set SET */
1868        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1869        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1870        break;
1871
1872    case MISC_SPIO_INPUT_HI_Z:
1873        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1874        /* set FLOAT */
1875        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1876        break;
1877
1878    default:
1879        break;
1880    }
1881
1882    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1883    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1884
1885    return (0);
1886}
1887
1888static int
1889bxe_gpio_read(struct bxe_softc *sc,
1890              int              gpio_num,
1891              uint8_t          port)
1892{
1893    /* The GPIO should be swapped if swap register is set and active */
1894    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1895                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1896    int gpio_shift = (gpio_num +
1897                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1898    uint32_t gpio_mask = (1 << gpio_shift);
1899    uint32_t gpio_reg;
1900
1901    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1902        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1903            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1904            gpio_mask);
1905        return (-1);
1906    }
1907
1908    /* read GPIO value */
1909    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1910
1911    /* get the requested pin value */
1912    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1913}
1914
1915static int
1916bxe_gpio_write(struct bxe_softc *sc,
1917               int              gpio_num,
1918               uint32_t         mode,
1919               uint8_t          port)
1920{
1921    /* The GPIO should be swapped if swap register is set and active */
1922    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1923                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1924    int gpio_shift = (gpio_num +
1925                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1926    uint32_t gpio_mask = (1 << gpio_shift);
1927    uint32_t gpio_reg;
1928
1929    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1930        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1931            " gpio_shift %d gpio_mask 0x%x\n",
1932            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1933        return (-1);
1934    }
1935
1936    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1937
1938    /* read GPIO and mask except the float bits */
1939    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1940
1941    switch (mode) {
1942    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1943        BLOGD(sc, DBG_PHY,
1944              "Set GPIO %d (shift %d) -> output low\n",
1945              gpio_num, gpio_shift);
1946        /* clear FLOAT and set CLR */
1947        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1949        break;
1950
1951    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1952        BLOGD(sc, DBG_PHY,
1953              "Set GPIO %d (shift %d) -> output high\n",
1954              gpio_num, gpio_shift);
1955        /* clear FLOAT and set SET */
1956        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1957        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1958        break;
1959
1960    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1961        BLOGD(sc, DBG_PHY,
1962              "Set GPIO %d (shift %d) -> input\n",
1963              gpio_num, gpio_shift);
1964        /* set FLOAT */
1965        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1966        break;
1967
1968    default:
1969        break;
1970    }
1971
1972    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1973    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1974
1975    return (0);
1976}
1977
1978static int
1979bxe_gpio_mult_write(struct bxe_softc *sc,
1980                    uint8_t          pins,
1981                    uint32_t         mode)
1982{
1983    uint32_t gpio_reg;
1984
1985    /* any port swapping should be handled by caller */
1986
1987    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1988
1989    /* read GPIO and mask except the float bits */
1990    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1991    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1992    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1993    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1994
1995    switch (mode) {
1996    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1997        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
1998        /* set CLR */
1999        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2000        break;
2001
2002    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2003        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2004        /* set SET */
2005        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2006        break;
2007
2008    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2009        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2010        /* set FLOAT */
2011        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2012        break;
2013
2014    default:
2015        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2016            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2017        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2018        return (-1);
2019    }
2020
2021    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2022    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2023
2024    return (0);
2025}
2026
2027static int
2028bxe_gpio_int_write(struct bxe_softc *sc,
2029                   int              gpio_num,
2030                   uint32_t         mode,
2031                   uint8_t          port)
2032{
2033    /* The GPIO should be swapped if swap register is set and active */
2034    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2035                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2036    int gpio_shift = (gpio_num +
2037                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2038    uint32_t gpio_mask = (1 << gpio_shift);
2039    uint32_t gpio_reg;
2040
2041    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2042        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2043            " gpio_shift %d gpio_mask 0x%x\n",
2044            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2045        return (-1);
2046    }
2047
2048    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2049
2050    /* read GPIO int */
2051    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2052
2053    switch (mode) {
2054    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2055        BLOGD(sc, DBG_PHY,
2056              "Clear GPIO INT %d (shift %d) -> output low\n",
2057              gpio_num, gpio_shift);
2058        /* clear SET and set CLR */
2059        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2060        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2061        break;
2062
2063    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2064        BLOGD(sc, DBG_PHY,
2065              "Set GPIO INT %d (shift %d) -> output high\n",
2066              gpio_num, gpio_shift);
2067        /* clear CLR and set SET */
2068        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2069        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2070        break;
2071
2072    default:
2073        break;
2074    }
2075
2076    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2077    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2078
2079    return (0);
2080}
2081
2082uint32_t
2083elink_cb_gpio_read(struct bxe_softc *sc,
2084                   uint16_t         gpio_num,
2085                   uint8_t          port)
2086{
2087    return (bxe_gpio_read(sc, gpio_num, port));
2088}
2089
2090uint8_t
2091elink_cb_gpio_write(struct bxe_softc *sc,
2092                    uint16_t         gpio_num,
2093                    uint8_t          mode, /* 0=low 1=high */
2094                    uint8_t          port)
2095{
2096    return (bxe_gpio_write(sc, gpio_num, mode, port));
2097}
2098
2099uint8_t
2100elink_cb_gpio_mult_write(struct bxe_softc *sc,
2101                         uint8_t          pins,
2102                         uint8_t          mode) /* 0=low 1=high */
2103{
2104    return (bxe_gpio_mult_write(sc, pins, mode));
2105}
2106
2107uint8_t
2108elink_cb_gpio_int_write(struct bxe_softc *sc,
2109                        uint16_t         gpio_num,
2110                        uint8_t          mode, /* 0=low 1=high */
2111                        uint8_t          port)
2112{
2113    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2114}
2115
2116void
2117elink_cb_notify_link_changed(struct bxe_softc *sc)
2118{
2119    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2120                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2121}
2122
2123/* send the MCP a request, block until there is a reply */
2124uint32_t
2125elink_cb_fw_command(struct bxe_softc *sc,
2126                    uint32_t         command,
2127                    uint32_t         param)
2128{
2129    int mb_idx = SC_FW_MB_IDX(sc);
2130    uint32_t seq;
2131    uint32_t rc = 0;
2132    uint32_t cnt = 1;
2133    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2134
2135    BXE_FWMB_LOCK(sc);
2136
2137    seq = ++sc->fw_seq;
2138    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2139    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2140
2141    BLOGD(sc, DBG_PHY,
2142          "wrote command 0x%08x to FW MB param 0x%08x\n",
2143          (command | seq), param);
2144
2145    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2146    do {
2147        DELAY(delay * 1000);
2148        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2149    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2150
2151    BLOGD(sc, DBG_PHY,
2152          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2153          cnt*delay, rc, seq);
2154
2155    /* is this a reply to our command? */
2156    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2157        rc &= FW_MSG_CODE_MASK;
2158    } else {
2159        /* Ruh-roh! */
2160        BLOGE(sc, "FW failed to respond!\n");
2161        // XXX bxe_fw_dump(sc);
2162        rc = 0;
2163    }
2164
2165    BXE_FWMB_UNLOCK(sc);
2166    return (rc);
2167}
2168
2169static uint32_t
2170bxe_fw_command(struct bxe_softc *sc,
2171               uint32_t         command,
2172               uint32_t         param)
2173{
2174    return (elink_cb_fw_command(sc, command, param));
2175}
2176
2177static void
2178__storm_memset_dma_mapping(struct bxe_softc *sc,
2179                           uint32_t         addr,
2180                           bus_addr_t       mapping)
2181{
2182    REG_WR(sc, addr, U64_LO(mapping));
2183    REG_WR(sc, (addr + 4), U64_HI(mapping));
2184}
2185
2186static void
2187storm_memset_spq_addr(struct bxe_softc *sc,
2188                      bus_addr_t       mapping,
2189                      uint16_t         abs_fid)
2190{
2191    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2192                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2193    __storm_memset_dma_mapping(sc, addr, mapping);
2194}
2195
2196static void
2197storm_memset_vf_to_pf(struct bxe_softc *sc,
2198                      uint16_t         abs_fid,
2199                      uint16_t         pf_id)
2200{
2201    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2202    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2203    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2204    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2205}
2206
2207static void
2208storm_memset_func_en(struct bxe_softc *sc,
2209                     uint16_t         abs_fid,
2210                     uint8_t          enable)
2211{
2212    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2213    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2214    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2215    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2216}
2217
2218static void
2219storm_memset_eq_data(struct bxe_softc       *sc,
2220                     struct event_ring_data *eq_data,
2221                     uint16_t               pfid)
2222{
2223    uint32_t addr;
2224    size_t size;
2225
2226    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2227    size = sizeof(struct event_ring_data);
2228    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2229}
2230
2231static void
2232storm_memset_eq_prod(struct bxe_softc *sc,
2233                     uint16_t         eq_prod,
2234                     uint16_t         pfid)
2235{
2236    uint32_t addr = (BAR_CSTRORM_INTMEM +
2237                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2238    REG_WR16(sc, addr, eq_prod);
2239}
2240
2241/*
2242 * Post a slowpath command.
2243 *
2244 * A slowpath command is used to propagate a configuration change through
2245 * the controller in a controlled manner, allowing each STORM processor and
2246 * other H/W blocks to phase in the change.  The commands sent on the
2247 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2248 * completion of the ramrod will occur in different ways.  Here's a
2249 * breakdown of ramrods and how they complete:
2250 *
2251 * RAMROD_CMD_ID_ETH_PORT_SETUP
2252 *   Used to setup the leading connection on a port.  Completes on the
2253 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2254 *
2255 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2256 *   Used to setup an additional connection on a port.  Completes on the
2257 *   RCQ of the multi-queue/RSS connection being initialized.
2258 *
2259 * RAMROD_CMD_ID_ETH_STAT_QUERY
2260 *   Used to force the storm processors to update the statistics database
2261 *   in host memory.  This ramrod is send on the leading connection CID and
2262 *   completes as an index increment of the CSTORM on the default status
2263 *   block.
2264 *
2265 * RAMROD_CMD_ID_ETH_UPDATE
2266 *   Used to update the state of the leading connection, usually to udpate
2267 *   the RSS indirection table.  Completes on the RCQ of the leading
2268 *   connection. (Not currently used under FreeBSD until OS support becomes
2269 *   available.)
2270 *
2271 * RAMROD_CMD_ID_ETH_HALT
2272 *   Used when tearing down a connection prior to driver unload.  Completes
2273 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2274 *   use this on the leading connection.
2275 *
2276 * RAMROD_CMD_ID_ETH_SET_MAC
2277 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2278 *   the RCQ of the leading connection.
2279 *
2280 * RAMROD_CMD_ID_ETH_CFC_DEL
2281 *   Used when tearing down a conneciton prior to driver unload.  Completes
2282 *   on the RCQ of the leading connection (since the current connection
2283 *   has been completely removed from controller memory).
2284 *
2285 * RAMROD_CMD_ID_ETH_PORT_DEL
2286 *   Used to tear down the leading connection prior to driver unload,
2287 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2288 *   default status block.
2289 *
2290 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2291 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2292 *   RSS connection that is being offloaded.  (Not currently used under
2293 *   FreeBSD.)
2294 *
2295 * There can only be one command pending per function.
2296 *
2297 * Returns:
2298 *   0 = Success, !0 = Failure.
2299 */
2300
2301/* must be called under the spq lock */
2302static inline
2303struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2304{
2305    struct eth_spe *next_spe = sc->spq_prod_bd;
2306
2307    if (sc->spq_prod_bd == sc->spq_last_bd) {
2308        /* wrap back to the first eth_spq */
2309        sc->spq_prod_bd = sc->spq;
2310        sc->spq_prod_idx = 0;
2311    } else {
2312        sc->spq_prod_bd++;
2313        sc->spq_prod_idx++;
2314    }
2315
2316    return (next_spe);
2317}
2318
2319/* must be called under the spq lock */
2320static inline
2321void bxe_sp_prod_update(struct bxe_softc *sc)
2322{
2323    int func = SC_FUNC(sc);
2324
2325    /*
2326     * Make sure that BD data is updated before writing the producer.
2327     * BD data is written to the memory, the producer is read from the
2328     * memory, thus we need a full memory barrier to ensure the ordering.
2329     */
2330    mb();
2331
2332    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2333             sc->spq_prod_idx);
2334
2335    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2336                      BUS_SPACE_BARRIER_WRITE);
2337}
2338
2339/**
2340 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2341 *
2342 * @cmd:      command to check
2343 * @cmd_type: command type
2344 */
2345static inline
2346int bxe_is_contextless_ramrod(int cmd,
2347                              int cmd_type)
2348{
2349    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2350        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2351        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2352        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2353        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2354        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2355        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2356        return (TRUE);
2357    } else {
2358        return (FALSE);
2359    }
2360}
2361
2362/**
2363 * bxe_sp_post - place a single command on an SP ring
2364 *
2365 * @sc:         driver handle
2366 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2367 * @cid:        SW CID the command is related to
2368 * @data_hi:    command private data address (high 32 bits)
2369 * @data_lo:    command private data address (low 32 bits)
2370 * @cmd_type:   command type (e.g. NONE, ETH)
2371 *
2372 * SP data is handled as if it's always an address pair, thus data fields are
2373 * not swapped to little endian in upper functions. Instead this function swaps
2374 * data as if it's two uint32 fields.
2375 */
2376int
2377bxe_sp_post(struct bxe_softc *sc,
2378            int              command,
2379            int              cid,
2380            uint32_t         data_hi,
2381            uint32_t         data_lo,
2382            int              cmd_type)
2383{
2384    struct eth_spe *spe;
2385    uint16_t type;
2386    int common;
2387
2388    common = bxe_is_contextless_ramrod(command, cmd_type);
2389
2390    BXE_SP_LOCK(sc);
2391
2392    if (common) {
2393        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2394            BLOGE(sc, "EQ ring is full!\n");
2395            BXE_SP_UNLOCK(sc);
2396            return (-1);
2397        }
2398    } else {
2399        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2400            BLOGE(sc, "SPQ ring is full!\n");
2401            BXE_SP_UNLOCK(sc);
2402            return (-1);
2403        }
2404    }
2405
2406    spe = bxe_sp_get_next(sc);
2407
2408    /* CID needs port number to be encoded int it */
2409    spe->hdr.conn_and_cmd_data =
2410        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2411
2412    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2413
2414    /* TBD: Check if it works for VFs */
2415    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2416             SPE_HDR_T_FUNCTION_ID);
2417
2418    spe->hdr.type = htole16(type);
2419
2420    spe->data.update_data_addr.hi = htole32(data_hi);
2421    spe->data.update_data_addr.lo = htole32(data_lo);
2422
2423    /*
2424     * It's ok if the actual decrement is issued towards the memory
2425     * somewhere between the lock and unlock. Thus no more explict
2426     * memory barrier is needed.
2427     */
2428    if (common) {
2429        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2430    } else {
2431        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2432    }
2433
2434    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2435    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2436          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2437    BLOGD(sc, DBG_SP,
2438          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2439          sc->spq_prod_idx,
2440          (uint32_t)U64_HI(sc->spq_dma.paddr),
2441          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2442          command,
2443          common,
2444          HW_CID(sc, cid),
2445          data_hi,
2446          data_lo,
2447          type,
2448          atomic_load_acq_long(&sc->cq_spq_left),
2449          atomic_load_acq_long(&sc->eq_spq_left));
2450
2451    bxe_sp_prod_update(sc);
2452
2453    BXE_SP_UNLOCK(sc);
2454    return (0);
2455}
2456
2457/**
2458 * bxe_debug_print_ind_table - prints the indirection table configuration.
2459 *
2460 * @sc: driver hanlde
2461 * @p:  pointer to rss configuration
2462 */
2463
2464/*
2465 * FreeBSD Device probe function.
2466 *
2467 * Compares the device found to the driver's list of supported devices and
2468 * reports back to the bsd loader whether this is the right driver for the device.
2469 * This is the driver entry function called from the "kldload" command.
2470 *
2471 * Returns:
2472 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2473 */
2474static int
2475bxe_probe(device_t dev)
2476{
2477    struct bxe_device_type *t;
2478    char *descbuf;
2479    uint16_t did, sdid, svid, vid;
2480
2481    /* Find our device structure */
2482    t = bxe_devs;
2483
2484    /* Get the data for the device to be probed. */
2485    vid  = pci_get_vendor(dev);
2486    did  = pci_get_device(dev);
2487    svid = pci_get_subvendor(dev);
2488    sdid = pci_get_subdevice(dev);
2489
2490    /* Look through the list of known devices for a match. */
2491    while (t->bxe_name != NULL) {
2492        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2493            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2494            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2495            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2496            if (descbuf == NULL)
2497                return (ENOMEM);
2498
2499            /* Print out the device identity. */
2500            snprintf(descbuf, BXE_DEVDESC_MAX,
2501                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2502                     (((pci_read_config(dev, PCIR_REVID, 4) &
2503                        0xf0) >> 4) + 'A'),
2504                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2505                     BXE_DRIVER_VERSION);
2506
2507            device_set_desc_copy(dev, descbuf);
2508            free(descbuf, M_TEMP);
2509            return (BUS_PROBE_DEFAULT);
2510        }
2511        t++;
2512    }
2513
2514    return (ENXIO);
2515}
2516
2517static void
2518bxe_init_mutexes(struct bxe_softc *sc)
2519{
2520#ifdef BXE_CORE_LOCK_SX
2521    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2522             "bxe%d_core_lock", sc->unit);
2523    sx_init(&sc->core_sx, sc->core_sx_name);
2524#else
2525    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2526             "bxe%d_core_lock", sc->unit);
2527    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2528#endif
2529
2530    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2531             "bxe%d_sp_lock", sc->unit);
2532    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2533
2534    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2535             "bxe%d_dmae_lock", sc->unit);
2536    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2537
2538    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2539             "bxe%d_phy_lock", sc->unit);
2540    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2541
2542    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2543             "bxe%d_fwmb_lock", sc->unit);
2544    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2545
2546    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2547             "bxe%d_print_lock", sc->unit);
2548    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2549
2550    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2551             "bxe%d_stats_lock", sc->unit);
2552    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2553
2554    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2555             "bxe%d_mcast_lock", sc->unit);
2556    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2557}
2558
2559static void
2560bxe_release_mutexes(struct bxe_softc *sc)
2561{
2562#ifdef BXE_CORE_LOCK_SX
2563    sx_destroy(&sc->core_sx);
2564#else
2565    if (mtx_initialized(&sc->core_mtx)) {
2566        mtx_destroy(&sc->core_mtx);
2567    }
2568#endif
2569
2570    if (mtx_initialized(&sc->sp_mtx)) {
2571        mtx_destroy(&sc->sp_mtx);
2572    }
2573
2574    if (mtx_initialized(&sc->dmae_mtx)) {
2575        mtx_destroy(&sc->dmae_mtx);
2576    }
2577
2578    if (mtx_initialized(&sc->port.phy_mtx)) {
2579        mtx_destroy(&sc->port.phy_mtx);
2580    }
2581
2582    if (mtx_initialized(&sc->fwmb_mtx)) {
2583        mtx_destroy(&sc->fwmb_mtx);
2584    }
2585
2586    if (mtx_initialized(&sc->print_mtx)) {
2587        mtx_destroy(&sc->print_mtx);
2588    }
2589
2590    if (mtx_initialized(&sc->stats_mtx)) {
2591        mtx_destroy(&sc->stats_mtx);
2592    }
2593
2594    if (mtx_initialized(&sc->mcast_mtx)) {
2595        mtx_destroy(&sc->mcast_mtx);
2596    }
2597}
2598
2599static void
2600bxe_tx_disable(struct bxe_softc* sc)
2601{
2602    if_t ifp = sc->ifp;
2603
2604    /* tell the stack the driver is stopped and TX queue is full */
2605    if (ifp !=  NULL) {
2606        if_setdrvflags(ifp, 0);
2607    }
2608}
2609
2610static void
2611bxe_drv_pulse(struct bxe_softc *sc)
2612{
2613    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2614             sc->fw_drv_pulse_wr_seq);
2615}
2616
2617static inline uint16_t
2618bxe_tx_avail(struct bxe_softc *sc,
2619             struct bxe_fastpath *fp)
2620{
2621    int16_t  used;
2622    uint16_t prod;
2623    uint16_t cons;
2624
2625    prod = fp->tx_bd_prod;
2626    cons = fp->tx_bd_cons;
2627
2628    used = SUB_S16(prod, cons);
2629
2630    return (int16_t)(sc->tx_ring_size) - used;
2631}
2632
2633static inline int
2634bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2635{
2636    uint16_t hw_cons;
2637
2638    mb(); /* status block fields can change */
2639    hw_cons = le16toh(*fp->tx_cons_sb);
2640    return (hw_cons != fp->tx_pkt_cons);
2641}
2642
2643static inline uint8_t
2644bxe_has_tx_work(struct bxe_fastpath *fp)
2645{
2646    /* expand this for multi-cos if ever supported */
2647    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2648}
2649
2650static inline int
2651bxe_has_rx_work(struct bxe_fastpath *fp)
2652{
2653    uint16_t rx_cq_cons_sb;
2654
2655    mb(); /* status block fields can change */
2656    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2657    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2658        rx_cq_cons_sb++;
2659    return (fp->rx_cq_cons != rx_cq_cons_sb);
2660}
2661
2662static void
2663bxe_sp_event(struct bxe_softc    *sc,
2664             struct bxe_fastpath *fp,
2665             union eth_rx_cqe    *rr_cqe)
2666{
2667    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2668    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2669    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2670    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2671
2672    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2673          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2674
2675    switch (command) {
2676    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2677        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2678        drv_cmd = ECORE_Q_CMD_UPDATE;
2679        break;
2680
2681    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2682        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2683        drv_cmd = ECORE_Q_CMD_SETUP;
2684        break;
2685
2686    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2687        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2688        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2689        break;
2690
2691    case (RAMROD_CMD_ID_ETH_HALT):
2692        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2693        drv_cmd = ECORE_Q_CMD_HALT;
2694        break;
2695
2696    case (RAMROD_CMD_ID_ETH_TERMINATE):
2697        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2698        drv_cmd = ECORE_Q_CMD_TERMINATE;
2699        break;
2700
2701    case (RAMROD_CMD_ID_ETH_EMPTY):
2702        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2703        drv_cmd = ECORE_Q_CMD_EMPTY;
2704        break;
2705
2706    default:
2707        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2708              command, fp->index);
2709        return;
2710    }
2711
2712    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2713        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2714        /*
2715         * q_obj->complete_cmd() failure means that this was
2716         * an unexpected completion.
2717         *
2718         * In this case we don't want to increase the sc->spq_left
2719         * because apparently we haven't sent this command the first
2720         * place.
2721         */
2722        // bxe_panic(sc, ("Unexpected SP completion\n"));
2723        return;
2724    }
2725
2726    atomic_add_acq_long(&sc->cq_spq_left, 1);
2727
2728    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2729          atomic_load_acq_long(&sc->cq_spq_left));
2730}
2731
2732/*
2733 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2734 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2735 * the current aggregation queue as in-progress.
2736 */
2737static void
2738bxe_tpa_start(struct bxe_softc            *sc,
2739              struct bxe_fastpath         *fp,
2740              uint16_t                    queue,
2741              uint16_t                    cons,
2742              uint16_t                    prod,
2743              struct eth_fast_path_rx_cqe *cqe)
2744{
2745    struct bxe_sw_rx_bd tmp_bd;
2746    struct bxe_sw_rx_bd *rx_buf;
2747    struct eth_rx_bd *rx_bd;
2748    int max_agg_queues;
2749    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2750    uint16_t index;
2751
2752    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2753                       "cons=%d prod=%d\n",
2754          fp->index, queue, cons, prod);
2755
2756    max_agg_queues = MAX_AGG_QS(sc);
2757
2758    KASSERT((queue < max_agg_queues),
2759            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2760             fp->index, queue, max_agg_queues));
2761
2762    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2763            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2764             fp->index, queue));
2765
2766    /* copy the existing mbuf and mapping from the TPA pool */
2767    tmp_bd = tpa_info->bd;
2768
2769    if (tmp_bd.m == NULL) {
2770        uint32_t *tmp;
2771
2772        tmp = (uint32_t *)cqe;
2773
2774        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2775              fp->index, queue, cons, prod);
2776        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2777            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2778
2779        /* XXX Error handling? */
2780        return;
2781    }
2782
2783    /* change the TPA queue to the start state */
2784    tpa_info->state            = BXE_TPA_STATE_START;
2785    tpa_info->placement_offset = cqe->placement_offset;
2786    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2787    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2788    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2789
2790    fp->rx_tpa_queue_used |= (1 << queue);
2791
2792    /*
2793     * If all the buffer descriptors are filled with mbufs then fill in
2794     * the current consumer index with a new BD. Else if a maximum Rx
2795     * buffer limit is imposed then fill in the next producer index.
2796     */
2797    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2798                prod : cons;
2799
2800    /* move the received mbuf and mapping to TPA pool */
2801    tpa_info->bd = fp->rx_mbuf_chain[cons];
2802
2803    /* release any existing RX BD mbuf mappings */
2804    if (cons != index) {
2805        rx_buf = &fp->rx_mbuf_chain[cons];
2806
2807        if (rx_buf->m_map != NULL) {
2808            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2809                            BUS_DMASYNC_POSTREAD);
2810            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2811        }
2812
2813        /*
2814         * We get here when the maximum number of rx buffers is less than
2815         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2816         * it out here without concern of a memory leak.
2817         */
2818        fp->rx_mbuf_chain[cons].m = NULL;
2819    }
2820
2821    /* update the Rx SW BD with the mbuf info from the TPA pool */
2822    fp->rx_mbuf_chain[index] = tmp_bd;
2823
2824    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2825    rx_bd = &fp->rx_chain[index];
2826    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2827    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2828}
2829
2830/*
2831 * When a TPA aggregation is completed, loop through the individual mbufs
2832 * of the aggregation, combining them into a single mbuf which will be sent
2833 * up the stack. Refill all freed SGEs with mbufs as we go along.
2834 */
2835static int
2836bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2837                   struct bxe_fastpath       *fp,
2838                   struct bxe_sw_tpa_info    *tpa_info,
2839                   uint16_t                  queue,
2840                   uint16_t                  pages,
2841                   struct mbuf               *m,
2842			       struct eth_end_agg_rx_cqe *cqe,
2843                   uint16_t                  cqe_idx)
2844{
2845    struct mbuf *m_frag;
2846    uint32_t frag_len, frag_size, i;
2847    uint16_t sge_idx;
2848    int rc = 0;
2849    int j;
2850
2851    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2852
2853    BLOGD(sc, DBG_LRO,
2854          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2855          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2856
2857    /* make sure the aggregated frame is not too big to handle */
2858    if (pages > 8 * PAGES_PER_SGE) {
2859
2860        uint32_t *tmp = (uint32_t *)cqe;
2861
2862        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2863                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2864              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2865              tpa_info->len_on_bd, frag_size);
2866
2867        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2868            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2869
2870        bxe_panic(sc, ("sge page count error\n"));
2871        return (EINVAL);
2872    }
2873
2874    /*
2875     * Scan through the scatter gather list pulling individual mbufs into a
2876     * single mbuf for the host stack.
2877     */
2878    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2879        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2880
2881        /*
2882         * Firmware gives the indices of the SGE as if the ring is an array
2883         * (meaning that the "next" element will consume 2 indices).
2884         */
2885        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2886
2887        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2888                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2889              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2890
2891        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2892
2893        /* allocate a new mbuf for the SGE */
2894        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2895        if (rc) {
2896            /* Leave all remaining SGEs in the ring! */
2897            return (rc);
2898        }
2899
2900        /* update the fragment length */
2901        m_frag->m_len = frag_len;
2902
2903        /* concatenate the fragment to the head mbuf */
2904        m_cat(m, m_frag);
2905        fp->eth_q_stats.mbuf_alloc_sge--;
2906
2907        /* update the TPA mbuf size and remaining fragment size */
2908        m->m_pkthdr.len += frag_len;
2909        frag_size -= frag_len;
2910    }
2911
2912    BLOGD(sc, DBG_LRO,
2913          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2914          fp->index, queue, frag_size);
2915
2916    return (rc);
2917}
2918
2919static inline void
2920bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2921{
2922    int i, j;
2923
2924    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2925        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2926
2927        for (j = 0; j < 2; j++) {
2928            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2929            idx--;
2930        }
2931    }
2932}
2933
2934static inline void
2935bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2936{
2937    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2938    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2939
2940    /*
2941     * Clear the two last indices in the page to 1. These are the indices that
2942     * correspond to the "next" element, hence will never be indicated and
2943     * should be removed from the calculations.
2944     */
2945    bxe_clear_sge_mask_next_elems(fp);
2946}
2947
2948static inline void
2949bxe_update_last_max_sge(struct bxe_fastpath *fp,
2950                        uint16_t            idx)
2951{
2952    uint16_t last_max = fp->last_max_sge;
2953
2954    if (SUB_S16(idx, last_max) > 0) {
2955        fp->last_max_sge = idx;
2956    }
2957}
2958
2959static inline void
2960bxe_update_sge_prod(struct bxe_softc          *sc,
2961                    struct bxe_fastpath       *fp,
2962                    uint16_t                  sge_len,
2963                    union eth_sgl_or_raw_data *cqe)
2964{
2965    uint16_t last_max, last_elem, first_elem;
2966    uint16_t delta = 0;
2967    uint16_t i;
2968
2969    if (!sge_len) {
2970        return;
2971    }
2972
2973    /* first mark all used pages */
2974    for (i = 0; i < sge_len; i++) {
2975        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2976                            RX_SGE(le16toh(cqe->sgl[i])));
2977    }
2978
2979    BLOGD(sc, DBG_LRO,
2980          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2981          fp->index, sge_len - 1,
2982          le16toh(cqe->sgl[sge_len - 1]));
2983
2984    /* assume that the last SGE index is the biggest */
2985    bxe_update_last_max_sge(fp,
2986                            le16toh(cqe->sgl[sge_len - 1]));
2987
2988    last_max = RX_SGE(fp->last_max_sge);
2989    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2990    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2991
2992    /* if ring is not full */
2993    if (last_elem + 1 != first_elem) {
2994        last_elem++;
2995    }
2996
2997    /* now update the prod */
2998    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
2999        if (__predict_true(fp->sge_mask[i])) {
3000            break;
3001        }
3002
3003        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3004        delta += BIT_VEC64_ELEM_SZ;
3005    }
3006
3007    if (delta > 0) {
3008        fp->rx_sge_prod += delta;
3009        /* clear page-end entries */
3010        bxe_clear_sge_mask_next_elems(fp);
3011    }
3012
3013    BLOGD(sc, DBG_LRO,
3014          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3015          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3016}
3017
3018/*
3019 * The aggregation on the current TPA queue has completed. Pull the individual
3020 * mbuf fragments together into a single mbuf, perform all necessary checksum
3021 * calculations, and send the resuting mbuf to the stack.
3022 */
3023static void
3024bxe_tpa_stop(struct bxe_softc          *sc,
3025             struct bxe_fastpath       *fp,
3026             struct bxe_sw_tpa_info    *tpa_info,
3027             uint16_t                  queue,
3028             uint16_t                  pages,
3029			 struct eth_end_agg_rx_cqe *cqe,
3030             uint16_t                  cqe_idx)
3031{
3032    if_t ifp = sc->ifp;
3033    struct mbuf *m;
3034    int rc = 0;
3035
3036    BLOGD(sc, DBG_LRO,
3037          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3038          fp->index, queue, tpa_info->placement_offset,
3039          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3040
3041    m = tpa_info->bd.m;
3042
3043    /* allocate a replacement before modifying existing mbuf */
3044    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3045    if (rc) {
3046        /* drop the frame and log an error */
3047        fp->eth_q_stats.rx_soft_errors++;
3048        goto bxe_tpa_stop_exit;
3049    }
3050
3051    /* we have a replacement, fixup the current mbuf */
3052    m_adj(m, tpa_info->placement_offset);
3053    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3054
3055    /* mark the checksums valid (taken care of by the firmware) */
3056    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3057    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3058    m->m_pkthdr.csum_data = 0xffff;
3059    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3060                               CSUM_IP_VALID   |
3061                               CSUM_DATA_VALID |
3062                               CSUM_PSEUDO_HDR);
3063
3064    /* aggregate all of the SGEs into a single mbuf */
3065    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3066    if (rc) {
3067        /* drop the packet and log an error */
3068        fp->eth_q_stats.rx_soft_errors++;
3069        m_freem(m);
3070    } else {
3071        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3072            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3073            m->m_flags |= M_VLANTAG;
3074        }
3075
3076        /* assign packet to this interface interface */
3077        if_setrcvif(m, ifp);
3078
3079#if __FreeBSD_version >= 800000
3080        /* specify what RSS queue was used for this flow */
3081        m->m_pkthdr.flowid = fp->index;
3082        BXE_SET_FLOWID(m);
3083#endif
3084
3085        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3086        fp->eth_q_stats.rx_tpa_pkts++;
3087
3088        /* pass the frame to the stack */
3089        if_input(ifp, m);
3090    }
3091
3092    /* we passed an mbuf up the stack or dropped the frame */
3093    fp->eth_q_stats.mbuf_alloc_tpa--;
3094
3095bxe_tpa_stop_exit:
3096
3097    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3098    fp->rx_tpa_queue_used &= ~(1 << queue);
3099}
3100
3101static uint8_t
3102bxe_service_rxsgl(
3103                 struct bxe_fastpath *fp,
3104                 uint16_t len,
3105                 uint16_t lenonbd,
3106                 struct mbuf *m,
3107                 struct eth_fast_path_rx_cqe *cqe_fp)
3108{
3109    struct mbuf *m_frag;
3110    uint16_t frags, frag_len;
3111    uint16_t sge_idx = 0;
3112    uint16_t j;
3113    uint8_t i, rc = 0;
3114    uint32_t frag_size;
3115
3116    /* adjust the mbuf */
3117    m->m_len = lenonbd;
3118
3119    frag_size =  len - lenonbd;
3120    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3121
3122    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3123        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3124
3125        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3126        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3127        m_frag->m_len = frag_len;
3128
3129       /* allocate a new mbuf for the SGE */
3130        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3131        if (rc) {
3132            /* Leave all remaining SGEs in the ring! */
3133            return (rc);
3134        }
3135        fp->eth_q_stats.mbuf_alloc_sge--;
3136
3137        /* concatenate the fragment to the head mbuf */
3138        m_cat(m, m_frag);
3139
3140        frag_size -= frag_len;
3141    }
3142
3143    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3144
3145    return rc;
3146}
3147
3148static uint8_t
3149bxe_rxeof(struct bxe_softc    *sc,
3150          struct bxe_fastpath *fp)
3151{
3152    if_t ifp = sc->ifp;
3153    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3154    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3155    int rx_pkts = 0;
3156    int rc = 0;
3157
3158    BXE_FP_RX_LOCK(fp);
3159
3160    /* CQ "next element" is of the size of the regular element */
3161    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3162    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3163        hw_cq_cons++;
3164    }
3165
3166    bd_cons = fp->rx_bd_cons;
3167    bd_prod = fp->rx_bd_prod;
3168    bd_prod_fw = bd_prod;
3169    sw_cq_cons = fp->rx_cq_cons;
3170    sw_cq_prod = fp->rx_cq_prod;
3171
3172    /*
3173     * Memory barrier necessary as speculative reads of the rx
3174     * buffer can be ahead of the index in the status block
3175     */
3176    rmb();
3177
3178    BLOGD(sc, DBG_RX,
3179          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3180          fp->index, hw_cq_cons, sw_cq_cons);
3181
3182    while (sw_cq_cons != hw_cq_cons) {
3183        struct bxe_sw_rx_bd *rx_buf = NULL;
3184        union eth_rx_cqe *cqe;
3185        struct eth_fast_path_rx_cqe *cqe_fp;
3186        uint8_t cqe_fp_flags;
3187        enum eth_rx_cqe_type cqe_fp_type;
3188        uint16_t len, lenonbd,  pad;
3189        struct mbuf *m = NULL;
3190
3191        comp_ring_cons = RCQ(sw_cq_cons);
3192        bd_prod = RX_BD(bd_prod);
3193        bd_cons = RX_BD(bd_cons);
3194
3195        cqe          = &fp->rcq_chain[comp_ring_cons];
3196        cqe_fp       = &cqe->fast_path_cqe;
3197        cqe_fp_flags = cqe_fp->type_error_flags;
3198        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3199
3200        BLOGD(sc, DBG_RX,
3201              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3202              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3203              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3204              fp->index,
3205              hw_cq_cons,
3206              sw_cq_cons,
3207              bd_prod,
3208              bd_cons,
3209              CQE_TYPE(cqe_fp_flags),
3210              cqe_fp_flags,
3211              cqe_fp->status_flags,
3212              le32toh(cqe_fp->rss_hash_result),
3213              le16toh(cqe_fp->vlan_tag),
3214              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3215              le16toh(cqe_fp->len_on_bd));
3216
3217        /* is this a slowpath msg? */
3218        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3219            bxe_sp_event(sc, fp, cqe);
3220            goto next_cqe;
3221        }
3222
3223        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3224
3225        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3226            struct bxe_sw_tpa_info *tpa_info;
3227            uint16_t frag_size, pages;
3228            uint8_t queue;
3229
3230            if (CQE_TYPE_START(cqe_fp_type)) {
3231                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3232                              bd_cons, bd_prod, cqe_fp);
3233                m = NULL; /* packet not ready yet */
3234                goto next_rx;
3235            }
3236
3237            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3238                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3239
3240            queue = cqe->end_agg_cqe.queue_index;
3241            tpa_info = &fp->rx_tpa_info[queue];
3242
3243            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3244                  fp->index, queue);
3245
3246            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3247                         tpa_info->len_on_bd);
3248            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3249
3250            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3251                         &cqe->end_agg_cqe, comp_ring_cons);
3252
3253            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3254
3255            goto next_cqe;
3256        }
3257
3258        /* non TPA */
3259
3260        /* is this an error packet? */
3261        if (__predict_false(cqe_fp_flags &
3262                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3263            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3264            fp->eth_q_stats.rx_soft_errors++;
3265            goto next_rx;
3266        }
3267
3268        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3269        lenonbd = le16toh(cqe_fp->len_on_bd);
3270        pad = cqe_fp->placement_offset;
3271
3272        m = rx_buf->m;
3273
3274        if (__predict_false(m == NULL)) {
3275            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3276                  bd_cons, fp->index);
3277            goto next_rx;
3278        }
3279
3280        /* XXX double copy if packet length under a threshold */
3281
3282        /*
3283         * If all the buffer descriptors are filled with mbufs then fill in
3284         * the current consumer index with a new BD. Else if a maximum Rx
3285         * buffer limit is imposed then fill in the next producer index.
3286         */
3287        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3288                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3289                                      bd_prod : bd_cons);
3290        if (rc != 0) {
3291
3292            /* we simply reuse the received mbuf and don't post it to the stack */
3293            m = NULL;
3294
3295            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3296                  fp->index, rc);
3297            fp->eth_q_stats.rx_soft_errors++;
3298
3299            if (sc->max_rx_bufs != RX_BD_USABLE) {
3300                /* copy this consumer index to the producer index */
3301                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3302                       sizeof(struct bxe_sw_rx_bd));
3303                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3304            }
3305
3306            goto next_rx;
3307        }
3308
3309        /* current mbuf was detached from the bd */
3310        fp->eth_q_stats.mbuf_alloc_rx--;
3311
3312        /* we allocated a replacement mbuf, fixup the current one */
3313        m_adj(m, pad);
3314        m->m_pkthdr.len = m->m_len = len;
3315
3316        if ((len > 60) && (len > lenonbd)) {
3317            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3318            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3319            if (rc)
3320                break;
3321            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3322        } else if (lenonbd < len) {
3323            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3324        }
3325
3326        /* assign packet to this interface interface */
3327	if_setrcvif(m, ifp);
3328
3329        /* assume no hardware checksum has complated */
3330        m->m_pkthdr.csum_flags = 0;
3331
3332        /* validate checksum if offload enabled */
3333        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3334            /* check for a valid IP frame */
3335            if (!(cqe->fast_path_cqe.status_flags &
3336                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3337                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3338                if (__predict_false(cqe_fp_flags &
3339                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3340                    fp->eth_q_stats.rx_hw_csum_errors++;
3341                } else {
3342                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3343                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3344                }
3345            }
3346
3347            /* check for a valid TCP/UDP frame */
3348            if (!(cqe->fast_path_cqe.status_flags &
3349                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3350                if (__predict_false(cqe_fp_flags &
3351                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3352                    fp->eth_q_stats.rx_hw_csum_errors++;
3353                } else {
3354                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3355                    m->m_pkthdr.csum_data = 0xFFFF;
3356                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3357                                               CSUM_PSEUDO_HDR);
3358                }
3359            }
3360        }
3361
3362        /* if there is a VLAN tag then flag that info */
3363        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3364            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3365            m->m_flags |= M_VLANTAG;
3366        }
3367
3368#if __FreeBSD_version >= 800000
3369        /* specify what RSS queue was used for this flow */
3370        m->m_pkthdr.flowid = fp->index;
3371        BXE_SET_FLOWID(m);
3372#endif
3373
3374next_rx:
3375
3376        bd_cons    = RX_BD_NEXT(bd_cons);
3377        bd_prod    = RX_BD_NEXT(bd_prod);
3378        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3379
3380        /* pass the frame to the stack */
3381        if (__predict_true(m != NULL)) {
3382            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3383            rx_pkts++;
3384            if_input(ifp, m);
3385        }
3386
3387next_cqe:
3388
3389        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3390        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3391
3392        /* limit spinning on the queue */
3393        if (rc != 0)
3394            break;
3395
3396        if (rx_pkts == sc->rx_budget) {
3397            fp->eth_q_stats.rx_budget_reached++;
3398            break;
3399        }
3400    } /* while work to do */
3401
3402    fp->rx_bd_cons = bd_cons;
3403    fp->rx_bd_prod = bd_prod_fw;
3404    fp->rx_cq_cons = sw_cq_cons;
3405    fp->rx_cq_prod = sw_cq_prod;
3406
3407    /* Update producers */
3408    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3409
3410    fp->eth_q_stats.rx_pkts += rx_pkts;
3411    fp->eth_q_stats.rx_calls++;
3412
3413    BXE_FP_RX_UNLOCK(fp);
3414
3415    return (sw_cq_cons != hw_cq_cons);
3416}
3417
3418static uint16_t
3419bxe_free_tx_pkt(struct bxe_softc    *sc,
3420                struct bxe_fastpath *fp,
3421                uint16_t            idx)
3422{
3423    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3424    struct eth_tx_start_bd *tx_start_bd;
3425    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3426    uint16_t new_cons;
3427    int nbd;
3428
3429    /* unmap the mbuf from non-paged memory */
3430    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3431
3432    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3433    nbd = le16toh(tx_start_bd->nbd) - 1;
3434
3435    new_cons = (tx_buf->first_bd + nbd);
3436
3437    /* free the mbuf */
3438    if (__predict_true(tx_buf->m != NULL)) {
3439        m_freem(tx_buf->m);
3440        fp->eth_q_stats.mbuf_alloc_tx--;
3441    } else {
3442        fp->eth_q_stats.tx_chain_lost_mbuf++;
3443    }
3444
3445    tx_buf->m = NULL;
3446    tx_buf->first_bd = 0;
3447
3448    return (new_cons);
3449}
3450
3451/* transmit timeout watchdog */
3452static int
3453bxe_watchdog(struct bxe_softc    *sc,
3454             struct bxe_fastpath *fp)
3455{
3456    BXE_FP_TX_LOCK(fp);
3457
3458    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3459        BXE_FP_TX_UNLOCK(fp);
3460        return (0);
3461    }
3462
3463    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3464    if(sc->trigger_grcdump) {
3465         /* taking grcdump */
3466         bxe_grc_dump(sc);
3467    }
3468
3469    BXE_FP_TX_UNLOCK(fp);
3470
3471    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3472    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3473
3474    return (-1);
3475}
3476
3477/* processes transmit completions */
3478static uint8_t
3479bxe_txeof(struct bxe_softc    *sc,
3480          struct bxe_fastpath *fp)
3481{
3482    if_t ifp = sc->ifp;
3483    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3484    uint16_t tx_bd_avail;
3485
3486    BXE_FP_TX_LOCK_ASSERT(fp);
3487
3488    bd_cons = fp->tx_bd_cons;
3489    hw_cons = le16toh(*fp->tx_cons_sb);
3490    sw_cons = fp->tx_pkt_cons;
3491
3492    while (sw_cons != hw_cons) {
3493        pkt_cons = TX_BD(sw_cons);
3494
3495        BLOGD(sc, DBG_TX,
3496              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3497              fp->index, hw_cons, sw_cons, pkt_cons);
3498
3499        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3500
3501        sw_cons++;
3502    }
3503
3504    fp->tx_pkt_cons = sw_cons;
3505    fp->tx_bd_cons  = bd_cons;
3506
3507    BLOGD(sc, DBG_TX,
3508          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3509          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3510
3511    mb();
3512
3513    tx_bd_avail = bxe_tx_avail(sc, fp);
3514
3515    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3516        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3517    } else {
3518        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3519    }
3520
3521    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3522        /* reset the watchdog timer if there are pending transmits */
3523        fp->watchdog_timer = BXE_TX_TIMEOUT;
3524        return (TRUE);
3525    } else {
3526        /* clear watchdog when there are no pending transmits */
3527        fp->watchdog_timer = 0;
3528        return (FALSE);
3529    }
3530}
3531
3532static void
3533bxe_drain_tx_queues(struct bxe_softc *sc)
3534{
3535    struct bxe_fastpath *fp;
3536    int i, count;
3537
3538    /* wait until all TX fastpath tasks have completed */
3539    for (i = 0; i < sc->num_queues; i++) {
3540        fp = &sc->fp[i];
3541
3542        count = 1000;
3543
3544        while (bxe_has_tx_work(fp)) {
3545
3546            BXE_FP_TX_LOCK(fp);
3547            bxe_txeof(sc, fp);
3548            BXE_FP_TX_UNLOCK(fp);
3549
3550            if (count == 0) {
3551                BLOGE(sc, "Timeout waiting for fp[%d] "
3552                          "transmits to complete!\n", i);
3553                bxe_panic(sc, ("tx drain failure\n"));
3554                return;
3555            }
3556
3557            count--;
3558            DELAY(1000);
3559            rmb();
3560        }
3561    }
3562
3563    return;
3564}
3565
3566static int
3567bxe_del_all_macs(struct bxe_softc          *sc,
3568                 struct ecore_vlan_mac_obj *mac_obj,
3569                 int                       mac_type,
3570                 uint8_t                   wait_for_comp)
3571{
3572    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3573    int rc;
3574
3575    /* wait for completion of requested */
3576    if (wait_for_comp) {
3577        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3578    }
3579
3580    /* Set the mac type of addresses we want to clear */
3581    bxe_set_bit(mac_type, &vlan_mac_flags);
3582
3583    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3584    if (rc < 0) {
3585        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3586            rc, mac_type, wait_for_comp);
3587    }
3588
3589    return (rc);
3590}
3591
3592static int
3593bxe_fill_accept_flags(struct bxe_softc *sc,
3594                      uint32_t         rx_mode,
3595                      unsigned long    *rx_accept_flags,
3596                      unsigned long    *tx_accept_flags)
3597{
3598    /* Clear the flags first */
3599    *rx_accept_flags = 0;
3600    *tx_accept_flags = 0;
3601
3602    switch (rx_mode) {
3603    case BXE_RX_MODE_NONE:
3604        /*
3605         * 'drop all' supersedes any accept flags that may have been
3606         * passed to the function.
3607         */
3608        break;
3609
3610    case BXE_RX_MODE_NORMAL:
3611        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3612        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3613        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3614
3615        /* internal switching mode */
3616        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3617        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3618        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3619
3620        break;
3621
3622    case BXE_RX_MODE_ALLMULTI:
3623        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3624        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3625        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3626
3627        /* internal switching mode */
3628        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3629        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3630        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3631
3632        break;
3633
3634    case BXE_RX_MODE_PROMISC:
3635        /*
3636         * According to deffinition of SI mode, iface in promisc mode
3637         * should receive matched and unmatched (in resolution of port)
3638         * unicast packets.
3639         */
3640        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3641        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3642        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3643        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3644
3645        /* internal switching mode */
3646        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3647        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3648
3649        if (IS_MF_SI(sc)) {
3650            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3651        } else {
3652            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3653        }
3654
3655        break;
3656
3657    default:
3658        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3659        return (-1);
3660    }
3661
3662    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3663    if (rx_mode != BXE_RX_MODE_NONE) {
3664        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3665        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3666    }
3667
3668    return (0);
3669}
3670
3671static int
3672bxe_set_q_rx_mode(struct bxe_softc *sc,
3673                  uint8_t          cl_id,
3674                  unsigned long    rx_mode_flags,
3675                  unsigned long    rx_accept_flags,
3676                  unsigned long    tx_accept_flags,
3677                  unsigned long    ramrod_flags)
3678{
3679    struct ecore_rx_mode_ramrod_params ramrod_param;
3680    int rc;
3681
3682    memset(&ramrod_param, 0, sizeof(ramrod_param));
3683
3684    /* Prepare ramrod parameters */
3685    ramrod_param.cid = 0;
3686    ramrod_param.cl_id = cl_id;
3687    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3688    ramrod_param.func_id = SC_FUNC(sc);
3689
3690    ramrod_param.pstate = &sc->sp_state;
3691    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3692
3693    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3694    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3695
3696    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3697
3698    ramrod_param.ramrod_flags = ramrod_flags;
3699    ramrod_param.rx_mode_flags = rx_mode_flags;
3700
3701    ramrod_param.rx_accept_flags = rx_accept_flags;
3702    ramrod_param.tx_accept_flags = tx_accept_flags;
3703
3704    rc = ecore_config_rx_mode(sc, &ramrod_param);
3705    if (rc < 0) {
3706        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3707            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3708            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3709            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3710            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3711        return (rc);
3712    }
3713
3714    return (0);
3715}
3716
3717static int
3718bxe_set_storm_rx_mode(struct bxe_softc *sc)
3719{
3720    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3721    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3722    int rc;
3723
3724    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3725                               &tx_accept_flags);
3726    if (rc) {
3727        return (rc);
3728    }
3729
3730    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3731    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3732
3733    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3734    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3735                              rx_accept_flags, tx_accept_flags,
3736                              ramrod_flags));
3737}
3738
3739/* returns the "mcp load_code" according to global load_count array */
3740static int
3741bxe_nic_load_no_mcp(struct bxe_softc *sc)
3742{
3743    int path = SC_PATH(sc);
3744    int port = SC_PORT(sc);
3745
3746    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3747          path, load_count[path][0], load_count[path][1],
3748          load_count[path][2]);
3749    load_count[path][0]++;
3750    load_count[path][1 + port]++;
3751    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3752          path, load_count[path][0], load_count[path][1],
3753          load_count[path][2]);
3754    if (load_count[path][0] == 1) {
3755        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3756    } else if (load_count[path][1 + port] == 1) {
3757        return (FW_MSG_CODE_DRV_LOAD_PORT);
3758    } else {
3759        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3760    }
3761}
3762
3763/* returns the "mcp load_code" according to global load_count array */
3764static int
3765bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3766{
3767    int port = SC_PORT(sc);
3768    int path = SC_PATH(sc);
3769
3770    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3771          path, load_count[path][0], load_count[path][1],
3772          load_count[path][2]);
3773    load_count[path][0]--;
3774    load_count[path][1 + port]--;
3775    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3776          path, load_count[path][0], load_count[path][1],
3777          load_count[path][2]);
3778    if (load_count[path][0] == 0) {
3779        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3780    } else if (load_count[path][1 + port] == 0) {
3781        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3782    } else {
3783        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3784    }
3785}
3786
3787/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3788static uint32_t
3789bxe_send_unload_req(struct bxe_softc *sc,
3790                    int              unload_mode)
3791{
3792    uint32_t reset_code = 0;
3793
3794    /* Select the UNLOAD request mode */
3795    if (unload_mode == UNLOAD_NORMAL) {
3796        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3797    } else {
3798        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3799    }
3800
3801    /* Send the request to the MCP */
3802    if (!BXE_NOMCP(sc)) {
3803        reset_code = bxe_fw_command(sc, reset_code, 0);
3804    } else {
3805        reset_code = bxe_nic_unload_no_mcp(sc);
3806    }
3807
3808    return (reset_code);
3809}
3810
3811/* send UNLOAD_DONE command to the MCP */
3812static void
3813bxe_send_unload_done(struct bxe_softc *sc,
3814                     uint8_t          keep_link)
3815{
3816    uint32_t reset_param =
3817        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3818
3819    /* Report UNLOAD_DONE to MCP */
3820    if (!BXE_NOMCP(sc)) {
3821        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3822    }
3823}
3824
3825static int
3826bxe_func_wait_started(struct bxe_softc *sc)
3827{
3828    int tout = 50;
3829
3830    if (!sc->port.pmf) {
3831        return (0);
3832    }
3833
3834    /*
3835     * (assumption: No Attention from MCP at this stage)
3836     * PMF probably in the middle of TX disable/enable transaction
3837     * 1. Sync IRS for default SB
3838     * 2. Sync SP queue - this guarantees us that attention handling started
3839     * 3. Wait, that TX disable/enable transaction completes
3840     *
3841     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3842     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3843     * received completion for the transaction the state is TX_STOPPED.
3844     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3845     * transaction.
3846     */
3847
3848    /* XXX make sure default SB ISR is done */
3849    /* need a way to synchronize an irq (intr_mtx?) */
3850
3851    /* XXX flush any work queues */
3852
3853    while (ecore_func_get_state(sc, &sc->func_obj) !=
3854           ECORE_F_STATE_STARTED && tout--) {
3855        DELAY(20000);
3856    }
3857
3858    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3859        /*
3860         * Failed to complete the transaction in a "good way"
3861         * Force both transactions with CLR bit.
3862         */
3863        struct ecore_func_state_params func_params = { NULL };
3864
3865        BLOGE(sc, "Unexpected function state! "
3866                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3867
3868        func_params.f_obj = &sc->func_obj;
3869        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3870
3871        /* STARTED-->TX_STOPPED */
3872        func_params.cmd = ECORE_F_CMD_TX_STOP;
3873        ecore_func_state_change(sc, &func_params);
3874
3875        /* TX_STOPPED-->STARTED */
3876        func_params.cmd = ECORE_F_CMD_TX_START;
3877        return (ecore_func_state_change(sc, &func_params));
3878    }
3879
3880    return (0);
3881}
3882
3883static int
3884bxe_stop_queue(struct bxe_softc *sc,
3885               int              index)
3886{
3887    struct bxe_fastpath *fp = &sc->fp[index];
3888    struct ecore_queue_state_params q_params = { NULL };
3889    int rc;
3890
3891    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3892
3893    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3894    /* We want to wait for completion in this context */
3895    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3896
3897    /* Stop the primary connection: */
3898
3899    /* ...halt the connection */
3900    q_params.cmd = ECORE_Q_CMD_HALT;
3901    rc = ecore_queue_state_change(sc, &q_params);
3902    if (rc) {
3903        return (rc);
3904    }
3905
3906    /* ...terminate the connection */
3907    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3908    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3909    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3910    rc = ecore_queue_state_change(sc, &q_params);
3911    if (rc) {
3912        return (rc);
3913    }
3914
3915    /* ...delete cfc entry */
3916    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3917    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3918    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3919    return (ecore_queue_state_change(sc, &q_params));
3920}
3921
3922/* wait for the outstanding SP commands */
3923static inline uint8_t
3924bxe_wait_sp_comp(struct bxe_softc *sc,
3925                 unsigned long    mask)
3926{
3927    unsigned long tmp;
3928    int tout = 5000; /* wait for 5 secs tops */
3929
3930    while (tout--) {
3931        mb();
3932        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3933            return (TRUE);
3934        }
3935
3936        DELAY(1000);
3937    }
3938
3939    mb();
3940
3941    tmp = atomic_load_acq_long(&sc->sp_state);
3942    if (tmp & mask) {
3943        BLOGE(sc, "Filtering completion timed out: "
3944                  "sp_state 0x%lx, mask 0x%lx\n",
3945              tmp, mask);
3946        return (FALSE);
3947    }
3948
3949    return (FALSE);
3950}
3951
3952static int
3953bxe_func_stop(struct bxe_softc *sc)
3954{
3955    struct ecore_func_state_params func_params = { NULL };
3956    int rc;
3957
3958    /* prepare parameters for function state transitions */
3959    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3960    func_params.f_obj = &sc->func_obj;
3961    func_params.cmd = ECORE_F_CMD_STOP;
3962
3963    /*
3964     * Try to stop the function the 'good way'. If it fails (in case
3965     * of a parity error during bxe_chip_cleanup()) and we are
3966     * not in a debug mode, perform a state transaction in order to
3967     * enable further HW_RESET transaction.
3968     */
3969    rc = ecore_func_state_change(sc, &func_params);
3970    if (rc) {
3971        BLOGE(sc, "FUNC_STOP ramrod failed. "
3972                  "Running a dry transaction (%d)\n", rc);
3973        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3974        return (ecore_func_state_change(sc, &func_params));
3975    }
3976
3977    return (0);
3978}
3979
3980static int
3981bxe_reset_hw(struct bxe_softc *sc,
3982             uint32_t         load_code)
3983{
3984    struct ecore_func_state_params func_params = { NULL };
3985
3986    /* Prepare parameters for function state transitions */
3987    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3988
3989    func_params.f_obj = &sc->func_obj;
3990    func_params.cmd = ECORE_F_CMD_HW_RESET;
3991
3992    func_params.params.hw_init.load_phase = load_code;
3993
3994    return (ecore_func_state_change(sc, &func_params));
3995}
3996
3997static void
3998bxe_int_disable_sync(struct bxe_softc *sc,
3999                     int              disable_hw)
4000{
4001    if (disable_hw) {
4002        /* prevent the HW from sending interrupts */
4003        bxe_int_disable(sc);
4004    }
4005
4006    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4007    /* make sure all ISRs are done */
4008
4009    /* XXX make sure sp_task is not running */
4010    /* cancel and flush work queues */
4011}
4012
4013static void
4014bxe_chip_cleanup(struct bxe_softc *sc,
4015                 uint32_t         unload_mode,
4016                 uint8_t          keep_link)
4017{
4018    int port = SC_PORT(sc);
4019    struct ecore_mcast_ramrod_params rparam = { NULL };
4020    uint32_t reset_code;
4021    int i, rc = 0;
4022
4023    bxe_drain_tx_queues(sc);
4024
4025    /* give HW time to discard old tx messages */
4026    DELAY(1000);
4027
4028    /* Clean all ETH MACs */
4029    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4030    if (rc < 0) {
4031        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4032    }
4033
4034    /* Clean up UC list  */
4035    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4036    if (rc < 0) {
4037        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4038    }
4039
4040    /* Disable LLH */
4041    if (!CHIP_IS_E1(sc)) {
4042        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4043    }
4044
4045    /* Set "drop all" to stop Rx */
4046
4047    /*
4048     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4049     * a race between the completion code and this code.
4050     */
4051    BXE_MCAST_LOCK(sc);
4052
4053    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4054        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4055    } else {
4056        bxe_set_storm_rx_mode(sc);
4057    }
4058
4059    /* Clean up multicast configuration */
4060    rparam.mcast_obj = &sc->mcast_obj;
4061    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4062    if (rc < 0) {
4063        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4064    }
4065
4066    BXE_MCAST_UNLOCK(sc);
4067
4068    // XXX bxe_iov_chip_cleanup(sc);
4069
4070    /*
4071     * Send the UNLOAD_REQUEST to the MCP. This will return if
4072     * this function should perform FUNCTION, PORT, or COMMON HW
4073     * reset.
4074     */
4075    reset_code = bxe_send_unload_req(sc, unload_mode);
4076
4077    /*
4078     * (assumption: No Attention from MCP at this stage)
4079     * PMF probably in the middle of TX disable/enable transaction
4080     */
4081    rc = bxe_func_wait_started(sc);
4082    if (rc) {
4083        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4084    }
4085
4086    /*
4087     * Close multi and leading connections
4088     * Completions for ramrods are collected in a synchronous way
4089     */
4090    for (i = 0; i < sc->num_queues; i++) {
4091        if (bxe_stop_queue(sc, i)) {
4092            goto unload_error;
4093        }
4094    }
4095
4096    /*
4097     * If SP settings didn't get completed so far - something
4098     * very wrong has happen.
4099     */
4100    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4101        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4102    }
4103
4104unload_error:
4105
4106    rc = bxe_func_stop(sc);
4107    if (rc) {
4108        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4109    }
4110
4111    /* disable HW interrupts */
4112    bxe_int_disable_sync(sc, TRUE);
4113
4114    /* detach interrupts */
4115    bxe_interrupt_detach(sc);
4116
4117    /* Reset the chip */
4118    rc = bxe_reset_hw(sc, reset_code);
4119    if (rc) {
4120        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4121    }
4122
4123    /* Report UNLOAD_DONE to MCP */
4124    bxe_send_unload_done(sc, keep_link);
4125}
4126
4127static void
4128bxe_disable_close_the_gate(struct bxe_softc *sc)
4129{
4130    uint32_t val;
4131    int port = SC_PORT(sc);
4132
4133    BLOGD(sc, DBG_LOAD,
4134          "Disabling 'close the gates'\n");
4135
4136    if (CHIP_IS_E1(sc)) {
4137        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4138                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4139        val = REG_RD(sc, addr);
4140        val &= ~(0x300);
4141        REG_WR(sc, addr, val);
4142    } else {
4143        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4144        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4145                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4146        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4147    }
4148}
4149
4150/*
4151 * Cleans the object that have internal lists without sending
4152 * ramrods. Should be run when interrutps are disabled.
4153 */
4154static void
4155bxe_squeeze_objects(struct bxe_softc *sc)
4156{
4157    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4158    struct ecore_mcast_ramrod_params rparam = { NULL };
4159    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4160    int rc;
4161
4162    /* Cleanup MACs' object first... */
4163
4164    /* Wait for completion of requested */
4165    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4166    /* Perform a dry cleanup */
4167    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4168
4169    /* Clean ETH primary MAC */
4170    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4171    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4172                             &ramrod_flags);
4173    if (rc != 0) {
4174        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4175    }
4176
4177    /* Cleanup UC list */
4178    vlan_mac_flags = 0;
4179    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4180    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4181                             &ramrod_flags);
4182    if (rc != 0) {
4183        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4184    }
4185
4186    /* Now clean mcast object... */
4187
4188    rparam.mcast_obj = &sc->mcast_obj;
4189    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4190
4191    /* Add a DEL command... */
4192    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4193    if (rc < 0) {
4194        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4195    }
4196
4197    /* now wait until all pending commands are cleared */
4198
4199    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4200    while (rc != 0) {
4201        if (rc < 0) {
4202            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4203            return;
4204        }
4205
4206        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4207    }
4208}
4209
4210/* stop the controller */
4211static __noinline int
4212bxe_nic_unload(struct bxe_softc *sc,
4213               uint32_t         unload_mode,
4214               uint8_t          keep_link)
4215{
4216    uint8_t global = FALSE;
4217    uint32_t val;
4218    int i;
4219
4220    BXE_CORE_LOCK_ASSERT(sc);
4221
4222    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4223
4224    for (i = 0; i < sc->num_queues; i++) {
4225        struct bxe_fastpath *fp;
4226
4227        fp = &sc->fp[i];
4228        BXE_FP_TX_LOCK(fp);
4229        BXE_FP_TX_UNLOCK(fp);
4230    }
4231
4232    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4233
4234    /* mark driver as unloaded in shmem2 */
4235    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4236        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4237        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4238                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4239    }
4240
4241    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4242        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4243        /*
4244         * We can get here if the driver has been unloaded
4245         * during parity error recovery and is either waiting for a
4246         * leader to complete or for other functions to unload and
4247         * then ifconfig down has been issued. In this case we want to
4248         * unload and let other functions to complete a recovery
4249         * process.
4250         */
4251        sc->recovery_state = BXE_RECOVERY_DONE;
4252        sc->is_leader = 0;
4253        bxe_release_leader_lock(sc);
4254        mb();
4255
4256        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4257        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4258            " state = 0x%x\n", sc->recovery_state, sc->state);
4259        return (-1);
4260    }
4261
4262    /*
4263     * Nothing to do during unload if previous bxe_nic_load()
4264     * did not completed successfully - all resourses are released.
4265     */
4266    if ((sc->state == BXE_STATE_CLOSED) ||
4267        (sc->state == BXE_STATE_ERROR)) {
4268        return (0);
4269    }
4270
4271    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4272    mb();
4273
4274    /* stop tx */
4275    bxe_tx_disable(sc);
4276
4277    sc->rx_mode = BXE_RX_MODE_NONE;
4278    /* XXX set rx mode ??? */
4279
4280    if (IS_PF(sc) && !sc->grcdump_done) {
4281        /* set ALWAYS_ALIVE bit in shmem */
4282        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4283
4284        bxe_drv_pulse(sc);
4285
4286        bxe_stats_handle(sc, STATS_EVENT_STOP);
4287        bxe_save_statistics(sc);
4288    }
4289
4290    /* wait till consumers catch up with producers in all queues */
4291    bxe_drain_tx_queues(sc);
4292
4293    /* if VF indicate to PF this function is going down (PF will delete sp
4294     * elements and clear initializations
4295     */
4296    if (IS_VF(sc)) {
4297        ; /* bxe_vfpf_close_vf(sc); */
4298    } else if (unload_mode != UNLOAD_RECOVERY) {
4299        /* if this is a normal/close unload need to clean up chip */
4300        if (!sc->grcdump_done)
4301            bxe_chip_cleanup(sc, unload_mode, keep_link);
4302    } else {
4303        /* Send the UNLOAD_REQUEST to the MCP */
4304        bxe_send_unload_req(sc, unload_mode);
4305
4306        /*
4307         * Prevent transactions to host from the functions on the
4308         * engine that doesn't reset global blocks in case of global
4309         * attention once gloabl blocks are reset and gates are opened
4310         * (the engine which leader will perform the recovery
4311         * last).
4312         */
4313        if (!CHIP_IS_E1x(sc)) {
4314            bxe_pf_disable(sc);
4315        }
4316
4317        /* disable HW interrupts */
4318        bxe_int_disable_sync(sc, TRUE);
4319
4320        /* detach interrupts */
4321        bxe_interrupt_detach(sc);
4322
4323        /* Report UNLOAD_DONE to MCP */
4324        bxe_send_unload_done(sc, FALSE);
4325    }
4326
4327    /*
4328     * At this stage no more interrupts will arrive so we may safely clean
4329     * the queue'able objects here in case they failed to get cleaned so far.
4330     */
4331    if (IS_PF(sc)) {
4332        bxe_squeeze_objects(sc);
4333    }
4334
4335    /* There should be no more pending SP commands at this stage */
4336    sc->sp_state = 0;
4337
4338    sc->port.pmf = 0;
4339
4340    bxe_free_fp_buffers(sc);
4341
4342    if (IS_PF(sc)) {
4343        bxe_free_mem(sc);
4344    }
4345
4346    bxe_free_fw_stats_mem(sc);
4347
4348    sc->state = BXE_STATE_CLOSED;
4349
4350    /*
4351     * Check if there are pending parity attentions. If there are - set
4352     * RECOVERY_IN_PROGRESS.
4353     */
4354    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4355        bxe_set_reset_in_progress(sc);
4356
4357        /* Set RESET_IS_GLOBAL if needed */
4358        if (global) {
4359            bxe_set_reset_global(sc);
4360        }
4361    }
4362
4363    /*
4364     * The last driver must disable a "close the gate" if there is no
4365     * parity attention or "process kill" pending.
4366     */
4367    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4368        bxe_reset_is_done(sc, SC_PATH(sc))) {
4369        bxe_disable_close_the_gate(sc);
4370    }
4371
4372    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4373
4374    return (0);
4375}
4376
4377/*
4378 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4379 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4380 */
4381static int
4382bxe_ifmedia_update(struct ifnet  *ifp)
4383{
4384    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4385    struct ifmedia *ifm;
4386
4387    ifm = &sc->ifmedia;
4388
4389    /* We only support Ethernet media type. */
4390    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4391        return (EINVAL);
4392    }
4393
4394    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4395    case IFM_AUTO:
4396         break;
4397    case IFM_10G_CX4:
4398    case IFM_10G_SR:
4399    case IFM_10G_T:
4400    case IFM_10G_TWINAX:
4401    default:
4402        /* We don't support changing the media type. */
4403        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4404              IFM_SUBTYPE(ifm->ifm_media));
4405        return (EINVAL);
4406    }
4407
4408    return (0);
4409}
4410
4411/*
4412 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4413 */
4414static void
4415bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4416{
4417    struct bxe_softc *sc = if_getsoftc(ifp);
4418
4419    /* Report link down if the driver isn't running. */
4420    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4421        ifmr->ifm_active |= IFM_NONE;
4422        return;
4423    }
4424
4425    /* Setup the default interface info. */
4426    ifmr->ifm_status = IFM_AVALID;
4427    ifmr->ifm_active = IFM_ETHER;
4428
4429    if (sc->link_vars.link_up) {
4430        ifmr->ifm_status |= IFM_ACTIVE;
4431    } else {
4432        ifmr->ifm_active |= IFM_NONE;
4433        return;
4434    }
4435
4436    ifmr->ifm_active |= sc->media;
4437
4438    if (sc->link_vars.duplex == DUPLEX_FULL) {
4439        ifmr->ifm_active |= IFM_FDX;
4440    } else {
4441        ifmr->ifm_active |= IFM_HDX;
4442    }
4443}
4444
4445static void
4446bxe_handle_chip_tq(void *context,
4447                   int  pending)
4448{
4449    struct bxe_softc *sc = (struct bxe_softc *)context;
4450    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4451
4452    switch (work)
4453    {
4454
4455    case CHIP_TQ_REINIT:
4456        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4457            /* restart the interface */
4458            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4459            bxe_periodic_stop(sc);
4460            BXE_CORE_LOCK(sc);
4461            bxe_stop_locked(sc);
4462            bxe_init_locked(sc);
4463            BXE_CORE_UNLOCK(sc);
4464        }
4465        break;
4466
4467    default:
4468        break;
4469    }
4470}
4471
4472/*
4473 * Handles any IOCTL calls from the operating system.
4474 *
4475 * Returns:
4476 *   0 = Success, >0 Failure
4477 */
4478static int
4479bxe_ioctl(if_t ifp,
4480          u_long       command,
4481          caddr_t      data)
4482{
4483    struct bxe_softc *sc = if_getsoftc(ifp);
4484    struct ifreq *ifr = (struct ifreq *)data;
4485    int mask = 0;
4486    int reinit = 0;
4487    int error = 0;
4488
4489    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4490    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4491
4492    switch (command)
4493    {
4494    case SIOCSIFMTU:
4495        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4496              ifr->ifr_mtu);
4497
4498        if (sc->mtu == ifr->ifr_mtu) {
4499            /* nothing to change */
4500            break;
4501        }
4502
4503        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4504            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4505                  ifr->ifr_mtu, mtu_min, mtu_max);
4506            error = EINVAL;
4507            break;
4508        }
4509
4510        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4511                             (unsigned long)ifr->ifr_mtu);
4512	/*
4513        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4514                              (unsigned long)ifr->ifr_mtu);
4515	XXX - Not sure why it needs to be atomic
4516	*/
4517	if_setmtu(ifp, ifr->ifr_mtu);
4518        reinit = 1;
4519        break;
4520
4521    case SIOCSIFFLAGS:
4522        /* toggle the interface state up or down */
4523        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4524
4525	BXE_CORE_LOCK(sc);
4526        /* check if the interface is up */
4527        if (if_getflags(ifp) & IFF_UP) {
4528            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4529                /* set the receive mode flags */
4530                bxe_set_rx_mode(sc);
4531            } else if(sc->state != BXE_STATE_DISABLED) {
4532		bxe_init_locked(sc);
4533            }
4534        } else {
4535            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4536		bxe_periodic_stop(sc);
4537		bxe_stop_locked(sc);
4538            }
4539        }
4540	BXE_CORE_UNLOCK(sc);
4541
4542        break;
4543
4544    case SIOCADDMULTI:
4545    case SIOCDELMULTI:
4546        /* add/delete multicast addresses */
4547        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4548
4549        /* check if the interface is up */
4550        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4551            /* set the receive mode flags */
4552	    BXE_CORE_LOCK(sc);
4553            bxe_set_rx_mode(sc);
4554	    BXE_CORE_UNLOCK(sc);
4555        }
4556
4557        break;
4558
4559    case SIOCSIFCAP:
4560        /* find out which capabilities have changed */
4561        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4562
4563        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4564              mask);
4565
4566        /* toggle the LRO capabilites enable flag */
4567        if (mask & IFCAP_LRO) {
4568	    if_togglecapenable(ifp, IFCAP_LRO);
4569            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4570                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4571            reinit = 1;
4572        }
4573
4574        /* toggle the TXCSUM checksum capabilites enable flag */
4575        if (mask & IFCAP_TXCSUM) {
4576	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4577            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4578                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4579            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4580                if_sethwassistbits(ifp, (CSUM_IP      |
4581                                    CSUM_TCP      |
4582                                    CSUM_UDP      |
4583                                    CSUM_TSO      |
4584                                    CSUM_TCP_IPV6 |
4585                                    CSUM_UDP_IPV6), 0);
4586            } else {
4587		if_clearhwassist(ifp); /* XXX */
4588            }
4589        }
4590
4591        /* toggle the RXCSUM checksum capabilities enable flag */
4592        if (mask & IFCAP_RXCSUM) {
4593	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4594            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4595                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4596            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4597                if_sethwassistbits(ifp, (CSUM_IP      |
4598                                    CSUM_TCP      |
4599                                    CSUM_UDP      |
4600                                    CSUM_TSO      |
4601                                    CSUM_TCP_IPV6 |
4602                                    CSUM_UDP_IPV6), 0);
4603            } else {
4604		if_clearhwassist(ifp); /* XXX */
4605            }
4606        }
4607
4608        /* toggle TSO4 capabilities enabled flag */
4609        if (mask & IFCAP_TSO4) {
4610            if_togglecapenable(ifp, IFCAP_TSO4);
4611            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4612                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4613        }
4614
4615        /* toggle TSO6 capabilities enabled flag */
4616        if (mask & IFCAP_TSO6) {
4617	    if_togglecapenable(ifp, IFCAP_TSO6);
4618            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4619                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4620        }
4621
4622        /* toggle VLAN_HWTSO capabilities enabled flag */
4623        if (mask & IFCAP_VLAN_HWTSO) {
4624
4625	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4626            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4627                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4628        }
4629
4630        /* toggle VLAN_HWCSUM capabilities enabled flag */
4631        if (mask & IFCAP_VLAN_HWCSUM) {
4632            /* XXX investigate this... */
4633            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4634            error = EINVAL;
4635        }
4636
4637        /* toggle VLAN_MTU capabilities enable flag */
4638        if (mask & IFCAP_VLAN_MTU) {
4639            /* XXX investigate this... */
4640            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4641            error = EINVAL;
4642        }
4643
4644        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4645        if (mask & IFCAP_VLAN_HWTAGGING) {
4646            /* XXX investigate this... */
4647            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4648            error = EINVAL;
4649        }
4650
4651        /* toggle VLAN_HWFILTER capabilities enabled flag */
4652        if (mask & IFCAP_VLAN_HWFILTER) {
4653            /* XXX investigate this... */
4654            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4655            error = EINVAL;
4656        }
4657
4658        /* XXX not yet...
4659         * IFCAP_WOL_MAGIC
4660         */
4661
4662        break;
4663
4664    case SIOCSIFMEDIA:
4665    case SIOCGIFMEDIA:
4666        /* set/get interface media */
4667        BLOGD(sc, DBG_IOCTL,
4668              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4669              (command & 0xff));
4670        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4671        break;
4672
4673    default:
4674        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4675              (command & 0xff));
4676        error = ether_ioctl(ifp, command, data);
4677        break;
4678    }
4679
4680    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4681        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4682              "Re-initializing hardware from IOCTL change\n");
4683	bxe_periodic_stop(sc);
4684	BXE_CORE_LOCK(sc);
4685	bxe_stop_locked(sc);
4686	bxe_init_locked(sc);
4687	BXE_CORE_UNLOCK(sc);
4688    }
4689
4690    return (error);
4691}
4692
4693static __noinline void
4694bxe_dump_mbuf(struct bxe_softc *sc,
4695              struct mbuf      *m,
4696              uint8_t          contents)
4697{
4698    char * type;
4699    int i = 0;
4700
4701    if (!(sc->debug & DBG_MBUF)) {
4702        return;
4703    }
4704
4705    if (m == NULL) {
4706        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4707        return;
4708    }
4709
4710    while (m) {
4711
4712#if __FreeBSD_version >= 1000000
4713        BLOGD(sc, DBG_MBUF,
4714              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4715              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4716
4717        if (m->m_flags & M_PKTHDR) {
4718             BLOGD(sc, DBG_MBUF,
4719                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4720                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4721                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4722        }
4723#else
4724        BLOGD(sc, DBG_MBUF,
4725              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4726              i, m, m->m_len, m->m_flags,
4727              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4728
4729        if (m->m_flags & M_PKTHDR) {
4730             BLOGD(sc, DBG_MBUF,
4731                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4732                   i, m->m_pkthdr.len, m->m_flags,
4733                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4734                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4735                   "\22M_PROMISC\23M_NOFREE",
4736                   (int)m->m_pkthdr.csum_flags,
4737                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4738                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4739                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4740                   "\14CSUM_PSEUDO_HDR");
4741        }
4742#endif /* #if __FreeBSD_version >= 1000000 */
4743
4744        if (m->m_flags & M_EXT) {
4745            switch (m->m_ext.ext_type) {
4746            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4747            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4748            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4749            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4750            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4751            case EXT_PACKET:     type = "EXT_PACKET";     break;
4752            case EXT_MBUF:       type = "EXT_MBUF";       break;
4753            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4754            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4755            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4756            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4757            default:             type = "UNKNOWN";        break;
4758            }
4759
4760            BLOGD(sc, DBG_MBUF,
4761                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4762                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4763        }
4764
4765        if (contents) {
4766            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4767        }
4768
4769        m = m->m_next;
4770        i++;
4771    }
4772}
4773
4774/*
4775 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4776 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4777 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4778 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4779 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4780 */
4781static int
4782bxe_chktso_window(struct bxe_softc  *sc,
4783                  int               nsegs,
4784                  bus_dma_segment_t *segs,
4785                  struct mbuf       *m)
4786{
4787    uint32_t num_wnds, wnd_size, wnd_sum;
4788    int32_t frag_idx, wnd_idx;
4789    unsigned short lso_mss;
4790    int defrag;
4791
4792    defrag = 0;
4793    wnd_sum = 0;
4794    wnd_size = 10;
4795    num_wnds = nsegs - wnd_size;
4796    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4797
4798    /*
4799     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4800     * first window sum of data while skipping the first assuming it is the
4801     * header in FreeBSD.
4802     */
4803    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4804        wnd_sum += htole16(segs[frag_idx].ds_len);
4805    }
4806
4807    /* check the first 10 bd window size */
4808    if (wnd_sum < lso_mss) {
4809        return (1);
4810    }
4811
4812    /* run through the windows */
4813    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4814        /* subtract the first mbuf->m_len of the last wndw(-header) */
4815        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4816        /* add the next mbuf len to the len of our new window */
4817        wnd_sum += htole16(segs[frag_idx].ds_len);
4818        if (wnd_sum < lso_mss) {
4819            return (1);
4820        }
4821    }
4822
4823    return (0);
4824}
4825
4826static uint8_t
4827bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4828                    struct mbuf         *m,
4829                    uint32_t            *parsing_data)
4830{
4831    struct ether_vlan_header *eh = NULL;
4832    struct ip *ip4 = NULL;
4833    struct ip6_hdr *ip6 = NULL;
4834    caddr_t ip = NULL;
4835    struct tcphdr *th = NULL;
4836    int e_hlen, ip_hlen, l4_off;
4837    uint16_t proto;
4838
4839    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4840        /* no L4 checksum offload needed */
4841        return (0);
4842    }
4843
4844    /* get the Ethernet header */
4845    eh = mtod(m, struct ether_vlan_header *);
4846
4847    /* handle VLAN encapsulation if present */
4848    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4849        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4850        proto  = ntohs(eh->evl_proto);
4851    } else {
4852        e_hlen = ETHER_HDR_LEN;
4853        proto  = ntohs(eh->evl_encap_proto);
4854    }
4855
4856    switch (proto) {
4857    case ETHERTYPE_IP:
4858        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4859        ip4 = (m->m_len < sizeof(struct ip)) ?
4860                  (struct ip *)m->m_next->m_data :
4861                  (struct ip *)(m->m_data + e_hlen);
4862        /* ip_hl is number of 32-bit words */
4863        ip_hlen = (ip4->ip_hl << 2);
4864        ip = (caddr_t)ip4;
4865        break;
4866    case ETHERTYPE_IPV6:
4867        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4868        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4869                  (struct ip6_hdr *)m->m_next->m_data :
4870                  (struct ip6_hdr *)(m->m_data + e_hlen);
4871        /* XXX cannot support offload with IPv6 extensions */
4872        ip_hlen = sizeof(struct ip6_hdr);
4873        ip = (caddr_t)ip6;
4874        break;
4875    default:
4876        /* We can't offload in this case... */
4877        /* XXX error stat ??? */
4878        return (0);
4879    }
4880
4881    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4882    l4_off = (e_hlen + ip_hlen);
4883
4884    *parsing_data |=
4885        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4886         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4887
4888    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4889                                  CSUM_TSO |
4890                                  CSUM_TCP_IPV6)) {
4891        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4892        th = (struct tcphdr *)(ip + ip_hlen);
4893        /* th_off is number of 32-bit words */
4894        *parsing_data |= ((th->th_off <<
4895                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4896                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4897        return (l4_off + (th->th_off << 2)); /* entire header length */
4898    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4899                                         CSUM_UDP_IPV6)) {
4900        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4901        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4902    } else {
4903        /* XXX error stat ??? */
4904        return (0);
4905    }
4906}
4907
4908static uint8_t
4909bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4910                 struct mbuf                *m,
4911                 struct eth_tx_parse_bd_e1x *pbd)
4912{
4913    struct ether_vlan_header *eh = NULL;
4914    struct ip *ip4 = NULL;
4915    struct ip6_hdr *ip6 = NULL;
4916    caddr_t ip = NULL;
4917    struct tcphdr *th = NULL;
4918    struct udphdr *uh = NULL;
4919    int e_hlen, ip_hlen;
4920    uint16_t proto;
4921    uint8_t hlen;
4922    uint16_t tmp_csum;
4923    uint32_t *tmp_uh;
4924
4925    /* get the Ethernet header */
4926    eh = mtod(m, struct ether_vlan_header *);
4927
4928    /* handle VLAN encapsulation if present */
4929    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4930        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4931        proto  = ntohs(eh->evl_proto);
4932    } else {
4933        e_hlen = ETHER_HDR_LEN;
4934        proto  = ntohs(eh->evl_encap_proto);
4935    }
4936
4937    switch (proto) {
4938    case ETHERTYPE_IP:
4939        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4940        ip4 = (m->m_len < sizeof(struct ip)) ?
4941                  (struct ip *)m->m_next->m_data :
4942                  (struct ip *)(m->m_data + e_hlen);
4943        /* ip_hl is number of 32-bit words */
4944        ip_hlen = (ip4->ip_hl << 1);
4945        ip = (caddr_t)ip4;
4946        break;
4947    case ETHERTYPE_IPV6:
4948        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4949        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4950                  (struct ip6_hdr *)m->m_next->m_data :
4951                  (struct ip6_hdr *)(m->m_data + e_hlen);
4952        /* XXX cannot support offload with IPv6 extensions */
4953        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4954        ip = (caddr_t)ip6;
4955        break;
4956    default:
4957        /* We can't offload in this case... */
4958        /* XXX error stat ??? */
4959        return (0);
4960    }
4961
4962    hlen = (e_hlen >> 1);
4963
4964    /* note that rest of global_data is indirectly zeroed here */
4965    if (m->m_flags & M_VLANTAG) {
4966        pbd->global_data =
4967            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4968    } else {
4969        pbd->global_data = htole16(hlen);
4970    }
4971
4972    pbd->ip_hlen_w = ip_hlen;
4973
4974    hlen += pbd->ip_hlen_w;
4975
4976    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4977
4978    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4979                                  CSUM_TSO |
4980                                  CSUM_TCP_IPV6)) {
4981        th = (struct tcphdr *)(ip + (ip_hlen << 1));
4982        /* th_off is number of 32-bit words */
4983        hlen += (uint16_t)(th->th_off << 1);
4984    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4985                                         CSUM_UDP_IPV6)) {
4986        uh = (struct udphdr *)(ip + (ip_hlen << 1));
4987        hlen += (sizeof(struct udphdr) / 2);
4988    } else {
4989        /* valid case as only CSUM_IP was set */
4990        return (0);
4991    }
4992
4993    pbd->total_hlen_w = htole16(hlen);
4994
4995    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4996                                  CSUM_TSO |
4997                                  CSUM_TCP_IPV6)) {
4998        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4999        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5000    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5001                                         CSUM_UDP_IPV6)) {
5002        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5003
5004        /*
5005         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5006         * checksums and does not know anything about the UDP header and where
5007         * the checksum field is located. It only knows about TCP. Therefore
5008         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5009         * offload. Since the checksum field offset for TCP is 16 bytes and
5010         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5011         * bytes less than the start of the UDP header. This allows the
5012         * hardware to write the checksum in the correct spot. But the
5013         * hardware will compute a checksum which includes the last 10 bytes
5014         * of the IP header. To correct this we tweak the stack computed
5015         * pseudo checksum by folding in the calculation of the inverse
5016         * checksum for those final 10 bytes of the IP header. This allows
5017         * the correct checksum to be computed by the hardware.
5018         */
5019
5020        /* set pointer 10 bytes before UDP header */
5021        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5022
5023        /* calculate a pseudo header checksum over the first 10 bytes */
5024        tmp_csum = in_pseudo(*tmp_uh,
5025                             *(tmp_uh + 1),
5026                             *(uint16_t *)(tmp_uh + 2));
5027
5028        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5029    }
5030
5031    return (hlen * 2); /* entire header length, number of bytes */
5032}
5033
5034static void
5035bxe_set_pbd_lso_e2(struct mbuf *m,
5036                   uint32_t    *parsing_data)
5037{
5038    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5039                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5040                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5041
5042    /* XXX test for IPv6 with extension header... */
5043}
5044
5045static void
5046bxe_set_pbd_lso(struct mbuf                *m,
5047                struct eth_tx_parse_bd_e1x *pbd)
5048{
5049    struct ether_vlan_header *eh = NULL;
5050    struct ip *ip = NULL;
5051    struct tcphdr *th = NULL;
5052    int e_hlen;
5053
5054    /* get the Ethernet header */
5055    eh = mtod(m, struct ether_vlan_header *);
5056
5057    /* handle VLAN encapsulation if present */
5058    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5059                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5060
5061    /* get the IP and TCP header, with LSO entire header in first mbuf */
5062    /* XXX assuming IPv4 */
5063    ip = (struct ip *)(m->m_data + e_hlen);
5064    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5065
5066    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5067    pbd->tcp_send_seq = ntohl(th->th_seq);
5068    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5069
5070#if 1
5071        /* XXX IPv4 */
5072        pbd->ip_id = ntohs(ip->ip_id);
5073        pbd->tcp_pseudo_csum =
5074            ntohs(in_pseudo(ip->ip_src.s_addr,
5075                            ip->ip_dst.s_addr,
5076                            htons(IPPROTO_TCP)));
5077#else
5078        /* XXX IPv6 */
5079        pbd->tcp_pseudo_csum =
5080            ntohs(in_pseudo(&ip6->ip6_src,
5081                            &ip6->ip6_dst,
5082                            htons(IPPROTO_TCP)));
5083#endif
5084
5085    pbd->global_data |=
5086        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5087}
5088
5089/*
5090 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5091 * visible to the controller.
5092 *
5093 * If an mbuf is submitted to this routine and cannot be given to the
5094 * controller (e.g. it has too many fragments) then the function may free
5095 * the mbuf and return to the caller.
5096 *
5097 * Returns:
5098 *   0 = Success, !0 = Failure
5099 *   Note the side effect that an mbuf may be freed if it causes a problem.
5100 */
5101static int
5102bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5103{
5104    bus_dma_segment_t segs[32];
5105    struct mbuf *m0;
5106    struct bxe_sw_tx_bd *tx_buf;
5107    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5108    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5109    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5110    struct eth_tx_bd *tx_data_bd;
5111    struct eth_tx_bd *tx_total_pkt_size_bd;
5112    struct eth_tx_start_bd *tx_start_bd;
5113    uint16_t bd_prod, pkt_prod, total_pkt_size;
5114    uint8_t mac_type;
5115    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5116    struct bxe_softc *sc;
5117    uint16_t tx_bd_avail;
5118    struct ether_vlan_header *eh;
5119    uint32_t pbd_e2_parsing_data = 0;
5120    uint8_t hlen = 0;
5121    int tmp_bd;
5122    int i;
5123
5124    sc = fp->sc;
5125
5126#if __FreeBSD_version >= 800000
5127    M_ASSERTPKTHDR(*m_head);
5128#endif /* #if __FreeBSD_version >= 800000 */
5129
5130    m0 = *m_head;
5131    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5132    tx_start_bd = NULL;
5133    tx_data_bd = NULL;
5134    tx_total_pkt_size_bd = NULL;
5135
5136    /* get the H/W pointer for packets and BDs */
5137    pkt_prod = fp->tx_pkt_prod;
5138    bd_prod = fp->tx_bd_prod;
5139
5140    mac_type = UNICAST_ADDRESS;
5141
5142    /* map the mbuf into the next open DMAable memory */
5143    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5144    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5145                                    tx_buf->m_map, m0,
5146                                    segs, &nsegs, BUS_DMA_NOWAIT);
5147
5148    /* mapping errors */
5149    if(__predict_false(error != 0)) {
5150        fp->eth_q_stats.tx_dma_mapping_failure++;
5151        if (error == ENOMEM) {
5152            /* resource issue, try again later */
5153            rc = ENOMEM;
5154        } else if (error == EFBIG) {
5155            /* possibly recoverable with defragmentation */
5156            fp->eth_q_stats.mbuf_defrag_attempts++;
5157            m0 = m_defrag(*m_head, M_NOWAIT);
5158            if (m0 == NULL) {
5159                fp->eth_q_stats.mbuf_defrag_failures++;
5160                rc = ENOBUFS;
5161            } else {
5162                /* defrag successful, try mapping again */
5163                *m_head = m0;
5164                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5165                                                tx_buf->m_map, m0,
5166                                                segs, &nsegs, BUS_DMA_NOWAIT);
5167                if (error) {
5168                    fp->eth_q_stats.tx_dma_mapping_failure++;
5169                    rc = error;
5170                }
5171            }
5172        } else {
5173            /* unknown, unrecoverable mapping error */
5174            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5175            bxe_dump_mbuf(sc, m0, FALSE);
5176            rc = error;
5177        }
5178
5179        goto bxe_tx_encap_continue;
5180    }
5181
5182    tx_bd_avail = bxe_tx_avail(sc, fp);
5183
5184    /* make sure there is enough room in the send queue */
5185    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5186        /* Recoverable, try again later. */
5187        fp->eth_q_stats.tx_hw_queue_full++;
5188        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5189        rc = ENOMEM;
5190        goto bxe_tx_encap_continue;
5191    }
5192
5193    /* capture the current H/W TX chain high watermark */
5194    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5195                        (TX_BD_USABLE - tx_bd_avail))) {
5196        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5197    }
5198
5199    /* make sure it fits in the packet window */
5200    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5201        /*
5202         * The mbuf may be to big for the controller to handle. If the frame
5203         * is a TSO frame we'll need to do an additional check.
5204         */
5205        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5206            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5207                goto bxe_tx_encap_continue; /* OK to send */
5208            } else {
5209                fp->eth_q_stats.tx_window_violation_tso++;
5210            }
5211        } else {
5212            fp->eth_q_stats.tx_window_violation_std++;
5213        }
5214
5215        /* lets try to defragment this mbuf and remap it */
5216        fp->eth_q_stats.mbuf_defrag_attempts++;
5217        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5218
5219        m0 = m_defrag(*m_head, M_NOWAIT);
5220        if (m0 == NULL) {
5221            fp->eth_q_stats.mbuf_defrag_failures++;
5222            /* Ugh, just drop the frame... :( */
5223            rc = ENOBUFS;
5224        } else {
5225            /* defrag successful, try mapping again */
5226            *m_head = m0;
5227            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5228                                            tx_buf->m_map, m0,
5229                                            segs, &nsegs, BUS_DMA_NOWAIT);
5230            if (error) {
5231                fp->eth_q_stats.tx_dma_mapping_failure++;
5232                /* No sense in trying to defrag/copy chain, drop it. :( */
5233                rc = error;
5234            }
5235            else {
5236                /* if the chain is still too long then drop it */
5237                if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5238                    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5239                    rc = ENODEV;
5240                }
5241            }
5242        }
5243    }
5244
5245bxe_tx_encap_continue:
5246
5247    /* Check for errors */
5248    if (rc) {
5249        if (rc == ENOMEM) {
5250            /* recoverable try again later  */
5251        } else {
5252            fp->eth_q_stats.tx_soft_errors++;
5253            fp->eth_q_stats.mbuf_alloc_tx--;
5254            m_freem(*m_head);
5255            *m_head = NULL;
5256        }
5257
5258        return (rc);
5259    }
5260
5261    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5262    if (m0->m_flags & M_BCAST) {
5263        mac_type = BROADCAST_ADDRESS;
5264    } else if (m0->m_flags & M_MCAST) {
5265        mac_type = MULTICAST_ADDRESS;
5266    }
5267
5268    /* store the mbuf into the mbuf ring */
5269    tx_buf->m        = m0;
5270    tx_buf->first_bd = fp->tx_bd_prod;
5271    tx_buf->flags    = 0;
5272
5273    /* prepare the first transmit (start) BD for the mbuf */
5274    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5275
5276    BLOGD(sc, DBG_TX,
5277          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5278          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5279
5280    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5281    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5282    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5283    total_pkt_size += tx_start_bd->nbytes;
5284    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5285
5286    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5287
5288    /* all frames have at least Start BD + Parsing BD */
5289    nbds = nsegs + 1;
5290    tx_start_bd->nbd = htole16(nbds);
5291
5292    if (m0->m_flags & M_VLANTAG) {
5293        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5294        tx_start_bd->bd_flags.as_bitfield |=
5295            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5296    } else {
5297        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5298        if (IS_VF(sc)) {
5299            /* map ethernet header to find type and header length */
5300            eh = mtod(m0, struct ether_vlan_header *);
5301            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5302        } else {
5303            /* used by FW for packet accounting */
5304            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5305        }
5306    }
5307
5308    /*
5309     * add a parsing BD from the chain. The parsing BD is always added
5310     * though it is only used for TSO and chksum
5311     */
5312    bd_prod = TX_BD_NEXT(bd_prod);
5313
5314    if (m0->m_pkthdr.csum_flags) {
5315        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5316            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5317            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5318        }
5319
5320        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5321            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5322                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5323        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5324            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5325                                                  ETH_TX_BD_FLAGS_IS_UDP |
5326                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5327        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5328                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5329            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5330        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5331            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5332                                                  ETH_TX_BD_FLAGS_IS_UDP);
5333        }
5334    }
5335
5336    if (!CHIP_IS_E1x(sc)) {
5337        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5338        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5339
5340        if (m0->m_pkthdr.csum_flags) {
5341            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5342        }
5343
5344        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5345                 mac_type);
5346    } else {
5347        uint16_t global_data = 0;
5348
5349        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5350        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5351
5352        if (m0->m_pkthdr.csum_flags) {
5353            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5354        }
5355
5356        SET_FLAG(global_data,
5357                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5358        pbd_e1x->global_data |= htole16(global_data);
5359    }
5360
5361    /* setup the parsing BD with TSO specific info */
5362    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5363        fp->eth_q_stats.tx_ofld_frames_lso++;
5364        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5365
5366        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5367            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5368
5369            /* split the first BD into header/data making the fw job easy */
5370            nbds++;
5371            tx_start_bd->nbd = htole16(nbds);
5372            tx_start_bd->nbytes = htole16(hlen);
5373
5374            bd_prod = TX_BD_NEXT(bd_prod);
5375
5376            /* new transmit BD after the tx_parse_bd */
5377            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5378            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5379            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5380            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5381            if (tx_total_pkt_size_bd == NULL) {
5382                tx_total_pkt_size_bd = tx_data_bd;
5383            }
5384
5385            BLOGD(sc, DBG_TX,
5386                  "TSO split header size is %d (%x:%x) nbds %d\n",
5387                  le16toh(tx_start_bd->nbytes),
5388                  le32toh(tx_start_bd->addr_hi),
5389                  le32toh(tx_start_bd->addr_lo),
5390                  nbds);
5391        }
5392
5393        if (!CHIP_IS_E1x(sc)) {
5394            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5395        } else {
5396            bxe_set_pbd_lso(m0, pbd_e1x);
5397        }
5398    }
5399
5400    if (pbd_e2_parsing_data) {
5401        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5402    }
5403
5404    /* prepare remaining BDs, start tx bd contains first seg/frag */
5405    for (i = 1; i < nsegs ; i++) {
5406        bd_prod = TX_BD_NEXT(bd_prod);
5407        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5408        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5409        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5410        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5411        if (tx_total_pkt_size_bd == NULL) {
5412            tx_total_pkt_size_bd = tx_data_bd;
5413        }
5414        total_pkt_size += tx_data_bd->nbytes;
5415    }
5416
5417    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5418
5419    if (tx_total_pkt_size_bd != NULL) {
5420        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5421    }
5422
5423    if (__predict_false(sc->debug & DBG_TX)) {
5424        tmp_bd = tx_buf->first_bd;
5425        for (i = 0; i < nbds; i++)
5426        {
5427            if (i == 0) {
5428                BLOGD(sc, DBG_TX,
5429                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5430                      "bd_flags=0x%x hdr_nbds=%d\n",
5431                      tx_start_bd,
5432                      tmp_bd,
5433                      le16toh(tx_start_bd->nbd),
5434                      le16toh(tx_start_bd->vlan_or_ethertype),
5435                      tx_start_bd->bd_flags.as_bitfield,
5436                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5437            } else if (i == 1) {
5438                if (pbd_e1x) {
5439                    BLOGD(sc, DBG_TX,
5440                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5441                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5442                          "tcp_seq=%u total_hlen_w=%u\n",
5443                          pbd_e1x,
5444                          tmp_bd,
5445                          pbd_e1x->global_data,
5446                          pbd_e1x->ip_hlen_w,
5447                          pbd_e1x->ip_id,
5448                          pbd_e1x->lso_mss,
5449                          pbd_e1x->tcp_flags,
5450                          pbd_e1x->tcp_pseudo_csum,
5451                          pbd_e1x->tcp_send_seq,
5452                          le16toh(pbd_e1x->total_hlen_w));
5453                } else { /* if (pbd_e2) */
5454                    BLOGD(sc, DBG_TX,
5455                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5456                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5457                          pbd_e2,
5458                          tmp_bd,
5459                          pbd_e2->data.mac_addr.dst_hi,
5460                          pbd_e2->data.mac_addr.dst_mid,
5461                          pbd_e2->data.mac_addr.dst_lo,
5462                          pbd_e2->data.mac_addr.src_hi,
5463                          pbd_e2->data.mac_addr.src_mid,
5464                          pbd_e2->data.mac_addr.src_lo,
5465                          pbd_e2->parsing_data);
5466                }
5467            }
5468
5469            if (i != 1) { /* skip parse db as it doesn't hold data */
5470                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5471                BLOGD(sc, DBG_TX,
5472                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5473                      tx_data_bd,
5474                      tmp_bd,
5475                      le16toh(tx_data_bd->nbytes),
5476                      le32toh(tx_data_bd->addr_hi),
5477                      le32toh(tx_data_bd->addr_lo));
5478            }
5479
5480            tmp_bd = TX_BD_NEXT(tmp_bd);
5481        }
5482    }
5483
5484    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5485
5486    /* update TX BD producer index value for next TX */
5487    bd_prod = TX_BD_NEXT(bd_prod);
5488
5489    /*
5490     * If the chain of tx_bd's describing this frame is adjacent to or spans
5491     * an eth_tx_next_bd element then we need to increment the nbds value.
5492     */
5493    if (TX_BD_IDX(bd_prod) < nbds) {
5494        nbds++;
5495    }
5496
5497    /* don't allow reordering of writes for nbd and packets */
5498    mb();
5499
5500    fp->tx_db.data.prod += nbds;
5501
5502    /* producer points to the next free tx_bd at this point */
5503    fp->tx_pkt_prod++;
5504    fp->tx_bd_prod = bd_prod;
5505
5506    DOORBELL(sc, fp->index, fp->tx_db.raw);
5507
5508    fp->eth_q_stats.tx_pkts++;
5509
5510    /* Prevent speculative reads from getting ahead of the status block. */
5511    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5512                      0, 0, BUS_SPACE_BARRIER_READ);
5513
5514    /* Prevent speculative reads from getting ahead of the doorbell. */
5515    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5516                      0, 0, BUS_SPACE_BARRIER_READ);
5517
5518    return (0);
5519}
5520
5521static void
5522bxe_tx_start_locked(struct bxe_softc *sc,
5523                    if_t ifp,
5524                    struct bxe_fastpath *fp)
5525{
5526    struct mbuf *m = NULL;
5527    int tx_count = 0;
5528    uint16_t tx_bd_avail;
5529
5530    BXE_FP_TX_LOCK_ASSERT(fp);
5531
5532    /* keep adding entries while there are frames to send */
5533    while (!if_sendq_empty(ifp)) {
5534
5535        /*
5536         * check for any frames to send
5537         * dequeue can still be NULL even if queue is not empty
5538         */
5539        m = if_dequeue(ifp);
5540        if (__predict_false(m == NULL)) {
5541            break;
5542        }
5543
5544        /* the mbuf now belongs to us */
5545        fp->eth_q_stats.mbuf_alloc_tx++;
5546
5547        /*
5548         * Put the frame into the transmit ring. If we don't have room,
5549         * place the mbuf back at the head of the TX queue, set the
5550         * OACTIVE flag, and wait for the NIC to drain the chain.
5551         */
5552        if (__predict_false(bxe_tx_encap(fp, &m))) {
5553            fp->eth_q_stats.tx_encap_failures++;
5554            if (m != NULL) {
5555                /* mark the TX queue as full and return the frame */
5556                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5557		if_sendq_prepend(ifp, m);
5558                fp->eth_q_stats.mbuf_alloc_tx--;
5559                fp->eth_q_stats.tx_queue_xoff++;
5560            }
5561
5562            /* stop looking for more work */
5563            break;
5564        }
5565
5566        /* the frame was enqueued successfully */
5567        tx_count++;
5568
5569        /* send a copy of the frame to any BPF listeners. */
5570        if_etherbpfmtap(ifp, m);
5571
5572        tx_bd_avail = bxe_tx_avail(sc, fp);
5573
5574        /* handle any completions if we're running low */
5575        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5576            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5577            bxe_txeof(sc, fp);
5578            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5579                break;
5580            }
5581        }
5582    }
5583
5584    /* all TX packets were dequeued and/or the tx ring is full */
5585    if (tx_count > 0) {
5586        /* reset the TX watchdog timeout timer */
5587        fp->watchdog_timer = BXE_TX_TIMEOUT;
5588    }
5589}
5590
5591/* Legacy (non-RSS) dispatch routine */
5592static void
5593bxe_tx_start(if_t ifp)
5594{
5595    struct bxe_softc *sc;
5596    struct bxe_fastpath *fp;
5597
5598    sc = if_getsoftc(ifp);
5599
5600    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5601        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5602        return;
5603    }
5604
5605    if (!sc->link_vars.link_up) {
5606        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5607        return;
5608    }
5609
5610    fp = &sc->fp[0];
5611
5612    if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5613        fp->eth_q_stats.tx_queue_full_return++;
5614        return;
5615    }
5616
5617    BXE_FP_TX_LOCK(fp);
5618    bxe_tx_start_locked(sc, ifp, fp);
5619    BXE_FP_TX_UNLOCK(fp);
5620}
5621
5622#if __FreeBSD_version >= 901504
5623
5624static int
5625bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5626                       if_t                ifp,
5627                       struct bxe_fastpath *fp,
5628                       struct mbuf         *m)
5629{
5630    struct buf_ring *tx_br = fp->tx_br;
5631    struct mbuf *next;
5632    int depth, rc, tx_count;
5633    uint16_t tx_bd_avail;
5634
5635    rc = tx_count = 0;
5636
5637    BXE_FP_TX_LOCK_ASSERT(fp);
5638
5639    if (!tx_br) {
5640        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5641        return (EINVAL);
5642    }
5643
5644    if (m != NULL) {
5645        rc = drbr_enqueue(ifp, tx_br, m);
5646        if (rc != 0) {
5647            fp->eth_q_stats.tx_soft_errors++;
5648            goto bxe_tx_mq_start_locked_exit;
5649        }
5650    }
5651
5652    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5653        fp->eth_q_stats.tx_request_link_down_failures++;
5654        goto bxe_tx_mq_start_locked_exit;
5655    }
5656
5657    /* fetch the depth of the driver queue */
5658    depth = drbr_inuse_drv(ifp, tx_br);
5659    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5660        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5661    }
5662
5663    /* keep adding entries while there are frames to send */
5664    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5665        /* handle any completions if we're running low */
5666        tx_bd_avail = bxe_tx_avail(sc, fp);
5667        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5668            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5669            bxe_txeof(sc, fp);
5670            tx_bd_avail = bxe_tx_avail(sc, fp);
5671            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5672                fp->eth_q_stats.bd_avail_too_less_failures++;
5673                m_freem(next);
5674                drbr_advance(ifp, tx_br);
5675                rc = ENOBUFS;
5676                break;
5677            }
5678        }
5679
5680        /* the mbuf now belongs to us */
5681        fp->eth_q_stats.mbuf_alloc_tx++;
5682
5683        /*
5684         * Put the frame into the transmit ring. If we don't have room,
5685         * place the mbuf back at the head of the TX queue, set the
5686         * OACTIVE flag, and wait for the NIC to drain the chain.
5687         */
5688        rc = bxe_tx_encap(fp, &next);
5689        if (__predict_false(rc != 0)) {
5690            fp->eth_q_stats.tx_encap_failures++;
5691            if (next != NULL) {
5692                /* mark the TX queue as full and save the frame */
5693                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5694                drbr_putback(ifp, tx_br, next);
5695                fp->eth_q_stats.mbuf_alloc_tx--;
5696                fp->eth_q_stats.tx_frames_deferred++;
5697            } else
5698                drbr_advance(ifp, tx_br);
5699
5700            /* stop looking for more work */
5701            break;
5702        }
5703
5704        /* the transmit frame was enqueued successfully */
5705        tx_count++;
5706
5707        /* send a copy of the frame to any BPF listeners */
5708	if_etherbpfmtap(ifp, next);
5709
5710        drbr_advance(ifp, tx_br);
5711    }
5712
5713    /* all TX packets were dequeued and/or the tx ring is full */
5714    if (tx_count > 0) {
5715        /* reset the TX watchdog timeout timer */
5716        fp->watchdog_timer = BXE_TX_TIMEOUT;
5717    }
5718
5719bxe_tx_mq_start_locked_exit:
5720    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5721    if (!drbr_empty(ifp, tx_br)) {
5722        fp->eth_q_stats.tx_mq_not_empty++;
5723        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5724    }
5725
5726    return (rc);
5727}
5728
5729static void
5730bxe_tx_mq_start_deferred(void *arg,
5731                         int pending)
5732{
5733    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5734    struct bxe_softc *sc = fp->sc;
5735    if_t ifp = sc->ifp;
5736
5737    BXE_FP_TX_LOCK(fp);
5738    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5739    BXE_FP_TX_UNLOCK(fp);
5740}
5741
5742/* Multiqueue (TSS) dispatch routine. */
5743static int
5744bxe_tx_mq_start(struct ifnet *ifp,
5745                struct mbuf  *m)
5746{
5747    struct bxe_softc *sc = if_getsoftc(ifp);
5748    struct bxe_fastpath *fp;
5749    int fp_index, rc;
5750
5751    fp_index = 0; /* default is the first queue */
5752
5753    /* check if flowid is set */
5754
5755    if (BXE_VALID_FLOWID(m))
5756        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5757
5758    fp = &sc->fp[fp_index];
5759
5760    if (BXE_FP_TX_TRYLOCK(fp)) {
5761        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5762        BXE_FP_TX_UNLOCK(fp);
5763    } else {
5764        rc = drbr_enqueue(ifp, fp->tx_br, m);
5765        taskqueue_enqueue(fp->tq, &fp->tx_task);
5766    }
5767
5768    return (rc);
5769}
5770
5771static void
5772bxe_mq_flush(struct ifnet *ifp)
5773{
5774    struct bxe_softc *sc = if_getsoftc(ifp);
5775    struct bxe_fastpath *fp;
5776    struct mbuf *m;
5777    int i;
5778
5779    for (i = 0; i < sc->num_queues; i++) {
5780        fp = &sc->fp[i];
5781
5782        if (fp->state != BXE_FP_STATE_OPEN) {
5783            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5784                  fp->index, fp->state);
5785            continue;
5786        }
5787
5788        if (fp->tx_br != NULL) {
5789            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5790            BXE_FP_TX_LOCK(fp);
5791            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5792                m_freem(m);
5793            }
5794            BXE_FP_TX_UNLOCK(fp);
5795        }
5796    }
5797
5798    if_qflush(ifp);
5799}
5800
5801#endif /* FreeBSD_version >= 901504 */
5802
5803static uint16_t
5804bxe_cid_ilt_lines(struct bxe_softc *sc)
5805{
5806    if (IS_SRIOV(sc)) {
5807        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5808    }
5809    return (L2_ILT_LINES(sc));
5810}
5811
5812static void
5813bxe_ilt_set_info(struct bxe_softc *sc)
5814{
5815    struct ilt_client_info *ilt_client;
5816    struct ecore_ilt *ilt = sc->ilt;
5817    uint16_t line = 0;
5818
5819    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5820    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5821
5822    /* CDU */
5823    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5824    ilt_client->client_num = ILT_CLIENT_CDU;
5825    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5826    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5827    ilt_client->start = line;
5828    line += bxe_cid_ilt_lines(sc);
5829
5830    if (CNIC_SUPPORT(sc)) {
5831        line += CNIC_ILT_LINES;
5832    }
5833
5834    ilt_client->end = (line - 1);
5835
5836    BLOGD(sc, DBG_LOAD,
5837          "ilt client[CDU]: start %d, end %d, "
5838          "psz 0x%x, flags 0x%x, hw psz %d\n",
5839          ilt_client->start, ilt_client->end,
5840          ilt_client->page_size,
5841          ilt_client->flags,
5842          ilog2(ilt_client->page_size >> 12));
5843
5844    /* QM */
5845    if (QM_INIT(sc->qm_cid_count)) {
5846        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5847        ilt_client->client_num = ILT_CLIENT_QM;
5848        ilt_client->page_size = QM_ILT_PAGE_SZ;
5849        ilt_client->flags = 0;
5850        ilt_client->start = line;
5851
5852        /* 4 bytes for each cid */
5853        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5854                             QM_ILT_PAGE_SZ);
5855
5856        ilt_client->end = (line - 1);
5857
5858        BLOGD(sc, DBG_LOAD,
5859              "ilt client[QM]: start %d, end %d, "
5860              "psz 0x%x, flags 0x%x, hw psz %d\n",
5861              ilt_client->start, ilt_client->end,
5862              ilt_client->page_size, ilt_client->flags,
5863              ilog2(ilt_client->page_size >> 12));
5864    }
5865
5866    if (CNIC_SUPPORT(sc)) {
5867        /* SRC */
5868        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5869        ilt_client->client_num = ILT_CLIENT_SRC;
5870        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5871        ilt_client->flags = 0;
5872        ilt_client->start = line;
5873        line += SRC_ILT_LINES;
5874        ilt_client->end = (line - 1);
5875
5876        BLOGD(sc, DBG_LOAD,
5877              "ilt client[SRC]: start %d, end %d, "
5878              "psz 0x%x, flags 0x%x, hw psz %d\n",
5879              ilt_client->start, ilt_client->end,
5880              ilt_client->page_size, ilt_client->flags,
5881              ilog2(ilt_client->page_size >> 12));
5882
5883        /* TM */
5884        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5885        ilt_client->client_num = ILT_CLIENT_TM;
5886        ilt_client->page_size = TM_ILT_PAGE_SZ;
5887        ilt_client->flags = 0;
5888        ilt_client->start = line;
5889        line += TM_ILT_LINES;
5890        ilt_client->end = (line - 1);
5891
5892        BLOGD(sc, DBG_LOAD,
5893              "ilt client[TM]: start %d, end %d, "
5894              "psz 0x%x, flags 0x%x, hw psz %d\n",
5895              ilt_client->start, ilt_client->end,
5896              ilt_client->page_size, ilt_client->flags,
5897              ilog2(ilt_client->page_size >> 12));
5898    }
5899
5900    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5901}
5902
5903static void
5904bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5905{
5906    int i;
5907    uint32_t rx_buf_size;
5908
5909    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5910
5911    for (i = 0; i < sc->num_queues; i++) {
5912        if(rx_buf_size <= MCLBYTES){
5913            sc->fp[i].rx_buf_size = rx_buf_size;
5914            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5915        }else if (rx_buf_size <= MJUMPAGESIZE){
5916            sc->fp[i].rx_buf_size = rx_buf_size;
5917            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5918        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5919            sc->fp[i].rx_buf_size = MCLBYTES;
5920            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5921        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5922            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5923            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5924        }else {
5925            sc->fp[i].rx_buf_size = MCLBYTES;
5926            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5927        }
5928    }
5929}
5930
5931static int
5932bxe_alloc_ilt_mem(struct bxe_softc *sc)
5933{
5934    int rc = 0;
5935
5936    if ((sc->ilt =
5937         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5938                                    M_BXE_ILT,
5939                                    (M_NOWAIT | M_ZERO))) == NULL) {
5940        rc = 1;
5941    }
5942
5943    return (rc);
5944}
5945
5946static int
5947bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5948{
5949    int rc = 0;
5950
5951    if ((sc->ilt->lines =
5952         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5953                                    M_BXE_ILT,
5954                                    (M_NOWAIT | M_ZERO))) == NULL) {
5955        rc = 1;
5956    }
5957
5958    return (rc);
5959}
5960
5961static void
5962bxe_free_ilt_mem(struct bxe_softc *sc)
5963{
5964    if (sc->ilt != NULL) {
5965        free(sc->ilt, M_BXE_ILT);
5966        sc->ilt = NULL;
5967    }
5968}
5969
5970static void
5971bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5972{
5973    if (sc->ilt->lines != NULL) {
5974        free(sc->ilt->lines, M_BXE_ILT);
5975        sc->ilt->lines = NULL;
5976    }
5977}
5978
5979static void
5980bxe_free_mem(struct bxe_softc *sc)
5981{
5982    int i;
5983
5984    for (i = 0; i < L2_ILT_LINES(sc); i++) {
5985        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
5986        sc->context[i].vcxt = NULL;
5987        sc->context[i].size = 0;
5988    }
5989
5990    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
5991
5992    bxe_free_ilt_lines_mem(sc);
5993
5994}
5995
5996static int
5997bxe_alloc_mem(struct bxe_softc *sc)
5998{
5999    int context_size;
6000    int allocated;
6001    int i;
6002
6003    /*
6004     * Allocate memory for CDU context:
6005     * This memory is allocated separately and not in the generic ILT
6006     * functions because CDU differs in few aspects:
6007     * 1. There can be multiple entities allocating memory for context -
6008     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6009     * its own ILT lines.
6010     * 2. Since CDU page-size is not a single 4KB page (which is the case
6011     * for the other ILT clients), to be efficient we want to support
6012     * allocation of sub-page-size in the last entry.
6013     * 3. Context pointers are used by the driver to pass to FW / update
6014     * the context (for the other ILT clients the pointers are used just to
6015     * free the memory during unload).
6016     */
6017    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6018    for (i = 0, allocated = 0; allocated < context_size; i++) {
6019        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6020                                  (context_size - allocated));
6021
6022        if (bxe_dma_alloc(sc, sc->context[i].size,
6023                          &sc->context[i].vcxt_dma,
6024                          "cdu context") != 0) {
6025            bxe_free_mem(sc);
6026            return (-1);
6027        }
6028
6029        sc->context[i].vcxt =
6030            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6031
6032        allocated += sc->context[i].size;
6033    }
6034
6035    bxe_alloc_ilt_lines_mem(sc);
6036
6037    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6038          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6039    {
6040        for (i = 0; i < 4; i++) {
6041            BLOGD(sc, DBG_LOAD,
6042                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6043                  i,
6044                  sc->ilt->clients[i].page_size,
6045                  sc->ilt->clients[i].start,
6046                  sc->ilt->clients[i].end,
6047                  sc->ilt->clients[i].client_num,
6048                  sc->ilt->clients[i].flags);
6049        }
6050    }
6051    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6052        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6053        bxe_free_mem(sc);
6054        return (-1);
6055    }
6056
6057    return (0);
6058}
6059
6060static void
6061bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6062{
6063    struct bxe_softc *sc;
6064    int i;
6065
6066    sc = fp->sc;
6067
6068    if (fp->rx_mbuf_tag == NULL) {
6069        return;
6070    }
6071
6072    /* free all mbufs and unload all maps */
6073    for (i = 0; i < RX_BD_TOTAL; i++) {
6074        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6075            bus_dmamap_sync(fp->rx_mbuf_tag,
6076                            fp->rx_mbuf_chain[i].m_map,
6077                            BUS_DMASYNC_POSTREAD);
6078            bus_dmamap_unload(fp->rx_mbuf_tag,
6079                              fp->rx_mbuf_chain[i].m_map);
6080        }
6081
6082        if (fp->rx_mbuf_chain[i].m != NULL) {
6083            m_freem(fp->rx_mbuf_chain[i].m);
6084            fp->rx_mbuf_chain[i].m = NULL;
6085            fp->eth_q_stats.mbuf_alloc_rx--;
6086        }
6087    }
6088}
6089
6090static void
6091bxe_free_tpa_pool(struct bxe_fastpath *fp)
6092{
6093    struct bxe_softc *sc;
6094    int i, max_agg_queues;
6095
6096    sc = fp->sc;
6097
6098    if (fp->rx_mbuf_tag == NULL) {
6099        return;
6100    }
6101
6102    max_agg_queues = MAX_AGG_QS(sc);
6103
6104    /* release all mbufs and unload all DMA maps in the TPA pool */
6105    for (i = 0; i < max_agg_queues; i++) {
6106        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6107            bus_dmamap_sync(fp->rx_mbuf_tag,
6108                            fp->rx_tpa_info[i].bd.m_map,
6109                            BUS_DMASYNC_POSTREAD);
6110            bus_dmamap_unload(fp->rx_mbuf_tag,
6111                              fp->rx_tpa_info[i].bd.m_map);
6112        }
6113
6114        if (fp->rx_tpa_info[i].bd.m != NULL) {
6115            m_freem(fp->rx_tpa_info[i].bd.m);
6116            fp->rx_tpa_info[i].bd.m = NULL;
6117            fp->eth_q_stats.mbuf_alloc_tpa--;
6118        }
6119    }
6120}
6121
6122static void
6123bxe_free_sge_chain(struct bxe_fastpath *fp)
6124{
6125    struct bxe_softc *sc;
6126    int i;
6127
6128    sc = fp->sc;
6129
6130    if (fp->rx_sge_mbuf_tag == NULL) {
6131        return;
6132    }
6133
6134    /* rree all mbufs and unload all maps */
6135    for (i = 0; i < RX_SGE_TOTAL; i++) {
6136        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6137            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6138                            fp->rx_sge_mbuf_chain[i].m_map,
6139                            BUS_DMASYNC_POSTREAD);
6140            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6141                              fp->rx_sge_mbuf_chain[i].m_map);
6142        }
6143
6144        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6145            m_freem(fp->rx_sge_mbuf_chain[i].m);
6146            fp->rx_sge_mbuf_chain[i].m = NULL;
6147            fp->eth_q_stats.mbuf_alloc_sge--;
6148        }
6149    }
6150}
6151
6152static void
6153bxe_free_fp_buffers(struct bxe_softc *sc)
6154{
6155    struct bxe_fastpath *fp;
6156    int i;
6157
6158    for (i = 0; i < sc->num_queues; i++) {
6159        fp = &sc->fp[i];
6160
6161#if __FreeBSD_version >= 901504
6162        if (fp->tx_br != NULL) {
6163            /* just in case bxe_mq_flush() wasn't called */
6164            if (mtx_initialized(&fp->tx_mtx)) {
6165                struct mbuf *m;
6166
6167                BXE_FP_TX_LOCK(fp);
6168                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6169                    m_freem(m);
6170                BXE_FP_TX_UNLOCK(fp);
6171            }
6172        }
6173#endif
6174
6175        /* free all RX buffers */
6176        bxe_free_rx_bd_chain(fp);
6177        bxe_free_tpa_pool(fp);
6178        bxe_free_sge_chain(fp);
6179
6180        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6181            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6182                  fp->eth_q_stats.mbuf_alloc_rx);
6183        }
6184
6185        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6186            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6187                  fp->eth_q_stats.mbuf_alloc_sge);
6188        }
6189
6190        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6191            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6192                  fp->eth_q_stats.mbuf_alloc_tpa);
6193        }
6194
6195        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6196            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6197                  fp->eth_q_stats.mbuf_alloc_tx);
6198        }
6199
6200        /* XXX verify all mbufs were reclaimed */
6201    }
6202}
6203
6204static int
6205bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6206                     uint16_t            prev_index,
6207                     uint16_t            index)
6208{
6209    struct bxe_sw_rx_bd *rx_buf;
6210    struct eth_rx_bd *rx_bd;
6211    bus_dma_segment_t segs[1];
6212    bus_dmamap_t map;
6213    struct mbuf *m;
6214    int nsegs, rc;
6215
6216    rc = 0;
6217
6218    /* allocate the new RX BD mbuf */
6219    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6220    if (__predict_false(m == NULL)) {
6221        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6222        return (ENOBUFS);
6223    }
6224
6225    fp->eth_q_stats.mbuf_alloc_rx++;
6226
6227    /* initialize the mbuf buffer length */
6228    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6229
6230    /* map the mbuf into non-paged pool */
6231    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6232                                 fp->rx_mbuf_spare_map,
6233                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6234    if (__predict_false(rc != 0)) {
6235        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6236        m_freem(m);
6237        fp->eth_q_stats.mbuf_alloc_rx--;
6238        return (rc);
6239    }
6240
6241    /* all mbufs must map to a single segment */
6242    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6243
6244    /* release any existing RX BD mbuf mappings */
6245
6246    if (prev_index != index) {
6247        rx_buf = &fp->rx_mbuf_chain[prev_index];
6248
6249        if (rx_buf->m_map != NULL) {
6250            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6251                            BUS_DMASYNC_POSTREAD);
6252            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6253        }
6254
6255        /*
6256         * We only get here from bxe_rxeof() when the maximum number
6257         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6258         * holds the mbuf in the prev_index so it's OK to NULL it out
6259         * here without concern of a memory leak.
6260         */
6261        fp->rx_mbuf_chain[prev_index].m = NULL;
6262    }
6263
6264    rx_buf = &fp->rx_mbuf_chain[index];
6265
6266    if (rx_buf->m_map != NULL) {
6267        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6268                        BUS_DMASYNC_POSTREAD);
6269        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6270    }
6271
6272    /* save the mbuf and mapping info for a future packet */
6273    map = (prev_index != index) ?
6274              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6275    rx_buf->m_map = fp->rx_mbuf_spare_map;
6276    fp->rx_mbuf_spare_map = map;
6277    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6278                    BUS_DMASYNC_PREREAD);
6279    rx_buf->m = m;
6280
6281    rx_bd = &fp->rx_chain[index];
6282    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6283    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6284
6285    return (rc);
6286}
6287
6288static int
6289bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6290                      int                 queue)
6291{
6292    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6293    bus_dma_segment_t segs[1];
6294    bus_dmamap_t map;
6295    struct mbuf *m;
6296    int nsegs;
6297    int rc = 0;
6298
6299    /* allocate the new TPA mbuf */
6300    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6301    if (__predict_false(m == NULL)) {
6302        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6303        return (ENOBUFS);
6304    }
6305
6306    fp->eth_q_stats.mbuf_alloc_tpa++;
6307
6308    /* initialize the mbuf buffer length */
6309    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6310
6311    /* map the mbuf into non-paged pool */
6312    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6313                                 fp->rx_tpa_info_mbuf_spare_map,
6314                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6315    if (__predict_false(rc != 0)) {
6316        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6317        m_free(m);
6318        fp->eth_q_stats.mbuf_alloc_tpa--;
6319        return (rc);
6320    }
6321
6322    /* all mbufs must map to a single segment */
6323    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6324
6325    /* release any existing TPA mbuf mapping */
6326    if (tpa_info->bd.m_map != NULL) {
6327        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6328                        BUS_DMASYNC_POSTREAD);
6329        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6330    }
6331
6332    /* save the mbuf and mapping info for the TPA mbuf */
6333    map = tpa_info->bd.m_map;
6334    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6335    fp->rx_tpa_info_mbuf_spare_map = map;
6336    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6337                    BUS_DMASYNC_PREREAD);
6338    tpa_info->bd.m = m;
6339    tpa_info->seg = segs[0];
6340
6341    return (rc);
6342}
6343
6344/*
6345 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6346 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6347 * chain.
6348 */
6349static int
6350bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6351                      uint16_t            index)
6352{
6353    struct bxe_sw_rx_bd *sge_buf;
6354    struct eth_rx_sge *sge;
6355    bus_dma_segment_t segs[1];
6356    bus_dmamap_t map;
6357    struct mbuf *m;
6358    int nsegs;
6359    int rc = 0;
6360
6361    /* allocate a new SGE mbuf */
6362    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6363    if (__predict_false(m == NULL)) {
6364        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6365        return (ENOMEM);
6366    }
6367
6368    fp->eth_q_stats.mbuf_alloc_sge++;
6369
6370    /* initialize the mbuf buffer length */
6371    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6372
6373    /* map the SGE mbuf into non-paged pool */
6374    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6375                                 fp->rx_sge_mbuf_spare_map,
6376                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6377    if (__predict_false(rc != 0)) {
6378        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6379        m_freem(m);
6380        fp->eth_q_stats.mbuf_alloc_sge--;
6381        return (rc);
6382    }
6383
6384    /* all mbufs must map to a single segment */
6385    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6386
6387    sge_buf = &fp->rx_sge_mbuf_chain[index];
6388
6389    /* release any existing SGE mbuf mapping */
6390    if (sge_buf->m_map != NULL) {
6391        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6392                        BUS_DMASYNC_POSTREAD);
6393        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6394    }
6395
6396    /* save the mbuf and mapping info for a future packet */
6397    map = sge_buf->m_map;
6398    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6399    fp->rx_sge_mbuf_spare_map = map;
6400    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6401                    BUS_DMASYNC_PREREAD);
6402    sge_buf->m = m;
6403
6404    sge = &fp->rx_sge_chain[index];
6405    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6406    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6407
6408    return (rc);
6409}
6410
6411static __noinline int
6412bxe_alloc_fp_buffers(struct bxe_softc *sc)
6413{
6414    struct bxe_fastpath *fp;
6415    int i, j, rc = 0;
6416    int ring_prod, cqe_ring_prod;
6417    int max_agg_queues;
6418
6419    for (i = 0; i < sc->num_queues; i++) {
6420        fp = &sc->fp[i];
6421
6422        ring_prod = cqe_ring_prod = 0;
6423        fp->rx_bd_cons = 0;
6424        fp->rx_cq_cons = 0;
6425
6426        /* allocate buffers for the RX BDs in RX BD chain */
6427        for (j = 0; j < sc->max_rx_bufs; j++) {
6428            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6429            if (rc != 0) {
6430                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6431                      i, rc);
6432                goto bxe_alloc_fp_buffers_error;
6433            }
6434
6435            ring_prod     = RX_BD_NEXT(ring_prod);
6436            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6437        }
6438
6439        fp->rx_bd_prod = ring_prod;
6440        fp->rx_cq_prod = cqe_ring_prod;
6441        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6442
6443        max_agg_queues = MAX_AGG_QS(sc);
6444
6445        fp->tpa_enable = TRUE;
6446
6447        /* fill the TPA pool */
6448        for (j = 0; j < max_agg_queues; j++) {
6449            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6450            if (rc != 0) {
6451                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6452                          i, j);
6453                fp->tpa_enable = FALSE;
6454                goto bxe_alloc_fp_buffers_error;
6455            }
6456
6457            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6458        }
6459
6460        if (fp->tpa_enable) {
6461            /* fill the RX SGE chain */
6462            ring_prod = 0;
6463            for (j = 0; j < RX_SGE_USABLE; j++) {
6464                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6465                if (rc != 0) {
6466                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6467                              i, ring_prod);
6468                    fp->tpa_enable = FALSE;
6469                    ring_prod = 0;
6470                    goto bxe_alloc_fp_buffers_error;
6471                }
6472
6473                ring_prod = RX_SGE_NEXT(ring_prod);
6474            }
6475
6476            fp->rx_sge_prod = ring_prod;
6477        }
6478    }
6479
6480    return (0);
6481
6482bxe_alloc_fp_buffers_error:
6483
6484    /* unwind what was already allocated */
6485    bxe_free_rx_bd_chain(fp);
6486    bxe_free_tpa_pool(fp);
6487    bxe_free_sge_chain(fp);
6488
6489    return (ENOBUFS);
6490}
6491
6492static void
6493bxe_free_fw_stats_mem(struct bxe_softc *sc)
6494{
6495    bxe_dma_free(sc, &sc->fw_stats_dma);
6496
6497    sc->fw_stats_num = 0;
6498
6499    sc->fw_stats_req_size = 0;
6500    sc->fw_stats_req = NULL;
6501    sc->fw_stats_req_mapping = 0;
6502
6503    sc->fw_stats_data_size = 0;
6504    sc->fw_stats_data = NULL;
6505    sc->fw_stats_data_mapping = 0;
6506}
6507
6508static int
6509bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6510{
6511    uint8_t num_queue_stats;
6512    int num_groups;
6513
6514    /* number of queues for statistics is number of eth queues */
6515    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6516
6517    /*
6518     * Total number of FW statistics requests =
6519     *   1 for port stats + 1 for PF stats + num of queues
6520     */
6521    sc->fw_stats_num = (2 + num_queue_stats);
6522
6523    /*
6524     * Request is built from stats_query_header and an array of
6525     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6526     * rules. The real number or requests is configured in the
6527     * stats_query_header.
6528     */
6529    num_groups =
6530        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6531         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6532
6533    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6534          sc->fw_stats_num, num_groups);
6535
6536    sc->fw_stats_req_size =
6537        (sizeof(struct stats_query_header) +
6538         (num_groups * sizeof(struct stats_query_cmd_group)));
6539
6540    /*
6541     * Data for statistics requests + stats_counter.
6542     * stats_counter holds per-STORM counters that are incremented when
6543     * STORM has finished with the current request. Memory for FCoE
6544     * offloaded statistics are counted anyway, even if they will not be sent.
6545     * VF stats are not accounted for here as the data of VF stats is stored
6546     * in memory allocated by the VF, not here.
6547     */
6548    sc->fw_stats_data_size =
6549        (sizeof(struct stats_counter) +
6550         sizeof(struct per_port_stats) +
6551         sizeof(struct per_pf_stats) +
6552         /* sizeof(struct fcoe_statistics_params) + */
6553         (sizeof(struct per_queue_stats) * num_queue_stats));
6554
6555    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6556                      &sc->fw_stats_dma, "fw stats") != 0) {
6557        bxe_free_fw_stats_mem(sc);
6558        return (-1);
6559    }
6560
6561    /* set up the shortcuts */
6562
6563    sc->fw_stats_req =
6564        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6565    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6566
6567    sc->fw_stats_data =
6568        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6569                                     sc->fw_stats_req_size);
6570    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6571                                 sc->fw_stats_req_size);
6572
6573    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6574          (uintmax_t)sc->fw_stats_req_mapping);
6575
6576    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6577          (uintmax_t)sc->fw_stats_data_mapping);
6578
6579    return (0);
6580}
6581
6582/*
6583 * Bits map:
6584 * 0-7  - Engine0 load counter.
6585 * 8-15 - Engine1 load counter.
6586 * 16   - Engine0 RESET_IN_PROGRESS bit.
6587 * 17   - Engine1 RESET_IN_PROGRESS bit.
6588 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6589 *        function on the engine
6590 * 19   - Engine1 ONE_IS_LOADED.
6591 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6592 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6593 *        for just the one belonging to its engine).
6594 */
6595#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6596#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6597#define BXE_PATH0_LOAD_CNT_SHIFT  0
6598#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6599#define BXE_PATH1_LOAD_CNT_SHIFT  8
6600#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6601#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6602#define BXE_GLOBAL_RESET_BIT      0x00040000
6603
6604/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6605static void
6606bxe_set_reset_global(struct bxe_softc *sc)
6607{
6608    uint32_t val;
6609    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6610    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6611    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6612    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6613}
6614
6615/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6616static void
6617bxe_clear_reset_global(struct bxe_softc *sc)
6618{
6619    uint32_t val;
6620    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6621    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6622    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6623    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6624}
6625
6626/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6627static uint8_t
6628bxe_reset_is_global(struct bxe_softc *sc)
6629{
6630    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6631    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6632    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6633}
6634
6635/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6636static void
6637bxe_set_reset_done(struct bxe_softc *sc)
6638{
6639    uint32_t val;
6640    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6641                                 BXE_PATH0_RST_IN_PROG_BIT;
6642
6643    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6644
6645    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6646    /* Clear the bit */
6647    val &= ~bit;
6648    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6649
6650    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6651}
6652
6653/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6654static void
6655bxe_set_reset_in_progress(struct bxe_softc *sc)
6656{
6657    uint32_t val;
6658    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6659                                 BXE_PATH0_RST_IN_PROG_BIT;
6660
6661    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6662
6663    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6664    /* Set the bit */
6665    val |= bit;
6666    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6667
6668    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6669}
6670
6671/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6672static uint8_t
6673bxe_reset_is_done(struct bxe_softc *sc,
6674                  int              engine)
6675{
6676    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6677    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6678                            BXE_PATH0_RST_IN_PROG_BIT;
6679
6680    /* return false if bit is set */
6681    return (val & bit) ? FALSE : TRUE;
6682}
6683
6684/* get the load status for an engine, should be run under rtnl lock */
6685static uint8_t
6686bxe_get_load_status(struct bxe_softc *sc,
6687                    int              engine)
6688{
6689    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6690                             BXE_PATH0_LOAD_CNT_MASK;
6691    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6692                              BXE_PATH0_LOAD_CNT_SHIFT;
6693    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6694
6695    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6696
6697    val = ((val & mask) >> shift);
6698
6699    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6700
6701    return (val != 0);
6702}
6703
6704/* set pf load mark */
6705/* XXX needs to be under rtnl lock */
6706static void
6707bxe_set_pf_load(struct bxe_softc *sc)
6708{
6709    uint32_t val;
6710    uint32_t val1;
6711    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6712                                  BXE_PATH0_LOAD_CNT_MASK;
6713    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6714                                   BXE_PATH0_LOAD_CNT_SHIFT;
6715
6716    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6717
6718    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6719    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6720
6721    /* get the current counter value */
6722    val1 = ((val & mask) >> shift);
6723
6724    /* set bit of this PF */
6725    val1 |= (1 << SC_ABS_FUNC(sc));
6726
6727    /* clear the old value */
6728    val &= ~mask;
6729
6730    /* set the new one */
6731    val |= ((val1 << shift) & mask);
6732
6733    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6734
6735    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6736}
6737
6738/* clear pf load mark */
6739/* XXX needs to be under rtnl lock */
6740static uint8_t
6741bxe_clear_pf_load(struct bxe_softc *sc)
6742{
6743    uint32_t val1, val;
6744    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6745                                  BXE_PATH0_LOAD_CNT_MASK;
6746    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6747                                   BXE_PATH0_LOAD_CNT_SHIFT;
6748
6749    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6750    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6751    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6752
6753    /* get the current counter value */
6754    val1 = (val & mask) >> shift;
6755
6756    /* clear bit of that PF */
6757    val1 &= ~(1 << SC_ABS_FUNC(sc));
6758
6759    /* clear the old value */
6760    val &= ~mask;
6761
6762    /* set the new one */
6763    val |= ((val1 << shift) & mask);
6764
6765    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6766    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6767    return (val1 != 0);
6768}
6769
6770/* send load requrest to mcp and analyze response */
6771static int
6772bxe_nic_load_request(struct bxe_softc *sc,
6773                     uint32_t         *load_code)
6774{
6775    /* init fw_seq */
6776    sc->fw_seq =
6777        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6778         DRV_MSG_SEQ_NUMBER_MASK);
6779
6780    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6781
6782    /* get the current FW pulse sequence */
6783    sc->fw_drv_pulse_wr_seq =
6784        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6785         DRV_PULSE_SEQ_MASK);
6786
6787    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6788          sc->fw_drv_pulse_wr_seq);
6789
6790    /* load request */
6791    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6792                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6793
6794    /* if the MCP fails to respond we must abort */
6795    if (!(*load_code)) {
6796        BLOGE(sc, "MCP response failure!\n");
6797        return (-1);
6798    }
6799
6800    /* if MCP refused then must abort */
6801    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6802        BLOGE(sc, "MCP refused load request\n");
6803        return (-1);
6804    }
6805
6806    return (0);
6807}
6808
6809/*
6810 * Check whether another PF has already loaded FW to chip. In virtualized
6811 * environments a pf from anoth VM may have already initialized the device
6812 * including loading FW.
6813 */
6814static int
6815bxe_nic_load_analyze_req(struct bxe_softc *sc,
6816                         uint32_t         load_code)
6817{
6818    uint32_t my_fw, loaded_fw;
6819
6820    /* is another pf loaded on this engine? */
6821    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6822        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6823        /* build my FW version dword */
6824        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6825                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6826                 (BCM_5710_FW_REVISION_VERSION << 16) +
6827                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6828
6829        /* read loaded FW from chip */
6830        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6831        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6832              loaded_fw, my_fw);
6833
6834        /* abort nic load if version mismatch */
6835        if (my_fw != loaded_fw) {
6836            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6837                  loaded_fw, my_fw);
6838            return (-1);
6839        }
6840    }
6841
6842    return (0);
6843}
6844
6845/* mark PMF if applicable */
6846static void
6847bxe_nic_load_pmf(struct bxe_softc *sc,
6848                 uint32_t         load_code)
6849{
6850    uint32_t ncsi_oem_data_addr;
6851
6852    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6853        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6854        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6855        /*
6856         * Barrier here for ordering between the writing to sc->port.pmf here
6857         * and reading it from the periodic task.
6858         */
6859        sc->port.pmf = 1;
6860        mb();
6861    } else {
6862        sc->port.pmf = 0;
6863    }
6864
6865    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6866
6867    /* XXX needed? */
6868    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6869        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6870            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6871            if (ncsi_oem_data_addr) {
6872                REG_WR(sc,
6873                       (ncsi_oem_data_addr +
6874                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6875                       0);
6876            }
6877        }
6878    }
6879}
6880
6881static void
6882bxe_read_mf_cfg(struct bxe_softc *sc)
6883{
6884    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6885    int abs_func;
6886    int vn;
6887
6888    if (BXE_NOMCP(sc)) {
6889        return; /* what should be the default bvalue in this case */
6890    }
6891
6892    /*
6893     * The formula for computing the absolute function number is...
6894     * For 2 port configuration (4 functions per port):
6895     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6896     * For 4 port configuration (2 functions per port):
6897     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6898     */
6899    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6900        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6901        if (abs_func >= E1H_FUNC_MAX) {
6902            break;
6903        }
6904        sc->devinfo.mf_info.mf_config[vn] =
6905            MFCFG_RD(sc, func_mf_config[abs_func].config);
6906    }
6907
6908    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6909        FUNC_MF_CFG_FUNC_DISABLED) {
6910        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6911        sc->flags |= BXE_MF_FUNC_DIS;
6912    } else {
6913        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6914        sc->flags &= ~BXE_MF_FUNC_DIS;
6915    }
6916}
6917
6918/* acquire split MCP access lock register */
6919static int bxe_acquire_alr(struct bxe_softc *sc)
6920{
6921    uint32_t j, val;
6922
6923    for (j = 0; j < 1000; j++) {
6924        val = (1UL << 31);
6925        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6926        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6927        if (val & (1L << 31))
6928            break;
6929
6930        DELAY(5000);
6931    }
6932
6933    if (!(val & (1L << 31))) {
6934        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6935        return (-1);
6936    }
6937
6938    return (0);
6939}
6940
6941/* release split MCP access lock register */
6942static void bxe_release_alr(struct bxe_softc *sc)
6943{
6944    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6945}
6946
6947static void
6948bxe_fan_failure(struct bxe_softc *sc)
6949{
6950    int port = SC_PORT(sc);
6951    uint32_t ext_phy_config;
6952
6953    /* mark the failure */
6954    ext_phy_config =
6955        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6956
6957    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6958    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6959    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6960             ext_phy_config);
6961
6962    /* log the failure */
6963    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6964              "the card to prevent permanent damage. "
6965              "Please contact OEM Support for assistance\n");
6966
6967    /* XXX */
6968#if 1
6969    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6970#else
6971    /*
6972     * Schedule device reset (unload)
6973     * This is due to some boards consuming sufficient power when driver is
6974     * up to overheat if fan fails.
6975     */
6976    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6977    schedule_delayed_work(&sc->sp_rtnl_task, 0);
6978#endif
6979}
6980
6981/* this function is called upon a link interrupt */
6982static void
6983bxe_link_attn(struct bxe_softc *sc)
6984{
6985    uint32_t pause_enabled = 0;
6986    struct host_port_stats *pstats;
6987    int cmng_fns;
6988    struct bxe_fastpath *fp;
6989    int i;
6990
6991    /* Make sure that we are synced with the current statistics */
6992    bxe_stats_handle(sc, STATS_EVENT_STOP);
6993
6994    elink_link_update(&sc->link_params, &sc->link_vars);
6995
6996    if (sc->link_vars.link_up) {
6997
6998        /* dropless flow control */
6999        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7000            pause_enabled = 0;
7001
7002            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7003                pause_enabled = 1;
7004            }
7005
7006            REG_WR(sc,
7007                   (BAR_USTRORM_INTMEM +
7008                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7009                   pause_enabled);
7010        }
7011
7012        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7013            pstats = BXE_SP(sc, port_stats);
7014            /* reset old mac stats */
7015            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7016        }
7017
7018        if (sc->state == BXE_STATE_OPEN) {
7019            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7020        }
7021
7022	/* Restart tx when the link comes back. */
7023        FOR_EACH_ETH_QUEUE(sc, i) {
7024            fp = &sc->fp[i];
7025            taskqueue_enqueue(fp->tq, &fp->tx_task);
7026	}
7027    }
7028
7029    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7030        cmng_fns = bxe_get_cmng_fns_mode(sc);
7031
7032        if (cmng_fns != CMNG_FNS_NONE) {
7033            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7034            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7035        } else {
7036            /* rate shaping and fairness are disabled */
7037            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7038        }
7039    }
7040
7041    bxe_link_report_locked(sc);
7042
7043    if (IS_MF(sc)) {
7044        ; // XXX bxe_link_sync_notify(sc);
7045    }
7046}
7047
7048static void
7049bxe_attn_int_asserted(struct bxe_softc *sc,
7050                      uint32_t         asserted)
7051{
7052    int port = SC_PORT(sc);
7053    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7054                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7055    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7056                                        NIG_REG_MASK_INTERRUPT_PORT0;
7057    uint32_t aeu_mask;
7058    uint32_t nig_mask = 0;
7059    uint32_t reg_addr;
7060    uint32_t igu_acked;
7061    uint32_t cnt;
7062
7063    if (sc->attn_state & asserted) {
7064        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7065    }
7066
7067    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7068
7069    aeu_mask = REG_RD(sc, aeu_addr);
7070
7071    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7072          aeu_mask, asserted);
7073
7074    aeu_mask &= ~(asserted & 0x3ff);
7075
7076    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7077
7078    REG_WR(sc, aeu_addr, aeu_mask);
7079
7080    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7081
7082    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7083    sc->attn_state |= asserted;
7084    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7085
7086    if (asserted & ATTN_HARD_WIRED_MASK) {
7087        if (asserted & ATTN_NIG_FOR_FUNC) {
7088
7089	    bxe_acquire_phy_lock(sc);
7090            /* save nig interrupt mask */
7091            nig_mask = REG_RD(sc, nig_int_mask_addr);
7092
7093            /* If nig_mask is not set, no need to call the update function */
7094            if (nig_mask) {
7095                REG_WR(sc, nig_int_mask_addr, 0);
7096
7097                bxe_link_attn(sc);
7098            }
7099
7100            /* handle unicore attn? */
7101        }
7102
7103        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7104            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7105        }
7106
7107        if (asserted & GPIO_2_FUNC) {
7108            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7109        }
7110
7111        if (asserted & GPIO_3_FUNC) {
7112            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7113        }
7114
7115        if (asserted & GPIO_4_FUNC) {
7116            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7117        }
7118
7119        if (port == 0) {
7120            if (asserted & ATTN_GENERAL_ATTN_1) {
7121                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7122                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7123            }
7124            if (asserted & ATTN_GENERAL_ATTN_2) {
7125                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7126                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7127            }
7128            if (asserted & ATTN_GENERAL_ATTN_3) {
7129                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7130                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7131            }
7132        } else {
7133            if (asserted & ATTN_GENERAL_ATTN_4) {
7134                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7135                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7136            }
7137            if (asserted & ATTN_GENERAL_ATTN_5) {
7138                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7139                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7140            }
7141            if (asserted & ATTN_GENERAL_ATTN_6) {
7142                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7143                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7144            }
7145        }
7146    } /* hardwired */
7147
7148    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7149        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7150    } else {
7151        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7152    }
7153
7154    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7155          asserted,
7156          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7157    REG_WR(sc, reg_addr, asserted);
7158
7159    /* now set back the mask */
7160    if (asserted & ATTN_NIG_FOR_FUNC) {
7161        /*
7162         * Verify that IGU ack through BAR was written before restoring
7163         * NIG mask. This loop should exit after 2-3 iterations max.
7164         */
7165        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7166            cnt = 0;
7167
7168            do {
7169                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7170            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7171                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7172
7173            if (!igu_acked) {
7174                BLOGE(sc, "Failed to verify IGU ack on time\n");
7175            }
7176
7177            mb();
7178        }
7179
7180        REG_WR(sc, nig_int_mask_addr, nig_mask);
7181
7182	bxe_release_phy_lock(sc);
7183    }
7184}
7185
7186static void
7187bxe_print_next_block(struct bxe_softc *sc,
7188                     int              idx,
7189                     const char       *blk)
7190{
7191    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7192}
7193
7194static int
7195bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7196                              uint32_t         sig,
7197                              int              par_num,
7198                              uint8_t          print)
7199{
7200    uint32_t cur_bit = 0;
7201    int i = 0;
7202
7203    for (i = 0; sig; i++) {
7204        cur_bit = ((uint32_t)0x1 << i);
7205        if (sig & cur_bit) {
7206            switch (cur_bit) {
7207            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7208                if (print)
7209                    bxe_print_next_block(sc, par_num++, "BRB");
7210                break;
7211            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7212                if (print)
7213                    bxe_print_next_block(sc, par_num++, "PARSER");
7214                break;
7215            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7216                if (print)
7217                    bxe_print_next_block(sc, par_num++, "TSDM");
7218                break;
7219            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7220                if (print)
7221                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7222                break;
7223            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7224                if (print)
7225                    bxe_print_next_block(sc, par_num++, "TCM");
7226                break;
7227            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7228                if (print)
7229                    bxe_print_next_block(sc, par_num++, "TSEMI");
7230                break;
7231            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7232                if (print)
7233                    bxe_print_next_block(sc, par_num++, "XPB");
7234                break;
7235            }
7236
7237            /* Clear the bit */
7238            sig &= ~cur_bit;
7239        }
7240    }
7241
7242    return (par_num);
7243}
7244
7245static int
7246bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7247                              uint32_t         sig,
7248                              int              par_num,
7249                              uint8_t          *global,
7250                              uint8_t          print)
7251{
7252    int i = 0;
7253    uint32_t cur_bit = 0;
7254    for (i = 0; sig; i++) {
7255        cur_bit = ((uint32_t)0x1 << i);
7256        if (sig & cur_bit) {
7257            switch (cur_bit) {
7258            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7259                if (print)
7260                    bxe_print_next_block(sc, par_num++, "PBF");
7261                break;
7262            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7263                if (print)
7264                    bxe_print_next_block(sc, par_num++, "QM");
7265                break;
7266            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7267                if (print)
7268                    bxe_print_next_block(sc, par_num++, "TM");
7269                break;
7270            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7271                if (print)
7272                    bxe_print_next_block(sc, par_num++, "XSDM");
7273                break;
7274            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7275                if (print)
7276                    bxe_print_next_block(sc, par_num++, "XCM");
7277                break;
7278            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7279                if (print)
7280                    bxe_print_next_block(sc, par_num++, "XSEMI");
7281                break;
7282            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7283                if (print)
7284                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7285                break;
7286            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7287                if (print)
7288                    bxe_print_next_block(sc, par_num++, "NIG");
7289                break;
7290            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7291                if (print)
7292                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7293                *global = TRUE;
7294                break;
7295            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7296                if (print)
7297                    bxe_print_next_block(sc, par_num++, "DEBUG");
7298                break;
7299            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7300                if (print)
7301                    bxe_print_next_block(sc, par_num++, "USDM");
7302                break;
7303            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7304                if (print)
7305                    bxe_print_next_block(sc, par_num++, "UCM");
7306                break;
7307            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7308                if (print)
7309                    bxe_print_next_block(sc, par_num++, "USEMI");
7310                break;
7311            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7312                if (print)
7313                    bxe_print_next_block(sc, par_num++, "UPB");
7314                break;
7315            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7316                if (print)
7317                    bxe_print_next_block(sc, par_num++, "CSDM");
7318                break;
7319            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7320                if (print)
7321                    bxe_print_next_block(sc, par_num++, "CCM");
7322                break;
7323            }
7324
7325            /* Clear the bit */
7326            sig &= ~cur_bit;
7327        }
7328    }
7329
7330    return (par_num);
7331}
7332
7333static int
7334bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7335                              uint32_t         sig,
7336                              int              par_num,
7337                              uint8_t          print)
7338{
7339    uint32_t cur_bit = 0;
7340    int i = 0;
7341
7342    for (i = 0; sig; i++) {
7343        cur_bit = ((uint32_t)0x1 << i);
7344        if (sig & cur_bit) {
7345            switch (cur_bit) {
7346            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7347                if (print)
7348                    bxe_print_next_block(sc, par_num++, "CSEMI");
7349                break;
7350            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7351                if (print)
7352                    bxe_print_next_block(sc, par_num++, "PXP");
7353                break;
7354            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7355                if (print)
7356                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7357                break;
7358            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7359                if (print)
7360                    bxe_print_next_block(sc, par_num++, "CFC");
7361                break;
7362            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7363                if (print)
7364                    bxe_print_next_block(sc, par_num++, "CDU");
7365                break;
7366            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7367                if (print)
7368                    bxe_print_next_block(sc, par_num++, "DMAE");
7369                break;
7370            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7371                if (print)
7372                    bxe_print_next_block(sc, par_num++, "IGU");
7373                break;
7374            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7375                if (print)
7376                    bxe_print_next_block(sc, par_num++, "MISC");
7377                break;
7378            }
7379
7380            /* Clear the bit */
7381            sig &= ~cur_bit;
7382        }
7383    }
7384
7385    return (par_num);
7386}
7387
7388static int
7389bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7390                              uint32_t         sig,
7391                              int              par_num,
7392                              uint8_t          *global,
7393                              uint8_t          print)
7394{
7395    uint32_t cur_bit = 0;
7396    int i = 0;
7397
7398    for (i = 0; sig; i++) {
7399        cur_bit = ((uint32_t)0x1 << i);
7400        if (sig & cur_bit) {
7401            switch (cur_bit) {
7402            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7403                if (print)
7404                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7405                *global = TRUE;
7406                break;
7407            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7408                if (print)
7409                    bxe_print_next_block(sc, par_num++,
7410                              "MCP UMP RX");
7411                *global = TRUE;
7412                break;
7413            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7414                if (print)
7415                    bxe_print_next_block(sc, par_num++,
7416                              "MCP UMP TX");
7417                *global = TRUE;
7418                break;
7419            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7420                if (print)
7421                    bxe_print_next_block(sc, par_num++,
7422                              "MCP SCPAD");
7423                *global = TRUE;
7424                break;
7425            }
7426
7427            /* Clear the bit */
7428            sig &= ~cur_bit;
7429        }
7430    }
7431
7432    return (par_num);
7433}
7434
7435static int
7436bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7437                              uint32_t         sig,
7438                              int              par_num,
7439                              uint8_t          print)
7440{
7441    uint32_t cur_bit = 0;
7442    int i = 0;
7443
7444    for (i = 0; sig; i++) {
7445        cur_bit = ((uint32_t)0x1 << i);
7446        if (sig & cur_bit) {
7447            switch (cur_bit) {
7448            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7449                if (print)
7450                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7451                break;
7452            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7453                if (print)
7454                    bxe_print_next_block(sc, par_num++, "ATC");
7455                break;
7456            }
7457
7458            /* Clear the bit */
7459            sig &= ~cur_bit;
7460        }
7461    }
7462
7463    return (par_num);
7464}
7465
7466static uint8_t
7467bxe_parity_attn(struct bxe_softc *sc,
7468                uint8_t          *global,
7469                uint8_t          print,
7470                uint32_t         *sig)
7471{
7472    int par_num = 0;
7473
7474    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7475        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7476        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7477        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7478        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7479        BLOGE(sc, "Parity error: HW block parity attention:\n"
7480                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7481              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7482              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7483              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7484              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7485              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7486
7487        if (print)
7488            BLOGI(sc, "Parity errors detected in blocks: ");
7489
7490        par_num =
7491            bxe_check_blocks_with_parity0(sc, sig[0] &
7492                                          HW_PRTY_ASSERT_SET_0,
7493                                          par_num, print);
7494        par_num =
7495            bxe_check_blocks_with_parity1(sc, sig[1] &
7496                                          HW_PRTY_ASSERT_SET_1,
7497                                          par_num, global, print);
7498        par_num =
7499            bxe_check_blocks_with_parity2(sc, sig[2] &
7500                                          HW_PRTY_ASSERT_SET_2,
7501                                          par_num, print);
7502        par_num =
7503            bxe_check_blocks_with_parity3(sc, sig[3] &
7504                                          HW_PRTY_ASSERT_SET_3,
7505                                          par_num, global, print);
7506        par_num =
7507            bxe_check_blocks_with_parity4(sc, sig[4] &
7508                                          HW_PRTY_ASSERT_SET_4,
7509                                          par_num, print);
7510
7511        if (print)
7512            BLOGI(sc, "\n");
7513
7514        return (TRUE);
7515    }
7516
7517    return (FALSE);
7518}
7519
7520static uint8_t
7521bxe_chk_parity_attn(struct bxe_softc *sc,
7522                    uint8_t          *global,
7523                    uint8_t          print)
7524{
7525    struct attn_route attn = { {0} };
7526    int port = SC_PORT(sc);
7527
7528    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7529    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7530    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7531    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7532
7533    /*
7534     * Since MCP attentions can't be disabled inside the block, we need to
7535     * read AEU registers to see whether they're currently disabled
7536     */
7537    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7538                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7539                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7540                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7541
7542
7543    if (!CHIP_IS_E1x(sc))
7544        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7545
7546    return (bxe_parity_attn(sc, global, print, attn.sig));
7547}
7548
7549static void
7550bxe_attn_int_deasserted4(struct bxe_softc *sc,
7551                         uint32_t         attn)
7552{
7553    uint32_t val;
7554
7555    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7556        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7557        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7558        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7559            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7560        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7561            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7562        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7563            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7564        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7565            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7566        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7567            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7568        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7569            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7570        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7571            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7572        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7573            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7574        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7575            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7576    }
7577
7578    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7579        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7580        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7581        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7582            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7583        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7584            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7585        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7586            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7587        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7588            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7589        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7590            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7591        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7592            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7593    }
7594
7595    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7596                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7597        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7598              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7599                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7600    }
7601}
7602
7603static void
7604bxe_e1h_disable(struct bxe_softc *sc)
7605{
7606    int port = SC_PORT(sc);
7607
7608    bxe_tx_disable(sc);
7609
7610    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7611}
7612
7613static void
7614bxe_e1h_enable(struct bxe_softc *sc)
7615{
7616    int port = SC_PORT(sc);
7617
7618    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7619
7620    // XXX bxe_tx_enable(sc);
7621}
7622
7623/*
7624 * called due to MCP event (on pmf):
7625 *   reread new bandwidth configuration
7626 *   configure FW
7627 *   notify others function about the change
7628 */
7629static void
7630bxe_config_mf_bw(struct bxe_softc *sc)
7631{
7632    if (sc->link_vars.link_up) {
7633        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7634        // XXX bxe_link_sync_notify(sc);
7635    }
7636
7637    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7638}
7639
7640static void
7641bxe_set_mf_bw(struct bxe_softc *sc)
7642{
7643    bxe_config_mf_bw(sc);
7644    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7645}
7646
7647static void
7648bxe_handle_eee_event(struct bxe_softc *sc)
7649{
7650    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7651    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7652}
7653
7654#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7655
7656static void
7657bxe_drv_info_ether_stat(struct bxe_softc *sc)
7658{
7659    struct eth_stats_info *ether_stat =
7660        &sc->sp->drv_info_to_mcp.ether_stat;
7661
7662    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7663            ETH_STAT_INFO_VERSION_LEN);
7664
7665    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7666    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7667                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7668                                          ether_stat->mac_local + MAC_PAD,
7669                                          MAC_PAD, ETH_ALEN);
7670
7671    ether_stat->mtu_size = sc->mtu;
7672
7673    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7674    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7675        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7676    }
7677
7678    // XXX ether_stat->feature_flags |= ???;
7679
7680    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7681
7682    ether_stat->txq_size = sc->tx_ring_size;
7683    ether_stat->rxq_size = sc->rx_ring_size;
7684}
7685
7686static void
7687bxe_handle_drv_info_req(struct bxe_softc *sc)
7688{
7689    enum drv_info_opcode op_code;
7690    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7691
7692    /* if drv_info version supported by MFW doesn't match - send NACK */
7693    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7694        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7695        return;
7696    }
7697
7698    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7699               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7700
7701    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7702
7703    switch (op_code) {
7704    case ETH_STATS_OPCODE:
7705        bxe_drv_info_ether_stat(sc);
7706        break;
7707    case FCOE_STATS_OPCODE:
7708    case ISCSI_STATS_OPCODE:
7709    default:
7710        /* if op code isn't supported - send NACK */
7711        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7712        return;
7713    }
7714
7715    /*
7716     * If we got drv_info attn from MFW then these fields are defined in
7717     * shmem2 for sure
7718     */
7719    SHMEM2_WR(sc, drv_info_host_addr_lo,
7720              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7721    SHMEM2_WR(sc, drv_info_host_addr_hi,
7722              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7723
7724    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7725}
7726
7727static void
7728bxe_dcc_event(struct bxe_softc *sc,
7729              uint32_t         dcc_event)
7730{
7731    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7732
7733    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7734        /*
7735         * This is the only place besides the function initialization
7736         * where the sc->flags can change so it is done without any
7737         * locks
7738         */
7739        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7740            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7741            sc->flags |= BXE_MF_FUNC_DIS;
7742            bxe_e1h_disable(sc);
7743        } else {
7744            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7745            sc->flags &= ~BXE_MF_FUNC_DIS;
7746            bxe_e1h_enable(sc);
7747        }
7748        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7749    }
7750
7751    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7752        bxe_config_mf_bw(sc);
7753        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7754    }
7755
7756    /* Report results to MCP */
7757    if (dcc_event)
7758        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7759    else
7760        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7761}
7762
7763static void
7764bxe_pmf_update(struct bxe_softc *sc)
7765{
7766    int port = SC_PORT(sc);
7767    uint32_t val;
7768
7769    sc->port.pmf = 1;
7770    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7771
7772    /*
7773     * We need the mb() to ensure the ordering between the writing to
7774     * sc->port.pmf here and reading it from the bxe_periodic_task().
7775     */
7776    mb();
7777
7778    /* queue a periodic task */
7779    // XXX schedule task...
7780
7781    // XXX bxe_dcbx_pmf_update(sc);
7782
7783    /* enable nig attention */
7784    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7785    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7786        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7787        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7788    } else if (!CHIP_IS_E1x(sc)) {
7789        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7790        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7791    }
7792
7793    bxe_stats_handle(sc, STATS_EVENT_PMF);
7794}
7795
7796static int
7797bxe_mc_assert(struct bxe_softc *sc)
7798{
7799    char last_idx;
7800    int i, rc = 0;
7801    uint32_t row0, row1, row2, row3;
7802
7803    /* XSTORM */
7804    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7805    if (last_idx)
7806        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7807
7808    /* print the asserts */
7809    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7810
7811        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7812        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7813        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7814        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7815
7816        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7817            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7818                  i, row3, row2, row1, row0);
7819            rc++;
7820        } else {
7821            break;
7822        }
7823    }
7824
7825    /* TSTORM */
7826    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7827    if (last_idx) {
7828        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7829    }
7830
7831    /* print the asserts */
7832    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7833
7834        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7835        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7836        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7837        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7838
7839        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7840            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7841                  i, row3, row2, row1, row0);
7842            rc++;
7843        } else {
7844            break;
7845        }
7846    }
7847
7848    /* CSTORM */
7849    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7850    if (last_idx) {
7851        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7852    }
7853
7854    /* print the asserts */
7855    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7856
7857        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7858        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7859        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7860        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7861
7862        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7863            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7864                  i, row3, row2, row1, row0);
7865            rc++;
7866        } else {
7867            break;
7868        }
7869    }
7870
7871    /* USTORM */
7872    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7873    if (last_idx) {
7874        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7875    }
7876
7877    /* print the asserts */
7878    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7879
7880        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7881        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7882        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7883        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7884
7885        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7886            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7887                  i, row3, row2, row1, row0);
7888            rc++;
7889        } else {
7890            break;
7891        }
7892    }
7893
7894    return (rc);
7895}
7896
7897static void
7898bxe_attn_int_deasserted3(struct bxe_softc *sc,
7899                         uint32_t         attn)
7900{
7901    int func = SC_FUNC(sc);
7902    uint32_t val;
7903
7904    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7905
7906        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7907
7908            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7909            bxe_read_mf_cfg(sc);
7910            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7911                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7912            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7913
7914            if (val & DRV_STATUS_DCC_EVENT_MASK)
7915                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7916
7917            if (val & DRV_STATUS_SET_MF_BW)
7918                bxe_set_mf_bw(sc);
7919
7920            if (val & DRV_STATUS_DRV_INFO_REQ)
7921                bxe_handle_drv_info_req(sc);
7922
7923            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7924                bxe_pmf_update(sc);
7925
7926            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7927                bxe_handle_eee_event(sc);
7928
7929            if (sc->link_vars.periodic_flags &
7930                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7931                /* sync with link */
7932		bxe_acquire_phy_lock(sc);
7933                sc->link_vars.periodic_flags &=
7934                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7935		bxe_release_phy_lock(sc);
7936                if (IS_MF(sc))
7937                    ; // XXX bxe_link_sync_notify(sc);
7938                bxe_link_report(sc);
7939            }
7940
7941            /*
7942             * Always call it here: bxe_link_report() will
7943             * prevent the link indication duplication.
7944             */
7945            bxe_link_status_update(sc);
7946
7947        } else if (attn & BXE_MC_ASSERT_BITS) {
7948
7949            BLOGE(sc, "MC assert!\n");
7950            bxe_mc_assert(sc);
7951            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7952            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7953            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7954            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7955            bxe_panic(sc, ("MC assert!\n"));
7956
7957        } else if (attn & BXE_MCP_ASSERT) {
7958
7959            BLOGE(sc, "MCP assert!\n");
7960            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7961            // XXX bxe_fw_dump(sc);
7962
7963        } else {
7964            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
7965        }
7966    }
7967
7968    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
7969        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
7970        if (attn & BXE_GRC_TIMEOUT) {
7971            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
7972            BLOGE(sc, "GRC time-out 0x%08x\n", val);
7973        }
7974        if (attn & BXE_GRC_RSV) {
7975            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
7976            BLOGE(sc, "GRC reserved 0x%08x\n", val);
7977        }
7978        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
7979    }
7980}
7981
7982static void
7983bxe_attn_int_deasserted2(struct bxe_softc *sc,
7984                         uint32_t         attn)
7985{
7986    int port = SC_PORT(sc);
7987    int reg_offset;
7988    uint32_t val0, mask0, val1, mask1;
7989    uint32_t val;
7990
7991    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
7992        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
7993        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
7994        /* CFC error attention */
7995        if (val & 0x2) {
7996            BLOGE(sc, "FATAL error from CFC\n");
7997        }
7998    }
7999
8000    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8001        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8002        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8003        /* RQ_USDMDP_FIFO_OVERFLOW */
8004        if (val & 0x18000) {
8005            BLOGE(sc, "FATAL error from PXP\n");
8006        }
8007
8008        if (!CHIP_IS_E1x(sc)) {
8009            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8010            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8011        }
8012    }
8013
8014#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8015#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8016
8017    if (attn & AEU_PXP2_HW_INT_BIT) {
8018        /*  CQ47854 workaround do not panic on
8019         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8020         */
8021        if (!CHIP_IS_E1x(sc)) {
8022            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8023            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8024            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8025            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8026            /*
8027             * If the only PXP2_EOP_ERROR_BIT is set in
8028             * STS0 and STS1 - clear it
8029             *
8030             * probably we lose additional attentions between
8031             * STS0 and STS_CLR0, in this case user will not
8032             * be notified about them
8033             */
8034            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8035                !(val1 & mask1))
8036                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8037
8038            /* print the register, since no one can restore it */
8039            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8040
8041            /*
8042             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8043             * then notify
8044             */
8045            if (val0 & PXP2_EOP_ERROR_BIT) {
8046                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8047
8048                /*
8049                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8050                 * set then clear attention from PXP2 block without panic
8051                 */
8052                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8053                    ((val1 & mask1) == 0))
8054                    attn &= ~AEU_PXP2_HW_INT_BIT;
8055            }
8056        }
8057    }
8058
8059    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8060        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8061                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8062
8063        val = REG_RD(sc, reg_offset);
8064        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8065        REG_WR(sc, reg_offset, val);
8066
8067        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8068              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8069        bxe_panic(sc, ("HW block attention set2\n"));
8070    }
8071}
8072
8073static void
8074bxe_attn_int_deasserted1(struct bxe_softc *sc,
8075                         uint32_t         attn)
8076{
8077    int port = SC_PORT(sc);
8078    int reg_offset;
8079    uint32_t val;
8080
8081    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8082        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8083        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8084        /* DORQ discard attention */
8085        if (val & 0x2) {
8086            BLOGE(sc, "FATAL error from DORQ\n");
8087        }
8088    }
8089
8090    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8091        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8092                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8093
8094        val = REG_RD(sc, reg_offset);
8095        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8096        REG_WR(sc, reg_offset, val);
8097
8098        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8099              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8100        bxe_panic(sc, ("HW block attention set1\n"));
8101    }
8102}
8103
8104static void
8105bxe_attn_int_deasserted0(struct bxe_softc *sc,
8106                         uint32_t         attn)
8107{
8108    int port = SC_PORT(sc);
8109    int reg_offset;
8110    uint32_t val;
8111
8112    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8113                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8114
8115    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8116        val = REG_RD(sc, reg_offset);
8117        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8118        REG_WR(sc, reg_offset, val);
8119
8120        BLOGW(sc, "SPIO5 hw attention\n");
8121
8122        /* Fan failure attention */
8123        elink_hw_reset_phy(&sc->link_params);
8124        bxe_fan_failure(sc);
8125    }
8126
8127    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8128	bxe_acquire_phy_lock(sc);
8129        elink_handle_module_detect_int(&sc->link_params);
8130	bxe_release_phy_lock(sc);
8131    }
8132
8133    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8134        val = REG_RD(sc, reg_offset);
8135        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8136        REG_WR(sc, reg_offset, val);
8137
8138        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8139                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8140    }
8141}
8142
8143static void
8144bxe_attn_int_deasserted(struct bxe_softc *sc,
8145                        uint32_t         deasserted)
8146{
8147    struct attn_route attn;
8148    struct attn_route *group_mask;
8149    int port = SC_PORT(sc);
8150    int index;
8151    uint32_t reg_addr;
8152    uint32_t val;
8153    uint32_t aeu_mask;
8154    uint8_t global = FALSE;
8155
8156    /*
8157     * Need to take HW lock because MCP or other port might also
8158     * try to handle this event.
8159     */
8160    bxe_acquire_alr(sc);
8161
8162    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8163        /* XXX
8164         * In case of parity errors don't handle attentions so that
8165         * other function would "see" parity errors.
8166         */
8167        sc->recovery_state = BXE_RECOVERY_INIT;
8168        // XXX schedule a recovery task...
8169        /* disable HW interrupts */
8170        bxe_int_disable(sc);
8171        bxe_release_alr(sc);
8172        return;
8173    }
8174
8175    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8176    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8177    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8178    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8179    if (!CHIP_IS_E1x(sc)) {
8180        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8181    } else {
8182        attn.sig[4] = 0;
8183    }
8184
8185    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8186          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8187
8188    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8189        if (deasserted & (1 << index)) {
8190            group_mask = &sc->attn_group[index];
8191
8192            BLOGD(sc, DBG_INTR,
8193                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8194                  group_mask->sig[0], group_mask->sig[1],
8195                  group_mask->sig[2], group_mask->sig[3],
8196                  group_mask->sig[4]);
8197
8198            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8199            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8200            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8201            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8202            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8203        }
8204    }
8205
8206    bxe_release_alr(sc);
8207
8208    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8209        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8210                    COMMAND_REG_ATTN_BITS_CLR);
8211    } else {
8212        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8213    }
8214
8215    val = ~deasserted;
8216    BLOGD(sc, DBG_INTR,
8217          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8218          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8219    REG_WR(sc, reg_addr, val);
8220
8221    if (~sc->attn_state & deasserted) {
8222        BLOGE(sc, "IGU error\n");
8223    }
8224
8225    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8226                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8227
8228    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8229
8230    aeu_mask = REG_RD(sc, reg_addr);
8231
8232    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8233          aeu_mask, deasserted);
8234    aeu_mask |= (deasserted & 0x3ff);
8235    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8236
8237    REG_WR(sc, reg_addr, aeu_mask);
8238    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8239
8240    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8241    sc->attn_state &= ~deasserted;
8242    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8243}
8244
8245static void
8246bxe_attn_int(struct bxe_softc *sc)
8247{
8248    /* read local copy of bits */
8249    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8250    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8251    uint32_t attn_state = sc->attn_state;
8252
8253    /* look for changed bits */
8254    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8255    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8256
8257    BLOGD(sc, DBG_INTR,
8258          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8259          attn_bits, attn_ack, asserted, deasserted);
8260
8261    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8262        BLOGE(sc, "BAD attention state\n");
8263    }
8264
8265    /* handle bits that were raised */
8266    if (asserted) {
8267        bxe_attn_int_asserted(sc, asserted);
8268    }
8269
8270    if (deasserted) {
8271        bxe_attn_int_deasserted(sc, deasserted);
8272    }
8273}
8274
8275static uint16_t
8276bxe_update_dsb_idx(struct bxe_softc *sc)
8277{
8278    struct host_sp_status_block *def_sb = sc->def_sb;
8279    uint16_t rc = 0;
8280
8281    mb(); /* status block is written to by the chip */
8282
8283    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8284        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8285        rc |= BXE_DEF_SB_ATT_IDX;
8286    }
8287
8288    if (sc->def_idx != def_sb->sp_sb.running_index) {
8289        sc->def_idx = def_sb->sp_sb.running_index;
8290        rc |= BXE_DEF_SB_IDX;
8291    }
8292
8293    mb();
8294
8295    return (rc);
8296}
8297
8298static inline struct ecore_queue_sp_obj *
8299bxe_cid_to_q_obj(struct bxe_softc *sc,
8300                 uint32_t         cid)
8301{
8302    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8303    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8304}
8305
8306static void
8307bxe_handle_mcast_eqe(struct bxe_softc *sc)
8308{
8309    struct ecore_mcast_ramrod_params rparam;
8310    int rc;
8311
8312    memset(&rparam, 0, sizeof(rparam));
8313
8314    rparam.mcast_obj = &sc->mcast_obj;
8315
8316    BXE_MCAST_LOCK(sc);
8317
8318    /* clear pending state for the last command */
8319    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8320
8321    /* if there are pending mcast commands - send them */
8322    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8323        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8324        if (rc < 0) {
8325            BLOGD(sc, DBG_SP,
8326                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8327        }
8328    }
8329
8330    BXE_MCAST_UNLOCK(sc);
8331}
8332
8333static void
8334bxe_handle_classification_eqe(struct bxe_softc      *sc,
8335                              union event_ring_elem *elem)
8336{
8337    unsigned long ramrod_flags = 0;
8338    int rc = 0;
8339    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8340    struct ecore_vlan_mac_obj *vlan_mac_obj;
8341
8342    /* always push next commands out, don't wait here */
8343    bit_set(&ramrod_flags, RAMROD_CONT);
8344
8345    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8346    case ECORE_FILTER_MAC_PENDING:
8347        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8348        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8349        break;
8350
8351    case ECORE_FILTER_MCAST_PENDING:
8352        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8353        /*
8354         * This is only relevant for 57710 where multicast MACs are
8355         * configured as unicast MACs using the same ramrod.
8356         */
8357        bxe_handle_mcast_eqe(sc);
8358        return;
8359
8360    default:
8361        BLOGE(sc, "Unsupported classification command: %d\n",
8362              elem->message.data.eth_event.echo);
8363        return;
8364    }
8365
8366    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8367
8368    if (rc < 0) {
8369        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8370    } else if (rc > 0) {
8371        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8372    }
8373}
8374
8375static void
8376bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8377                       union event_ring_elem *elem)
8378{
8379    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8380
8381    /* send rx_mode command again if was requested */
8382    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8383                               &sc->sp_state)) {
8384        bxe_set_storm_rx_mode(sc);
8385    }
8386}
8387
8388static void
8389bxe_update_eq_prod(struct bxe_softc *sc,
8390                   uint16_t         prod)
8391{
8392    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8393    wmb(); /* keep prod updates ordered */
8394}
8395
8396static void
8397bxe_eq_int(struct bxe_softc *sc)
8398{
8399    uint16_t hw_cons, sw_cons, sw_prod;
8400    union event_ring_elem *elem;
8401    uint8_t echo;
8402    uint32_t cid;
8403    uint8_t opcode;
8404    int spqe_cnt = 0;
8405    struct ecore_queue_sp_obj *q_obj;
8406    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8407    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8408
8409    hw_cons = le16toh(*sc->eq_cons_sb);
8410
8411    /*
8412     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8413     * when we get to the next-page we need to adjust so the loop
8414     * condition below will be met. The next element is the size of a
8415     * regular element and hence incrementing by 1
8416     */
8417    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8418        hw_cons++;
8419    }
8420
8421    /*
8422     * This function may never run in parallel with itself for a
8423     * specific sc and no need for a read memory barrier here.
8424     */
8425    sw_cons = sc->eq_cons;
8426    sw_prod = sc->eq_prod;
8427
8428    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8429          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8430
8431    for (;
8432         sw_cons != hw_cons;
8433         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8434
8435        elem = &sc->eq[EQ_DESC(sw_cons)];
8436
8437        /* elem CID originates from FW, actually LE */
8438        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8439        opcode = elem->message.opcode;
8440
8441        /* handle eq element */
8442        switch (opcode) {
8443
8444        case EVENT_RING_OPCODE_STAT_QUERY:
8445            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8446                  sc->stats_comp++);
8447            /* nothing to do with stats comp */
8448            goto next_spqe;
8449
8450        case EVENT_RING_OPCODE_CFC_DEL:
8451            /* handle according to cid range */
8452            /* we may want to verify here that the sc state is HALTING */
8453            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8454            q_obj = bxe_cid_to_q_obj(sc, cid);
8455            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8456                break;
8457            }
8458            goto next_spqe;
8459
8460        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8461            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8462            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8463                break;
8464            }
8465            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8466            goto next_spqe;
8467
8468        case EVENT_RING_OPCODE_START_TRAFFIC:
8469            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8470            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8471                break;
8472            }
8473            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8474            goto next_spqe;
8475
8476        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8477            echo = elem->message.data.function_update_event.echo;
8478            if (echo == SWITCH_UPDATE) {
8479                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8480                if (f_obj->complete_cmd(sc, f_obj,
8481                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8482                    break;
8483                }
8484            }
8485            else {
8486                BLOGD(sc, DBG_SP,
8487                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8488            }
8489            goto next_spqe;
8490
8491        case EVENT_RING_OPCODE_FORWARD_SETUP:
8492            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8493            if (q_obj->complete_cmd(sc, q_obj,
8494                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8495                break;
8496            }
8497            goto next_spqe;
8498
8499        case EVENT_RING_OPCODE_FUNCTION_START:
8500            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8501            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8502                break;
8503            }
8504            goto next_spqe;
8505
8506        case EVENT_RING_OPCODE_FUNCTION_STOP:
8507            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8508            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8509                break;
8510            }
8511            goto next_spqe;
8512        }
8513
8514        switch (opcode | sc->state) {
8515        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8516        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8517            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8518            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8519            rss_raw->clear_pending(rss_raw);
8520            break;
8521
8522        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8523        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8524        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8525        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8526        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8527        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8528            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8529            bxe_handle_classification_eqe(sc, elem);
8530            break;
8531
8532        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8533        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8534        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8535            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8536            bxe_handle_mcast_eqe(sc);
8537            break;
8538
8539        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8540        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8541        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8542            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8543            bxe_handle_rx_mode_eqe(sc, elem);
8544            break;
8545
8546        default:
8547            /* unknown event log error and continue */
8548            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8549                  elem->message.opcode, sc->state);
8550        }
8551
8552next_spqe:
8553        spqe_cnt++;
8554    } /* for */
8555
8556    mb();
8557    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8558
8559    sc->eq_cons = sw_cons;
8560    sc->eq_prod = sw_prod;
8561
8562    /* make sure that above mem writes were issued towards the memory */
8563    wmb();
8564
8565    /* update producer */
8566    bxe_update_eq_prod(sc, sc->eq_prod);
8567}
8568
8569static void
8570bxe_handle_sp_tq(void *context,
8571                 int  pending)
8572{
8573    struct bxe_softc *sc = (struct bxe_softc *)context;
8574    uint16_t status;
8575
8576    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8577
8578    /* what work needs to be performed? */
8579    status = bxe_update_dsb_idx(sc);
8580
8581    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8582
8583    /* HW attentions */
8584    if (status & BXE_DEF_SB_ATT_IDX) {
8585        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8586        bxe_attn_int(sc);
8587        status &= ~BXE_DEF_SB_ATT_IDX;
8588    }
8589
8590    /* SP events: STAT_QUERY and others */
8591    if (status & BXE_DEF_SB_IDX) {
8592        /* handle EQ completions */
8593        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8594        bxe_eq_int(sc);
8595        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8596                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8597        status &= ~BXE_DEF_SB_IDX;
8598    }
8599
8600    /* if status is non zero then something went wrong */
8601    if (__predict_false(status)) {
8602        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8603    }
8604
8605    /* ack status block only if something was actually handled */
8606    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8607               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8608
8609    /*
8610     * Must be called after the EQ processing (since eq leads to sriov
8611     * ramrod completion flows).
8612     * This flow may have been scheduled by the arrival of a ramrod
8613     * completion, or by the sriov code rescheduling itself.
8614     */
8615    // XXX bxe_iov_sp_task(sc);
8616
8617}
8618
8619static void
8620bxe_handle_fp_tq(void *context,
8621                 int  pending)
8622{
8623    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8624    struct bxe_softc *sc = fp->sc;
8625    uint8_t more_tx = FALSE;
8626    uint8_t more_rx = FALSE;
8627
8628    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8629
8630    /* XXX
8631     * IFF_DRV_RUNNING state can't be checked here since we process
8632     * slowpath events on a client queue during setup. Instead
8633     * we need to add a "process/continue" flag here that the driver
8634     * can use to tell the task here not to do anything.
8635     */
8636#if 0
8637    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8638        return;
8639    }
8640#endif
8641
8642    /* update the fastpath index */
8643    bxe_update_fp_sb_idx(fp);
8644
8645    /* XXX add loop here if ever support multiple tx CoS */
8646    /* fp->txdata[cos] */
8647    if (bxe_has_tx_work(fp)) {
8648        BXE_FP_TX_LOCK(fp);
8649        more_tx = bxe_txeof(sc, fp);
8650        BXE_FP_TX_UNLOCK(fp);
8651    }
8652
8653    if (bxe_has_rx_work(fp)) {
8654        more_rx = bxe_rxeof(sc, fp);
8655    }
8656
8657    if (more_rx /*|| more_tx*/) {
8658        /* still more work to do */
8659        taskqueue_enqueue(fp->tq, &fp->tq_task);
8660        return;
8661    }
8662
8663    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8664               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8665}
8666
8667static void
8668bxe_task_fp(struct bxe_fastpath *fp)
8669{
8670    struct bxe_softc *sc = fp->sc;
8671    uint8_t more_tx = FALSE;
8672    uint8_t more_rx = FALSE;
8673
8674    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8675
8676    /* update the fastpath index */
8677    bxe_update_fp_sb_idx(fp);
8678
8679    /* XXX add loop here if ever support multiple tx CoS */
8680    /* fp->txdata[cos] */
8681    if (bxe_has_tx_work(fp)) {
8682        BXE_FP_TX_LOCK(fp);
8683        more_tx = bxe_txeof(sc, fp);
8684        BXE_FP_TX_UNLOCK(fp);
8685    }
8686
8687    if (bxe_has_rx_work(fp)) {
8688        more_rx = bxe_rxeof(sc, fp);
8689    }
8690
8691    if (more_rx /*|| more_tx*/) {
8692        /* still more work to do, bail out if this ISR and process later */
8693        taskqueue_enqueue(fp->tq, &fp->tq_task);
8694        return;
8695    }
8696
8697    /*
8698     * Here we write the fastpath index taken before doing any tx or rx work.
8699     * It is very well possible other hw events occurred up to this point and
8700     * they were actually processed accordingly above. Since we're going to
8701     * write an older fastpath index, an interrupt is coming which we might
8702     * not do any work in.
8703     */
8704    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8705               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8706}
8707
8708/*
8709 * Legacy interrupt entry point.
8710 *
8711 * Verifies that the controller generated the interrupt and
8712 * then calls a separate routine to handle the various
8713 * interrupt causes: link, RX, and TX.
8714 */
8715static void
8716bxe_intr_legacy(void *xsc)
8717{
8718    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8719    struct bxe_fastpath *fp;
8720    uint16_t status, mask;
8721    int i;
8722
8723    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8724
8725    /*
8726     * 0 for ustorm, 1 for cstorm
8727     * the bits returned from ack_int() are 0-15
8728     * bit 0 = attention status block
8729     * bit 1 = fast path status block
8730     * a mask of 0x2 or more = tx/rx event
8731     * a mask of 1 = slow path event
8732     */
8733
8734    status = bxe_ack_int(sc);
8735
8736    /* the interrupt is not for us */
8737    if (__predict_false(status == 0)) {
8738        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8739        return;
8740    }
8741
8742    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8743
8744    FOR_EACH_ETH_QUEUE(sc, i) {
8745        fp = &sc->fp[i];
8746        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8747        if (status & mask) {
8748            /* acknowledge and disable further fastpath interrupts */
8749            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8750            bxe_task_fp(fp);
8751            status &= ~mask;
8752        }
8753    }
8754
8755    if (__predict_false(status & 0x1)) {
8756        /* acknowledge and disable further slowpath interrupts */
8757        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8758
8759        /* schedule slowpath handler */
8760        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8761
8762        status &= ~0x1;
8763    }
8764
8765    if (__predict_false(status)) {
8766        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8767    }
8768}
8769
8770/* slowpath interrupt entry point */
8771static void
8772bxe_intr_sp(void *xsc)
8773{
8774    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8775
8776    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8777
8778    /* acknowledge and disable further slowpath interrupts */
8779    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8780
8781    /* schedule slowpath handler */
8782    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8783}
8784
8785/* fastpath interrupt entry point */
8786static void
8787bxe_intr_fp(void *xfp)
8788{
8789    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8790    struct bxe_softc *sc = fp->sc;
8791
8792    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8793
8794    BLOGD(sc, DBG_INTR,
8795          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8796          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8797
8798    /* acknowledge and disable further fastpath interrupts */
8799    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8800
8801    bxe_task_fp(fp);
8802}
8803
8804/* Release all interrupts allocated by the driver. */
8805static void
8806bxe_interrupt_free(struct bxe_softc *sc)
8807{
8808    int i;
8809
8810    switch (sc->interrupt_mode) {
8811    case INTR_MODE_INTX:
8812        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8813        if (sc->intr[0].resource != NULL) {
8814            bus_release_resource(sc->dev,
8815                                 SYS_RES_IRQ,
8816                                 sc->intr[0].rid,
8817                                 sc->intr[0].resource);
8818        }
8819        break;
8820    case INTR_MODE_MSI:
8821        for (i = 0; i < sc->intr_count; i++) {
8822            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8823            if (sc->intr[i].resource && sc->intr[i].rid) {
8824                bus_release_resource(sc->dev,
8825                                     SYS_RES_IRQ,
8826                                     sc->intr[i].rid,
8827                                     sc->intr[i].resource);
8828            }
8829        }
8830        pci_release_msi(sc->dev);
8831        break;
8832    case INTR_MODE_MSIX:
8833        for (i = 0; i < sc->intr_count; i++) {
8834            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8835            if (sc->intr[i].resource && sc->intr[i].rid) {
8836                bus_release_resource(sc->dev,
8837                                     SYS_RES_IRQ,
8838                                     sc->intr[i].rid,
8839                                     sc->intr[i].resource);
8840            }
8841        }
8842        pci_release_msi(sc->dev);
8843        break;
8844    default:
8845        /* nothing to do as initial allocation failed */
8846        break;
8847    }
8848}
8849
8850/*
8851 * This function determines and allocates the appropriate
8852 * interrupt based on system capabilites and user request.
8853 *
8854 * The user may force a particular interrupt mode, specify
8855 * the number of receive queues, specify the method for
8856 * distribuitng received frames to receive queues, or use
8857 * the default settings which will automatically select the
8858 * best supported combination.  In addition, the OS may or
8859 * may not support certain combinations of these settings.
8860 * This routine attempts to reconcile the settings requested
8861 * by the user with the capabilites available from the system
8862 * to select the optimal combination of features.
8863 *
8864 * Returns:
8865 *   0 = Success, !0 = Failure.
8866 */
8867static int
8868bxe_interrupt_alloc(struct bxe_softc *sc)
8869{
8870    int msix_count = 0;
8871    int msi_count = 0;
8872    int num_requested = 0;
8873    int num_allocated = 0;
8874    int rid, i, j;
8875    int rc;
8876
8877    /* get the number of available MSI/MSI-X interrupts from the OS */
8878    if (sc->interrupt_mode > 0) {
8879        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8880            msix_count = pci_msix_count(sc->dev);
8881        }
8882
8883        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8884            msi_count = pci_msi_count(sc->dev);
8885        }
8886
8887        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8888              msi_count, msix_count);
8889    }
8890
8891    do { /* try allocating MSI-X interrupt resources (at least 2) */
8892        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8893            break;
8894        }
8895
8896        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8897            (msix_count < 2)) {
8898            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8899            break;
8900        }
8901
8902        /* ask for the necessary number of MSI-X vectors */
8903        num_requested = min((sc->num_queues + 1), msix_count);
8904
8905        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8906
8907        num_allocated = num_requested;
8908        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8909            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8910            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8911            break;
8912        }
8913
8914        if (num_allocated < 2) { /* possible? */
8915            BLOGE(sc, "MSI-X allocation less than 2!\n");
8916            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8917            pci_release_msi(sc->dev);
8918            break;
8919        }
8920
8921        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8922              num_requested, num_allocated);
8923
8924        /* best effort so use the number of vectors allocated to us */
8925        sc->intr_count = num_allocated;
8926        sc->num_queues = num_allocated - 1;
8927
8928        rid = 1; /* initial resource identifier */
8929
8930        /* allocate the MSI-X vectors */
8931        for (i = 0; i < num_allocated; i++) {
8932            sc->intr[i].rid = (rid + i);
8933
8934            if ((sc->intr[i].resource =
8935                 bus_alloc_resource_any(sc->dev,
8936                                        SYS_RES_IRQ,
8937                                        &sc->intr[i].rid,
8938                                        RF_ACTIVE)) == NULL) {
8939                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8940                      i, (rid + i));
8941
8942                for (j = (i - 1); j >= 0; j--) {
8943                    bus_release_resource(sc->dev,
8944                                         SYS_RES_IRQ,
8945                                         sc->intr[j].rid,
8946                                         sc->intr[j].resource);
8947                }
8948
8949                sc->intr_count = 0;
8950                sc->num_queues = 0;
8951                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8952                pci_release_msi(sc->dev);
8953                break;
8954            }
8955
8956            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
8957        }
8958    } while (0);
8959
8960    do { /* try allocating MSI vector resources (at least 2) */
8961        if (sc->interrupt_mode != INTR_MODE_MSI) {
8962            break;
8963        }
8964
8965        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
8966            (msi_count < 1)) {
8967            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8968            break;
8969        }
8970
8971        /* ask for a single MSI vector */
8972        num_requested = 1;
8973
8974        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
8975
8976        num_allocated = num_requested;
8977        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
8978            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
8979            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8980            break;
8981        }
8982
8983        if (num_allocated != 1) { /* possible? */
8984            BLOGE(sc, "MSI allocation is not 1!\n");
8985            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8986            pci_release_msi(sc->dev);
8987            break;
8988        }
8989
8990        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
8991              num_requested, num_allocated);
8992
8993        /* best effort so use the number of vectors allocated to us */
8994        sc->intr_count = num_allocated;
8995        sc->num_queues = num_allocated;
8996
8997        rid = 1; /* initial resource identifier */
8998
8999        sc->intr[0].rid = rid;
9000
9001        if ((sc->intr[0].resource =
9002             bus_alloc_resource_any(sc->dev,
9003                                    SYS_RES_IRQ,
9004                                    &sc->intr[0].rid,
9005                                    RF_ACTIVE)) == NULL) {
9006            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9007            sc->intr_count = 0;
9008            sc->num_queues = 0;
9009            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9010            pci_release_msi(sc->dev);
9011            break;
9012        }
9013
9014        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9015    } while (0);
9016
9017    do { /* try allocating INTx vector resources */
9018        if (sc->interrupt_mode != INTR_MODE_INTX) {
9019            break;
9020        }
9021
9022        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9023
9024        /* only one vector for INTx */
9025        sc->intr_count = 1;
9026        sc->num_queues = 1;
9027
9028        rid = 0; /* initial resource identifier */
9029
9030        sc->intr[0].rid = rid;
9031
9032        if ((sc->intr[0].resource =
9033             bus_alloc_resource_any(sc->dev,
9034                                    SYS_RES_IRQ,
9035                                    &sc->intr[0].rid,
9036                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9037            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9038            sc->intr_count = 0;
9039            sc->num_queues = 0;
9040            sc->interrupt_mode = -1; /* Failed! */
9041            break;
9042        }
9043
9044        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9045    } while (0);
9046
9047    if (sc->interrupt_mode == -1) {
9048        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9049        rc = 1;
9050    } else {
9051        BLOGD(sc, DBG_LOAD,
9052              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9053              sc->interrupt_mode, sc->num_queues);
9054        rc = 0;
9055    }
9056
9057    return (rc);
9058}
9059
9060static void
9061bxe_interrupt_detach(struct bxe_softc *sc)
9062{
9063    struct bxe_fastpath *fp;
9064    int i;
9065
9066    /* release interrupt resources */
9067    for (i = 0; i < sc->intr_count; i++) {
9068        if (sc->intr[i].resource && sc->intr[i].tag) {
9069            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9070            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9071        }
9072    }
9073
9074    for (i = 0; i < sc->num_queues; i++) {
9075        fp = &sc->fp[i];
9076        if (fp->tq) {
9077            taskqueue_drain(fp->tq, &fp->tq_task);
9078            taskqueue_drain(fp->tq, &fp->tx_task);
9079            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9080                NULL))
9081                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9082            taskqueue_free(fp->tq);
9083            fp->tq = NULL;
9084        }
9085    }
9086
9087
9088    if (sc->sp_tq) {
9089        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9090        taskqueue_free(sc->sp_tq);
9091        sc->sp_tq = NULL;
9092    }
9093}
9094
9095/*
9096 * Enables interrupts and attach to the ISR.
9097 *
9098 * When using multiple MSI/MSI-X vectors the first vector
9099 * is used for slowpath operations while all remaining
9100 * vectors are used for fastpath operations.  If only a
9101 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9102 * ISR must look for both slowpath and fastpath completions.
9103 */
9104static int
9105bxe_interrupt_attach(struct bxe_softc *sc)
9106{
9107    struct bxe_fastpath *fp;
9108    int rc = 0;
9109    int i;
9110
9111    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9112             "bxe%d_sp_tq", sc->unit);
9113    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9114    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9115                                 taskqueue_thread_enqueue,
9116                                 &sc->sp_tq);
9117    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9118                            "%s", sc->sp_tq_name);
9119
9120
9121    for (i = 0; i < sc->num_queues; i++) {
9122        fp = &sc->fp[i];
9123        snprintf(fp->tq_name, sizeof(fp->tq_name),
9124                 "bxe%d_fp%d_tq", sc->unit, i);
9125        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9126        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9127        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9128                                  taskqueue_thread_enqueue,
9129                                  &fp->tq);
9130        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9131                          bxe_tx_mq_start_deferred, fp);
9132        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9133                                "%s", fp->tq_name);
9134    }
9135
9136    /* setup interrupt handlers */
9137    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9138        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9139
9140        /*
9141         * Setup the interrupt handler. Note that we pass the driver instance
9142         * to the interrupt handler for the slowpath.
9143         */
9144        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9145                                 (INTR_TYPE_NET | INTR_MPSAFE),
9146                                 NULL, bxe_intr_sp, sc,
9147                                 &sc->intr[0].tag)) != 0) {
9148            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9149            goto bxe_interrupt_attach_exit;
9150        }
9151
9152        bus_describe_intr(sc->dev, sc->intr[0].resource,
9153                          sc->intr[0].tag, "sp");
9154
9155        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9156
9157        /* initialize the fastpath vectors (note the first was used for sp) */
9158        for (i = 0; i < sc->num_queues; i++) {
9159            fp = &sc->fp[i];
9160            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9161
9162            /*
9163             * Setup the interrupt handler. Note that we pass the
9164             * fastpath context to the interrupt handler in this
9165             * case.
9166             */
9167            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9168                                     (INTR_TYPE_NET | INTR_MPSAFE),
9169                                     NULL, bxe_intr_fp, fp,
9170                                     &sc->intr[i + 1].tag)) != 0) {
9171                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9172                      (i + 1), rc);
9173                goto bxe_interrupt_attach_exit;
9174            }
9175
9176            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9177                              sc->intr[i + 1].tag, "fp%02d", i);
9178
9179            /* bind the fastpath instance to a cpu */
9180            if (sc->num_queues > 1) {
9181                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9182            }
9183
9184            fp->state = BXE_FP_STATE_IRQ;
9185        }
9186    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9187        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9188
9189        /*
9190         * Setup the interrupt handler. Note that we pass the
9191         * driver instance to the interrupt handler which
9192         * will handle both the slowpath and fastpath.
9193         */
9194        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9195                                 (INTR_TYPE_NET | INTR_MPSAFE),
9196                                 NULL, bxe_intr_legacy, sc,
9197                                 &sc->intr[0].tag)) != 0) {
9198            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9199            goto bxe_interrupt_attach_exit;
9200        }
9201
9202    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9203        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9204
9205        /*
9206         * Setup the interrupt handler. Note that we pass the
9207         * driver instance to the interrupt handler which
9208         * will handle both the slowpath and fastpath.
9209         */
9210        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9211                                 (INTR_TYPE_NET | INTR_MPSAFE),
9212                                 NULL, bxe_intr_legacy, sc,
9213                                 &sc->intr[0].tag)) != 0) {
9214            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9215            goto bxe_interrupt_attach_exit;
9216        }
9217    }
9218
9219bxe_interrupt_attach_exit:
9220
9221    return (rc);
9222}
9223
9224static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9225static int  bxe_init_hw_common(struct bxe_softc *sc);
9226static int  bxe_init_hw_port(struct bxe_softc *sc);
9227static int  bxe_init_hw_func(struct bxe_softc *sc);
9228static void bxe_reset_common(struct bxe_softc *sc);
9229static void bxe_reset_port(struct bxe_softc *sc);
9230static void bxe_reset_func(struct bxe_softc *sc);
9231static int  bxe_gunzip_init(struct bxe_softc *sc);
9232static void bxe_gunzip_end(struct bxe_softc *sc);
9233static int  bxe_init_firmware(struct bxe_softc *sc);
9234static void bxe_release_firmware(struct bxe_softc *sc);
9235
9236static struct
9237ecore_func_sp_drv_ops bxe_func_sp_drv = {
9238    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9239    .init_hw_cmn      = bxe_init_hw_common,
9240    .init_hw_port     = bxe_init_hw_port,
9241    .init_hw_func     = bxe_init_hw_func,
9242
9243    .reset_hw_cmn     = bxe_reset_common,
9244    .reset_hw_port    = bxe_reset_port,
9245    .reset_hw_func    = bxe_reset_func,
9246
9247    .gunzip_init      = bxe_gunzip_init,
9248    .gunzip_end       = bxe_gunzip_end,
9249
9250    .init_fw          = bxe_init_firmware,
9251    .release_fw       = bxe_release_firmware,
9252};
9253
9254static void
9255bxe_init_func_obj(struct bxe_softc *sc)
9256{
9257    sc->dmae_ready = 0;
9258
9259    ecore_init_func_obj(sc,
9260                        &sc->func_obj,
9261                        BXE_SP(sc, func_rdata),
9262                        BXE_SP_MAPPING(sc, func_rdata),
9263                        BXE_SP(sc, func_afex_rdata),
9264                        BXE_SP_MAPPING(sc, func_afex_rdata),
9265                        &bxe_func_sp_drv);
9266}
9267
9268static int
9269bxe_init_hw(struct bxe_softc *sc,
9270            uint32_t         load_code)
9271{
9272    struct ecore_func_state_params func_params = { NULL };
9273    int rc;
9274
9275    /* prepare the parameters for function state transitions */
9276    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9277
9278    func_params.f_obj = &sc->func_obj;
9279    func_params.cmd = ECORE_F_CMD_HW_INIT;
9280
9281    func_params.params.hw_init.load_phase = load_code;
9282
9283    /*
9284     * Via a plethora of function pointers, we will eventually reach
9285     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9286     */
9287    rc = ecore_func_state_change(sc, &func_params);
9288
9289    return (rc);
9290}
9291
9292static void
9293bxe_fill(struct bxe_softc *sc,
9294         uint32_t         addr,
9295         int              fill,
9296         uint32_t         len)
9297{
9298    uint32_t i;
9299
9300    if (!(len % 4) && !(addr % 4)) {
9301        for (i = 0; i < len; i += 4) {
9302            REG_WR(sc, (addr + i), fill);
9303        }
9304    } else {
9305        for (i = 0; i < len; i++) {
9306            REG_WR8(sc, (addr + i), fill);
9307        }
9308    }
9309}
9310
9311/* writes FP SP data to FW - data_size in dwords */
9312static void
9313bxe_wr_fp_sb_data(struct bxe_softc *sc,
9314                  int              fw_sb_id,
9315                  uint32_t         *sb_data_p,
9316                  uint32_t         data_size)
9317{
9318    int index;
9319
9320    for (index = 0; index < data_size; index++) {
9321        REG_WR(sc,
9322               (BAR_CSTRORM_INTMEM +
9323                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9324                (sizeof(uint32_t) * index)),
9325               *(sb_data_p + index));
9326    }
9327}
9328
9329static void
9330bxe_zero_fp_sb(struct bxe_softc *sc,
9331               int              fw_sb_id)
9332{
9333    struct hc_status_block_data_e2 sb_data_e2;
9334    struct hc_status_block_data_e1x sb_data_e1x;
9335    uint32_t *sb_data_p;
9336    uint32_t data_size = 0;
9337
9338    if (!CHIP_IS_E1x(sc)) {
9339        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9340        sb_data_e2.common.state = SB_DISABLED;
9341        sb_data_e2.common.p_func.vf_valid = FALSE;
9342        sb_data_p = (uint32_t *)&sb_data_e2;
9343        data_size = (sizeof(struct hc_status_block_data_e2) /
9344                     sizeof(uint32_t));
9345    } else {
9346        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9347        sb_data_e1x.common.state = SB_DISABLED;
9348        sb_data_e1x.common.p_func.vf_valid = FALSE;
9349        sb_data_p = (uint32_t *)&sb_data_e1x;
9350        data_size = (sizeof(struct hc_status_block_data_e1x) /
9351                     sizeof(uint32_t));
9352    }
9353
9354    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9355
9356    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9357             0, CSTORM_STATUS_BLOCK_SIZE);
9358    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9359             0, CSTORM_SYNC_BLOCK_SIZE);
9360}
9361
9362static void
9363bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9364                  struct hc_sp_status_block_data *sp_sb_data)
9365{
9366    int i;
9367
9368    for (i = 0;
9369         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9370         i++) {
9371        REG_WR(sc,
9372               (BAR_CSTRORM_INTMEM +
9373                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9374                (i * sizeof(uint32_t))),
9375               *((uint32_t *)sp_sb_data + i));
9376    }
9377}
9378
9379static void
9380bxe_zero_sp_sb(struct bxe_softc *sc)
9381{
9382    struct hc_sp_status_block_data sp_sb_data;
9383
9384    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9385
9386    sp_sb_data.state           = SB_DISABLED;
9387    sp_sb_data.p_func.vf_valid = FALSE;
9388
9389    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9390
9391    bxe_fill(sc,
9392             (BAR_CSTRORM_INTMEM +
9393              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9394              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9395    bxe_fill(sc,
9396             (BAR_CSTRORM_INTMEM +
9397              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9398              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9399}
9400
9401static void
9402bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9403                             int                       igu_sb_id,
9404                             int                       igu_seg_id)
9405{
9406    hc_sm->igu_sb_id      = igu_sb_id;
9407    hc_sm->igu_seg_id     = igu_seg_id;
9408    hc_sm->timer_value    = 0xFF;
9409    hc_sm->time_to_expire = 0xFFFFFFFF;
9410}
9411
9412static void
9413bxe_map_sb_state_machines(struct hc_index_data *index_data)
9414{
9415    /* zero out state machine indices */
9416
9417    /* rx indices */
9418    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9419
9420    /* tx indices */
9421    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9422    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9423    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9424    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9425
9426    /* map indices */
9427
9428    /* rx indices */
9429    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9430        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9431
9432    /* tx indices */
9433    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9434        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9435    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9436        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9437    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9438        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9439    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9440        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9441}
9442
9443static void
9444bxe_init_sb(struct bxe_softc *sc,
9445            bus_addr_t       busaddr,
9446            int              vfid,
9447            uint8_t          vf_valid,
9448            int              fw_sb_id,
9449            int              igu_sb_id)
9450{
9451    struct hc_status_block_data_e2  sb_data_e2;
9452    struct hc_status_block_data_e1x sb_data_e1x;
9453    struct hc_status_block_sm       *hc_sm_p;
9454    uint32_t *sb_data_p;
9455    int igu_seg_id;
9456    int data_size;
9457
9458    if (CHIP_INT_MODE_IS_BC(sc)) {
9459        igu_seg_id = HC_SEG_ACCESS_NORM;
9460    } else {
9461        igu_seg_id = IGU_SEG_ACCESS_NORM;
9462    }
9463
9464    bxe_zero_fp_sb(sc, fw_sb_id);
9465
9466    if (!CHIP_IS_E1x(sc)) {
9467        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9468        sb_data_e2.common.state = SB_ENABLED;
9469        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9470        sb_data_e2.common.p_func.vf_id = vfid;
9471        sb_data_e2.common.p_func.vf_valid = vf_valid;
9472        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9473        sb_data_e2.common.same_igu_sb_1b = TRUE;
9474        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9475        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9476        hc_sm_p = sb_data_e2.common.state_machine;
9477        sb_data_p = (uint32_t *)&sb_data_e2;
9478        data_size = (sizeof(struct hc_status_block_data_e2) /
9479                     sizeof(uint32_t));
9480        bxe_map_sb_state_machines(sb_data_e2.index_data);
9481    } else {
9482        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9483        sb_data_e1x.common.state = SB_ENABLED;
9484        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9485        sb_data_e1x.common.p_func.vf_id = 0xff;
9486        sb_data_e1x.common.p_func.vf_valid = FALSE;
9487        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9488        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9489        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9490        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9491        hc_sm_p = sb_data_e1x.common.state_machine;
9492        sb_data_p = (uint32_t *)&sb_data_e1x;
9493        data_size = (sizeof(struct hc_status_block_data_e1x) /
9494                     sizeof(uint32_t));
9495        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9496    }
9497
9498    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9499    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9500
9501    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9502
9503    /* write indices to HW - PCI guarantees endianity of regpairs */
9504    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9505}
9506
9507static inline uint8_t
9508bxe_fp_qzone_id(struct bxe_fastpath *fp)
9509{
9510    if (CHIP_IS_E1x(fp->sc)) {
9511        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9512    } else {
9513        return (fp->cl_id);
9514    }
9515}
9516
9517static inline uint32_t
9518bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9519                           struct bxe_fastpath *fp)
9520{
9521    uint32_t offset = BAR_USTRORM_INTMEM;
9522
9523    if (!CHIP_IS_E1x(sc)) {
9524        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9525    } else {
9526        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9527    }
9528
9529    return (offset);
9530}
9531
9532static void
9533bxe_init_eth_fp(struct bxe_softc *sc,
9534                int              idx)
9535{
9536    struct bxe_fastpath *fp = &sc->fp[idx];
9537    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9538    unsigned long q_type = 0;
9539    int cos;
9540
9541    fp->sc    = sc;
9542    fp->index = idx;
9543
9544    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9545    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9546
9547    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9548                    (SC_L_ID(sc) + idx) :
9549                    /* want client ID same as IGU SB ID for non-E1 */
9550                    fp->igu_sb_id;
9551    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9552
9553    /* setup sb indices */
9554    if (!CHIP_IS_E1x(sc)) {
9555        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9556        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9557    } else {
9558        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9559        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9560    }
9561
9562    /* init shortcut */
9563    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9564
9565    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9566
9567    /*
9568     * XXX If multiple CoS is ever supported then each fastpath structure
9569     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9570     */
9571    for (cos = 0; cos < sc->max_cos; cos++) {
9572        cids[cos] = idx;
9573    }
9574    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9575
9576    /* nothing more for a VF to do */
9577    if (IS_VF(sc)) {
9578        return;
9579    }
9580
9581    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9582                fp->fw_sb_id, fp->igu_sb_id);
9583
9584    bxe_update_fp_sb_idx(fp);
9585
9586    /* Configure Queue State object */
9587    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9588    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9589
9590    ecore_init_queue_obj(sc,
9591                         &sc->sp_objs[idx].q_obj,
9592                         fp->cl_id,
9593                         cids,
9594                         sc->max_cos,
9595                         SC_FUNC(sc),
9596                         BXE_SP(sc, q_rdata),
9597                         BXE_SP_MAPPING(sc, q_rdata),
9598                         q_type);
9599
9600    /* configure classification DBs */
9601    ecore_init_mac_obj(sc,
9602                       &sc->sp_objs[idx].mac_obj,
9603                       fp->cl_id,
9604                       idx,
9605                       SC_FUNC(sc),
9606                       BXE_SP(sc, mac_rdata),
9607                       BXE_SP_MAPPING(sc, mac_rdata),
9608                       ECORE_FILTER_MAC_PENDING,
9609                       &sc->sp_state,
9610                       ECORE_OBJ_TYPE_RX_TX,
9611                       &sc->macs_pool);
9612
9613    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9614          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9615}
9616
9617static inline void
9618bxe_update_rx_prod(struct bxe_softc    *sc,
9619                   struct bxe_fastpath *fp,
9620                   uint16_t            rx_bd_prod,
9621                   uint16_t            rx_cq_prod,
9622                   uint16_t            rx_sge_prod)
9623{
9624    struct ustorm_eth_rx_producers rx_prods = { 0 };
9625    uint32_t i;
9626
9627    /* update producers */
9628    rx_prods.bd_prod  = rx_bd_prod;
9629    rx_prods.cqe_prod = rx_cq_prod;
9630    rx_prods.sge_prod = rx_sge_prod;
9631
9632    /*
9633     * Make sure that the BD and SGE data is updated before updating the
9634     * producers since FW might read the BD/SGE right after the producer
9635     * is updated.
9636     * This is only applicable for weak-ordered memory model archs such
9637     * as IA-64. The following barrier is also mandatory since FW will
9638     * assumes BDs must have buffers.
9639     */
9640    wmb();
9641
9642    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9643        REG_WR(sc,
9644               (fp->ustorm_rx_prods_offset + (i * 4)),
9645               ((uint32_t *)&rx_prods)[i]);
9646    }
9647
9648    wmb(); /* keep prod updates ordered */
9649
9650    BLOGD(sc, DBG_RX,
9651          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9652          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9653}
9654
9655static void
9656bxe_init_rx_rings(struct bxe_softc *sc)
9657{
9658    struct bxe_fastpath *fp;
9659    int i;
9660
9661    for (i = 0; i < sc->num_queues; i++) {
9662        fp = &sc->fp[i];
9663
9664        fp->rx_bd_cons = 0;
9665
9666        /*
9667         * Activate the BD ring...
9668         * Warning, this will generate an interrupt (to the TSTORM)
9669         * so this can only be done after the chip is initialized
9670         */
9671        bxe_update_rx_prod(sc, fp,
9672                           fp->rx_bd_prod,
9673                           fp->rx_cq_prod,
9674                           fp->rx_sge_prod);
9675
9676        if (i != 0) {
9677            continue;
9678        }
9679
9680        if (CHIP_IS_E1(sc)) {
9681            REG_WR(sc,
9682                   (BAR_USTRORM_INTMEM +
9683                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9684                   U64_LO(fp->rcq_dma.paddr));
9685            REG_WR(sc,
9686                   (BAR_USTRORM_INTMEM +
9687                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9688                   U64_HI(fp->rcq_dma.paddr));
9689        }
9690    }
9691}
9692
9693static void
9694bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9695{
9696    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9697    fp->tx_db.data.zero_fill1 = 0;
9698    fp->tx_db.data.prod = 0;
9699
9700    fp->tx_pkt_prod = 0;
9701    fp->tx_pkt_cons = 0;
9702    fp->tx_bd_prod = 0;
9703    fp->tx_bd_cons = 0;
9704    fp->eth_q_stats.tx_pkts = 0;
9705}
9706
9707static inline void
9708bxe_init_tx_rings(struct bxe_softc *sc)
9709{
9710    int i;
9711
9712    for (i = 0; i < sc->num_queues; i++) {
9713        bxe_init_tx_ring_one(&sc->fp[i]);
9714    }
9715}
9716
9717static void
9718bxe_init_def_sb(struct bxe_softc *sc)
9719{
9720    struct host_sp_status_block *def_sb = sc->def_sb;
9721    bus_addr_t mapping = sc->def_sb_dma.paddr;
9722    int igu_sp_sb_index;
9723    int igu_seg_id;
9724    int port = SC_PORT(sc);
9725    int func = SC_FUNC(sc);
9726    int reg_offset, reg_offset_en5;
9727    uint64_t section;
9728    int index, sindex;
9729    struct hc_sp_status_block_data sp_sb_data;
9730
9731    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9732
9733    if (CHIP_INT_MODE_IS_BC(sc)) {
9734        igu_sp_sb_index = DEF_SB_IGU_ID;
9735        igu_seg_id = HC_SEG_ACCESS_DEF;
9736    } else {
9737        igu_sp_sb_index = sc->igu_dsb_id;
9738        igu_seg_id = IGU_SEG_ACCESS_DEF;
9739    }
9740
9741    /* attentions */
9742    section = ((uint64_t)mapping +
9743               offsetof(struct host_sp_status_block, atten_status_block));
9744    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9745    sc->attn_state = 0;
9746
9747    reg_offset = (port) ?
9748                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9749                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9750    reg_offset_en5 = (port) ?
9751                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9752                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9753
9754    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9755        /* take care of sig[0]..sig[4] */
9756        for (sindex = 0; sindex < 4; sindex++) {
9757            sc->attn_group[index].sig[sindex] =
9758                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9759        }
9760
9761        if (!CHIP_IS_E1x(sc)) {
9762            /*
9763             * enable5 is separate from the rest of the registers,
9764             * and the address skip is 4 and not 16 between the
9765             * different groups
9766             */
9767            sc->attn_group[index].sig[4] =
9768                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9769        } else {
9770            sc->attn_group[index].sig[4] = 0;
9771        }
9772    }
9773
9774    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9775        reg_offset = (port) ?
9776                         HC_REG_ATTN_MSG1_ADDR_L :
9777                         HC_REG_ATTN_MSG0_ADDR_L;
9778        REG_WR(sc, reg_offset, U64_LO(section));
9779        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9780    } else if (!CHIP_IS_E1x(sc)) {
9781        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9782        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9783    }
9784
9785    section = ((uint64_t)mapping +
9786               offsetof(struct host_sp_status_block, sp_sb));
9787
9788    bxe_zero_sp_sb(sc);
9789
9790    /* PCI guarantees endianity of regpair */
9791    sp_sb_data.state           = SB_ENABLED;
9792    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9793    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9794    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9795    sp_sb_data.igu_seg_id      = igu_seg_id;
9796    sp_sb_data.p_func.pf_id    = func;
9797    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9798    sp_sb_data.p_func.vf_id    = 0xff;
9799
9800    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9801
9802    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9803}
9804
9805static void
9806bxe_init_sp_ring(struct bxe_softc *sc)
9807{
9808    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9809    sc->spq_prod_idx = 0;
9810    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9811    sc->spq_prod_bd = sc->spq;
9812    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9813}
9814
9815static void
9816bxe_init_eq_ring(struct bxe_softc *sc)
9817{
9818    union event_ring_elem *elem;
9819    int i;
9820
9821    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9822        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9823
9824        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9825                                                 BCM_PAGE_SIZE *
9826                                                 (i % NUM_EQ_PAGES)));
9827        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9828                                                 BCM_PAGE_SIZE *
9829                                                 (i % NUM_EQ_PAGES)));
9830    }
9831
9832    sc->eq_cons    = 0;
9833    sc->eq_prod    = NUM_EQ_DESC;
9834    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9835
9836    atomic_store_rel_long(&sc->eq_spq_left,
9837                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9838                               NUM_EQ_DESC) - 1));
9839}
9840
9841static void
9842bxe_init_internal_common(struct bxe_softc *sc)
9843{
9844    int i;
9845
9846    /*
9847     * Zero this manually as its initialization is currently missing
9848     * in the initTool.
9849     */
9850    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9851        REG_WR(sc,
9852               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9853               0);
9854    }
9855
9856    if (!CHIP_IS_E1x(sc)) {
9857        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9858                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9859    }
9860}
9861
9862static void
9863bxe_init_internal(struct bxe_softc *sc,
9864                  uint32_t         load_code)
9865{
9866    switch (load_code) {
9867    case FW_MSG_CODE_DRV_LOAD_COMMON:
9868    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9869        bxe_init_internal_common(sc);
9870        /* no break */
9871
9872    case FW_MSG_CODE_DRV_LOAD_PORT:
9873        /* nothing to do */
9874        /* no break */
9875
9876    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9877        /* internal memory per function is initialized inside bxe_pf_init */
9878        break;
9879
9880    default:
9881        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9882        break;
9883    }
9884}
9885
9886static void
9887storm_memset_func_cfg(struct bxe_softc                         *sc,
9888                      struct tstorm_eth_function_common_config *tcfg,
9889                      uint16_t                                  abs_fid)
9890{
9891    uint32_t addr;
9892    size_t size;
9893
9894    addr = (BAR_TSTRORM_INTMEM +
9895            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9896    size = sizeof(struct tstorm_eth_function_common_config);
9897    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9898}
9899
9900static void
9901bxe_func_init(struct bxe_softc            *sc,
9902              struct bxe_func_init_params *p)
9903{
9904    struct tstorm_eth_function_common_config tcfg = { 0 };
9905
9906    if (CHIP_IS_E1x(sc)) {
9907        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9908    }
9909
9910    /* Enable the function in the FW */
9911    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9912    storm_memset_func_en(sc, p->func_id, 1);
9913
9914    /* spq */
9915    if (p->func_flgs & FUNC_FLG_SPQ) {
9916        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9917        REG_WR(sc,
9918               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9919               p->spq_prod);
9920    }
9921}
9922
9923/*
9924 * Calculates the sum of vn_min_rates.
9925 * It's needed for further normalizing of the min_rates.
9926 * Returns:
9927 *   sum of vn_min_rates.
9928 *     or
9929 *   0 - if all the min_rates are 0.
9930 * In the later case fainess algorithm should be deactivated.
9931 * If all min rates are not zero then those that are zeroes will be set to 1.
9932 */
9933static void
9934bxe_calc_vn_min(struct bxe_softc       *sc,
9935                struct cmng_init_input *input)
9936{
9937    uint32_t vn_cfg;
9938    uint32_t vn_min_rate;
9939    int all_zero = 1;
9940    int vn;
9941
9942    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9943        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9944        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9945                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
9946
9947        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9948            /* skip hidden VNs */
9949            vn_min_rate = 0;
9950        } else if (!vn_min_rate) {
9951            /* If min rate is zero - set it to 100 */
9952            vn_min_rate = DEF_MIN_RATE;
9953        } else {
9954            all_zero = 0;
9955        }
9956
9957        input->vnic_min_rate[vn] = vn_min_rate;
9958    }
9959
9960    /* if ETS or all min rates are zeros - disable fairness */
9961    if (BXE_IS_ETS_ENABLED(sc)) {
9962        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9963        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
9964    } else if (all_zero) {
9965        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9966        BLOGD(sc, DBG_LOAD,
9967              "Fariness disabled (all MIN values are zeroes)\n");
9968    } else {
9969        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9970    }
9971}
9972
9973static inline uint16_t
9974bxe_extract_max_cfg(struct bxe_softc *sc,
9975                    uint32_t         mf_cfg)
9976{
9977    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
9978                        FUNC_MF_CFG_MAX_BW_SHIFT);
9979
9980    if (!max_cfg) {
9981        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
9982        max_cfg = 100;
9983    }
9984
9985    return (max_cfg);
9986}
9987
9988static void
9989bxe_calc_vn_max(struct bxe_softc       *sc,
9990                int                    vn,
9991                struct cmng_init_input *input)
9992{
9993    uint16_t vn_max_rate;
9994    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9995    uint32_t max_cfg;
9996
9997    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9998        vn_max_rate = 0;
9999    } else {
10000        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10001
10002        if (IS_MF_SI(sc)) {
10003            /* max_cfg in percents of linkspeed */
10004            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10005        } else { /* SD modes */
10006            /* max_cfg is absolute in 100Mb units */
10007            vn_max_rate = (max_cfg * 100);
10008        }
10009    }
10010
10011    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10012
10013    input->vnic_max_rate[vn] = vn_max_rate;
10014}
10015
10016static void
10017bxe_cmng_fns_init(struct bxe_softc *sc,
10018                  uint8_t          read_cfg,
10019                  uint8_t          cmng_type)
10020{
10021    struct cmng_init_input input;
10022    int vn;
10023
10024    memset(&input, 0, sizeof(struct cmng_init_input));
10025
10026    input.port_rate = sc->link_vars.line_speed;
10027
10028    if (cmng_type == CMNG_FNS_MINMAX) {
10029        /* read mf conf from shmem */
10030        if (read_cfg) {
10031            bxe_read_mf_cfg(sc);
10032        }
10033
10034        /* get VN min rate and enable fairness if not 0 */
10035        bxe_calc_vn_min(sc, &input);
10036
10037        /* get VN max rate */
10038        if (sc->port.pmf) {
10039            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10040                bxe_calc_vn_max(sc, vn, &input);
10041            }
10042        }
10043
10044        /* always enable rate shaping and fairness */
10045        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10046
10047        ecore_init_cmng(&input, &sc->cmng);
10048        return;
10049    }
10050
10051    /* rate shaping and fairness are disabled */
10052    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10053}
10054
10055static int
10056bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10057{
10058    if (CHIP_REV_IS_SLOW(sc)) {
10059        return (CMNG_FNS_NONE);
10060    }
10061
10062    if (IS_MF(sc)) {
10063        return (CMNG_FNS_MINMAX);
10064    }
10065
10066    return (CMNG_FNS_NONE);
10067}
10068
10069static void
10070storm_memset_cmng(struct bxe_softc *sc,
10071                  struct cmng_init *cmng,
10072                  uint8_t          port)
10073{
10074    int vn;
10075    int func;
10076    uint32_t addr;
10077    size_t size;
10078
10079    addr = (BAR_XSTRORM_INTMEM +
10080            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10081    size = sizeof(struct cmng_struct_per_port);
10082    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10083
10084    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10085        func = func_by_vn(sc, vn);
10086
10087        addr = (BAR_XSTRORM_INTMEM +
10088                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10089        size = sizeof(struct rate_shaping_vars_per_vn);
10090        ecore_storm_memset_struct(sc, addr, size,
10091                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10092
10093        addr = (BAR_XSTRORM_INTMEM +
10094                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10095        size = sizeof(struct fairness_vars_per_vn);
10096        ecore_storm_memset_struct(sc, addr, size,
10097                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10098    }
10099}
10100
10101static void
10102bxe_pf_init(struct bxe_softc *sc)
10103{
10104    struct bxe_func_init_params func_init = { 0 };
10105    struct event_ring_data eq_data = { { 0 } };
10106    uint16_t flags;
10107
10108    if (!CHIP_IS_E1x(sc)) {
10109        /* reset IGU PF statistics: MSIX + ATTN */
10110        /* PF */
10111        REG_WR(sc,
10112               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10113                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10114                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10115               0);
10116        /* ATTN */
10117        REG_WR(sc,
10118               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10119                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10120                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10121                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10122               0);
10123    }
10124
10125    /* function setup flags */
10126    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10127
10128    /*
10129     * This flag is relevant for E1x only.
10130     * E2 doesn't have a TPA configuration in a function level.
10131     */
10132    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10133
10134    func_init.func_flgs = flags;
10135    func_init.pf_id     = SC_FUNC(sc);
10136    func_init.func_id   = SC_FUNC(sc);
10137    func_init.spq_map   = sc->spq_dma.paddr;
10138    func_init.spq_prod  = sc->spq_prod_idx;
10139
10140    bxe_func_init(sc, &func_init);
10141
10142    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10143
10144    /*
10145     * Congestion management values depend on the link rate.
10146     * There is no active link so initial link rate is set to 10Gbps.
10147     * When the link comes up the congestion management values are
10148     * re-calculated according to the actual link rate.
10149     */
10150    sc->link_vars.line_speed = SPEED_10000;
10151    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10152
10153    /* Only the PMF sets the HW */
10154    if (sc->port.pmf) {
10155        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10156    }
10157
10158    /* init Event Queue - PCI bus guarantees correct endainity */
10159    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10160    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10161    eq_data.producer     = sc->eq_prod;
10162    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10163    eq_data.sb_id        = DEF_SB_ID;
10164    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10165}
10166
10167static void
10168bxe_hc_int_enable(struct bxe_softc *sc)
10169{
10170    int port = SC_PORT(sc);
10171    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10172    uint32_t val = REG_RD(sc, addr);
10173    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10174    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10175                           (sc->intr_count == 1)) ? TRUE : FALSE;
10176    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10177
10178    if (msix) {
10179        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10180                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10181        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10182                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10183        if (single_msix) {
10184            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10185        }
10186    } else if (msi) {
10187        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10188        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10189                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10190                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10191    } else {
10192        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10193                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10194                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10195                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10196
10197        if (!CHIP_IS_E1(sc)) {
10198            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10199                  val, port, addr);
10200
10201            REG_WR(sc, addr, val);
10202
10203            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10204        }
10205    }
10206
10207    if (CHIP_IS_E1(sc)) {
10208        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10209    }
10210
10211    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10212          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10213
10214    REG_WR(sc, addr, val);
10215
10216    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10217    mb();
10218
10219    if (!CHIP_IS_E1(sc)) {
10220        /* init leading/trailing edge */
10221        if (IS_MF(sc)) {
10222            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10223            if (sc->port.pmf) {
10224                /* enable nig and gpio3 attention */
10225                val |= 0x1100;
10226            }
10227        } else {
10228            val = 0xffff;
10229        }
10230
10231        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10232        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10233    }
10234
10235    /* make sure that interrupts are indeed enabled from here on */
10236    mb();
10237}
10238
10239static void
10240bxe_igu_int_enable(struct bxe_softc *sc)
10241{
10242    uint32_t val;
10243    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10244    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10245                           (sc->intr_count == 1)) ? TRUE : FALSE;
10246    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10247
10248    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10249
10250    if (msix) {
10251        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10252                 IGU_PF_CONF_SINGLE_ISR_EN);
10253        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10254                IGU_PF_CONF_ATTN_BIT_EN);
10255        if (single_msix) {
10256            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10257        }
10258    } else if (msi) {
10259        val &= ~IGU_PF_CONF_INT_LINE_EN;
10260        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10261                IGU_PF_CONF_ATTN_BIT_EN |
10262                IGU_PF_CONF_SINGLE_ISR_EN);
10263    } else {
10264        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10265        val |= (IGU_PF_CONF_INT_LINE_EN |
10266                IGU_PF_CONF_ATTN_BIT_EN |
10267                IGU_PF_CONF_SINGLE_ISR_EN);
10268    }
10269
10270    /* clean previous status - need to configure igu prior to ack*/
10271    if ((!msix) || single_msix) {
10272        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10273        bxe_ack_int(sc);
10274    }
10275
10276    val |= IGU_PF_CONF_FUNC_EN;
10277
10278    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10279          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10280
10281    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10282
10283    mb();
10284
10285    /* init leading/trailing edge */
10286    if (IS_MF(sc)) {
10287        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10288        if (sc->port.pmf) {
10289            /* enable nig and gpio3 attention */
10290            val |= 0x1100;
10291        }
10292    } else {
10293        val = 0xffff;
10294    }
10295
10296    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10297    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10298
10299    /* make sure that interrupts are indeed enabled from here on */
10300    mb();
10301}
10302
10303static void
10304bxe_int_enable(struct bxe_softc *sc)
10305{
10306    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10307        bxe_hc_int_enable(sc);
10308    } else {
10309        bxe_igu_int_enable(sc);
10310    }
10311}
10312
10313static void
10314bxe_hc_int_disable(struct bxe_softc *sc)
10315{
10316    int port = SC_PORT(sc);
10317    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10318    uint32_t val = REG_RD(sc, addr);
10319
10320    /*
10321     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10322     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10323     * block
10324     */
10325    if (CHIP_IS_E1(sc)) {
10326        /*
10327         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10328         * to prevent from HC sending interrupts after we exit the function
10329         */
10330        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10331
10332        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10333                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10334                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10335    } else {
10336        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10337                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10338                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10339                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10340    }
10341
10342    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10343
10344    /* flush all outstanding writes */
10345    mb();
10346
10347    REG_WR(sc, addr, val);
10348    if (REG_RD(sc, addr) != val) {
10349        BLOGE(sc, "proper val not read from HC IGU!\n");
10350    }
10351}
10352
10353static void
10354bxe_igu_int_disable(struct bxe_softc *sc)
10355{
10356    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10357
10358    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10359             IGU_PF_CONF_INT_LINE_EN |
10360             IGU_PF_CONF_ATTN_BIT_EN);
10361
10362    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10363
10364    /* flush all outstanding writes */
10365    mb();
10366
10367    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10368    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10369        BLOGE(sc, "proper val not read from IGU!\n");
10370    }
10371}
10372
10373static void
10374bxe_int_disable(struct bxe_softc *sc)
10375{
10376    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10377        bxe_hc_int_disable(sc);
10378    } else {
10379        bxe_igu_int_disable(sc);
10380    }
10381}
10382
10383static void
10384bxe_nic_init(struct bxe_softc *sc,
10385             int              load_code)
10386{
10387    int i;
10388
10389    for (i = 0; i < sc->num_queues; i++) {
10390        bxe_init_eth_fp(sc, i);
10391    }
10392
10393    rmb(); /* ensure status block indices were read */
10394
10395    bxe_init_rx_rings(sc);
10396    bxe_init_tx_rings(sc);
10397
10398    if (IS_VF(sc)) {
10399        return;
10400    }
10401
10402    /* initialize MOD_ABS interrupts */
10403    elink_init_mod_abs_int(sc, &sc->link_vars,
10404                           sc->devinfo.chip_id,
10405                           sc->devinfo.shmem_base,
10406                           sc->devinfo.shmem2_base,
10407                           SC_PORT(sc));
10408
10409    bxe_init_def_sb(sc);
10410    bxe_update_dsb_idx(sc);
10411    bxe_init_sp_ring(sc);
10412    bxe_init_eq_ring(sc);
10413    bxe_init_internal(sc, load_code);
10414    bxe_pf_init(sc);
10415    bxe_stats_init(sc);
10416
10417    /* flush all before enabling interrupts */
10418    mb();
10419
10420    bxe_int_enable(sc);
10421
10422    /* check for SPIO5 */
10423    bxe_attn_int_deasserted0(sc,
10424                             REG_RD(sc,
10425                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10426                                     SC_PORT(sc)*4)) &
10427                             AEU_INPUTS_ATTN_BITS_SPIO5);
10428}
10429
10430static inline void
10431bxe_init_objs(struct bxe_softc *sc)
10432{
10433    /* mcast rules must be added to tx if tx switching is enabled */
10434    ecore_obj_type o_type =
10435        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10436                                         ECORE_OBJ_TYPE_RX;
10437
10438    /* RX_MODE controlling object */
10439    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10440
10441    /* multicast configuration controlling object */
10442    ecore_init_mcast_obj(sc,
10443                         &sc->mcast_obj,
10444                         sc->fp[0].cl_id,
10445                         sc->fp[0].index,
10446                         SC_FUNC(sc),
10447                         SC_FUNC(sc),
10448                         BXE_SP(sc, mcast_rdata),
10449                         BXE_SP_MAPPING(sc, mcast_rdata),
10450                         ECORE_FILTER_MCAST_PENDING,
10451                         &sc->sp_state,
10452                         o_type);
10453
10454    /* Setup CAM credit pools */
10455    ecore_init_mac_credit_pool(sc,
10456                               &sc->macs_pool,
10457                               SC_FUNC(sc),
10458                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10459                                                 VNICS_PER_PATH(sc));
10460
10461    ecore_init_vlan_credit_pool(sc,
10462                                &sc->vlans_pool,
10463                                SC_ABS_FUNC(sc) >> 1,
10464                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10465                                                  VNICS_PER_PATH(sc));
10466
10467    /* RSS configuration object */
10468    ecore_init_rss_config_obj(sc,
10469                              &sc->rss_conf_obj,
10470                              sc->fp[0].cl_id,
10471                              sc->fp[0].index,
10472                              SC_FUNC(sc),
10473                              SC_FUNC(sc),
10474                              BXE_SP(sc, rss_rdata),
10475                              BXE_SP_MAPPING(sc, rss_rdata),
10476                              ECORE_FILTER_RSS_CONF_PENDING,
10477                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10478}
10479
10480/*
10481 * Initialize the function. This must be called before sending CLIENT_SETUP
10482 * for the first client.
10483 */
10484static inline int
10485bxe_func_start(struct bxe_softc *sc)
10486{
10487    struct ecore_func_state_params func_params = { NULL };
10488    struct ecore_func_start_params *start_params = &func_params.params.start;
10489
10490    /* Prepare parameters for function state transitions */
10491    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10492
10493    func_params.f_obj = &sc->func_obj;
10494    func_params.cmd = ECORE_F_CMD_START;
10495
10496    /* Function parameters */
10497    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10498    start_params->sd_vlan_tag = OVLAN(sc);
10499
10500    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10501        start_params->network_cos_mode = STATIC_COS;
10502    } else { /* CHIP_IS_E1X */
10503        start_params->network_cos_mode = FW_WRR;
10504    }
10505
10506    //start_params->gre_tunnel_mode = 0;
10507    //start_params->gre_tunnel_rss  = 0;
10508
10509    return (ecore_func_state_change(sc, &func_params));
10510}
10511
10512static int
10513bxe_set_power_state(struct bxe_softc *sc,
10514                    uint8_t          state)
10515{
10516    uint16_t pmcsr;
10517
10518    /* If there is no power capability, silently succeed */
10519    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10520        BLOGW(sc, "No power capability\n");
10521        return (0);
10522    }
10523
10524    pmcsr = pci_read_config(sc->dev,
10525                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10526                            2);
10527
10528    switch (state) {
10529    case PCI_PM_D0:
10530        pci_write_config(sc->dev,
10531                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10532                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10533
10534        if (pmcsr & PCIM_PSTAT_DMASK) {
10535            /* delay required during transition out of D3hot */
10536            DELAY(20000);
10537        }
10538
10539        break;
10540
10541    case PCI_PM_D3hot:
10542        /* XXX if there are other clients above don't shut down the power */
10543
10544        /* don't shut down the power for emulation and FPGA */
10545        if (CHIP_REV_IS_SLOW(sc)) {
10546            return (0);
10547        }
10548
10549        pmcsr &= ~PCIM_PSTAT_DMASK;
10550        pmcsr |= PCIM_PSTAT_D3;
10551
10552        if (sc->wol) {
10553            pmcsr |= PCIM_PSTAT_PMEENABLE;
10554        }
10555
10556        pci_write_config(sc->dev,
10557                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10558                         pmcsr, 4);
10559
10560        /*
10561         * No more memory access after this point until device is brought back
10562         * to D0 state.
10563         */
10564        break;
10565
10566    default:
10567        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10568            state, pmcsr);
10569        return (-1);
10570    }
10571
10572    return (0);
10573}
10574
10575
10576/* return true if succeeded to acquire the lock */
10577static uint8_t
10578bxe_trylock_hw_lock(struct bxe_softc *sc,
10579                    uint32_t         resource)
10580{
10581    uint32_t lock_status;
10582    uint32_t resource_bit = (1 << resource);
10583    int func = SC_FUNC(sc);
10584    uint32_t hw_lock_control_reg;
10585
10586    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10587
10588    /* Validating that the resource is within range */
10589    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10590        BLOGD(sc, DBG_LOAD,
10591              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10592              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10593        return (FALSE);
10594    }
10595
10596    if (func <= 5) {
10597        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10598    } else {
10599        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10600    }
10601
10602    /* try to acquire the lock */
10603    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10604    lock_status = REG_RD(sc, hw_lock_control_reg);
10605    if (lock_status & resource_bit) {
10606        return (TRUE);
10607    }
10608
10609    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10610        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10611        lock_status, resource_bit);
10612
10613    return (FALSE);
10614}
10615
10616/*
10617 * Get the recovery leader resource id according to the engine this function
10618 * belongs to. Currently only only 2 engines is supported.
10619 */
10620static int
10621bxe_get_leader_lock_resource(struct bxe_softc *sc)
10622{
10623    if (SC_PATH(sc)) {
10624        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10625    } else {
10626        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10627    }
10628}
10629
10630/* try to acquire a leader lock for current engine */
10631static uint8_t
10632bxe_trylock_leader_lock(struct bxe_softc *sc)
10633{
10634    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10635}
10636
10637static int
10638bxe_release_leader_lock(struct bxe_softc *sc)
10639{
10640    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10641}
10642
10643/* close gates #2, #3 and #4 */
10644static void
10645bxe_set_234_gates(struct bxe_softc *sc,
10646                  uint8_t          close)
10647{
10648    uint32_t val;
10649
10650    /* gates #2 and #4a are closed/opened for "not E1" only */
10651    if (!CHIP_IS_E1(sc)) {
10652        /* #4 */
10653        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10654        /* #2 */
10655        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10656    }
10657
10658    /* #3 */
10659    if (CHIP_IS_E1x(sc)) {
10660        /* prevent interrupts from HC on both ports */
10661        val = REG_RD(sc, HC_REG_CONFIG_1);
10662        REG_WR(sc, HC_REG_CONFIG_1,
10663               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10664               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10665
10666        val = REG_RD(sc, HC_REG_CONFIG_0);
10667        REG_WR(sc, HC_REG_CONFIG_0,
10668               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10669               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10670    } else {
10671        /* Prevent incoming interrupts in IGU */
10672        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10673
10674        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10675               (!close) ?
10676               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10677               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10678    }
10679
10680    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10681          close ? "closing" : "opening");
10682
10683    wmb();
10684}
10685
10686/* poll for pending writes bit, it should get cleared in no more than 1s */
10687static int
10688bxe_er_poll_igu_vq(struct bxe_softc *sc)
10689{
10690    uint32_t cnt = 1000;
10691    uint32_t pend_bits = 0;
10692
10693    do {
10694        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10695
10696        if (pend_bits == 0) {
10697            break;
10698        }
10699
10700        DELAY(1000);
10701    } while (--cnt > 0);
10702
10703    if (cnt == 0) {
10704        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10705        return (-1);
10706    }
10707
10708    return (0);
10709}
10710
10711#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10712
10713static void
10714bxe_clp_reset_prep(struct bxe_softc *sc,
10715                   uint32_t         *magic_val)
10716{
10717    /* Do some magic... */
10718    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10719    *magic_val = val & SHARED_MF_CLP_MAGIC;
10720    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10721}
10722
10723/* restore the value of the 'magic' bit */
10724static void
10725bxe_clp_reset_done(struct bxe_softc *sc,
10726                   uint32_t         magic_val)
10727{
10728    /* Restore the 'magic' bit value... */
10729    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10730    MFCFG_WR(sc, shared_mf_config.clp_mb,
10731              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10732}
10733
10734/* prepare for MCP reset, takes care of CLP configurations */
10735static void
10736bxe_reset_mcp_prep(struct bxe_softc *sc,
10737                   uint32_t         *magic_val)
10738{
10739    uint32_t shmem;
10740    uint32_t validity_offset;
10741
10742    /* set `magic' bit in order to save MF config */
10743    if (!CHIP_IS_E1(sc)) {
10744        bxe_clp_reset_prep(sc, magic_val);
10745    }
10746
10747    /* get shmem offset */
10748    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10749    validity_offset =
10750        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10751
10752    /* Clear validity map flags */
10753    if (shmem > 0) {
10754        REG_WR(sc, shmem + validity_offset, 0);
10755    }
10756}
10757
10758#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10759#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10760
10761static void
10762bxe_mcp_wait_one(struct bxe_softc *sc)
10763{
10764    /* special handling for emulation and FPGA (10 times longer) */
10765    if (CHIP_REV_IS_SLOW(sc)) {
10766        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10767    } else {
10768        DELAY((MCP_ONE_TIMEOUT) * 1000);
10769    }
10770}
10771
10772/* initialize shmem_base and waits for validity signature to appear */
10773static int
10774bxe_init_shmem(struct bxe_softc *sc)
10775{
10776    int cnt = 0;
10777    uint32_t val = 0;
10778
10779    do {
10780        sc->devinfo.shmem_base     =
10781        sc->link_params.shmem_base =
10782            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10783
10784        if (sc->devinfo.shmem_base) {
10785            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10786            if (val & SHR_MEM_VALIDITY_MB)
10787                return (0);
10788        }
10789
10790        bxe_mcp_wait_one(sc);
10791
10792    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10793
10794    BLOGE(sc, "BAD MCP validity signature\n");
10795
10796    return (-1);
10797}
10798
10799static int
10800bxe_reset_mcp_comp(struct bxe_softc *sc,
10801                   uint32_t         magic_val)
10802{
10803    int rc = bxe_init_shmem(sc);
10804
10805    /* Restore the `magic' bit value */
10806    if (!CHIP_IS_E1(sc)) {
10807        bxe_clp_reset_done(sc, magic_val);
10808    }
10809
10810    return (rc);
10811}
10812
10813static void
10814bxe_pxp_prep(struct bxe_softc *sc)
10815{
10816    if (!CHIP_IS_E1(sc)) {
10817        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10818        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10819        wmb();
10820    }
10821}
10822
10823/*
10824 * Reset the whole chip except for:
10825 *      - PCIE core
10826 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10827 *      - IGU
10828 *      - MISC (including AEU)
10829 *      - GRC
10830 *      - RBCN, RBCP
10831 */
10832static void
10833bxe_process_kill_chip_reset(struct bxe_softc *sc,
10834                            uint8_t          global)
10835{
10836    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10837    uint32_t global_bits2, stay_reset2;
10838
10839    /*
10840     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10841     * (per chip) blocks.
10842     */
10843    global_bits2 =
10844        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10845        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10846
10847    /*
10848     * Don't reset the following blocks.
10849     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10850     *            reset, as in 4 port device they might still be owned
10851     *            by the MCP (there is only one leader per path).
10852     */
10853    not_reset_mask1 =
10854        MISC_REGISTERS_RESET_REG_1_RST_HC |
10855        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10856        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10857
10858    not_reset_mask2 =
10859        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10860        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10861        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10862        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10863        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10864        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10865        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10866        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10867        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10868        MISC_REGISTERS_RESET_REG_2_PGLC |
10869        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10870        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10871        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10872        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10873        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10874        MISC_REGISTERS_RESET_REG_2_UMAC1;
10875
10876    /*
10877     * Keep the following blocks in reset:
10878     *  - all xxMACs are handled by the elink code.
10879     */
10880    stay_reset2 =
10881        MISC_REGISTERS_RESET_REG_2_XMAC |
10882        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10883
10884    /* Full reset masks according to the chip */
10885    reset_mask1 = 0xffffffff;
10886
10887    if (CHIP_IS_E1(sc))
10888        reset_mask2 = 0xffff;
10889    else if (CHIP_IS_E1H(sc))
10890        reset_mask2 = 0x1ffff;
10891    else if (CHIP_IS_E2(sc))
10892        reset_mask2 = 0xfffff;
10893    else /* CHIP_IS_E3 */
10894        reset_mask2 = 0x3ffffff;
10895
10896    /* Don't reset global blocks unless we need to */
10897    if (!global)
10898        reset_mask2 &= ~global_bits2;
10899
10900    /*
10901     * In case of attention in the QM, we need to reset PXP
10902     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10903     * because otherwise QM reset would release 'close the gates' shortly
10904     * before resetting the PXP, then the PSWRQ would send a write
10905     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10906     * read the payload data from PSWWR, but PSWWR would not
10907     * respond. The write queue in PGLUE would stuck, dmae commands
10908     * would not return. Therefore it's important to reset the second
10909     * reset register (containing the
10910     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10911     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10912     * bit).
10913     */
10914    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10915           reset_mask2 & (~not_reset_mask2));
10916
10917    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10918           reset_mask1 & (~not_reset_mask1));
10919
10920    mb();
10921    wmb();
10922
10923    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10924           reset_mask2 & (~stay_reset2));
10925
10926    mb();
10927    wmb();
10928
10929    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10930    wmb();
10931}
10932
10933static int
10934bxe_process_kill(struct bxe_softc *sc,
10935                 uint8_t          global)
10936{
10937    int cnt = 1000;
10938    uint32_t val = 0;
10939    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10940    uint32_t tags_63_32 = 0;
10941
10942    /* Empty the Tetris buffer, wait for 1s */
10943    do {
10944        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10945        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
10946        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
10947        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
10948        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
10949        if (CHIP_IS_E3(sc)) {
10950            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
10951        }
10952
10953        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
10954            ((port_is_idle_0 & 0x1) == 0x1) &&
10955            ((port_is_idle_1 & 0x1) == 0x1) &&
10956            (pgl_exp_rom2 == 0xffffffff) &&
10957            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
10958            break;
10959        DELAY(1000);
10960    } while (cnt-- > 0);
10961
10962    if (cnt <= 0) {
10963        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
10964                  "are still outstanding read requests after 1s! "
10965                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
10966                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
10967              sr_cnt, blk_cnt, port_is_idle_0,
10968              port_is_idle_1, pgl_exp_rom2);
10969        return (-1);
10970    }
10971
10972    mb();
10973
10974    /* Close gates #2, #3 and #4 */
10975    bxe_set_234_gates(sc, TRUE);
10976
10977    /* Poll for IGU VQs for 57712 and newer chips */
10978    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
10979        return (-1);
10980    }
10981
10982    /* XXX indicate that "process kill" is in progress to MCP */
10983
10984    /* clear "unprepared" bit */
10985    REG_WR(sc, MISC_REG_UNPREPARED, 0);
10986    mb();
10987
10988    /* Make sure all is written to the chip before the reset */
10989    wmb();
10990
10991    /*
10992     * Wait for 1ms to empty GLUE and PCI-E core queues,
10993     * PSWHST, GRC and PSWRD Tetris buffer.
10994     */
10995    DELAY(1000);
10996
10997    /* Prepare to chip reset: */
10998    /* MCP */
10999    if (global) {
11000        bxe_reset_mcp_prep(sc, &val);
11001    }
11002
11003    /* PXP */
11004    bxe_pxp_prep(sc);
11005    mb();
11006
11007    /* reset the chip */
11008    bxe_process_kill_chip_reset(sc, global);
11009    mb();
11010
11011    /* clear errors in PGB */
11012    if (!CHIP_IS_E1(sc))
11013        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11014
11015    /* Recover after reset: */
11016    /* MCP */
11017    if (global && bxe_reset_mcp_comp(sc, val)) {
11018        return (-1);
11019    }
11020
11021    /* XXX add resetting the NO_MCP mode DB here */
11022
11023    /* Open the gates #2, #3 and #4 */
11024    bxe_set_234_gates(sc, FALSE);
11025
11026    /* XXX
11027     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11028     * re-enable attentions
11029     */
11030
11031    return (0);
11032}
11033
11034static int
11035bxe_leader_reset(struct bxe_softc *sc)
11036{
11037    int rc = 0;
11038    uint8_t global = bxe_reset_is_global(sc);
11039    uint32_t load_code;
11040
11041    /*
11042     * If not going to reset MCP, load "fake" driver to reset HW while
11043     * driver is owner of the HW.
11044     */
11045    if (!global && !BXE_NOMCP(sc)) {
11046        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11047                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11048        if (!load_code) {
11049            BLOGE(sc, "MCP response failure, aborting\n");
11050            rc = -1;
11051            goto exit_leader_reset;
11052        }
11053
11054        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11055            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11056            BLOGE(sc, "MCP unexpected response, aborting\n");
11057            rc = -1;
11058            goto exit_leader_reset2;
11059        }
11060
11061        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11062        if (!load_code) {
11063            BLOGE(sc, "MCP response failure, aborting\n");
11064            rc = -1;
11065            goto exit_leader_reset2;
11066        }
11067    }
11068
11069    /* try to recover after the failure */
11070    if (bxe_process_kill(sc, global)) {
11071        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11072        rc = -1;
11073        goto exit_leader_reset2;
11074    }
11075
11076    /*
11077     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11078     * state.
11079     */
11080    bxe_set_reset_done(sc);
11081    if (global) {
11082        bxe_clear_reset_global(sc);
11083    }
11084
11085exit_leader_reset2:
11086
11087    /* unload "fake driver" if it was loaded */
11088    if (!global && !BXE_NOMCP(sc)) {
11089        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11090        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11091    }
11092
11093exit_leader_reset:
11094
11095    sc->is_leader = 0;
11096    bxe_release_leader_lock(sc);
11097
11098    mb();
11099    return (rc);
11100}
11101
11102/*
11103 * prepare INIT transition, parameters configured:
11104 *   - HC configuration
11105 *   - Queue's CDU context
11106 */
11107static void
11108bxe_pf_q_prep_init(struct bxe_softc               *sc,
11109                   struct bxe_fastpath            *fp,
11110                   struct ecore_queue_init_params *init_params)
11111{
11112    uint8_t cos;
11113    int cxt_index, cxt_offset;
11114
11115    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11116    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11117
11118    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11119    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11120
11121    /* HC rate */
11122    init_params->rx.hc_rate =
11123        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11124    init_params->tx.hc_rate =
11125        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11126
11127    /* FW SB ID */
11128    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11129
11130    /* CQ index among the SB indices */
11131    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11132    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11133
11134    /* set maximum number of COSs supported by this queue */
11135    init_params->max_cos = sc->max_cos;
11136
11137    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11138          fp->index, init_params->max_cos);
11139
11140    /* set the context pointers queue object */
11141    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11142        /* XXX change index/cid here if ever support multiple tx CoS */
11143        /* fp->txdata[cos]->cid */
11144        cxt_index = fp->index / ILT_PAGE_CIDS;
11145        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11146        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11147    }
11148}
11149
11150/* set flags that are common for the Tx-only and not normal connections */
11151static unsigned long
11152bxe_get_common_flags(struct bxe_softc    *sc,
11153                     struct bxe_fastpath *fp,
11154                     uint8_t             zero_stats)
11155{
11156    unsigned long flags = 0;
11157
11158    /* PF driver will always initialize the Queue to an ACTIVE state */
11159    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11160
11161    /*
11162     * tx only connections collect statistics (on the same index as the
11163     * parent connection). The statistics are zeroed when the parent
11164     * connection is initialized.
11165     */
11166
11167    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11168    if (zero_stats) {
11169        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11170    }
11171
11172    /*
11173     * tx only connections can support tx-switching, though their
11174     * CoS-ness doesn't survive the loopback
11175     */
11176    if (sc->flags & BXE_TX_SWITCHING) {
11177        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11178    }
11179
11180    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11181
11182    return (flags);
11183}
11184
11185static unsigned long
11186bxe_get_q_flags(struct bxe_softc    *sc,
11187                struct bxe_fastpath *fp,
11188                uint8_t             leading)
11189{
11190    unsigned long flags = 0;
11191
11192    if (IS_MF_SD(sc)) {
11193        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11194    }
11195
11196    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11197        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11198        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11199    }
11200
11201    if (leading) {
11202        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11203        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11204    }
11205
11206    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11207
11208    /* merge with common flags */
11209    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11210}
11211
11212static void
11213bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11214                      struct bxe_fastpath               *fp,
11215                      struct ecore_general_setup_params *gen_init,
11216                      uint8_t                           cos)
11217{
11218    gen_init->stat_id = bxe_stats_id(fp);
11219    gen_init->spcl_id = fp->cl_id;
11220    gen_init->mtu = sc->mtu;
11221    gen_init->cos = cos;
11222}
11223
11224static void
11225bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11226                 struct bxe_fastpath           *fp,
11227                 struct rxq_pause_params       *pause,
11228                 struct ecore_rxq_setup_params *rxq_init)
11229{
11230    uint8_t max_sge = 0;
11231    uint16_t sge_sz = 0;
11232    uint16_t tpa_agg_size = 0;
11233
11234    pause->sge_th_lo = SGE_TH_LO(sc);
11235    pause->sge_th_hi = SGE_TH_HI(sc);
11236
11237    /* validate SGE ring has enough to cross high threshold */
11238    if (sc->dropless_fc &&
11239            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11240            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11241        BLOGW(sc, "sge ring threshold limit\n");
11242    }
11243
11244    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11245    tpa_agg_size = (2 * sc->mtu);
11246    if (tpa_agg_size < sc->max_aggregation_size) {
11247        tpa_agg_size = sc->max_aggregation_size;
11248    }
11249
11250    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11251    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11252                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11253    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11254
11255    /* pause - not for e1 */
11256    if (!CHIP_IS_E1(sc)) {
11257        pause->bd_th_lo = BD_TH_LO(sc);
11258        pause->bd_th_hi = BD_TH_HI(sc);
11259
11260        pause->rcq_th_lo = RCQ_TH_LO(sc);
11261        pause->rcq_th_hi = RCQ_TH_HI(sc);
11262
11263        /* validate rings have enough entries to cross high thresholds */
11264        if (sc->dropless_fc &&
11265            pause->bd_th_hi + FW_PREFETCH_CNT >
11266            sc->rx_ring_size) {
11267            BLOGW(sc, "rx bd ring threshold limit\n");
11268        }
11269
11270        if (sc->dropless_fc &&
11271            pause->rcq_th_hi + FW_PREFETCH_CNT >
11272            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11273            BLOGW(sc, "rcq ring threshold limit\n");
11274        }
11275
11276        pause->pri_map = 1;
11277    }
11278
11279    /* rxq setup */
11280    rxq_init->dscr_map   = fp->rx_dma.paddr;
11281    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11282    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11283    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11284
11285    /*
11286     * This should be a maximum number of data bytes that may be
11287     * placed on the BD (not including paddings).
11288     */
11289    rxq_init->buf_sz = (fp->rx_buf_size -
11290                        IP_HEADER_ALIGNMENT_PADDING);
11291
11292    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11293    rxq_init->tpa_agg_sz      = tpa_agg_size;
11294    rxq_init->sge_buf_sz      = sge_sz;
11295    rxq_init->max_sges_pkt    = max_sge;
11296    rxq_init->rss_engine_id   = SC_FUNC(sc);
11297    rxq_init->mcast_engine_id = SC_FUNC(sc);
11298
11299    /*
11300     * Maximum number or simultaneous TPA aggregation for this Queue.
11301     * For PF Clients it should be the maximum available number.
11302     * VF driver(s) may want to define it to a smaller value.
11303     */
11304    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11305
11306    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11307    rxq_init->fw_sb_id = fp->fw_sb_id;
11308
11309    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11310
11311    /*
11312     * configure silent vlan removal
11313     * if multi function mode is afex, then mask default vlan
11314     */
11315    if (IS_MF_AFEX(sc)) {
11316        rxq_init->silent_removal_value =
11317            sc->devinfo.mf_info.afex_def_vlan_tag;
11318        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11319    }
11320}
11321
11322static void
11323bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11324                 struct bxe_fastpath           *fp,
11325                 struct ecore_txq_setup_params *txq_init,
11326                 uint8_t                       cos)
11327{
11328    /*
11329     * XXX If multiple CoS is ever supported then each fastpath structure
11330     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11331     * fp->txdata[cos]->tx_dma.paddr;
11332     */
11333    txq_init->dscr_map     = fp->tx_dma.paddr;
11334    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11335    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11336    txq_init->fw_sb_id     = fp->fw_sb_id;
11337
11338    /*
11339     * set the TSS leading client id for TX classfication to the
11340     * leading RSS client id
11341     */
11342    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11343}
11344
11345/*
11346 * This function performs 2 steps in a queue state machine:
11347 *   1) RESET->INIT
11348 *   2) INIT->SETUP
11349 */
11350static int
11351bxe_setup_queue(struct bxe_softc    *sc,
11352                struct bxe_fastpath *fp,
11353                uint8_t             leading)
11354{
11355    struct ecore_queue_state_params q_params = { NULL };
11356    struct ecore_queue_setup_params *setup_params =
11357                        &q_params.params.setup;
11358    int rc;
11359
11360    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11361
11362    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11363
11364    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11365
11366    /* we want to wait for completion in this context */
11367    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11368
11369    /* prepare the INIT parameters */
11370    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11371
11372    /* Set the command */
11373    q_params.cmd = ECORE_Q_CMD_INIT;
11374
11375    /* Change the state to INIT */
11376    rc = ecore_queue_state_change(sc, &q_params);
11377    if (rc) {
11378        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11379        return (rc);
11380    }
11381
11382    BLOGD(sc, DBG_LOAD, "init complete\n");
11383
11384    /* now move the Queue to the SETUP state */
11385    memset(setup_params, 0, sizeof(*setup_params));
11386
11387    /* set Queue flags */
11388    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11389
11390    /* set general SETUP parameters */
11391    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11392                          FIRST_TX_COS_INDEX);
11393
11394    bxe_pf_rx_q_prep(sc, fp,
11395                     &setup_params->pause_params,
11396                     &setup_params->rxq_params);
11397
11398    bxe_pf_tx_q_prep(sc, fp,
11399                     &setup_params->txq_params,
11400                     FIRST_TX_COS_INDEX);
11401
11402    /* Set the command */
11403    q_params.cmd = ECORE_Q_CMD_SETUP;
11404
11405    /* change the state to SETUP */
11406    rc = ecore_queue_state_change(sc, &q_params);
11407    if (rc) {
11408        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11409        return (rc);
11410    }
11411
11412    return (rc);
11413}
11414
11415static int
11416bxe_setup_leading(struct bxe_softc *sc)
11417{
11418    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11419}
11420
11421static int
11422bxe_config_rss_pf(struct bxe_softc            *sc,
11423                  struct ecore_rss_config_obj *rss_obj,
11424                  uint8_t                     config_hash)
11425{
11426    struct ecore_config_rss_params params = { NULL };
11427    int i;
11428
11429    /*
11430     * Although RSS is meaningless when there is a single HW queue we
11431     * still need it enabled in order to have HW Rx hash generated.
11432     */
11433
11434    params.rss_obj = rss_obj;
11435
11436    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11437
11438    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11439
11440    /* RSS configuration */
11441    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11442    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11443    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11444    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11445    if (rss_obj->udp_rss_v4) {
11446        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11447    }
11448    if (rss_obj->udp_rss_v6) {
11449        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11450    }
11451
11452    /* Hash bits */
11453    params.rss_result_mask = MULTI_MASK;
11454
11455    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11456
11457    if (config_hash) {
11458        /* RSS keys */
11459        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11460            params.rss_key[i] = arc4random();
11461        }
11462
11463        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11464    }
11465
11466    return (ecore_config_rss(sc, &params));
11467}
11468
11469static int
11470bxe_config_rss_eth(struct bxe_softc *sc,
11471                   uint8_t          config_hash)
11472{
11473    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11474}
11475
11476static int
11477bxe_init_rss_pf(struct bxe_softc *sc)
11478{
11479    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11480    int i;
11481
11482    /*
11483     * Prepare the initial contents of the indirection table if
11484     * RSS is enabled
11485     */
11486    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11487        sc->rss_conf_obj.ind_table[i] =
11488            (sc->fp->cl_id + (i % num_eth_queues));
11489    }
11490
11491    if (sc->udp_rss) {
11492        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11493    }
11494
11495    /*
11496     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11497     * per-port, so if explicit configuration is needed, do it only
11498     * for a PMF.
11499     *
11500     * For 57712 and newer it's a per-function configuration.
11501     */
11502    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11503}
11504
11505static int
11506bxe_set_mac_one(struct bxe_softc          *sc,
11507                uint8_t                   *mac,
11508                struct ecore_vlan_mac_obj *obj,
11509                uint8_t                   set,
11510                int                       mac_type,
11511                unsigned long             *ramrod_flags)
11512{
11513    struct ecore_vlan_mac_ramrod_params ramrod_param;
11514    int rc;
11515
11516    memset(&ramrod_param, 0, sizeof(ramrod_param));
11517
11518    /* fill in general parameters */
11519    ramrod_param.vlan_mac_obj = obj;
11520    ramrod_param.ramrod_flags = *ramrod_flags;
11521
11522    /* fill a user request section if needed */
11523    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11524        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11525
11526        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11527
11528        /* Set the command: ADD or DEL */
11529        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11530                                            ECORE_VLAN_MAC_DEL;
11531    }
11532
11533    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11534
11535    if (rc == ECORE_EXISTS) {
11536        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11537        /* do not treat adding same MAC as error */
11538        rc = 0;
11539    } else if (rc < 0) {
11540        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11541    }
11542
11543    return (rc);
11544}
11545
11546static int
11547bxe_set_eth_mac(struct bxe_softc *sc,
11548                uint8_t          set)
11549{
11550    unsigned long ramrod_flags = 0;
11551
11552    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11553
11554    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11555
11556    /* Eth MAC is set on RSS leading client (fp[0]) */
11557    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11558                            &sc->sp_objs->mac_obj,
11559                            set, ECORE_ETH_MAC, &ramrod_flags));
11560}
11561
11562static int
11563bxe_get_cur_phy_idx(struct bxe_softc *sc)
11564{
11565    uint32_t sel_phy_idx = 0;
11566
11567    if (sc->link_params.num_phys <= 1) {
11568        return (ELINK_INT_PHY);
11569    }
11570
11571    if (sc->link_vars.link_up) {
11572        sel_phy_idx = ELINK_EXT_PHY1;
11573        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11574        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11575            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11576             ELINK_SUPPORTED_FIBRE))
11577            sel_phy_idx = ELINK_EXT_PHY2;
11578    } else {
11579        switch (elink_phy_selection(&sc->link_params)) {
11580        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11581        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11582        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11583               sel_phy_idx = ELINK_EXT_PHY1;
11584               break;
11585        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11586        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11587               sel_phy_idx = ELINK_EXT_PHY2;
11588               break;
11589        }
11590    }
11591
11592    return (sel_phy_idx);
11593}
11594
11595static int
11596bxe_get_link_cfg_idx(struct bxe_softc *sc)
11597{
11598    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11599
11600    /*
11601     * The selected activated PHY is always after swapping (in case PHY
11602     * swapping is enabled). So when swapping is enabled, we need to reverse
11603     * the configuration
11604     */
11605
11606    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11607        if (sel_phy_idx == ELINK_EXT_PHY1)
11608            sel_phy_idx = ELINK_EXT_PHY2;
11609        else if (sel_phy_idx == ELINK_EXT_PHY2)
11610            sel_phy_idx = ELINK_EXT_PHY1;
11611    }
11612
11613    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11614}
11615
11616static void
11617bxe_set_requested_fc(struct bxe_softc *sc)
11618{
11619    /*
11620     * Initialize link parameters structure variables
11621     * It is recommended to turn off RX FC for jumbo frames
11622     * for better performance
11623     */
11624    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11625        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11626    } else {
11627        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11628    }
11629}
11630
11631static void
11632bxe_calc_fc_adv(struct bxe_softc *sc)
11633{
11634    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11635    switch (sc->link_vars.ieee_fc &
11636            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11637    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
11638    default:
11639        sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11640                                           ADVERTISED_Pause);
11641        break;
11642
11643    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11644        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11645                                          ADVERTISED_Pause);
11646        break;
11647
11648    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11649        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11650        break;
11651    }
11652}
11653
11654static uint16_t
11655bxe_get_mf_speed(struct bxe_softc *sc)
11656{
11657    uint16_t line_speed = sc->link_vars.line_speed;
11658    if (IS_MF(sc)) {
11659        uint16_t maxCfg =
11660            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11661
11662        /* calculate the current MAX line speed limit for the MF devices */
11663        if (IS_MF_SI(sc)) {
11664            line_speed = (line_speed * maxCfg) / 100;
11665        } else { /* SD mode */
11666            uint16_t vn_max_rate = maxCfg * 100;
11667
11668            if (vn_max_rate < line_speed) {
11669                line_speed = vn_max_rate;
11670            }
11671        }
11672    }
11673
11674    return (line_speed);
11675}
11676
11677static void
11678bxe_fill_report_data(struct bxe_softc            *sc,
11679                     struct bxe_link_report_data *data)
11680{
11681    uint16_t line_speed = bxe_get_mf_speed(sc);
11682
11683    memset(data, 0, sizeof(*data));
11684
11685    /* fill the report data with the effective line speed */
11686    data->line_speed = line_speed;
11687
11688    /* Link is down */
11689    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11690        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11691    }
11692
11693    /* Full DUPLEX */
11694    if (sc->link_vars.duplex == DUPLEX_FULL) {
11695        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11696    }
11697
11698    /* Rx Flow Control is ON */
11699    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11700        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11701    }
11702
11703    /* Tx Flow Control is ON */
11704    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11705        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11706    }
11707}
11708
11709/* report link status to OS, should be called under phy_lock */
11710static void
11711bxe_link_report_locked(struct bxe_softc *sc)
11712{
11713    struct bxe_link_report_data cur_data;
11714
11715    /* reread mf_cfg */
11716    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11717        bxe_read_mf_cfg(sc);
11718    }
11719
11720    /* Read the current link report info */
11721    bxe_fill_report_data(sc, &cur_data);
11722
11723    /* Don't report link down or exactly the same link status twice */
11724    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11725        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11726                      &sc->last_reported_link.link_report_flags) &&
11727         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11728                      &cur_data.link_report_flags))) {
11729        return;
11730    }
11731
11732    sc->link_cnt++;
11733
11734    /* report new link params and remember the state for the next time */
11735    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11736
11737    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11738                     &cur_data.link_report_flags)) {
11739        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11740        BLOGI(sc, "NIC Link is Down\n");
11741    } else {
11742        const char *duplex;
11743        const char *flow;
11744
11745        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11746                                   &cur_data.link_report_flags)) {
11747            duplex = "full";
11748        } else {
11749            duplex = "half";
11750        }
11751
11752        /*
11753         * Handle the FC at the end so that only these flags would be
11754         * possibly set. This way we may easily check if there is no FC
11755         * enabled.
11756         */
11757        if (cur_data.link_report_flags) {
11758            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11759                             &cur_data.link_report_flags) &&
11760                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11761                             &cur_data.link_report_flags)) {
11762                flow = "ON - receive & transmit";
11763            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11764                                    &cur_data.link_report_flags) &&
11765                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11766                                     &cur_data.link_report_flags)) {
11767                flow = "ON - receive";
11768            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11769                                     &cur_data.link_report_flags) &&
11770                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11771                                    &cur_data.link_report_flags)) {
11772                flow = "ON - transmit";
11773            } else {
11774                flow = "none"; /* possible? */
11775            }
11776        } else {
11777            flow = "none";
11778        }
11779
11780        if_link_state_change(sc->ifp, LINK_STATE_UP);
11781        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11782              cur_data.line_speed, duplex, flow);
11783    }
11784}
11785
11786static void
11787bxe_link_report(struct bxe_softc *sc)
11788{
11789    bxe_acquire_phy_lock(sc);
11790    bxe_link_report_locked(sc);
11791    bxe_release_phy_lock(sc);
11792}
11793
11794static void
11795bxe_link_status_update(struct bxe_softc *sc)
11796{
11797    if (sc->state != BXE_STATE_OPEN) {
11798        return;
11799    }
11800
11801    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11802        elink_link_status_update(&sc->link_params, &sc->link_vars);
11803    } else {
11804        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11805                                  ELINK_SUPPORTED_10baseT_Full |
11806                                  ELINK_SUPPORTED_100baseT_Half |
11807                                  ELINK_SUPPORTED_100baseT_Full |
11808                                  ELINK_SUPPORTED_1000baseT_Full |
11809                                  ELINK_SUPPORTED_2500baseX_Full |
11810                                  ELINK_SUPPORTED_10000baseT_Full |
11811                                  ELINK_SUPPORTED_TP |
11812                                  ELINK_SUPPORTED_FIBRE |
11813                                  ELINK_SUPPORTED_Autoneg |
11814                                  ELINK_SUPPORTED_Pause |
11815                                  ELINK_SUPPORTED_Asym_Pause);
11816        sc->port.advertising[0] = sc->port.supported[0];
11817
11818        sc->link_params.sc                = sc;
11819        sc->link_params.port              = SC_PORT(sc);
11820        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11821        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11822        sc->link_params.req_line_speed[0] = SPEED_10000;
11823        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11824        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11825
11826        if (CHIP_REV_IS_FPGA(sc)) {
11827            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11828            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11829            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11830                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11831        } else {
11832            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11833            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11834            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11835                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11836        }
11837
11838        sc->link_vars.link_up = 1;
11839
11840        sc->link_vars.duplex    = DUPLEX_FULL;
11841        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11842
11843        if (IS_PF(sc)) {
11844            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11845            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11846            bxe_link_report(sc);
11847        }
11848    }
11849
11850    if (IS_PF(sc)) {
11851        if (sc->link_vars.link_up) {
11852            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11853        } else {
11854            bxe_stats_handle(sc, STATS_EVENT_STOP);
11855        }
11856        bxe_link_report(sc);
11857    } else {
11858        bxe_link_report(sc);
11859        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11860    }
11861}
11862
11863static int
11864bxe_initial_phy_init(struct bxe_softc *sc,
11865                     int              load_mode)
11866{
11867    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11868    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11869    struct elink_params *lp = &sc->link_params;
11870
11871    bxe_set_requested_fc(sc);
11872
11873    if (CHIP_REV_IS_SLOW(sc)) {
11874        uint32_t bond = CHIP_BOND_ID(sc);
11875        uint32_t feat = 0;
11876
11877        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11878            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11879        } else if (bond & 0x4) {
11880            if (CHIP_IS_E3(sc)) {
11881                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11882            } else {
11883                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11884            }
11885        } else if (bond & 0x8) {
11886            if (CHIP_IS_E3(sc)) {
11887                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11888            } else {
11889                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11890            }
11891        }
11892
11893        /* disable EMAC for E3 and above */
11894        if (bond & 0x2) {
11895            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11896        }
11897
11898        sc->link_params.feature_config_flags |= feat;
11899    }
11900
11901    bxe_acquire_phy_lock(sc);
11902
11903    if (load_mode == LOAD_DIAG) {
11904        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11905        /* Prefer doing PHY loopback at 10G speed, if possible */
11906        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11907            if (lp->speed_cap_mask[cfg_idx] &
11908                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11909                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11910            } else {
11911                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11912            }
11913        }
11914    }
11915
11916    if (load_mode == LOAD_LOOPBACK_EXT) {
11917        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11918    }
11919
11920    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11921
11922    bxe_release_phy_lock(sc);
11923
11924    bxe_calc_fc_adv(sc);
11925
11926    if (sc->link_vars.link_up) {
11927        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11928        bxe_link_report(sc);
11929    }
11930
11931    if (!CHIP_REV_IS_SLOW(sc)) {
11932        bxe_periodic_start(sc);
11933    }
11934
11935    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11936    return (rc);
11937}
11938
11939/* must be called under IF_ADDR_LOCK */
11940
11941static int
11942bxe_set_mc_list(struct bxe_softc *sc)
11943{
11944    struct ecore_mcast_ramrod_params rparam = { NULL };
11945    int rc = 0;
11946    int mc_count = 0;
11947    int mcnt, i;
11948    struct ecore_mcast_list_elem *mc_mac, *mc_mac_start;
11949    unsigned char *mta;
11950    if_t ifp = sc->ifp;
11951
11952    mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
11953    if (!mc_count)
11954        return (0);
11955
11956    mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
11957            mc_count, M_DEVBUF, M_NOWAIT);
11958
11959    if(mta == NULL) {
11960        BLOGE(sc, "Failed to allocate temp mcast list\n");
11961        return (-1);
11962    }
11963    bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count));
11964
11965    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO));
11966    mc_mac_start = mc_mac;
11967
11968    if (!mc_mac) {
11969        free(mta, M_DEVBUF);
11970        BLOGE(sc, "Failed to allocate temp mcast list\n");
11971        return (-1);
11972    }
11973    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
11974
11975    /* mta and mcnt not expected to be  different */
11976    if_multiaddr_array(ifp, mta, &mcnt, mc_count);
11977
11978
11979    rparam.mcast_obj = &sc->mcast_obj;
11980    ECORE_LIST_INIT(&rparam.mcast_list);
11981
11982    for(i=0; i< mcnt; i++) {
11983
11984        mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN));
11985        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list);
11986
11987        BLOGD(sc, DBG_LOAD,
11988              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
11989              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
11990              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
11991
11992        mc_mac++;
11993    }
11994    rparam.mcast_list_len = mc_count;
11995
11996    BXE_MCAST_LOCK(sc);
11997
11998    /* first, clear all configured multicast MACs */
11999    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12000    if (rc < 0) {
12001        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12002        BXE_MCAST_UNLOCK(sc);
12003    	free(mc_mac_start, M_DEVBUF);
12004        free(mta, M_DEVBUF);
12005        return (rc);
12006    }
12007
12008    /* Now add the new MACs */
12009    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12010    if (rc < 0) {
12011        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12012    }
12013
12014    BXE_MCAST_UNLOCK(sc);
12015
12016    free(mc_mac_start, M_DEVBUF);
12017    free(mta, M_DEVBUF);
12018
12019    return (rc);
12020}
12021
12022static int
12023bxe_set_uc_list(struct bxe_softc *sc)
12024{
12025    if_t ifp = sc->ifp;
12026    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12027    struct ifaddr *ifa;
12028    unsigned long ramrod_flags = 0;
12029    int rc;
12030
12031#if __FreeBSD_version < 800000
12032    IF_ADDR_LOCK(ifp);
12033#else
12034    if_addr_rlock(ifp);
12035#endif
12036
12037    /* first schedule a cleanup up of old configuration */
12038    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12039    if (rc < 0) {
12040        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12041#if __FreeBSD_version < 800000
12042        IF_ADDR_UNLOCK(ifp);
12043#else
12044        if_addr_runlock(ifp);
12045#endif
12046        return (rc);
12047    }
12048
12049    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12050    while (ifa) {
12051        if (ifa->ifa_addr->sa_family != AF_LINK) {
12052            ifa = TAILQ_NEXT(ifa, ifa_link);
12053            continue;
12054        }
12055
12056        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12057                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12058        if (rc == -EEXIST) {
12059            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12060            /* do not treat adding same MAC as an error */
12061            rc = 0;
12062        } else if (rc < 0) {
12063            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12064#if __FreeBSD_version < 800000
12065            IF_ADDR_UNLOCK(ifp);
12066#else
12067            if_addr_runlock(ifp);
12068#endif
12069            return (rc);
12070        }
12071
12072        ifa = TAILQ_NEXT(ifa, ifa_link);
12073    }
12074
12075#if __FreeBSD_version < 800000
12076    IF_ADDR_UNLOCK(ifp);
12077#else
12078    if_addr_runlock(ifp);
12079#endif
12080
12081    /* Execute the pending commands */
12082    bit_set(&ramrod_flags, RAMROD_CONT);
12083    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12084                            ECORE_UC_LIST_MAC, &ramrod_flags));
12085}
12086
12087static void
12088bxe_set_rx_mode(struct bxe_softc *sc)
12089{
12090    if_t ifp = sc->ifp;
12091    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12092
12093    if (sc->state != BXE_STATE_OPEN) {
12094        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12095        return;
12096    }
12097
12098    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12099
12100    if (if_getflags(ifp) & IFF_PROMISC) {
12101        rx_mode = BXE_RX_MODE_PROMISC;
12102    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12103               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12104                CHIP_IS_E1(sc))) {
12105        rx_mode = BXE_RX_MODE_ALLMULTI;
12106    } else {
12107        if (IS_PF(sc)) {
12108            /* some multicasts */
12109            if (bxe_set_mc_list(sc) < 0) {
12110                rx_mode = BXE_RX_MODE_ALLMULTI;
12111            }
12112            if (bxe_set_uc_list(sc) < 0) {
12113                rx_mode = BXE_RX_MODE_PROMISC;
12114            }
12115        }
12116    }
12117
12118    sc->rx_mode = rx_mode;
12119
12120    /* schedule the rx_mode command */
12121    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12122        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12123        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12124        return;
12125    }
12126
12127    if (IS_PF(sc)) {
12128        bxe_set_storm_rx_mode(sc);
12129    }
12130}
12131
12132
12133/* update flags in shmem */
12134static void
12135bxe_update_drv_flags(struct bxe_softc *sc,
12136                     uint32_t         flags,
12137                     uint32_t         set)
12138{
12139    uint32_t drv_flags;
12140
12141    if (SHMEM2_HAS(sc, drv_flags)) {
12142        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12143        drv_flags = SHMEM2_RD(sc, drv_flags);
12144
12145        if (set) {
12146            SET_FLAGS(drv_flags, flags);
12147        } else {
12148            RESET_FLAGS(drv_flags, flags);
12149        }
12150
12151        SHMEM2_WR(sc, drv_flags, drv_flags);
12152        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12153
12154        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12155    }
12156}
12157
12158/* periodic timer callout routine, only runs when the interface is up */
12159
12160static void
12161bxe_periodic_callout_func(void *xsc)
12162{
12163    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12164    int i;
12165
12166    if (!BXE_CORE_TRYLOCK(sc)) {
12167        /* just bail and try again next time */
12168
12169        if ((sc->state == BXE_STATE_OPEN) &&
12170            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12171            /* schedule the next periodic callout */
12172            callout_reset(&sc->periodic_callout, hz,
12173                          bxe_periodic_callout_func, sc);
12174        }
12175
12176        return;
12177    }
12178
12179    if ((sc->state != BXE_STATE_OPEN) ||
12180        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12181        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12182        BXE_CORE_UNLOCK(sc);
12183        return;
12184        }
12185
12186
12187    /* Check for TX timeouts on any fastpath. */
12188    FOR_EACH_QUEUE(sc, i) {
12189        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12190            /* Ruh-Roh, chip was reset! */
12191            break;
12192        }
12193    }
12194
12195    if (!CHIP_REV_IS_SLOW(sc)) {
12196        /*
12197         * This barrier is needed to ensure the ordering between the writing
12198         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12199         * the reading here.
12200         */
12201        mb();
12202        if (sc->port.pmf) {
12203	    bxe_acquire_phy_lock(sc);
12204            elink_period_func(&sc->link_params, &sc->link_vars);
12205	    bxe_release_phy_lock(sc);
12206        }
12207    }
12208
12209    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12210        int mb_idx = SC_FW_MB_IDX(sc);
12211        uint32_t drv_pulse;
12212        uint32_t mcp_pulse;
12213
12214        ++sc->fw_drv_pulse_wr_seq;
12215        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12216
12217        drv_pulse = sc->fw_drv_pulse_wr_seq;
12218        bxe_drv_pulse(sc);
12219
12220        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12221                     MCP_PULSE_SEQ_MASK);
12222
12223        /*
12224         * The delta between driver pulse and mcp response should
12225         * be 1 (before mcp response) or 0 (after mcp response).
12226         */
12227        if ((drv_pulse != mcp_pulse) &&
12228            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12229            /* someone lost a heartbeat... */
12230            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12231                  drv_pulse, mcp_pulse);
12232        }
12233    }
12234
12235    /* state is BXE_STATE_OPEN */
12236    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12237
12238    BXE_CORE_UNLOCK(sc);
12239
12240    if ((sc->state == BXE_STATE_OPEN) &&
12241        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12242        /* schedule the next periodic callout */
12243        callout_reset(&sc->periodic_callout, hz,
12244                      bxe_periodic_callout_func, sc);
12245    }
12246}
12247
12248static void
12249bxe_periodic_start(struct bxe_softc *sc)
12250{
12251    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12252    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12253}
12254
12255static void
12256bxe_periodic_stop(struct bxe_softc *sc)
12257{
12258    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12259    callout_drain(&sc->periodic_callout);
12260}
12261
12262/* start the controller */
12263static __noinline int
12264bxe_nic_load(struct bxe_softc *sc,
12265             int              load_mode)
12266{
12267    uint32_t val;
12268    int load_code = 0;
12269    int i, rc = 0;
12270
12271    BXE_CORE_LOCK_ASSERT(sc);
12272
12273    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12274
12275    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12276
12277    if (IS_PF(sc)) {
12278        /* must be called before memory allocation and HW init */
12279        bxe_ilt_set_info(sc);
12280    }
12281
12282    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12283
12284    bxe_set_fp_rx_buf_size(sc);
12285
12286    if (bxe_alloc_fp_buffers(sc) != 0) {
12287        BLOGE(sc, "Failed to allocate fastpath memory\n");
12288        sc->state = BXE_STATE_CLOSED;
12289        rc = ENOMEM;
12290        goto bxe_nic_load_error0;
12291    }
12292
12293    if (bxe_alloc_mem(sc) != 0) {
12294        sc->state = BXE_STATE_CLOSED;
12295        rc = ENOMEM;
12296        goto bxe_nic_load_error0;
12297    }
12298
12299    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12300        sc->state = BXE_STATE_CLOSED;
12301        rc = ENOMEM;
12302        goto bxe_nic_load_error0;
12303    }
12304
12305    if (IS_PF(sc)) {
12306        /* set pf load just before approaching the MCP */
12307        bxe_set_pf_load(sc);
12308
12309        /* if MCP exists send load request and analyze response */
12310        if (!BXE_NOMCP(sc)) {
12311            /* attempt to load pf */
12312            if (bxe_nic_load_request(sc, &load_code) != 0) {
12313                sc->state = BXE_STATE_CLOSED;
12314                rc = ENXIO;
12315                goto bxe_nic_load_error1;
12316            }
12317
12318            /* what did the MCP say? */
12319            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12320                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12321                sc->state = BXE_STATE_CLOSED;
12322                rc = ENXIO;
12323                goto bxe_nic_load_error2;
12324            }
12325        } else {
12326            BLOGI(sc, "Device has no MCP!\n");
12327            load_code = bxe_nic_load_no_mcp(sc);
12328        }
12329
12330        /* mark PMF if applicable */
12331        bxe_nic_load_pmf(sc, load_code);
12332
12333        /* Init Function state controlling object */
12334        bxe_init_func_obj(sc);
12335
12336        /* Initialize HW */
12337        if (bxe_init_hw(sc, load_code) != 0) {
12338            BLOGE(sc, "HW init failed\n");
12339            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12340            sc->state = BXE_STATE_CLOSED;
12341            rc = ENXIO;
12342            goto bxe_nic_load_error2;
12343        }
12344    }
12345
12346    /* set ALWAYS_ALIVE bit in shmem */
12347    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12348    bxe_drv_pulse(sc);
12349    sc->flags |= BXE_NO_PULSE;
12350
12351    /* attach interrupts */
12352    if (bxe_interrupt_attach(sc) != 0) {
12353        sc->state = BXE_STATE_CLOSED;
12354        rc = ENXIO;
12355        goto bxe_nic_load_error2;
12356    }
12357
12358    bxe_nic_init(sc, load_code);
12359
12360    /* Init per-function objects */
12361    if (IS_PF(sc)) {
12362        bxe_init_objs(sc);
12363        // XXX bxe_iov_nic_init(sc);
12364
12365        /* set AFEX default VLAN tag to an invalid value */
12366        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12367        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12368
12369        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12370        rc = bxe_func_start(sc);
12371        if (rc) {
12372            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12373            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12374            sc->state = BXE_STATE_ERROR;
12375            goto bxe_nic_load_error3;
12376        }
12377
12378        /* send LOAD_DONE command to MCP */
12379        if (!BXE_NOMCP(sc)) {
12380            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12381            if (!load_code) {
12382                BLOGE(sc, "MCP response failure, aborting\n");
12383                sc->state = BXE_STATE_ERROR;
12384                rc = ENXIO;
12385                goto bxe_nic_load_error3;
12386            }
12387        }
12388
12389        rc = bxe_setup_leading(sc);
12390        if (rc) {
12391            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12392            sc->state = BXE_STATE_ERROR;
12393            goto bxe_nic_load_error3;
12394        }
12395
12396        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12397            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12398            if (rc) {
12399                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12400                sc->state = BXE_STATE_ERROR;
12401                goto bxe_nic_load_error3;
12402            }
12403        }
12404
12405        rc = bxe_init_rss_pf(sc);
12406        if (rc) {
12407            BLOGE(sc, "PF RSS init failed\n");
12408            sc->state = BXE_STATE_ERROR;
12409            goto bxe_nic_load_error3;
12410        }
12411    }
12412    /* XXX VF */
12413
12414    /* now when Clients are configured we are ready to work */
12415    sc->state = BXE_STATE_OPEN;
12416
12417    /* Configure a ucast MAC */
12418    if (IS_PF(sc)) {
12419        rc = bxe_set_eth_mac(sc, TRUE);
12420    }
12421    if (rc) {
12422        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12423        sc->state = BXE_STATE_ERROR;
12424        goto bxe_nic_load_error3;
12425    }
12426
12427    if (sc->port.pmf) {
12428        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12429        if (rc) {
12430            sc->state = BXE_STATE_ERROR;
12431            goto bxe_nic_load_error3;
12432        }
12433    }
12434
12435    sc->link_params.feature_config_flags &=
12436        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12437
12438    /* start fast path */
12439
12440    /* Initialize Rx filter */
12441    bxe_set_rx_mode(sc);
12442
12443    /* start the Tx */
12444    switch (/* XXX load_mode */LOAD_OPEN) {
12445    case LOAD_NORMAL:
12446    case LOAD_OPEN:
12447        break;
12448
12449    case LOAD_DIAG:
12450    case LOAD_LOOPBACK_EXT:
12451        sc->state = BXE_STATE_DIAG;
12452        break;
12453
12454    default:
12455        break;
12456    }
12457
12458    if (sc->port.pmf) {
12459        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12460    } else {
12461        bxe_link_status_update(sc);
12462    }
12463
12464    /* start the periodic timer callout */
12465    bxe_periodic_start(sc);
12466
12467    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12468        /* mark driver is loaded in shmem2 */
12469        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12470        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12471                  (val |
12472                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12473                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12474    }
12475
12476    /* wait for all pending SP commands to complete */
12477    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12478        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12479        bxe_periodic_stop(sc);
12480        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12481        return (ENXIO);
12482    }
12483
12484    /* Tell the stack the driver is running! */
12485    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12486
12487    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12488
12489    return (0);
12490
12491bxe_nic_load_error3:
12492
12493    if (IS_PF(sc)) {
12494        bxe_int_disable_sync(sc, 1);
12495
12496        /* clean out queued objects */
12497        bxe_squeeze_objects(sc);
12498    }
12499
12500    bxe_interrupt_detach(sc);
12501
12502bxe_nic_load_error2:
12503
12504    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12505        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12506        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12507    }
12508
12509    sc->port.pmf = 0;
12510
12511bxe_nic_load_error1:
12512
12513    /* clear pf_load status, as it was already set */
12514    if (IS_PF(sc)) {
12515        bxe_clear_pf_load(sc);
12516    }
12517
12518bxe_nic_load_error0:
12519
12520    bxe_free_fw_stats_mem(sc);
12521    bxe_free_fp_buffers(sc);
12522    bxe_free_mem(sc);
12523
12524    return (rc);
12525}
12526
12527static int
12528bxe_init_locked(struct bxe_softc *sc)
12529{
12530    int other_engine = SC_PATH(sc) ? 0 : 1;
12531    uint8_t other_load_status, load_status;
12532    uint8_t global = FALSE;
12533    int rc;
12534
12535    BXE_CORE_LOCK_ASSERT(sc);
12536
12537    /* check if the driver is already running */
12538    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12539        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12540        return (0);
12541    }
12542
12543    bxe_set_power_state(sc, PCI_PM_D0);
12544
12545    /*
12546     * If parity occurred during the unload, then attentions and/or
12547     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12548     * loaded on the current engine to complete the recovery. Parity recovery
12549     * is only relevant for PF driver.
12550     */
12551    if (IS_PF(sc)) {
12552        other_load_status = bxe_get_load_status(sc, other_engine);
12553        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12554
12555        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12556            bxe_chk_parity_attn(sc, &global, TRUE)) {
12557            do {
12558                /*
12559                 * If there are attentions and they are in global blocks, set
12560                 * the GLOBAL_RESET bit regardless whether it will be this
12561                 * function that will complete the recovery or not.
12562                 */
12563                if (global) {
12564                    bxe_set_reset_global(sc);
12565                }
12566
12567                /*
12568                 * Only the first function on the current engine should try
12569                 * to recover in open. In case of attentions in global blocks
12570                 * only the first in the chip should try to recover.
12571                 */
12572                if ((!load_status && (!global || !other_load_status)) &&
12573                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12574                    BLOGI(sc, "Recovered during init\n");
12575                    break;
12576                }
12577
12578                /* recovery has failed... */
12579                bxe_set_power_state(sc, PCI_PM_D3hot);
12580                sc->recovery_state = BXE_RECOVERY_FAILED;
12581
12582                BLOGE(sc, "Recovery flow hasn't properly "
12583                          "completed yet, try again later. "
12584                          "If you still see this message after a "
12585                          "few retries then power cycle is required.\n");
12586
12587                rc = ENXIO;
12588                goto bxe_init_locked_done;
12589            } while (0);
12590        }
12591    }
12592
12593    sc->recovery_state = BXE_RECOVERY_DONE;
12594
12595    rc = bxe_nic_load(sc, LOAD_OPEN);
12596
12597bxe_init_locked_done:
12598
12599    if (rc) {
12600        /* Tell the stack the driver is NOT running! */
12601        BLOGE(sc, "Initialization failed, "
12602                  "stack notified driver is NOT running!\n");
12603	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12604    }
12605
12606    return (rc);
12607}
12608
12609static int
12610bxe_stop_locked(struct bxe_softc *sc)
12611{
12612    BXE_CORE_LOCK_ASSERT(sc);
12613    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12614}
12615
12616/*
12617 * Handles controller initialization when called from an unlocked routine.
12618 * ifconfig calls this function.
12619 *
12620 * Returns:
12621 *   void
12622 */
12623static void
12624bxe_init(void *xsc)
12625{
12626    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12627
12628    BXE_CORE_LOCK(sc);
12629    bxe_init_locked(sc);
12630    BXE_CORE_UNLOCK(sc);
12631}
12632
12633static int
12634bxe_init_ifnet(struct bxe_softc *sc)
12635{
12636    if_t ifp;
12637    int capabilities;
12638
12639    /* ifconfig entrypoint for media type/status reporting */
12640    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12641                 bxe_ifmedia_update,
12642                 bxe_ifmedia_status);
12643
12644    /* set the default interface values */
12645    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12646    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12647    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12648
12649    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12650
12651    /* allocate the ifnet structure */
12652    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12653        BLOGE(sc, "Interface allocation failed!\n");
12654        return (ENXIO);
12655    }
12656
12657    if_setsoftc(ifp, sc);
12658    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12659    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12660    if_setioctlfn(ifp, bxe_ioctl);
12661    if_setstartfn(ifp, bxe_tx_start);
12662    if_setgetcounterfn(ifp, bxe_get_counter);
12663#if __FreeBSD_version >= 901504
12664    if_settransmitfn(ifp, bxe_tx_mq_start);
12665    if_setqflushfn(ifp, bxe_mq_flush);
12666#endif
12667#ifdef FreeBSD8_0
12668    if_settimer(ifp, 0);
12669#endif
12670    if_setinitfn(ifp, bxe_init);
12671    if_setmtu(ifp, sc->mtu);
12672    if_sethwassist(ifp, (CSUM_IP      |
12673                        CSUM_TCP      |
12674                        CSUM_UDP      |
12675                        CSUM_TSO      |
12676                        CSUM_TCP_IPV6 |
12677                        CSUM_UDP_IPV6));
12678
12679    capabilities =
12680#if __FreeBSD_version < 700000
12681        (IFCAP_VLAN_MTU       |
12682         IFCAP_VLAN_HWTAGGING |
12683         IFCAP_HWCSUM         |
12684         IFCAP_JUMBO_MTU      |
12685         IFCAP_LRO);
12686#else
12687        (IFCAP_VLAN_MTU       |
12688         IFCAP_VLAN_HWTAGGING |
12689         IFCAP_VLAN_HWTSO     |
12690         IFCAP_VLAN_HWFILTER  |
12691         IFCAP_VLAN_HWCSUM    |
12692         IFCAP_HWCSUM         |
12693         IFCAP_JUMBO_MTU      |
12694         IFCAP_LRO            |
12695         IFCAP_TSO4           |
12696         IFCAP_TSO6           |
12697         IFCAP_WOL_MAGIC);
12698#endif
12699    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12700    if_setbaudrate(ifp, IF_Gbps(10));
12701/* XXX */
12702    if_setsendqlen(ifp, sc->tx_ring_size);
12703    if_setsendqready(ifp);
12704/* XXX */
12705
12706    sc->ifp = ifp;
12707
12708    /* attach to the Ethernet interface list */
12709    ether_ifattach(ifp, sc->link_params.mac_addr);
12710
12711    return (0);
12712}
12713
12714static void
12715bxe_deallocate_bars(struct bxe_softc *sc)
12716{
12717    int i;
12718
12719    for (i = 0; i < MAX_BARS; i++) {
12720        if (sc->bar[i].resource != NULL) {
12721            bus_release_resource(sc->dev,
12722                                 SYS_RES_MEMORY,
12723                                 sc->bar[i].rid,
12724                                 sc->bar[i].resource);
12725            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12726                  i, PCIR_BAR(i));
12727        }
12728    }
12729}
12730
12731static int
12732bxe_allocate_bars(struct bxe_softc *sc)
12733{
12734    u_int flags;
12735    int i;
12736
12737    memset(sc->bar, 0, sizeof(sc->bar));
12738
12739    for (i = 0; i < MAX_BARS; i++) {
12740
12741        /* memory resources reside at BARs 0, 2, 4 */
12742        /* Run `pciconf -lb` to see mappings */
12743        if ((i != 0) && (i != 2) && (i != 4)) {
12744            continue;
12745        }
12746
12747        sc->bar[i].rid = PCIR_BAR(i);
12748
12749        flags = RF_ACTIVE;
12750        if (i == 0) {
12751            flags |= RF_SHAREABLE;
12752        }
12753
12754        if ((sc->bar[i].resource =
12755             bus_alloc_resource_any(sc->dev,
12756                                    SYS_RES_MEMORY,
12757                                    &sc->bar[i].rid,
12758                                    flags)) == NULL) {
12759            return (0);
12760        }
12761
12762        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12763        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12764        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12765
12766        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n",
12767              i, PCIR_BAR(i),
12768              (void *)rman_get_start(sc->bar[i].resource),
12769              (void *)rman_get_end(sc->bar[i].resource),
12770              rman_get_size(sc->bar[i].resource),
12771              (void *)sc->bar[i].kva);
12772    }
12773
12774    return (0);
12775}
12776
12777static void
12778bxe_get_function_num(struct bxe_softc *sc)
12779{
12780    uint32_t val = 0;
12781
12782    /*
12783     * Read the ME register to get the function number. The ME register
12784     * holds the relative-function number and absolute-function number. The
12785     * absolute-function number appears only in E2 and above. Before that
12786     * these bits always contained zero, therefore we cannot blindly use them.
12787     */
12788
12789    val = REG_RD(sc, BAR_ME_REGISTER);
12790
12791    sc->pfunc_rel =
12792        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12793    sc->path_id =
12794        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12795
12796    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12797        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12798    } else {
12799        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12800    }
12801
12802    BLOGD(sc, DBG_LOAD,
12803          "Relative function %d, Absolute function %d, Path %d\n",
12804          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12805}
12806
12807static uint32_t
12808bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12809{
12810    uint32_t shmem2_size;
12811    uint32_t offset;
12812    uint32_t mf_cfg_offset_value;
12813
12814    /* Non 57712 */
12815    offset = (SHMEM_RD(sc, func_mb) +
12816              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12817
12818    /* 57712 plus */
12819    if (sc->devinfo.shmem2_base != 0) {
12820        shmem2_size = SHMEM2_RD(sc, size);
12821        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12822            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12823            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12824                offset = mf_cfg_offset_value;
12825            }
12826        }
12827    }
12828
12829    return (offset);
12830}
12831
12832static uint32_t
12833bxe_pcie_capability_read(struct bxe_softc *sc,
12834                         int    reg,
12835                         int    width)
12836{
12837    int pcie_reg;
12838
12839    /* ensure PCIe capability is enabled */
12840    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12841        if (pcie_reg != 0) {
12842            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12843            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12844        }
12845    }
12846
12847    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12848
12849    return (0);
12850}
12851
12852static uint8_t
12853bxe_is_pcie_pending(struct bxe_softc *sc)
12854{
12855    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12856            PCIM_EXP_STA_TRANSACTION_PND);
12857}
12858
12859/*
12860 * Walk the PCI capabiites list for the device to find what features are
12861 * supported. These capabilites may be enabled/disabled by firmware so it's
12862 * best to walk the list rather than make assumptions.
12863 */
12864static void
12865bxe_probe_pci_caps(struct bxe_softc *sc)
12866{
12867    uint16_t link_status;
12868    int reg;
12869
12870    /* check if PCI Power Management is enabled */
12871    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12872        if (reg != 0) {
12873            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12874
12875            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12876            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12877        }
12878    }
12879
12880    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12881
12882    /* handle PCIe 2.0 workarounds for 57710 */
12883    if (CHIP_IS_E1(sc)) {
12884        /* workaround for 57710 errata E4_57710_27462 */
12885        sc->devinfo.pcie_link_speed =
12886            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12887
12888        /* workaround for 57710 errata E4_57710_27488 */
12889        sc->devinfo.pcie_link_width =
12890            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12891        if (sc->devinfo.pcie_link_speed > 1) {
12892            sc->devinfo.pcie_link_width =
12893                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12894        }
12895    } else {
12896        sc->devinfo.pcie_link_speed =
12897            (link_status & PCIM_LINK_STA_SPEED);
12898        sc->devinfo.pcie_link_width =
12899            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12900    }
12901
12902    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12903          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12904
12905    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
12906    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
12907
12908    /* check if MSI capability is enabled */
12909    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
12910        if (reg != 0) {
12911            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
12912
12913            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
12914            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
12915        }
12916    }
12917
12918    /* check if MSI-X capability is enabled */
12919    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
12920        if (reg != 0) {
12921            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
12922
12923            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
12924            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
12925        }
12926    }
12927}
12928
12929static int
12930bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
12931{
12932    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12933    uint32_t val;
12934
12935    /* get the outer vlan if we're in switch-dependent mode */
12936
12937    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
12938    mf_info->ext_id = (uint16_t)val;
12939
12940    mf_info->multi_vnics_mode = 1;
12941
12942    if (!VALID_OVLAN(mf_info->ext_id)) {
12943        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
12944        return (1);
12945    }
12946
12947    /* get the capabilities */
12948    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12949        FUNC_MF_CFG_PROTOCOL_ISCSI) {
12950        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
12951    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12952               FUNC_MF_CFG_PROTOCOL_FCOE) {
12953        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
12954    } else {
12955        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
12956    }
12957
12958    mf_info->vnics_per_port =
12959        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
12960
12961    return (0);
12962}
12963
12964static uint32_t
12965bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
12966{
12967    uint32_t retval = 0;
12968    uint32_t val;
12969
12970    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
12971
12972    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
12973        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
12974            retval |= MF_PROTO_SUPPORT_ETHERNET;
12975        }
12976        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
12977            retval |= MF_PROTO_SUPPORT_ISCSI;
12978        }
12979        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
12980            retval |= MF_PROTO_SUPPORT_FCOE;
12981        }
12982    }
12983
12984    return (retval);
12985}
12986
12987static int
12988bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
12989{
12990    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12991    uint32_t val;
12992
12993    /*
12994     * There is no outer vlan if we're in switch-independent mode.
12995     * If the mac is valid then assume multi-function.
12996     */
12997
12998    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
12999
13000    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13001
13002    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13003
13004    mf_info->vnics_per_port =
13005        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13006
13007    return (0);
13008}
13009
13010static int
13011bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13012{
13013    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13014    uint32_t e1hov_tag;
13015    uint32_t func_config;
13016    uint32_t niv_config;
13017
13018    mf_info->multi_vnics_mode = 1;
13019
13020    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13021    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13022    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13023
13024    mf_info->ext_id =
13025        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13026                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13027
13028    mf_info->default_vlan =
13029        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13030                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13031
13032    mf_info->niv_allowed_priorities =
13033        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13034                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13035
13036    mf_info->niv_default_cos =
13037        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13038                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13039
13040    mf_info->afex_vlan_mode =
13041        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13042         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13043
13044    mf_info->niv_mba_enabled =
13045        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13046         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13047
13048    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13049
13050    mf_info->vnics_per_port =
13051        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13052
13053    return (0);
13054}
13055
13056static int
13057bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13058{
13059    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13060    uint32_t mf_cfg1;
13061    uint32_t mf_cfg2;
13062    uint32_t ovlan1;
13063    uint32_t ovlan2;
13064    uint8_t i, j;
13065
13066    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13067          SC_PORT(sc));
13068    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13069          mf_info->mf_config[SC_VN(sc)]);
13070    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13071          mf_info->multi_vnics_mode);
13072    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13073          mf_info->vnics_per_port);
13074    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13075          mf_info->ext_id);
13076    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13077          mf_info->min_bw[0], mf_info->min_bw[1],
13078          mf_info->min_bw[2], mf_info->min_bw[3]);
13079    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13080          mf_info->max_bw[0], mf_info->max_bw[1],
13081          mf_info->max_bw[2], mf_info->max_bw[3]);
13082    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13083          sc->mac_addr_str);
13084
13085    /* various MF mode sanity checks... */
13086
13087    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13088        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13089              SC_PORT(sc));
13090        return (1);
13091    }
13092
13093    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13094        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13095              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13096        return (1);
13097    }
13098
13099    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13100        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13101        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13102            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13103                  SC_VN(sc), OVLAN(sc));
13104            return (1);
13105        }
13106
13107        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13108            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13109                  mf_info->multi_vnics_mode, OVLAN(sc));
13110            return (1);
13111        }
13112
13113        /*
13114         * Verify all functions are either MF or SF mode. If MF, make sure
13115         * sure that all non-hidden functions have a valid ovlan. If SF,
13116         * make sure that all non-hidden functions have an invalid ovlan.
13117         */
13118        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13119            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13120            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13121            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13122                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13123                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13124                BLOGE(sc, "mf_mode=SD function %d MF config "
13125                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13126                      i, mf_info->multi_vnics_mode, ovlan1);
13127                return (1);
13128            }
13129        }
13130
13131        /* Verify all funcs on the same port each have a different ovlan. */
13132        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13133            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13134            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13135            /* iterate from the next function on the port to the max func */
13136            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13137                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13138                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13139                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13140                    VALID_OVLAN(ovlan1) &&
13141                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13142                    VALID_OVLAN(ovlan2) &&
13143                    (ovlan1 == ovlan2)) {
13144                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13145                              "have the same ovlan (%d)\n",
13146                          i, j, ovlan1);
13147                    return (1);
13148                }
13149            }
13150        }
13151    } /* MULTI_FUNCTION_SD */
13152
13153    return (0);
13154}
13155
13156static int
13157bxe_get_mf_cfg_info(struct bxe_softc *sc)
13158{
13159    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13160    uint32_t val, mac_upper;
13161    uint8_t i, vnic;
13162
13163    /* initialize mf_info defaults */
13164    mf_info->vnics_per_port   = 1;
13165    mf_info->multi_vnics_mode = FALSE;
13166    mf_info->path_has_ovlan   = FALSE;
13167    mf_info->mf_mode          = SINGLE_FUNCTION;
13168
13169    if (!CHIP_IS_MF_CAP(sc)) {
13170        return (0);
13171    }
13172
13173    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13174        BLOGE(sc, "Invalid mf_cfg_base!\n");
13175        return (1);
13176    }
13177
13178    /* get the MF mode (switch dependent / independent / single-function) */
13179
13180    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13181
13182    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13183    {
13184    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13185
13186        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13187
13188        /* check for legal upper mac bytes */
13189        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13190            mf_info->mf_mode = MULTI_FUNCTION_SI;
13191        } else {
13192            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13193        }
13194
13195        break;
13196
13197    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13198    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13199
13200        /* get outer vlan configuration */
13201        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13202
13203        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13204            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13205            mf_info->mf_mode = MULTI_FUNCTION_SD;
13206        } else {
13207            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13208        }
13209
13210        break;
13211
13212    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13213
13214        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13215        return (0);
13216
13217    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13218
13219        /*
13220         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13221         * and the MAC address is valid.
13222         */
13223        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13224
13225        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13226            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13227            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13228        } else {
13229            BLOGE(sc, "Invalid config for AFEX mode\n");
13230        }
13231
13232        break;
13233
13234    default:
13235
13236        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13237              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13238
13239        return (1);
13240    }
13241
13242    /* set path mf_mode (which could be different than function mf_mode) */
13243    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13244        mf_info->path_has_ovlan = TRUE;
13245    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13246        /*
13247         * Decide on path multi vnics mode. If we're not in MF mode and in
13248         * 4-port mode, this is good enough to check vnic-0 of the other port
13249         * on the same path
13250         */
13251        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13252            uint8_t other_port = !(PORT_ID(sc) & 1);
13253            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13254
13255            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13256
13257            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13258        }
13259    }
13260
13261    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13262        /* invalid MF config */
13263        if (SC_VN(sc) >= 1) {
13264            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13265            return (1);
13266        }
13267
13268        return (0);
13269    }
13270
13271    /* get the MF configuration */
13272    mf_info->mf_config[SC_VN(sc)] =
13273        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13274
13275    switch(mf_info->mf_mode)
13276    {
13277    case MULTI_FUNCTION_SD:
13278
13279        bxe_get_shmem_mf_cfg_info_sd(sc);
13280        break;
13281
13282    case MULTI_FUNCTION_SI:
13283
13284        bxe_get_shmem_mf_cfg_info_si(sc);
13285        break;
13286
13287    case MULTI_FUNCTION_AFEX:
13288
13289        bxe_get_shmem_mf_cfg_info_niv(sc);
13290        break;
13291
13292    default:
13293
13294        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13295              mf_info->mf_mode);
13296        return (1);
13297    }
13298
13299    /* get the congestion management parameters */
13300
13301    vnic = 0;
13302    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13303        /* get min/max bw */
13304        val = MFCFG_RD(sc, func_mf_config[i].config);
13305        mf_info->min_bw[vnic] =
13306            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13307        mf_info->max_bw[vnic] =
13308            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13309        vnic++;
13310    }
13311
13312    return (bxe_check_valid_mf_cfg(sc));
13313}
13314
13315static int
13316bxe_get_shmem_info(struct bxe_softc *sc)
13317{
13318    int port;
13319    uint32_t mac_hi, mac_lo, val;
13320
13321    port = SC_PORT(sc);
13322    mac_hi = mac_lo = 0;
13323
13324    sc->link_params.sc   = sc;
13325    sc->link_params.port = port;
13326
13327    /* get the hardware config info */
13328    sc->devinfo.hw_config =
13329        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13330    sc->devinfo.hw_config2 =
13331        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13332
13333    sc->link_params.hw_led_mode =
13334        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13335         SHARED_HW_CFG_LED_MODE_SHIFT);
13336
13337    /* get the port feature config */
13338    sc->port.config =
13339        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13340
13341    /* get the link params */
13342    sc->link_params.speed_cap_mask[0] =
13343        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13344    sc->link_params.speed_cap_mask[1] =
13345        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13346
13347    /* get the lane config */
13348    sc->link_params.lane_config =
13349        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13350
13351    /* get the link config */
13352    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13353    sc->port.link_config[ELINK_INT_PHY] = val;
13354    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13355    sc->port.link_config[ELINK_EXT_PHY1] =
13356        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13357
13358    /* get the override preemphasis flag and enable it or turn it off */
13359    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13360    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13361        sc->link_params.feature_config_flags |=
13362            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13363    } else {
13364        sc->link_params.feature_config_flags &=
13365            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13366    }
13367
13368    /* get the initial value of the link params */
13369    sc->link_params.multi_phy_config =
13370        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13371
13372    /* get external phy info */
13373    sc->port.ext_phy_config =
13374        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13375
13376    /* get the multifunction configuration */
13377    bxe_get_mf_cfg_info(sc);
13378
13379    /* get the mac address */
13380    if (IS_MF(sc)) {
13381        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13382        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13383    } else {
13384        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13385        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13386    }
13387
13388    if ((mac_lo == 0) && (mac_hi == 0)) {
13389        *sc->mac_addr_str = 0;
13390        BLOGE(sc, "No Ethernet address programmed!\n");
13391    } else {
13392        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13393        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13394        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13395        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13396        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13397        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13398        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13399                 "%02x:%02x:%02x:%02x:%02x:%02x",
13400                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13401                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13402                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13403        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13404    }
13405
13406    return (0);
13407}
13408
13409static void
13410bxe_get_tunable_params(struct bxe_softc *sc)
13411{
13412    /* sanity checks */
13413
13414    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13415        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13416        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13417        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13418        bxe_interrupt_mode = INTR_MODE_MSIX;
13419    }
13420
13421    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13422        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13423        bxe_queue_count = 0;
13424    }
13425
13426    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13427        if (bxe_max_rx_bufs == 0) {
13428            bxe_max_rx_bufs = RX_BD_USABLE;
13429        } else {
13430            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13431            bxe_max_rx_bufs = 2048;
13432        }
13433    }
13434
13435    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13436        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13437        bxe_hc_rx_ticks = 25;
13438    }
13439
13440    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13441        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13442        bxe_hc_tx_ticks = 50;
13443    }
13444
13445    if (bxe_max_aggregation_size == 0) {
13446        bxe_max_aggregation_size = TPA_AGG_SIZE;
13447    }
13448
13449    if (bxe_max_aggregation_size > 0xffff) {
13450        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13451              bxe_max_aggregation_size);
13452        bxe_max_aggregation_size = TPA_AGG_SIZE;
13453    }
13454
13455    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13456        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13457        bxe_mrrs = -1;
13458    }
13459
13460    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13461        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13462        bxe_autogreeen = 0;
13463    }
13464
13465    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13466        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13467        bxe_udp_rss = 0;
13468    }
13469
13470    /* pull in user settings */
13471
13472    sc->interrupt_mode       = bxe_interrupt_mode;
13473    sc->max_rx_bufs          = bxe_max_rx_bufs;
13474    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13475    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13476    sc->max_aggregation_size = bxe_max_aggregation_size;
13477    sc->mrrs                 = bxe_mrrs;
13478    sc->autogreeen           = bxe_autogreeen;
13479    sc->udp_rss              = bxe_udp_rss;
13480
13481    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13482        sc->num_queues = 1;
13483    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13484        sc->num_queues =
13485            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13486                MAX_RSS_CHAINS);
13487        if (sc->num_queues > mp_ncpus) {
13488            sc->num_queues = mp_ncpus;
13489        }
13490    }
13491
13492    BLOGD(sc, DBG_LOAD,
13493          "User Config: "
13494          "debug=0x%lx "
13495          "interrupt_mode=%d "
13496          "queue_count=%d "
13497          "hc_rx_ticks=%d "
13498          "hc_tx_ticks=%d "
13499          "rx_budget=%d "
13500          "max_aggregation_size=%d "
13501          "mrrs=%d "
13502          "autogreeen=%d "
13503          "udp_rss=%d\n",
13504          bxe_debug,
13505          sc->interrupt_mode,
13506          sc->num_queues,
13507          sc->hc_rx_ticks,
13508          sc->hc_tx_ticks,
13509          bxe_rx_budget,
13510          sc->max_aggregation_size,
13511          sc->mrrs,
13512          sc->autogreeen,
13513          sc->udp_rss);
13514}
13515
13516static int
13517bxe_media_detect(struct bxe_softc *sc)
13518{
13519    int port_type;
13520    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13521
13522    switch (sc->link_params.phy[phy_idx].media_type) {
13523    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13524    case ELINK_ETH_PHY_XFP_FIBER:
13525        BLOGI(sc, "Found 10Gb Fiber media.\n");
13526        sc->media = IFM_10G_SR;
13527        port_type = PORT_FIBRE;
13528        break;
13529    case ELINK_ETH_PHY_SFP_1G_FIBER:
13530        BLOGI(sc, "Found 1Gb Fiber media.\n");
13531        sc->media = IFM_1000_SX;
13532        port_type = PORT_FIBRE;
13533        break;
13534    case ELINK_ETH_PHY_KR:
13535    case ELINK_ETH_PHY_CX4:
13536        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13537        sc->media = IFM_10G_CX4;
13538        port_type = PORT_FIBRE;
13539        break;
13540    case ELINK_ETH_PHY_DA_TWINAX:
13541        BLOGI(sc, "Found 10Gb Twinax media.\n");
13542        sc->media = IFM_10G_TWINAX;
13543        port_type = PORT_DA;
13544        break;
13545    case ELINK_ETH_PHY_BASE_T:
13546        if (sc->link_params.speed_cap_mask[0] &
13547            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13548            BLOGI(sc, "Found 10GBase-T media.\n");
13549            sc->media = IFM_10G_T;
13550            port_type = PORT_TP;
13551        } else {
13552            BLOGI(sc, "Found 1000Base-T media.\n");
13553            sc->media = IFM_1000_T;
13554            port_type = PORT_TP;
13555        }
13556        break;
13557    case ELINK_ETH_PHY_NOT_PRESENT:
13558        BLOGI(sc, "Media not present.\n");
13559        sc->media = 0;
13560        port_type = PORT_OTHER;
13561        break;
13562    case ELINK_ETH_PHY_UNSPECIFIED:
13563    default:
13564        BLOGI(sc, "Unknown media!\n");
13565        sc->media = 0;
13566        port_type = PORT_OTHER;
13567        break;
13568    }
13569    return port_type;
13570}
13571
13572#define GET_FIELD(value, fname)                     \
13573    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13574#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13575#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13576
13577static int
13578bxe_get_igu_cam_info(struct bxe_softc *sc)
13579{
13580    int pfid = SC_FUNC(sc);
13581    int igu_sb_id;
13582    uint32_t val;
13583    uint8_t fid, igu_sb_cnt = 0;
13584
13585    sc->igu_base_sb = 0xff;
13586
13587    if (CHIP_INT_MODE_IS_BC(sc)) {
13588        int vn = SC_VN(sc);
13589        igu_sb_cnt = sc->igu_sb_cnt;
13590        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13591                           FP_SB_MAX_E1x);
13592        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13593                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13594        return (0);
13595    }
13596
13597    /* IGU in normal mode - read CAM */
13598    for (igu_sb_id = 0;
13599         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13600         igu_sb_id++) {
13601        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13602        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13603            continue;
13604        }
13605        fid = IGU_FID(val);
13606        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13607            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13608                continue;
13609            }
13610            if (IGU_VEC(val) == 0) {
13611                /* default status block */
13612                sc->igu_dsb_id = igu_sb_id;
13613            } else {
13614                if (sc->igu_base_sb == 0xff) {
13615                    sc->igu_base_sb = igu_sb_id;
13616                }
13617                igu_sb_cnt++;
13618            }
13619        }
13620    }
13621
13622    /*
13623     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13624     * that number of CAM entries will not be equal to the value advertised in
13625     * PCI. Driver should use the minimal value of both as the actual status
13626     * block count
13627     */
13628    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13629
13630    if (igu_sb_cnt == 0) {
13631        BLOGE(sc, "CAM configuration error\n");
13632        return (-1);
13633    }
13634
13635    return (0);
13636}
13637
13638/*
13639 * Gather various information from the device config space, the device itself,
13640 * shmem, and the user input.
13641 */
13642static int
13643bxe_get_device_info(struct bxe_softc *sc)
13644{
13645    uint32_t val;
13646    int rc;
13647
13648    /* Get the data for the device */
13649    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13650    sc->devinfo.device_id    = pci_get_device(sc->dev);
13651    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13652    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13653
13654    /* get the chip revision (chip metal comes from pci config space) */
13655    sc->devinfo.chip_id     =
13656    sc->link_params.chip_id =
13657        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13658         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13659         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13660         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13661
13662    /* force 57811 according to MISC register */
13663    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13664        if (CHIP_IS_57810(sc)) {
13665            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13666                                   (sc->devinfo.chip_id & 0x0000ffff));
13667        } else if (CHIP_IS_57810_MF(sc)) {
13668            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13669                                   (sc->devinfo.chip_id & 0x0000ffff));
13670        }
13671        sc->devinfo.chip_id |= 0x1;
13672    }
13673
13674    BLOGD(sc, DBG_LOAD,
13675          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13676          sc->devinfo.chip_id,
13677          ((sc->devinfo.chip_id >> 16) & 0xffff),
13678          ((sc->devinfo.chip_id >> 12) & 0xf),
13679          ((sc->devinfo.chip_id >>  4) & 0xff),
13680          ((sc->devinfo.chip_id >>  0) & 0xf));
13681
13682    val = (REG_RD(sc, 0x2874) & 0x55);
13683    if ((sc->devinfo.chip_id & 0x1) ||
13684        (CHIP_IS_E1(sc) && val) ||
13685        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13686        sc->flags |= BXE_ONE_PORT_FLAG;
13687        BLOGD(sc, DBG_LOAD, "single port device\n");
13688    }
13689
13690    /* set the doorbell size */
13691    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13692
13693    /* determine whether the device is in 2 port or 4 port mode */
13694    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13695    if (CHIP_IS_E2E3(sc)) {
13696        /*
13697         * Read port4mode_en_ovwr[0]:
13698         *   If 1, four port mode is in port4mode_en_ovwr[1].
13699         *   If 0, four port mode is in port4mode_en[0].
13700         */
13701        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13702        if (val & 1) {
13703            val = ((val >> 1) & 1);
13704        } else {
13705            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13706        }
13707
13708        sc->devinfo.chip_port_mode =
13709            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13710
13711        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13712    }
13713
13714    /* get the function and path info for the device */
13715    bxe_get_function_num(sc);
13716
13717    /* get the shared memory base address */
13718    sc->devinfo.shmem_base     =
13719    sc->link_params.shmem_base =
13720        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13721    sc->devinfo.shmem2_base =
13722        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13723                                  MISC_REG_GENERIC_CR_0));
13724
13725    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13726          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13727
13728    if (!sc->devinfo.shmem_base) {
13729        /* this should ONLY prevent upcoming shmem reads */
13730        BLOGI(sc, "MCP not active\n");
13731        sc->flags |= BXE_NO_MCP_FLAG;
13732        return (0);
13733    }
13734
13735    /* make sure the shared memory contents are valid */
13736    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13737    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13738        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13739        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13740        return (0);
13741    }
13742    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13743
13744    /* get the bootcode version */
13745    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13746    snprintf(sc->devinfo.bc_ver_str,
13747             sizeof(sc->devinfo.bc_ver_str),
13748             "%d.%d.%d",
13749             ((sc->devinfo.bc_ver >> 24) & 0xff),
13750             ((sc->devinfo.bc_ver >> 16) & 0xff),
13751             ((sc->devinfo.bc_ver >>  8) & 0xff));
13752    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13753
13754    /* get the bootcode shmem address */
13755    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13756    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13757
13758    /* clean indirect addresses as they're not used */
13759    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13760    if (IS_PF(sc)) {
13761        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13762        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13763        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13764        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13765        if (CHIP_IS_E1x(sc)) {
13766            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13767            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13768            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13769            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13770        }
13771
13772        /*
13773         * Enable internal target-read (in case we are probed after PF
13774         * FLR). Must be done prior to any BAR read access. Only for
13775         * 57712 and up
13776         */
13777        if (!CHIP_IS_E1x(sc)) {
13778            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13779        }
13780    }
13781
13782    /* get the nvram size */
13783    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13784    sc->devinfo.flash_size =
13785        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13786    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13787
13788    /* get PCI capabilites */
13789    bxe_probe_pci_caps(sc);
13790
13791    bxe_set_power_state(sc, PCI_PM_D0);
13792
13793    /* get various configuration parameters from shmem */
13794    bxe_get_shmem_info(sc);
13795
13796    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13797        val = pci_read_config(sc->dev,
13798                              (sc->devinfo.pcie_msix_cap_reg +
13799                               PCIR_MSIX_CTRL),
13800                              2);
13801        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13802    } else {
13803        sc->igu_sb_cnt = 1;
13804    }
13805
13806    sc->igu_base_addr = BAR_IGU_INTMEM;
13807
13808    /* initialize IGU parameters */
13809    if (CHIP_IS_E1x(sc)) {
13810        sc->devinfo.int_block = INT_BLOCK_HC;
13811        sc->igu_dsb_id = DEF_SB_IGU_ID;
13812        sc->igu_base_sb = 0;
13813    } else {
13814        sc->devinfo.int_block = INT_BLOCK_IGU;
13815
13816        /* do not allow device reset during IGU info preocessing */
13817        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13818
13819        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13820
13821        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13822            int tout = 5000;
13823
13824            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13825
13826            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13827            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13828            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13829
13830            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13831                tout--;
13832                DELAY(1000);
13833            }
13834
13835            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13836                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13837                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13838                return (-1);
13839            }
13840        }
13841
13842        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13843            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13844            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13845        } else {
13846            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13847        }
13848
13849        rc = bxe_get_igu_cam_info(sc);
13850
13851        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13852
13853        if (rc) {
13854            return (rc);
13855        }
13856    }
13857
13858    /*
13859     * Get base FW non-default (fast path) status block ID. This value is
13860     * used to initialize the fw_sb_id saved on the fp/queue structure to
13861     * determine the id used by the FW.
13862     */
13863    if (CHIP_IS_E1x(sc)) {
13864        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13865    } else {
13866        /*
13867         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13868         * the same queue are indicated on the same IGU SB). So we prefer
13869         * FW and IGU SBs to be the same value.
13870         */
13871        sc->base_fw_ndsb = sc->igu_base_sb;
13872    }
13873
13874    BLOGD(sc, DBG_LOAD,
13875          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13876          sc->igu_dsb_id, sc->igu_base_sb,
13877          sc->igu_sb_cnt, sc->base_fw_ndsb);
13878
13879    elink_phy_probe(&sc->link_params);
13880
13881    return (0);
13882}
13883
13884static void
13885bxe_link_settings_supported(struct bxe_softc *sc,
13886                            uint32_t         switch_cfg)
13887{
13888    uint32_t cfg_size = 0;
13889    uint32_t idx;
13890    uint8_t port = SC_PORT(sc);
13891
13892    /* aggregation of supported attributes of all external phys */
13893    sc->port.supported[0] = 0;
13894    sc->port.supported[1] = 0;
13895
13896    switch (sc->link_params.num_phys) {
13897    case 1:
13898        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13899        cfg_size = 1;
13900        break;
13901    case 2:
13902        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13903        cfg_size = 1;
13904        break;
13905    case 3:
13906        if (sc->link_params.multi_phy_config &
13907            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13908            sc->port.supported[1] =
13909                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13910            sc->port.supported[0] =
13911                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13912        } else {
13913            sc->port.supported[0] =
13914                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13915            sc->port.supported[1] =
13916                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13917        }
13918        cfg_size = 2;
13919        break;
13920    }
13921
13922    if (!(sc->port.supported[0] || sc->port.supported[1])) {
13923        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
13924              SHMEM_RD(sc,
13925                       dev_info.port_hw_config[port].external_phy_config),
13926              SHMEM_RD(sc,
13927                       dev_info.port_hw_config[port].external_phy_config2));
13928        return;
13929    }
13930
13931    if (CHIP_IS_E3(sc))
13932        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
13933    else {
13934        switch (switch_cfg) {
13935        case ELINK_SWITCH_CFG_1G:
13936            sc->port.phy_addr =
13937                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
13938            break;
13939        case ELINK_SWITCH_CFG_10G:
13940            sc->port.phy_addr =
13941                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
13942            break;
13943        default:
13944            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
13945                  sc->port.link_config[0]);
13946            return;
13947        }
13948    }
13949
13950    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
13951
13952    /* mask what we support according to speed_cap_mask per configuration */
13953    for (idx = 0; idx < cfg_size; idx++) {
13954        if (!(sc->link_params.speed_cap_mask[idx] &
13955              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
13956            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
13957        }
13958
13959        if (!(sc->link_params.speed_cap_mask[idx] &
13960              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
13961            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
13962        }
13963
13964        if (!(sc->link_params.speed_cap_mask[idx] &
13965              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
13966            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
13967        }
13968
13969        if (!(sc->link_params.speed_cap_mask[idx] &
13970              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
13971            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
13972        }
13973
13974        if (!(sc->link_params.speed_cap_mask[idx] &
13975              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
13976            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
13977        }
13978
13979        if (!(sc->link_params.speed_cap_mask[idx] &
13980              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
13981            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
13982        }
13983
13984        if (!(sc->link_params.speed_cap_mask[idx] &
13985              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
13986            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
13987        }
13988
13989        if (!(sc->link_params.speed_cap_mask[idx] &
13990              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
13991            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
13992        }
13993    }
13994
13995    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
13996          sc->port.supported[0], sc->port.supported[1]);
13997}
13998
13999static void
14000bxe_link_settings_requested(struct bxe_softc *sc)
14001{
14002    uint32_t link_config;
14003    uint32_t idx;
14004    uint32_t cfg_size = 0;
14005
14006    sc->port.advertising[0] = 0;
14007    sc->port.advertising[1] = 0;
14008
14009    switch (sc->link_params.num_phys) {
14010    case 1:
14011    case 2:
14012        cfg_size = 1;
14013        break;
14014    case 3:
14015        cfg_size = 2;
14016        break;
14017    }
14018
14019    for (idx = 0; idx < cfg_size; idx++) {
14020        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14021        link_config = sc->port.link_config[idx];
14022
14023        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14024        case PORT_FEATURE_LINK_SPEED_AUTO:
14025            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14026                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14027                sc->port.advertising[idx] |= sc->port.supported[idx];
14028                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14029                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14030                    sc->port.advertising[idx] |=
14031                        (ELINK_SUPPORTED_100baseT_Half |
14032                         ELINK_SUPPORTED_100baseT_Full);
14033            } else {
14034                /* force 10G, no AN */
14035                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14036                sc->port.advertising[idx] |=
14037                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14038                continue;
14039            }
14040            break;
14041
14042        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14043            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14044                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14045                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14046                                              ADVERTISED_TP);
14047            } else {
14048                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14049                          "speed_cap_mask=0x%08x\n",
14050                      link_config, sc->link_params.speed_cap_mask[idx]);
14051                return;
14052            }
14053            break;
14054
14055        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14056            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14057                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14058                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14059                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14060                                              ADVERTISED_TP);
14061            } else {
14062                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14063                          "speed_cap_mask=0x%08x\n",
14064                      link_config, sc->link_params.speed_cap_mask[idx]);
14065                return;
14066            }
14067            break;
14068
14069        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14070            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14071                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14072                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14073                                              ADVERTISED_TP);
14074            } else {
14075                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14076                          "speed_cap_mask=0x%08x\n",
14077                      link_config, sc->link_params.speed_cap_mask[idx]);
14078                return;
14079            }
14080            break;
14081
14082        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14083            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14084                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14085                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14086                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14087                                              ADVERTISED_TP);
14088            } else {
14089                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14090                          "speed_cap_mask=0x%08x\n",
14091                      link_config, sc->link_params.speed_cap_mask[idx]);
14092                return;
14093            }
14094            break;
14095
14096        case PORT_FEATURE_LINK_SPEED_1G:
14097            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14098                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14099                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14100                                              ADVERTISED_TP);
14101            } else {
14102                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14103                          "speed_cap_mask=0x%08x\n",
14104                      link_config, sc->link_params.speed_cap_mask[idx]);
14105                return;
14106            }
14107            break;
14108
14109        case PORT_FEATURE_LINK_SPEED_2_5G:
14110            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14111                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14112                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14113                                              ADVERTISED_TP);
14114            } else {
14115                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14116                          "speed_cap_mask=0x%08x\n",
14117                      link_config, sc->link_params.speed_cap_mask[idx]);
14118                return;
14119            }
14120            break;
14121
14122        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14123            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14124                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14125                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14126                                              ADVERTISED_FIBRE);
14127            } else {
14128                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14129                          "speed_cap_mask=0x%08x\n",
14130                      link_config, sc->link_params.speed_cap_mask[idx]);
14131                return;
14132            }
14133            break;
14134
14135        case PORT_FEATURE_LINK_SPEED_20G:
14136            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14137            break;
14138
14139        default:
14140            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14141                      "speed_cap_mask=0x%08x\n",
14142                  link_config, sc->link_params.speed_cap_mask[idx]);
14143            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14144            sc->port.advertising[idx] = sc->port.supported[idx];
14145            break;
14146        }
14147
14148        sc->link_params.req_flow_ctrl[idx] =
14149            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14150
14151        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14152            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14153                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14154            } else {
14155                bxe_set_requested_fc(sc);
14156            }
14157        }
14158
14159        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14160                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14161              sc->link_params.req_line_speed[idx],
14162              sc->link_params.req_duplex[idx],
14163              sc->link_params.req_flow_ctrl[idx],
14164              sc->port.advertising[idx]);
14165    }
14166}
14167
14168static void
14169bxe_get_phy_info(struct bxe_softc *sc)
14170{
14171    uint8_t port = SC_PORT(sc);
14172    uint32_t config = sc->port.config;
14173    uint32_t eee_mode;
14174
14175    /* shmem data already read in bxe_get_shmem_info() */
14176
14177    BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14178                        "link_config0=0x%08x\n",
14179               sc->link_params.lane_config,
14180               sc->link_params.speed_cap_mask[0],
14181               sc->port.link_config[0]);
14182
14183    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14184    bxe_link_settings_requested(sc);
14185
14186    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14187        sc->link_params.feature_config_flags |=
14188            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14189    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14190        sc->link_params.feature_config_flags &=
14191            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14192    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14193        sc->link_params.feature_config_flags |=
14194            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14195    }
14196
14197    /* configure link feature according to nvram value */
14198    eee_mode =
14199        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14200          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14201         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14202    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14203        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14204                                    ELINK_EEE_MODE_ENABLE_LPI |
14205                                    ELINK_EEE_MODE_OUTPUT_TIME);
14206    } else {
14207        sc->link_params.eee_mode = 0;
14208    }
14209
14210    /* get the media type */
14211    bxe_media_detect(sc);
14212}
14213
14214static void
14215bxe_get_params(struct bxe_softc *sc)
14216{
14217    /* get user tunable params */
14218    bxe_get_tunable_params(sc);
14219
14220    /* select the RX and TX ring sizes */
14221    sc->tx_ring_size = TX_BD_USABLE;
14222    sc->rx_ring_size = RX_BD_USABLE;
14223
14224    /* XXX disable WoL */
14225    sc->wol = 0;
14226}
14227
14228static void
14229bxe_set_modes_bitmap(struct bxe_softc *sc)
14230{
14231    uint32_t flags = 0;
14232
14233    if (CHIP_REV_IS_FPGA(sc)) {
14234        SET_FLAGS(flags, MODE_FPGA);
14235    } else if (CHIP_REV_IS_EMUL(sc)) {
14236        SET_FLAGS(flags, MODE_EMUL);
14237    } else {
14238        SET_FLAGS(flags, MODE_ASIC);
14239    }
14240
14241    if (CHIP_IS_MODE_4_PORT(sc)) {
14242        SET_FLAGS(flags, MODE_PORT4);
14243    } else {
14244        SET_FLAGS(flags, MODE_PORT2);
14245    }
14246
14247    if (CHIP_IS_E2(sc)) {
14248        SET_FLAGS(flags, MODE_E2);
14249    } else if (CHIP_IS_E3(sc)) {
14250        SET_FLAGS(flags, MODE_E3);
14251        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14252            SET_FLAGS(flags, MODE_E3_A0);
14253        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14254            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14255        }
14256    }
14257
14258    if (IS_MF(sc)) {
14259        SET_FLAGS(flags, MODE_MF);
14260        switch (sc->devinfo.mf_info.mf_mode) {
14261        case MULTI_FUNCTION_SD:
14262            SET_FLAGS(flags, MODE_MF_SD);
14263            break;
14264        case MULTI_FUNCTION_SI:
14265            SET_FLAGS(flags, MODE_MF_SI);
14266            break;
14267        case MULTI_FUNCTION_AFEX:
14268            SET_FLAGS(flags, MODE_MF_AFEX);
14269            break;
14270        }
14271    } else {
14272        SET_FLAGS(flags, MODE_SF);
14273    }
14274
14275#if defined(__LITTLE_ENDIAN)
14276    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14277#else /* __BIG_ENDIAN */
14278    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14279#endif
14280
14281    INIT_MODE_FLAGS(sc) = flags;
14282}
14283
14284static int
14285bxe_alloc_hsi_mem(struct bxe_softc *sc)
14286{
14287    struct bxe_fastpath *fp;
14288    bus_addr_t busaddr;
14289    int max_agg_queues;
14290    int max_segments;
14291    bus_size_t max_size;
14292    bus_size_t max_seg_size;
14293    char buf[32];
14294    int rc;
14295    int i, j;
14296
14297    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14298
14299    /* allocate the parent bus DMA tag */
14300    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14301                            1,                        /* alignment */
14302                            0,                        /* boundary limit */
14303                            BUS_SPACE_MAXADDR,        /* restricted low */
14304                            BUS_SPACE_MAXADDR,        /* restricted hi */
14305                            NULL,                     /* addr filter() */
14306                            NULL,                     /* addr filter() arg */
14307                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14308                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14309                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14310                            0,                        /* flags */
14311                            NULL,                     /* lock() */
14312                            NULL,                     /* lock() arg */
14313                            &sc->parent_dma_tag);     /* returned dma tag */
14314    if (rc != 0) {
14315        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14316        return (1);
14317    }
14318
14319    /************************/
14320    /* DEFAULT STATUS BLOCK */
14321    /************************/
14322
14323    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14324                      &sc->def_sb_dma, "default status block") != 0) {
14325        /* XXX */
14326        bus_dma_tag_destroy(sc->parent_dma_tag);
14327        return (1);
14328    }
14329
14330    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14331
14332    /***************/
14333    /* EVENT QUEUE */
14334    /***************/
14335
14336    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14337                      &sc->eq_dma, "event queue") != 0) {
14338        /* XXX */
14339        bxe_dma_free(sc, &sc->def_sb_dma);
14340        sc->def_sb = NULL;
14341        bus_dma_tag_destroy(sc->parent_dma_tag);
14342        return (1);
14343    }
14344
14345    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14346
14347    /*************/
14348    /* SLOW PATH */
14349    /*************/
14350
14351    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14352                      &sc->sp_dma, "slow path") != 0) {
14353        /* XXX */
14354        bxe_dma_free(sc, &sc->eq_dma);
14355        sc->eq = NULL;
14356        bxe_dma_free(sc, &sc->def_sb_dma);
14357        sc->def_sb = NULL;
14358        bus_dma_tag_destroy(sc->parent_dma_tag);
14359        return (1);
14360    }
14361
14362    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14363
14364    /*******************/
14365    /* SLOW PATH QUEUE */
14366    /*******************/
14367
14368    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14369                      &sc->spq_dma, "slow path queue") != 0) {
14370        /* XXX */
14371        bxe_dma_free(sc, &sc->sp_dma);
14372        sc->sp = NULL;
14373        bxe_dma_free(sc, &sc->eq_dma);
14374        sc->eq = NULL;
14375        bxe_dma_free(sc, &sc->def_sb_dma);
14376        sc->def_sb = NULL;
14377        bus_dma_tag_destroy(sc->parent_dma_tag);
14378        return (1);
14379    }
14380
14381    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14382
14383    /***************************/
14384    /* FW DECOMPRESSION BUFFER */
14385    /***************************/
14386
14387    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14388                      "fw decompression buffer") != 0) {
14389        /* XXX */
14390        bxe_dma_free(sc, &sc->spq_dma);
14391        sc->spq = NULL;
14392        bxe_dma_free(sc, &sc->sp_dma);
14393        sc->sp = NULL;
14394        bxe_dma_free(sc, &sc->eq_dma);
14395        sc->eq = NULL;
14396        bxe_dma_free(sc, &sc->def_sb_dma);
14397        sc->def_sb = NULL;
14398        bus_dma_tag_destroy(sc->parent_dma_tag);
14399        return (1);
14400    }
14401
14402    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14403
14404    if ((sc->gz_strm =
14405         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14406        /* XXX */
14407        bxe_dma_free(sc, &sc->gz_buf_dma);
14408        sc->gz_buf = NULL;
14409        bxe_dma_free(sc, &sc->spq_dma);
14410        sc->spq = NULL;
14411        bxe_dma_free(sc, &sc->sp_dma);
14412        sc->sp = NULL;
14413        bxe_dma_free(sc, &sc->eq_dma);
14414        sc->eq = NULL;
14415        bxe_dma_free(sc, &sc->def_sb_dma);
14416        sc->def_sb = NULL;
14417        bus_dma_tag_destroy(sc->parent_dma_tag);
14418        return (1);
14419    }
14420
14421    /*************/
14422    /* FASTPATHS */
14423    /*************/
14424
14425    /* allocate DMA memory for each fastpath structure */
14426    for (i = 0; i < sc->num_queues; i++) {
14427        fp = &sc->fp[i];
14428        fp->sc    = sc;
14429        fp->index = i;
14430
14431        /*******************/
14432        /* FP STATUS BLOCK */
14433        /*******************/
14434
14435        snprintf(buf, sizeof(buf), "fp %d status block", i);
14436        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14437                          &fp->sb_dma, buf) != 0) {
14438            /* XXX unwind and free previous fastpath allocations */
14439            BLOGE(sc, "Failed to alloc %s\n", buf);
14440            return (1);
14441        } else {
14442            if (CHIP_IS_E2E3(sc)) {
14443                fp->status_block.e2_sb =
14444                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14445            } else {
14446                fp->status_block.e1x_sb =
14447                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14448            }
14449        }
14450
14451        /******************/
14452        /* FP TX BD CHAIN */
14453        /******************/
14454
14455        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14456        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14457                          &fp->tx_dma, buf) != 0) {
14458            /* XXX unwind and free previous fastpath allocations */
14459            BLOGE(sc, "Failed to alloc %s\n", buf);
14460            return (1);
14461        } else {
14462            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14463        }
14464
14465        /* link together the tx bd chain pages */
14466        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14467            /* index into the tx bd chain array to last entry per page */
14468            struct eth_tx_next_bd *tx_next_bd =
14469                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14470            /* point to the next page and wrap from last page */
14471            busaddr = (fp->tx_dma.paddr +
14472                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14473            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14474            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14475        }
14476
14477        /******************/
14478        /* FP RX BD CHAIN */
14479        /******************/
14480
14481        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14482        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14483                          &fp->rx_dma, buf) != 0) {
14484            /* XXX unwind and free previous fastpath allocations */
14485            BLOGE(sc, "Failed to alloc %s\n", buf);
14486            return (1);
14487        } else {
14488            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14489        }
14490
14491        /* link together the rx bd chain pages */
14492        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14493            /* index into the rx bd chain array to last entry per page */
14494            struct eth_rx_bd *rx_bd =
14495                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14496            /* point to the next page and wrap from last page */
14497            busaddr = (fp->rx_dma.paddr +
14498                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14499            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14500            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14501        }
14502
14503        /*******************/
14504        /* FP RX RCQ CHAIN */
14505        /*******************/
14506
14507        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14508        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14509                          &fp->rcq_dma, buf) != 0) {
14510            /* XXX unwind and free previous fastpath allocations */
14511            BLOGE(sc, "Failed to alloc %s\n", buf);
14512            return (1);
14513        } else {
14514            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14515        }
14516
14517        /* link together the rcq chain pages */
14518        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14519            /* index into the rcq chain array to last entry per page */
14520            struct eth_rx_cqe_next_page *rx_cqe_next =
14521                (struct eth_rx_cqe_next_page *)
14522                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14523            /* point to the next page and wrap from last page */
14524            busaddr = (fp->rcq_dma.paddr +
14525                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14526            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14527            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14528        }
14529
14530        /*******************/
14531        /* FP RX SGE CHAIN */
14532        /*******************/
14533
14534        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14535        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14536                          &fp->rx_sge_dma, buf) != 0) {
14537            /* XXX unwind and free previous fastpath allocations */
14538            BLOGE(sc, "Failed to alloc %s\n", buf);
14539            return (1);
14540        } else {
14541            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14542        }
14543
14544        /* link together the sge chain pages */
14545        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14546            /* index into the rcq chain array to last entry per page */
14547            struct eth_rx_sge *rx_sge =
14548                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14549            /* point to the next page and wrap from last page */
14550            busaddr = (fp->rx_sge_dma.paddr +
14551                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14552            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14553            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14554        }
14555
14556        /***********************/
14557        /* FP TX MBUF DMA MAPS */
14558        /***********************/
14559
14560        /* set required sizes before mapping to conserve resources */
14561        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14562            max_size     = BXE_TSO_MAX_SIZE;
14563            max_segments = BXE_TSO_MAX_SEGMENTS;
14564            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14565        } else {
14566            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14567            max_segments = BXE_MAX_SEGMENTS;
14568            max_seg_size = MCLBYTES;
14569        }
14570
14571        /* create a dma tag for the tx mbufs */
14572        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14573                                1,                  /* alignment */
14574                                0,                  /* boundary limit */
14575                                BUS_SPACE_MAXADDR,  /* restricted low */
14576                                BUS_SPACE_MAXADDR,  /* restricted hi */
14577                                NULL,               /* addr filter() */
14578                                NULL,               /* addr filter() arg */
14579                                max_size,           /* max map size */
14580                                max_segments,       /* num discontinuous */
14581                                max_seg_size,       /* max seg size */
14582                                0,                  /* flags */
14583                                NULL,               /* lock() */
14584                                NULL,               /* lock() arg */
14585                                &fp->tx_mbuf_tag);  /* returned dma tag */
14586        if (rc != 0) {
14587            /* XXX unwind and free previous fastpath allocations */
14588            BLOGE(sc, "Failed to create dma tag for "
14589                      "'fp %d tx mbufs' (%d)\n", i, rc);
14590            return (1);
14591        }
14592
14593        /* create dma maps for each of the tx mbuf clusters */
14594        for (j = 0; j < TX_BD_TOTAL; j++) {
14595            if (bus_dmamap_create(fp->tx_mbuf_tag,
14596                                  BUS_DMA_NOWAIT,
14597                                  &fp->tx_mbuf_chain[j].m_map)) {
14598                /* XXX unwind and free previous fastpath allocations */
14599                BLOGE(sc, "Failed to create dma map for "
14600                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14601                return (1);
14602            }
14603        }
14604
14605        /***********************/
14606        /* FP RX MBUF DMA MAPS */
14607        /***********************/
14608
14609        /* create a dma tag for the rx mbufs */
14610        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14611                                1,                  /* alignment */
14612                                0,                  /* boundary limit */
14613                                BUS_SPACE_MAXADDR,  /* restricted low */
14614                                BUS_SPACE_MAXADDR,  /* restricted hi */
14615                                NULL,               /* addr filter() */
14616                                NULL,               /* addr filter() arg */
14617                                MJUM9BYTES,         /* max map size */
14618                                1,                  /* num discontinuous */
14619                                MJUM9BYTES,         /* max seg size */
14620                                0,                  /* flags */
14621                                NULL,               /* lock() */
14622                                NULL,               /* lock() arg */
14623                                &fp->rx_mbuf_tag);  /* returned dma tag */
14624        if (rc != 0) {
14625            /* XXX unwind and free previous fastpath allocations */
14626            BLOGE(sc, "Failed to create dma tag for "
14627                      "'fp %d rx mbufs' (%d)\n", i, rc);
14628            return (1);
14629        }
14630
14631        /* create dma maps for each of the rx mbuf clusters */
14632        for (j = 0; j < RX_BD_TOTAL; j++) {
14633            if (bus_dmamap_create(fp->rx_mbuf_tag,
14634                                  BUS_DMA_NOWAIT,
14635                                  &fp->rx_mbuf_chain[j].m_map)) {
14636                /* XXX unwind and free previous fastpath allocations */
14637                BLOGE(sc, "Failed to create dma map for "
14638                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14639                return (1);
14640            }
14641        }
14642
14643        /* create dma map for the spare rx mbuf cluster */
14644        if (bus_dmamap_create(fp->rx_mbuf_tag,
14645                              BUS_DMA_NOWAIT,
14646                              &fp->rx_mbuf_spare_map)) {
14647            /* XXX unwind and free previous fastpath allocations */
14648            BLOGE(sc, "Failed to create dma map for "
14649                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14650            return (1);
14651        }
14652
14653        /***************************/
14654        /* FP RX SGE MBUF DMA MAPS */
14655        /***************************/
14656
14657        /* create a dma tag for the rx sge mbufs */
14658        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14659                                1,                  /* alignment */
14660                                0,                  /* boundary limit */
14661                                BUS_SPACE_MAXADDR,  /* restricted low */
14662                                BUS_SPACE_MAXADDR,  /* restricted hi */
14663                                NULL,               /* addr filter() */
14664                                NULL,               /* addr filter() arg */
14665                                BCM_PAGE_SIZE,      /* max map size */
14666                                1,                  /* num discontinuous */
14667                                BCM_PAGE_SIZE,      /* max seg size */
14668                                0,                  /* flags */
14669                                NULL,               /* lock() */
14670                                NULL,               /* lock() arg */
14671                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14672        if (rc != 0) {
14673            /* XXX unwind and free previous fastpath allocations */
14674            BLOGE(sc, "Failed to create dma tag for "
14675                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14676            return (1);
14677        }
14678
14679        /* create dma maps for the rx sge mbuf clusters */
14680        for (j = 0; j < RX_SGE_TOTAL; j++) {
14681            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14682                                  BUS_DMA_NOWAIT,
14683                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14684                /* XXX unwind and free previous fastpath allocations */
14685                BLOGE(sc, "Failed to create dma map for "
14686                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14687                return (1);
14688            }
14689        }
14690
14691        /* create dma map for the spare rx sge mbuf cluster */
14692        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14693                              BUS_DMA_NOWAIT,
14694                              &fp->rx_sge_mbuf_spare_map)) {
14695            /* XXX unwind and free previous fastpath allocations */
14696            BLOGE(sc, "Failed to create dma map for "
14697                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14698            return (1);
14699        }
14700
14701        /***************************/
14702        /* FP RX TPA MBUF DMA MAPS */
14703        /***************************/
14704
14705        /* create dma maps for the rx tpa mbuf clusters */
14706        max_agg_queues = MAX_AGG_QS(sc);
14707
14708        for (j = 0; j < max_agg_queues; j++) {
14709            if (bus_dmamap_create(fp->rx_mbuf_tag,
14710                                  BUS_DMA_NOWAIT,
14711                                  &fp->rx_tpa_info[j].bd.m_map)) {
14712                /* XXX unwind and free previous fastpath allocations */
14713                BLOGE(sc, "Failed to create dma map for "
14714                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14715                return (1);
14716            }
14717        }
14718
14719        /* create dma map for the spare rx tpa mbuf cluster */
14720        if (bus_dmamap_create(fp->rx_mbuf_tag,
14721                              BUS_DMA_NOWAIT,
14722                              &fp->rx_tpa_info_mbuf_spare_map)) {
14723            /* XXX unwind and free previous fastpath allocations */
14724            BLOGE(sc, "Failed to create dma map for "
14725                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14726            return (1);
14727        }
14728
14729        bxe_init_sge_ring_bit_mask(fp);
14730    }
14731
14732    return (0);
14733}
14734
14735static void
14736bxe_free_hsi_mem(struct bxe_softc *sc)
14737{
14738    struct bxe_fastpath *fp;
14739    int max_agg_queues;
14740    int i, j;
14741
14742    if (sc->parent_dma_tag == NULL) {
14743        return; /* assume nothing was allocated */
14744    }
14745
14746    for (i = 0; i < sc->num_queues; i++) {
14747        fp = &sc->fp[i];
14748
14749        /*******************/
14750        /* FP STATUS BLOCK */
14751        /*******************/
14752
14753        bxe_dma_free(sc, &fp->sb_dma);
14754        memset(&fp->status_block, 0, sizeof(fp->status_block));
14755
14756        /******************/
14757        /* FP TX BD CHAIN */
14758        /******************/
14759
14760        bxe_dma_free(sc, &fp->tx_dma);
14761        fp->tx_chain = NULL;
14762
14763        /******************/
14764        /* FP RX BD CHAIN */
14765        /******************/
14766
14767        bxe_dma_free(sc, &fp->rx_dma);
14768        fp->rx_chain = NULL;
14769
14770        /*******************/
14771        /* FP RX RCQ CHAIN */
14772        /*******************/
14773
14774        bxe_dma_free(sc, &fp->rcq_dma);
14775        fp->rcq_chain = NULL;
14776
14777        /*******************/
14778        /* FP RX SGE CHAIN */
14779        /*******************/
14780
14781        bxe_dma_free(sc, &fp->rx_sge_dma);
14782        fp->rx_sge_chain = NULL;
14783
14784        /***********************/
14785        /* FP TX MBUF DMA MAPS */
14786        /***********************/
14787
14788        if (fp->tx_mbuf_tag != NULL) {
14789            for (j = 0; j < TX_BD_TOTAL; j++) {
14790                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14791                    bus_dmamap_unload(fp->tx_mbuf_tag,
14792                                      fp->tx_mbuf_chain[j].m_map);
14793                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14794                                       fp->tx_mbuf_chain[j].m_map);
14795                }
14796            }
14797
14798            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14799            fp->tx_mbuf_tag = NULL;
14800        }
14801
14802        /***********************/
14803        /* FP RX MBUF DMA MAPS */
14804        /***********************/
14805
14806        if (fp->rx_mbuf_tag != NULL) {
14807            for (j = 0; j < RX_BD_TOTAL; j++) {
14808                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14809                    bus_dmamap_unload(fp->rx_mbuf_tag,
14810                                      fp->rx_mbuf_chain[j].m_map);
14811                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14812                                       fp->rx_mbuf_chain[j].m_map);
14813                }
14814            }
14815
14816            if (fp->rx_mbuf_spare_map != NULL) {
14817                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14818                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14819            }
14820
14821            /***************************/
14822            /* FP RX TPA MBUF DMA MAPS */
14823            /***************************/
14824
14825            max_agg_queues = MAX_AGG_QS(sc);
14826
14827            for (j = 0; j < max_agg_queues; j++) {
14828                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14829                    bus_dmamap_unload(fp->rx_mbuf_tag,
14830                                      fp->rx_tpa_info[j].bd.m_map);
14831                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14832                                       fp->rx_tpa_info[j].bd.m_map);
14833                }
14834            }
14835
14836            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14837                bus_dmamap_unload(fp->rx_mbuf_tag,
14838                                  fp->rx_tpa_info_mbuf_spare_map);
14839                bus_dmamap_destroy(fp->rx_mbuf_tag,
14840                                   fp->rx_tpa_info_mbuf_spare_map);
14841            }
14842
14843            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14844            fp->rx_mbuf_tag = NULL;
14845        }
14846
14847        /***************************/
14848        /* FP RX SGE MBUF DMA MAPS */
14849        /***************************/
14850
14851        if (fp->rx_sge_mbuf_tag != NULL) {
14852            for (j = 0; j < RX_SGE_TOTAL; j++) {
14853                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14854                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14855                                      fp->rx_sge_mbuf_chain[j].m_map);
14856                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14857                                       fp->rx_sge_mbuf_chain[j].m_map);
14858                }
14859            }
14860
14861            if (fp->rx_sge_mbuf_spare_map != NULL) {
14862                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14863                                  fp->rx_sge_mbuf_spare_map);
14864                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14865                                   fp->rx_sge_mbuf_spare_map);
14866            }
14867
14868            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14869            fp->rx_sge_mbuf_tag = NULL;
14870        }
14871    }
14872
14873    /***************************/
14874    /* FW DECOMPRESSION BUFFER */
14875    /***************************/
14876
14877    bxe_dma_free(sc, &sc->gz_buf_dma);
14878    sc->gz_buf = NULL;
14879    free(sc->gz_strm, M_DEVBUF);
14880    sc->gz_strm = NULL;
14881
14882    /*******************/
14883    /* SLOW PATH QUEUE */
14884    /*******************/
14885
14886    bxe_dma_free(sc, &sc->spq_dma);
14887    sc->spq = NULL;
14888
14889    /*************/
14890    /* SLOW PATH */
14891    /*************/
14892
14893    bxe_dma_free(sc, &sc->sp_dma);
14894    sc->sp = NULL;
14895
14896    /***************/
14897    /* EVENT QUEUE */
14898    /***************/
14899
14900    bxe_dma_free(sc, &sc->eq_dma);
14901    sc->eq = NULL;
14902
14903    /************************/
14904    /* DEFAULT STATUS BLOCK */
14905    /************************/
14906
14907    bxe_dma_free(sc, &sc->def_sb_dma);
14908    sc->def_sb = NULL;
14909
14910    bus_dma_tag_destroy(sc->parent_dma_tag);
14911    sc->parent_dma_tag = NULL;
14912}
14913
14914/*
14915 * Previous driver DMAE transaction may have occurred when pre-boot stage
14916 * ended and boot began. This would invalidate the addresses of the
14917 * transaction, resulting in was-error bit set in the PCI causing all
14918 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
14919 * the interrupt which detected this from the pglueb and the was-done bit
14920 */
14921static void
14922bxe_prev_interrupted_dmae(struct bxe_softc *sc)
14923{
14924    uint32_t val;
14925
14926    if (!CHIP_IS_E1x(sc)) {
14927        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
14928        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
14929            BLOGD(sc, DBG_LOAD,
14930                  "Clearing 'was-error' bit that was set in pglueb");
14931            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
14932        }
14933    }
14934}
14935
14936static int
14937bxe_prev_mcp_done(struct bxe_softc *sc)
14938{
14939    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
14940                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
14941    if (!rc) {
14942        BLOGE(sc, "MCP response failure, aborting\n");
14943        return (-1);
14944    }
14945
14946    return (0);
14947}
14948
14949static struct bxe_prev_list_node *
14950bxe_prev_path_get_entry(struct bxe_softc *sc)
14951{
14952    struct bxe_prev_list_node *tmp;
14953
14954    LIST_FOREACH(tmp, &bxe_prev_list, node) {
14955        if ((sc->pcie_bus == tmp->bus) &&
14956            (sc->pcie_device == tmp->slot) &&
14957            (SC_PATH(sc) == tmp->path)) {
14958            return (tmp);
14959        }
14960    }
14961
14962    return (NULL);
14963}
14964
14965static uint8_t
14966bxe_prev_is_path_marked(struct bxe_softc *sc)
14967{
14968    struct bxe_prev_list_node *tmp;
14969    int rc = FALSE;
14970
14971    mtx_lock(&bxe_prev_mtx);
14972
14973    tmp = bxe_prev_path_get_entry(sc);
14974    if (tmp) {
14975        if (tmp->aer) {
14976            BLOGD(sc, DBG_LOAD,
14977                  "Path %d/%d/%d was marked by AER\n",
14978                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
14979        } else {
14980            rc = TRUE;
14981            BLOGD(sc, DBG_LOAD,
14982                  "Path %d/%d/%d was already cleaned from previous drivers\n",
14983                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
14984        }
14985    }
14986
14987    mtx_unlock(&bxe_prev_mtx);
14988
14989    return (rc);
14990}
14991
14992static int
14993bxe_prev_mark_path(struct bxe_softc *sc,
14994                   uint8_t          after_undi)
14995{
14996    struct bxe_prev_list_node *tmp;
14997
14998    mtx_lock(&bxe_prev_mtx);
14999
15000    /* Check whether the entry for this path already exists */
15001    tmp = bxe_prev_path_get_entry(sc);
15002    if (tmp) {
15003        if (!tmp->aer) {
15004            BLOGD(sc, DBG_LOAD,
15005                  "Re-marking AER in path %d/%d/%d\n",
15006                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15007        } else {
15008            BLOGD(sc, DBG_LOAD,
15009                  "Removing AER indication from path %d/%d/%d\n",
15010                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15011            tmp->aer = 0;
15012        }
15013
15014        mtx_unlock(&bxe_prev_mtx);
15015        return (0);
15016    }
15017
15018    mtx_unlock(&bxe_prev_mtx);
15019
15020    /* Create an entry for this path and add it */
15021    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15022                 (M_NOWAIT | M_ZERO));
15023    if (!tmp) {
15024        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15025        return (-1);
15026    }
15027
15028    tmp->bus  = sc->pcie_bus;
15029    tmp->slot = sc->pcie_device;
15030    tmp->path = SC_PATH(sc);
15031    tmp->aer  = 0;
15032    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15033
15034    mtx_lock(&bxe_prev_mtx);
15035
15036    BLOGD(sc, DBG_LOAD,
15037          "Marked path %d/%d/%d - finished previous unload\n",
15038          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15039    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15040
15041    mtx_unlock(&bxe_prev_mtx);
15042
15043    return (0);
15044}
15045
15046static int
15047bxe_do_flr(struct bxe_softc *sc)
15048{
15049    int i;
15050
15051    /* only E2 and onwards support FLR */
15052    if (CHIP_IS_E1x(sc)) {
15053        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15054        return (-1);
15055    }
15056
15057    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15058    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15059        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15060              sc->devinfo.bc_ver);
15061        return (-1);
15062    }
15063
15064    /* Wait for Transaction Pending bit clean */
15065    for (i = 0; i < 4; i++) {
15066        if (i) {
15067            DELAY(((1 << (i - 1)) * 100) * 1000);
15068        }
15069
15070        if (!bxe_is_pcie_pending(sc)) {
15071            goto clear;
15072        }
15073    }
15074
15075    BLOGE(sc, "PCIE transaction is not cleared, "
15076              "proceeding with reset anyway\n");
15077
15078clear:
15079
15080    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15081    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15082
15083    return (0);
15084}
15085
15086struct bxe_mac_vals {
15087    uint32_t xmac_addr;
15088    uint32_t xmac_val;
15089    uint32_t emac_addr;
15090    uint32_t emac_val;
15091    uint32_t umac_addr;
15092    uint32_t umac_val;
15093    uint32_t bmac_addr;
15094    uint32_t bmac_val[2];
15095};
15096
15097static void
15098bxe_prev_unload_close_mac(struct bxe_softc *sc,
15099                          struct bxe_mac_vals *vals)
15100{
15101    uint32_t val, base_addr, offset, mask, reset_reg;
15102    uint8_t mac_stopped = FALSE;
15103    uint8_t port = SC_PORT(sc);
15104    uint32_t wb_data[2];
15105
15106    /* reset addresses as they also mark which values were changed */
15107    vals->bmac_addr = 0;
15108    vals->umac_addr = 0;
15109    vals->xmac_addr = 0;
15110    vals->emac_addr = 0;
15111
15112    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15113
15114    if (!CHIP_IS_E3(sc)) {
15115        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15116        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15117        if ((mask & reset_reg) && val) {
15118            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15119            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15120                                    : NIG_REG_INGRESS_BMAC0_MEM;
15121            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15122                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15123
15124            /*
15125             * use rd/wr since we cannot use dmae. This is safe
15126             * since MCP won't access the bus due to the request
15127             * to unload, and no function on the path can be
15128             * loaded at this time.
15129             */
15130            wb_data[0] = REG_RD(sc, base_addr + offset);
15131            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15132            vals->bmac_addr = base_addr + offset;
15133            vals->bmac_val[0] = wb_data[0];
15134            vals->bmac_val[1] = wb_data[1];
15135            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15136            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15137            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15138        }
15139
15140        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15141        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15142        vals->emac_val = REG_RD(sc, vals->emac_addr);
15143        REG_WR(sc, vals->emac_addr, 0);
15144        mac_stopped = TRUE;
15145    } else {
15146        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15147            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15148            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15149            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15150            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15151            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15152            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15153            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15154            REG_WR(sc, vals->xmac_addr, 0);
15155            mac_stopped = TRUE;
15156        }
15157
15158        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15159        if (mask & reset_reg) {
15160            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15161            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15162            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15163            vals->umac_val = REG_RD(sc, vals->umac_addr);
15164            REG_WR(sc, vals->umac_addr, 0);
15165            mac_stopped = TRUE;
15166        }
15167    }
15168
15169    if (mac_stopped) {
15170        DELAY(20000);
15171    }
15172}
15173
15174#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15175#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15176#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15177#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15178
15179static void
15180bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15181                         uint8_t          port,
15182                         uint8_t          inc)
15183{
15184    uint16_t rcq, bd;
15185    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15186
15187    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15188    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15189
15190    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15191    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15192
15193    BLOGD(sc, DBG_LOAD,
15194          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15195          port, bd, rcq);
15196}
15197
15198static int
15199bxe_prev_unload_common(struct bxe_softc *sc)
15200{
15201    uint32_t reset_reg, tmp_reg = 0, rc;
15202    uint8_t prev_undi = FALSE;
15203    struct bxe_mac_vals mac_vals;
15204    uint32_t timer_count = 1000;
15205    uint32_t prev_brb;
15206
15207    /*
15208     * It is possible a previous function received 'common' answer,
15209     * but hasn't loaded yet, therefore creating a scenario of
15210     * multiple functions receiving 'common' on the same path.
15211     */
15212    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15213
15214    memset(&mac_vals, 0, sizeof(mac_vals));
15215
15216    if (bxe_prev_is_path_marked(sc)) {
15217        return (bxe_prev_mcp_done(sc));
15218    }
15219
15220    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15221
15222    /* Reset should be performed after BRB is emptied */
15223    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15224        /* Close the MAC Rx to prevent BRB from filling up */
15225        bxe_prev_unload_close_mac(sc, &mac_vals);
15226
15227        /* close LLH filters towards the BRB */
15228        elink_set_rx_filter(&sc->link_params, 0);
15229
15230        /*
15231         * Check if the UNDI driver was previously loaded.
15232         * UNDI driver initializes CID offset for normal bell to 0x7
15233         */
15234        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15235            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15236            if (tmp_reg == 0x7) {
15237                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15238                prev_undi = TRUE;
15239                /* clear the UNDI indication */
15240                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15241                /* clear possible idle check errors */
15242                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15243            }
15244        }
15245
15246        /* wait until BRB is empty */
15247        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15248        while (timer_count) {
15249            prev_brb = tmp_reg;
15250
15251            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15252            if (!tmp_reg) {
15253                break;
15254            }
15255
15256            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15257
15258            /* reset timer as long as BRB actually gets emptied */
15259            if (prev_brb > tmp_reg) {
15260                timer_count = 1000;
15261            } else {
15262                timer_count--;
15263            }
15264
15265            /* If UNDI resides in memory, manually increment it */
15266            if (prev_undi) {
15267                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15268            }
15269
15270            DELAY(10);
15271        }
15272
15273        if (!timer_count) {
15274            BLOGE(sc, "Failed to empty BRB\n");
15275        }
15276    }
15277
15278    /* No packets are in the pipeline, path is ready for reset */
15279    bxe_reset_common(sc);
15280
15281    if (mac_vals.xmac_addr) {
15282        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15283    }
15284    if (mac_vals.umac_addr) {
15285        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15286    }
15287    if (mac_vals.emac_addr) {
15288        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15289    }
15290    if (mac_vals.bmac_addr) {
15291        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15292        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15293    }
15294
15295    rc = bxe_prev_mark_path(sc, prev_undi);
15296    if (rc) {
15297        bxe_prev_mcp_done(sc);
15298        return (rc);
15299    }
15300
15301    return (bxe_prev_mcp_done(sc));
15302}
15303
15304static int
15305bxe_prev_unload_uncommon(struct bxe_softc *sc)
15306{
15307    int rc;
15308
15309    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15310
15311    /* Test if previous unload process was already finished for this path */
15312    if (bxe_prev_is_path_marked(sc)) {
15313        return (bxe_prev_mcp_done(sc));
15314    }
15315
15316    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15317
15318    /*
15319     * If function has FLR capabilities, and existing FW version matches
15320     * the one required, then FLR will be sufficient to clean any residue
15321     * left by previous driver
15322     */
15323    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15324    if (!rc) {
15325        /* fw version is good */
15326        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15327        rc = bxe_do_flr(sc);
15328    }
15329
15330    if (!rc) {
15331        /* FLR was performed */
15332        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15333        return (0);
15334    }
15335
15336    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15337
15338    /* Close the MCP request, return failure*/
15339    rc = bxe_prev_mcp_done(sc);
15340    if (!rc) {
15341        rc = BXE_PREV_WAIT_NEEDED;
15342    }
15343
15344    return (rc);
15345}
15346
15347static int
15348bxe_prev_unload(struct bxe_softc *sc)
15349{
15350    int time_counter = 10;
15351    uint32_t fw, hw_lock_reg, hw_lock_val;
15352    uint32_t rc = 0;
15353
15354    /*
15355     * Clear HW from errors which may have resulted from an interrupted
15356     * DMAE transaction.
15357     */
15358    bxe_prev_interrupted_dmae(sc);
15359
15360    /* Release previously held locks */
15361    hw_lock_reg =
15362        (SC_FUNC(sc) <= 5) ?
15363            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15364            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15365
15366    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15367    if (hw_lock_val) {
15368        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15369            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15370            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15371                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15372        }
15373        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15374        REG_WR(sc, hw_lock_reg, 0xffffffff);
15375    } else {
15376        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15377    }
15378
15379    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15380        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15381        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15382    }
15383
15384    do {
15385        /* Lock MCP using an unload request */
15386        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15387        if (!fw) {
15388            BLOGE(sc, "MCP response failure, aborting\n");
15389            rc = -1;
15390            break;
15391        }
15392
15393        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15394            rc = bxe_prev_unload_common(sc);
15395            break;
15396        }
15397
15398        /* non-common reply from MCP night require looping */
15399        rc = bxe_prev_unload_uncommon(sc);
15400        if (rc != BXE_PREV_WAIT_NEEDED) {
15401            break;
15402        }
15403
15404        DELAY(20000);
15405    } while (--time_counter);
15406
15407    if (!time_counter || rc) {
15408        BLOGE(sc, "Failed to unload previous driver!"
15409            " time_counter %d rc %d\n", time_counter, rc);
15410        rc = -1;
15411    }
15412
15413    return (rc);
15414}
15415
15416void
15417bxe_dcbx_set_state(struct bxe_softc *sc,
15418                   uint8_t          dcb_on,
15419                   uint32_t         dcbx_enabled)
15420{
15421    if (!CHIP_IS_E1x(sc)) {
15422        sc->dcb_state = dcb_on;
15423        sc->dcbx_enabled = dcbx_enabled;
15424    } else {
15425        sc->dcb_state = FALSE;
15426        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15427    }
15428    BLOGD(sc, DBG_LOAD,
15429          "DCB state [%s:%s]\n",
15430          dcb_on ? "ON" : "OFF",
15431          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15432          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15433          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15434          "on-chip with negotiation" : "invalid");
15435}
15436
15437/* must be called after sriov-enable */
15438static int
15439bxe_set_qm_cid_count(struct bxe_softc *sc)
15440{
15441    int cid_count = BXE_L2_MAX_CID(sc);
15442
15443    if (IS_SRIOV(sc)) {
15444        cid_count += BXE_VF_CIDS;
15445    }
15446
15447    if (CNIC_SUPPORT(sc)) {
15448        cid_count += CNIC_CID_MAX;
15449    }
15450
15451    return (roundup(cid_count, QM_CID_ROUND));
15452}
15453
15454static void
15455bxe_init_multi_cos(struct bxe_softc *sc)
15456{
15457    int pri, cos;
15458
15459    uint32_t pri_map = 0; /* XXX change to user config */
15460
15461    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15462        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15463        if (cos < sc->max_cos) {
15464            sc->prio_to_cos[pri] = cos;
15465        } else {
15466            BLOGW(sc, "Invalid COS %d for priority %d "
15467                      "(max COS is %d), setting to 0\n",
15468                  cos, pri, (sc->max_cos - 1));
15469            sc->prio_to_cos[pri] = 0;
15470        }
15471    }
15472}
15473
15474static int
15475bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15476{
15477    struct bxe_softc *sc;
15478    int error, result;
15479
15480    result = 0;
15481    error = sysctl_handle_int(oidp, &result, 0, req);
15482
15483    if (error || !req->newptr) {
15484        return (error);
15485    }
15486
15487    if (result == 1) {
15488        uint32_t  temp;
15489        sc = (struct bxe_softc *)arg1;
15490
15491        BLOGI(sc, "... dumping driver state ...\n");
15492        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15493        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15494    }
15495
15496    return (error);
15497}
15498
15499static int
15500bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15501{
15502    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15503    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15504    uint32_t *offset;
15505    uint64_t value = 0;
15506    int index = (int)arg2;
15507
15508    if (index >= BXE_NUM_ETH_STATS) {
15509        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15510        return (-1);
15511    }
15512
15513    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15514
15515    switch (bxe_eth_stats_arr[index].size) {
15516    case 4:
15517        value = (uint64_t)*offset;
15518        break;
15519    case 8:
15520        value = HILO_U64(*offset, *(offset + 1));
15521        break;
15522    default:
15523        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15524              index, bxe_eth_stats_arr[index].size);
15525        return (-1);
15526    }
15527
15528    return (sysctl_handle_64(oidp, &value, 0, req));
15529}
15530
15531static int
15532bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15533{
15534    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15535    uint32_t *eth_stats;
15536    uint32_t *offset;
15537    uint64_t value = 0;
15538    uint32_t q_stat = (uint32_t)arg2;
15539    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15540    uint32_t index = (q_stat & 0xffff);
15541
15542    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15543
15544    if (index >= BXE_NUM_ETH_Q_STATS) {
15545        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15546        return (-1);
15547    }
15548
15549    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15550
15551    switch (bxe_eth_q_stats_arr[index].size) {
15552    case 4:
15553        value = (uint64_t)*offset;
15554        break;
15555    case 8:
15556        value = HILO_U64(*offset, *(offset + 1));
15557        break;
15558    default:
15559        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15560              index, bxe_eth_q_stats_arr[index].size);
15561        return (-1);
15562    }
15563
15564    return (sysctl_handle_64(oidp, &value, 0, req));
15565}
15566
15567static void
15568bxe_add_sysctls(struct bxe_softc *sc)
15569{
15570    struct sysctl_ctx_list *ctx;
15571    struct sysctl_oid_list *children;
15572    struct sysctl_oid *queue_top, *queue;
15573    struct sysctl_oid_list *queue_top_children, *queue_children;
15574    char queue_num_buf[32];
15575    uint32_t q_stat;
15576    int i, j;
15577
15578    ctx = device_get_sysctl_ctx(sc->dev);
15579    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15580
15581    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15582                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15583                      "version");
15584
15585    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15586             BCM_5710_FW_MAJOR_VERSION,
15587             BCM_5710_FW_MINOR_VERSION,
15588             BCM_5710_FW_REVISION_VERSION,
15589             BCM_5710_FW_ENGINEERING_VERSION);
15590
15591    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15592        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15593         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15594         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15595         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15596                                                                "Unknown"));
15597    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15598                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15599                    "multifunction vnics per port");
15600
15601    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15602        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15603         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15604         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15605                                              "???GT/s"),
15606        sc->devinfo.pcie_link_width);
15607
15608    sc->debug = bxe_debug;
15609
15610#if __FreeBSD_version >= 900000
15611    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15612                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15613                      "bootcode version");
15614    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15615                      CTLFLAG_RD, sc->fw_ver_str, 0,
15616                      "firmware version");
15617    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15618                      CTLFLAG_RD, sc->mf_mode_str, 0,
15619                      "multifunction mode");
15620    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15621                      CTLFLAG_RD, sc->mac_addr_str, 0,
15622                      "mac address");
15623    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15624                      CTLFLAG_RD, sc->pci_link_str, 0,
15625                      "pci link status");
15626    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15627                    CTLFLAG_RW, &sc->debug,
15628                    "debug logging mode");
15629#else
15630    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15631                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15632                      "bootcode version");
15633    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15634                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15635                      "firmware version");
15636    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15637                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15638                      "multifunction mode");
15639    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15640                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15641                      "mac address");
15642    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15643                      CTLFLAG_RD, &sc->pci_link_str, 0,
15644                      "pci link status");
15645    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15646                    CTLFLAG_RW, &sc->debug, 0,
15647                    "debug logging mode");
15648#endif /* #if __FreeBSD_version >= 900000 */
15649
15650    sc->trigger_grcdump = 0;
15651    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15652                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
15653                   "trigger grcdump should be invoked"
15654                   "  before collecting grcdump");
15655
15656    sc->grcdump_started = 0;
15657    sc->grcdump_done = 0;
15658    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15659                   CTLFLAG_RD, &sc->grcdump_done, 0,
15660                   "set by driver when grcdump is done");
15661
15662    sc->rx_budget = bxe_rx_budget;
15663    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15664                    CTLFLAG_RW, &sc->rx_budget, 0,
15665                    "rx processing budget");
15666
15667    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15668                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15669                    bxe_sysctl_state, "IU", "dump driver state");
15670
15671    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15672        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15673                        bxe_eth_stats_arr[i].string,
15674                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15675                        bxe_sysctl_eth_stat, "LU",
15676                        bxe_eth_stats_arr[i].string);
15677    }
15678
15679    /* add a new parent node for all queues "dev.bxe.#.queue" */
15680    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15681                                CTLFLAG_RD, NULL, "queue");
15682    queue_top_children = SYSCTL_CHILDREN(queue_top);
15683
15684    for (i = 0; i < sc->num_queues; i++) {
15685        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15686        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15687        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15688                                queue_num_buf, CTLFLAG_RD, NULL,
15689                                "single queue");
15690        queue_children = SYSCTL_CHILDREN(queue);
15691
15692        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15693            q_stat = ((i << 16) | j);
15694            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15695                            bxe_eth_q_stats_arr[j].string,
15696                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15697                            bxe_sysctl_eth_q_stat, "LU",
15698                            bxe_eth_q_stats_arr[j].string);
15699        }
15700    }
15701}
15702
15703static int
15704bxe_alloc_buf_rings(struct bxe_softc *sc)
15705{
15706#if __FreeBSD_version >= 901504
15707
15708    int i;
15709    struct bxe_fastpath *fp;
15710
15711    for (i = 0; i < sc->num_queues; i++) {
15712
15713        fp = &sc->fp[i];
15714
15715        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15716                                   M_NOWAIT, &fp->tx_mtx);
15717        if (fp->tx_br == NULL)
15718            return (-1);
15719    }
15720#endif
15721    return (0);
15722}
15723
15724static void
15725bxe_free_buf_rings(struct bxe_softc *sc)
15726{
15727#if __FreeBSD_version >= 901504
15728
15729    int i;
15730    struct bxe_fastpath *fp;
15731
15732    for (i = 0; i < sc->num_queues; i++) {
15733
15734        fp = &sc->fp[i];
15735
15736        if (fp->tx_br) {
15737            buf_ring_free(fp->tx_br, M_DEVBUF);
15738            fp->tx_br = NULL;
15739        }
15740    }
15741
15742#endif
15743}
15744
15745static void
15746bxe_init_fp_mutexs(struct bxe_softc *sc)
15747{
15748    int i;
15749    struct bxe_fastpath *fp;
15750
15751    for (i = 0; i < sc->num_queues; i++) {
15752
15753        fp = &sc->fp[i];
15754
15755        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15756            "bxe%d_fp%d_tx_lock", sc->unit, i);
15757        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15758
15759        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15760            "bxe%d_fp%d_rx_lock", sc->unit, i);
15761        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15762    }
15763}
15764
15765static void
15766bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15767{
15768    int i;
15769    struct bxe_fastpath *fp;
15770
15771    for (i = 0; i < sc->num_queues; i++) {
15772
15773        fp = &sc->fp[i];
15774
15775        if (mtx_initialized(&fp->tx_mtx)) {
15776            mtx_destroy(&fp->tx_mtx);
15777        }
15778
15779        if (mtx_initialized(&fp->rx_mtx)) {
15780            mtx_destroy(&fp->rx_mtx);
15781        }
15782    }
15783}
15784
15785
15786/*
15787 * Device attach function.
15788 *
15789 * Allocates device resources, performs secondary chip identification, and
15790 * initializes driver instance variables. This function is called from driver
15791 * load after a successful probe.
15792 *
15793 * Returns:
15794 *   0 = Success, >0 = Failure
15795 */
15796static int
15797bxe_attach(device_t dev)
15798{
15799    struct bxe_softc *sc;
15800
15801    sc = device_get_softc(dev);
15802
15803    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15804
15805    sc->state = BXE_STATE_CLOSED;
15806
15807    sc->dev  = dev;
15808    sc->unit = device_get_unit(dev);
15809
15810    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15811
15812    sc->pcie_bus    = pci_get_bus(dev);
15813    sc->pcie_device = pci_get_slot(dev);
15814    sc->pcie_func   = pci_get_function(dev);
15815
15816    /* enable bus master capability */
15817    pci_enable_busmaster(dev);
15818
15819    /* get the BARs */
15820    if (bxe_allocate_bars(sc) != 0) {
15821        return (ENXIO);
15822    }
15823
15824    /* initialize the mutexes */
15825    bxe_init_mutexes(sc);
15826
15827    /* prepare the periodic callout */
15828    callout_init(&sc->periodic_callout, 0);
15829
15830    /* prepare the chip taskqueue */
15831    sc->chip_tq_flags = CHIP_TQ_NONE;
15832    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15833             "bxe%d_chip_tq", sc->unit);
15834    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15835    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15836                                   taskqueue_thread_enqueue,
15837                                   &sc->chip_tq);
15838    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15839                            "%s", sc->chip_tq_name);
15840
15841    /* get device info and set params */
15842    if (bxe_get_device_info(sc) != 0) {
15843        BLOGE(sc, "getting device info\n");
15844        bxe_deallocate_bars(sc);
15845        pci_disable_busmaster(dev);
15846        return (ENXIO);
15847    }
15848
15849    /* get final misc params */
15850    bxe_get_params(sc);
15851
15852    /* set the default MTU (changed via ifconfig) */
15853    sc->mtu = ETHERMTU;
15854
15855    bxe_set_modes_bitmap(sc);
15856
15857    /* XXX
15858     * If in AFEX mode and the function is configured for FCoE
15859     * then bail... no L2 allowed.
15860     */
15861
15862    /* get phy settings from shmem and 'and' against admin settings */
15863    bxe_get_phy_info(sc);
15864
15865    /* initialize the FreeBSD ifnet interface */
15866    if (bxe_init_ifnet(sc) != 0) {
15867        bxe_release_mutexes(sc);
15868        bxe_deallocate_bars(sc);
15869        pci_disable_busmaster(dev);
15870        return (ENXIO);
15871    }
15872
15873    if (bxe_add_cdev(sc) != 0) {
15874        if (sc->ifp != NULL) {
15875            ether_ifdetach(sc->ifp);
15876        }
15877        ifmedia_removeall(&sc->ifmedia);
15878        bxe_release_mutexes(sc);
15879        bxe_deallocate_bars(sc);
15880        pci_disable_busmaster(dev);
15881        return (ENXIO);
15882    }
15883
15884    /* allocate device interrupts */
15885    if (bxe_interrupt_alloc(sc) != 0) {
15886        bxe_del_cdev(sc);
15887        if (sc->ifp != NULL) {
15888            ether_ifdetach(sc->ifp);
15889        }
15890        ifmedia_removeall(&sc->ifmedia);
15891        bxe_release_mutexes(sc);
15892        bxe_deallocate_bars(sc);
15893        pci_disable_busmaster(dev);
15894        return (ENXIO);
15895    }
15896
15897    bxe_init_fp_mutexs(sc);
15898
15899    if (bxe_alloc_buf_rings(sc) != 0) {
15900	bxe_free_buf_rings(sc);
15901        bxe_interrupt_free(sc);
15902        bxe_del_cdev(sc);
15903        if (sc->ifp != NULL) {
15904            ether_ifdetach(sc->ifp);
15905        }
15906        ifmedia_removeall(&sc->ifmedia);
15907        bxe_release_mutexes(sc);
15908        bxe_deallocate_bars(sc);
15909        pci_disable_busmaster(dev);
15910        return (ENXIO);
15911    }
15912
15913    /* allocate ilt */
15914    if (bxe_alloc_ilt_mem(sc) != 0) {
15915	bxe_free_buf_rings(sc);
15916        bxe_interrupt_free(sc);
15917        bxe_del_cdev(sc);
15918        if (sc->ifp != NULL) {
15919            ether_ifdetach(sc->ifp);
15920        }
15921        ifmedia_removeall(&sc->ifmedia);
15922        bxe_release_mutexes(sc);
15923        bxe_deallocate_bars(sc);
15924        pci_disable_busmaster(dev);
15925        return (ENXIO);
15926    }
15927
15928    /* allocate the host hardware/software hsi structures */
15929    if (bxe_alloc_hsi_mem(sc) != 0) {
15930        bxe_free_ilt_mem(sc);
15931	bxe_free_buf_rings(sc);
15932        bxe_interrupt_free(sc);
15933        bxe_del_cdev(sc);
15934        if (sc->ifp != NULL) {
15935            ether_ifdetach(sc->ifp);
15936        }
15937        ifmedia_removeall(&sc->ifmedia);
15938        bxe_release_mutexes(sc);
15939        bxe_deallocate_bars(sc);
15940        pci_disable_busmaster(dev);
15941        return (ENXIO);
15942    }
15943
15944    /* need to reset chip if UNDI was active */
15945    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
15946        /* init fw_seq */
15947        sc->fw_seq =
15948            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
15949             DRV_MSG_SEQ_NUMBER_MASK);
15950        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
15951        bxe_prev_unload(sc);
15952    }
15953
15954#if 1
15955    /* XXX */
15956    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15957#else
15958    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
15959        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
15960        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
15961        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
15962        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
15963        bxe_dcbx_init_params(sc);
15964    } else {
15965        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15966    }
15967#endif
15968
15969    /* calculate qm_cid_count */
15970    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
15971    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
15972
15973    sc->max_cos = 1;
15974    bxe_init_multi_cos(sc);
15975
15976    bxe_add_sysctls(sc);
15977
15978    return (0);
15979}
15980
15981/*
15982 * Device detach function.
15983 *
15984 * Stops the controller, resets the controller, and releases resources.
15985 *
15986 * Returns:
15987 *   0 = Success, >0 = Failure
15988 */
15989static int
15990bxe_detach(device_t dev)
15991{
15992    struct bxe_softc *sc;
15993    if_t ifp;
15994
15995    sc = device_get_softc(dev);
15996
15997    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
15998
15999    ifp = sc->ifp;
16000    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16001        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16002        return(EBUSY);
16003    }
16004
16005    bxe_del_cdev(sc);
16006
16007    /* stop the periodic callout */
16008    bxe_periodic_stop(sc);
16009
16010    /* stop the chip taskqueue */
16011    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16012    if (sc->chip_tq) {
16013        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16014        taskqueue_free(sc->chip_tq);
16015        sc->chip_tq = NULL;
16016    }
16017
16018    /* stop and reset the controller if it was open */
16019    if (sc->state != BXE_STATE_CLOSED) {
16020        BXE_CORE_LOCK(sc);
16021        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16022        sc->state = BXE_STATE_DISABLED;
16023        BXE_CORE_UNLOCK(sc);
16024    }
16025
16026    /* release the network interface */
16027    if (ifp != NULL) {
16028        ether_ifdetach(ifp);
16029    }
16030    ifmedia_removeall(&sc->ifmedia);
16031
16032    /* XXX do the following based on driver state... */
16033
16034    /* free the host hardware/software hsi structures */
16035    bxe_free_hsi_mem(sc);
16036
16037    /* free ilt */
16038    bxe_free_ilt_mem(sc);
16039
16040    bxe_free_buf_rings(sc);
16041
16042    /* release the interrupts */
16043    bxe_interrupt_free(sc);
16044
16045    /* Release the mutexes*/
16046    bxe_destroy_fp_mutexs(sc);
16047    bxe_release_mutexes(sc);
16048
16049
16050    /* Release the PCIe BAR mapped memory */
16051    bxe_deallocate_bars(sc);
16052
16053    /* Release the FreeBSD interface. */
16054    if (sc->ifp != NULL) {
16055        if_free(sc->ifp);
16056    }
16057
16058    pci_disable_busmaster(dev);
16059
16060    return (0);
16061}
16062
16063/*
16064 * Device shutdown function.
16065 *
16066 * Stops and resets the controller.
16067 *
16068 * Returns:
16069 *   Nothing
16070 */
16071static int
16072bxe_shutdown(device_t dev)
16073{
16074    struct bxe_softc *sc;
16075
16076    sc = device_get_softc(dev);
16077
16078    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16079
16080    /* stop the periodic callout */
16081    bxe_periodic_stop(sc);
16082
16083    BXE_CORE_LOCK(sc);
16084    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16085    BXE_CORE_UNLOCK(sc);
16086
16087    return (0);
16088}
16089
16090void
16091bxe_igu_ack_sb(struct bxe_softc *sc,
16092               uint8_t          igu_sb_id,
16093               uint8_t          segment,
16094               uint16_t         index,
16095               uint8_t          op,
16096               uint8_t          update)
16097{
16098    uint32_t igu_addr = sc->igu_base_addr;
16099    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16100    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16101}
16102
16103static void
16104bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16105                     uint8_t          func,
16106                     uint8_t          idu_sb_id,
16107                     uint8_t          is_pf)
16108{
16109    uint32_t data, ctl, cnt = 100;
16110    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16111    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16112    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16113    uint32_t sb_bit =  1 << (idu_sb_id%32);
16114    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16115    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16116
16117    /* Not supported in BC mode */
16118    if (CHIP_INT_MODE_IS_BC(sc)) {
16119        return;
16120    }
16121
16122    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16123             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16124            IGU_REGULAR_CLEANUP_SET |
16125            IGU_REGULAR_BCLEANUP);
16126
16127    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16128           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16129           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16130
16131    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16132            data, igu_addr_data);
16133    REG_WR(sc, igu_addr_data, data);
16134
16135    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16136                      BUS_SPACE_BARRIER_WRITE);
16137    mb();
16138
16139    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16140            ctl, igu_addr_ctl);
16141    REG_WR(sc, igu_addr_ctl, ctl);
16142
16143    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16144                      BUS_SPACE_BARRIER_WRITE);
16145    mb();
16146
16147    /* wait for clean up to finish */
16148    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16149        DELAY(20000);
16150    }
16151
16152    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16153        BLOGD(sc, DBG_LOAD,
16154              "Unable to finish IGU cleanup: "
16155              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16156              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16157    }
16158}
16159
16160static void
16161bxe_igu_clear_sb(struct bxe_softc *sc,
16162                 uint8_t          idu_sb_id)
16163{
16164    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16165}
16166
16167
16168
16169
16170
16171
16172
16173/*******************/
16174/* ECORE CALLBACKS */
16175/*******************/
16176
16177static void
16178bxe_reset_common(struct bxe_softc *sc)
16179{
16180    uint32_t val = 0x1400;
16181
16182    /* reset_common */
16183    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16184
16185    if (CHIP_IS_E3(sc)) {
16186        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16187        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16188    }
16189
16190    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16191}
16192
16193static void
16194bxe_common_init_phy(struct bxe_softc *sc)
16195{
16196    uint32_t shmem_base[2];
16197    uint32_t shmem2_base[2];
16198
16199    /* Avoid common init in case MFW supports LFA */
16200    if (SHMEM2_RD(sc, size) >
16201        (uint32_t)offsetof(struct shmem2_region,
16202                           lfa_host_addr[SC_PORT(sc)])) {
16203        return;
16204    }
16205
16206    shmem_base[0]  = sc->devinfo.shmem_base;
16207    shmem2_base[0] = sc->devinfo.shmem2_base;
16208
16209    if (!CHIP_IS_E1x(sc)) {
16210        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16211        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16212    }
16213
16214    bxe_acquire_phy_lock(sc);
16215    elink_common_init_phy(sc, shmem_base, shmem2_base,
16216                          sc->devinfo.chip_id, 0);
16217    bxe_release_phy_lock(sc);
16218}
16219
16220static void
16221bxe_pf_disable(struct bxe_softc *sc)
16222{
16223    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16224
16225    val &= ~IGU_PF_CONF_FUNC_EN;
16226
16227    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16228    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16229    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16230}
16231
16232static void
16233bxe_init_pxp(struct bxe_softc *sc)
16234{
16235    uint16_t devctl;
16236    int r_order, w_order;
16237
16238    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16239
16240    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16241
16242    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16243
16244    if (sc->mrrs == -1) {
16245        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16246    } else {
16247        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16248        r_order = sc->mrrs;
16249    }
16250
16251    ecore_init_pxp_arb(sc, r_order, w_order);
16252}
16253
16254static uint32_t
16255bxe_get_pretend_reg(struct bxe_softc *sc)
16256{
16257    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16258    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16259    return (base + (SC_ABS_FUNC(sc)) * stride);
16260}
16261
16262/*
16263 * Called only on E1H or E2.
16264 * When pretending to be PF, the pretend value is the function number 0..7.
16265 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16266 * combination.
16267 */
16268static int
16269bxe_pretend_func(struct bxe_softc *sc,
16270                 uint16_t         pretend_func_val)
16271{
16272    uint32_t pretend_reg;
16273
16274    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16275        return (-1);
16276    }
16277
16278    /* get my own pretend register */
16279    pretend_reg = bxe_get_pretend_reg(sc);
16280    REG_WR(sc, pretend_reg, pretend_func_val);
16281    REG_RD(sc, pretend_reg);
16282    return (0);
16283}
16284
16285static void
16286bxe_iov_init_dmae(struct bxe_softc *sc)
16287{
16288    return;
16289}
16290
16291static void
16292bxe_iov_init_dq(struct bxe_softc *sc)
16293{
16294    return;
16295}
16296
16297/* send a NIG loopback debug packet */
16298static void
16299bxe_lb_pckt(struct bxe_softc *sc)
16300{
16301    uint32_t wb_write[3];
16302
16303    /* Ethernet source and destination addresses */
16304    wb_write[0] = 0x55555555;
16305    wb_write[1] = 0x55555555;
16306    wb_write[2] = 0x20;     /* SOP */
16307    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16308
16309    /* NON-IP protocol */
16310    wb_write[0] = 0x09000000;
16311    wb_write[1] = 0x55555555;
16312    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16313    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16314}
16315
16316/*
16317 * Some of the internal memories are not directly readable from the driver.
16318 * To test them we send debug packets.
16319 */
16320static int
16321bxe_int_mem_test(struct bxe_softc *sc)
16322{
16323    int factor;
16324    int count, i;
16325    uint32_t val = 0;
16326
16327    if (CHIP_REV_IS_FPGA(sc)) {
16328        factor = 120;
16329    } else if (CHIP_REV_IS_EMUL(sc)) {
16330        factor = 200;
16331    } else {
16332        factor = 1;
16333    }
16334
16335    /* disable inputs of parser neighbor blocks */
16336    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16337    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16338    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16339    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16340
16341    /*  write 0 to parser credits for CFC search request */
16342    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16343
16344    /* send Ethernet packet */
16345    bxe_lb_pckt(sc);
16346
16347    /* TODO do i reset NIG statistic? */
16348    /* Wait until NIG register shows 1 packet of size 0x10 */
16349    count = 1000 * factor;
16350    while (count) {
16351        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16352        val = *BXE_SP(sc, wb_data[0]);
16353        if (val == 0x10) {
16354            break;
16355        }
16356
16357        DELAY(10000);
16358        count--;
16359    }
16360
16361    if (val != 0x10) {
16362        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16363        return (-1);
16364    }
16365
16366    /* wait until PRS register shows 1 packet */
16367    count = (1000 * factor);
16368    while (count) {
16369        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16370        if (val == 1) {
16371            break;
16372        }
16373
16374        DELAY(10000);
16375        count--;
16376    }
16377
16378    if (val != 0x1) {
16379        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16380        return (-2);
16381    }
16382
16383    /* Reset and init BRB, PRS */
16384    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16385    DELAY(50000);
16386    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16387    DELAY(50000);
16388    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16389    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16390
16391    /* Disable inputs of parser neighbor blocks */
16392    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16393    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16394    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16395    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16396
16397    /* Write 0 to parser credits for CFC search request */
16398    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16399
16400    /* send 10 Ethernet packets */
16401    for (i = 0; i < 10; i++) {
16402        bxe_lb_pckt(sc);
16403    }
16404
16405    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16406    count = (1000 * factor);
16407    while (count) {
16408        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16409        val = *BXE_SP(sc, wb_data[0]);
16410        if (val == 0xb0) {
16411            break;
16412        }
16413
16414        DELAY(10000);
16415        count--;
16416    }
16417
16418    if (val != 0xb0) {
16419        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16420        return (-3);
16421    }
16422
16423    /* Wait until PRS register shows 2 packets */
16424    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16425    if (val != 2) {
16426        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16427    }
16428
16429    /* Write 1 to parser credits for CFC search request */
16430    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16431
16432    /* Wait until PRS register shows 3 packets */
16433    DELAY(10000 * factor);
16434
16435    /* Wait until NIG register shows 1 packet of size 0x10 */
16436    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16437    if (val != 3) {
16438        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16439    }
16440
16441    /* clear NIG EOP FIFO */
16442    for (i = 0; i < 11; i++) {
16443        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16444    }
16445
16446    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16447    if (val != 1) {
16448        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16449        return (-4);
16450    }
16451
16452    /* Reset and init BRB, PRS, NIG */
16453    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16454    DELAY(50000);
16455    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16456    DELAY(50000);
16457    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16458    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16459    if (!CNIC_SUPPORT(sc)) {
16460        /* set NIC mode */
16461        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16462    }
16463
16464    /* Enable inputs of parser neighbor blocks */
16465    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16466    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16467    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16468    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16469
16470    return (0);
16471}
16472
16473static void
16474bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16475{
16476    int is_required;
16477    uint32_t val;
16478    int port;
16479
16480    is_required = 0;
16481    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16482           SHARED_HW_CFG_FAN_FAILURE_MASK);
16483
16484    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16485        is_required = 1;
16486    }
16487    /*
16488     * The fan failure mechanism is usually related to the PHY type since
16489     * the power consumption of the board is affected by the PHY. Currently,
16490     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16491     */
16492    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16493        for (port = PORT_0; port < PORT_MAX; port++) {
16494            is_required |= elink_fan_failure_det_req(sc,
16495                                                     sc->devinfo.shmem_base,
16496                                                     sc->devinfo.shmem2_base,
16497                                                     port);
16498        }
16499    }
16500
16501    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16502
16503    if (is_required == 0) {
16504        return;
16505    }
16506
16507    /* Fan failure is indicated by SPIO 5 */
16508    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16509
16510    /* set to active low mode */
16511    val = REG_RD(sc, MISC_REG_SPIO_INT);
16512    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16513    REG_WR(sc, MISC_REG_SPIO_INT, val);
16514
16515    /* enable interrupt to signal the IGU */
16516    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16517    val |= MISC_SPIO_SPIO5;
16518    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16519}
16520
16521static void
16522bxe_enable_blocks_attention(struct bxe_softc *sc)
16523{
16524    uint32_t val;
16525
16526    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16527    if (!CHIP_IS_E1x(sc)) {
16528        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16529    } else {
16530        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16531    }
16532    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16533    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16534    /*
16535     * mask read length error interrupts in brb for parser
16536     * (parsing unit and 'checksum and crc' unit)
16537     * these errors are legal (PU reads fixed length and CAC can cause
16538     * read length error on truncated packets)
16539     */
16540    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16541    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16542    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16543    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16544    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16545    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16546/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16547/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16548    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16549    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16550    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16551/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16552/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16553    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16554    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16555    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16556    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16557/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16558/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16559
16560    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16561           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16562           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16563    if (!CHIP_IS_E1x(sc)) {
16564        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16565                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16566    }
16567    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16568
16569    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16570    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16571    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16572/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16573
16574    if (!CHIP_IS_E1x(sc)) {
16575        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16576        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16577    }
16578
16579    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16580    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16581/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16582    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16583}
16584
16585/**
16586 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16587 *
16588 * @sc:     driver handle
16589 */
16590static int
16591bxe_init_hw_common(struct bxe_softc *sc)
16592{
16593    uint8_t abs_func_id;
16594    uint32_t val;
16595
16596    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16597          SC_ABS_FUNC(sc));
16598
16599    /*
16600     * take the RESET lock to protect undi_unload flow from accessing
16601     * registers while we are resetting the chip
16602     */
16603    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16604
16605    bxe_reset_common(sc);
16606
16607    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16608
16609    val = 0xfffc;
16610    if (CHIP_IS_E3(sc)) {
16611        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16612        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16613    }
16614
16615    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16616
16617    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16618
16619    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16620    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16621
16622    if (!CHIP_IS_E1x(sc)) {
16623        /*
16624         * 4-port mode or 2-port mode we need to turn off master-enable for
16625         * everyone. After that we turn it back on for self. So, we disregard
16626         * multi-function, and always disable all functions on the given path,
16627         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16628         */
16629        for (abs_func_id = SC_PATH(sc);
16630             abs_func_id < (E2_FUNC_MAX * 2);
16631             abs_func_id += 2) {
16632            if (abs_func_id == SC_ABS_FUNC(sc)) {
16633                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16634                continue;
16635            }
16636
16637            bxe_pretend_func(sc, abs_func_id);
16638
16639            /* clear pf enable */
16640            bxe_pf_disable(sc);
16641
16642            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16643        }
16644    }
16645
16646    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16647
16648    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16649
16650    if (CHIP_IS_E1(sc)) {
16651        /*
16652         * enable HW interrupt from PXP on USDM overflow
16653         * bit 16 on INT_MASK_0
16654         */
16655        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16656    }
16657
16658    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16659    bxe_init_pxp(sc);
16660
16661#ifdef __BIG_ENDIAN
16662    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16663    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16664    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16665    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16666    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16667    /* make sure this value is 0 */
16668    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16669
16670    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16671    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16672    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16673    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16674    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16675#endif
16676
16677    ecore_ilt_init_page_size(sc, INITOP_SET);
16678
16679    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16680        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16681    }
16682
16683    /* let the HW do it's magic... */
16684    DELAY(100000);
16685
16686    /* finish PXP init */
16687    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16688    if (val != 1) {
16689        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16690            val);
16691        return (-1);
16692    }
16693    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16694    if (val != 1) {
16695        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16696        return (-1);
16697    }
16698
16699    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16700
16701    /*
16702     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16703     * entries with value "0" and valid bit on. This needs to be done by the
16704     * first PF that is loaded in a path (i.e. common phase)
16705     */
16706    if (!CHIP_IS_E1x(sc)) {
16707/*
16708 * In E2 there is a bug in the timers block that can cause function 6 / 7
16709 * (i.e. vnic3) to start even if it is marked as "scan-off".
16710 * This occurs when a different function (func2,3) is being marked
16711 * as "scan-off". Real-life scenario for example: if a driver is being
16712 * load-unloaded while func6,7 are down. This will cause the timer to access
16713 * the ilt, translate to a logical address and send a request to read/write.
16714 * Since the ilt for the function that is down is not valid, this will cause
16715 * a translation error which is unrecoverable.
16716 * The Workaround is intended to make sure that when this happens nothing
16717 * fatal will occur. The workaround:
16718 *  1.  First PF driver which loads on a path will:
16719 *      a.  After taking the chip out of reset, by using pretend,
16720 *          it will write "0" to the following registers of
16721 *          the other vnics.
16722 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16723 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16724 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16725 *          And for itself it will write '1' to
16726 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16727 *          dmae-operations (writing to pram for example.)
16728 *          note: can be done for only function 6,7 but cleaner this
16729 *            way.
16730 *      b.  Write zero+valid to the entire ILT.
16731 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16732 *          VNIC3 (of that port). The range allocated will be the
16733 *          entire ILT. This is needed to prevent  ILT range error.
16734 *  2.  Any PF driver load flow:
16735 *      a.  ILT update with the physical addresses of the allocated
16736 *          logical pages.
16737 *      b.  Wait 20msec. - note that this timeout is needed to make
16738 *          sure there are no requests in one of the PXP internal
16739 *          queues with "old" ILT addresses.
16740 *      c.  PF enable in the PGLC.
16741 *      d.  Clear the was_error of the PF in the PGLC. (could have
16742 *          occurred while driver was down)
16743 *      e.  PF enable in the CFC (WEAK + STRONG)
16744 *      f.  Timers scan enable
16745 *  3.  PF driver unload flow:
16746 *      a.  Clear the Timers scan_en.
16747 *      b.  Polling for scan_on=0 for that PF.
16748 *      c.  Clear the PF enable bit in the PXP.
16749 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16750 *      e.  Write zero+valid to all ILT entries (The valid bit must
16751 *          stay set)
16752 *      f.  If this is VNIC 3 of a port then also init
16753 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16754 *          to the last enrty in the ILT.
16755 *
16756 *      Notes:
16757 *      Currently the PF error in the PGLC is non recoverable.
16758 *      In the future the there will be a recovery routine for this error.
16759 *      Currently attention is masked.
16760 *      Having an MCP lock on the load/unload process does not guarantee that
16761 *      there is no Timer disable during Func6/7 enable. This is because the
16762 *      Timers scan is currently being cleared by the MCP on FLR.
16763 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16764 *      there is error before clearing it. But the flow above is simpler and
16765 *      more general.
16766 *      All ILT entries are written by zero+valid and not just PF6/7
16767 *      ILT entries since in the future the ILT entries allocation for
16768 *      PF-s might be dynamic.
16769 */
16770        struct ilt_client_info ilt_cli;
16771        struct ecore_ilt ilt;
16772
16773        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16774        memset(&ilt, 0, sizeof(struct ecore_ilt));
16775
16776        /* initialize dummy TM client */
16777        ilt_cli.start      = 0;
16778        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16779        ilt_cli.client_num = ILT_CLIENT_TM;
16780
16781        /*
16782         * Step 1: set zeroes to all ilt page entries with valid bit on
16783         * Step 2: set the timers first/last ilt entry to point
16784         * to the entire range to prevent ILT range error for 3rd/4th
16785         * vnic (this code assumes existence of the vnic)
16786         *
16787         * both steps performed by call to ecore_ilt_client_init_op()
16788         * with dummy TM client
16789         *
16790         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16791         * and his brother are split registers
16792         */
16793
16794        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16795        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16796        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16797
16798        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16799        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16800        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16801    }
16802
16803    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16804    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16805
16806    if (!CHIP_IS_E1x(sc)) {
16807        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16808                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16809
16810        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16811        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16812
16813        /* let the HW do it's magic... */
16814        do {
16815            DELAY(200000);
16816            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16817        } while (factor-- && (val != 1));
16818
16819        if (val != 1) {
16820            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16821            return (-1);
16822        }
16823    }
16824
16825    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16826
16827    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16828
16829    bxe_iov_init_dmae(sc);
16830
16831    /* clean the DMAE memory */
16832    sc->dmae_ready = 1;
16833    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16834
16835    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16836
16837    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16838
16839    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16840
16841    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16842
16843    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16844    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16845    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16846    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16847
16848    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16849
16850    /* QM queues pointers table */
16851    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16852
16853    /* soft reset pulse */
16854    REG_WR(sc, QM_REG_SOFT_RESET, 1);
16855    REG_WR(sc, QM_REG_SOFT_RESET, 0);
16856
16857    if (CNIC_SUPPORT(sc))
16858        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
16859
16860    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
16861    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
16862    if (!CHIP_REV_IS_SLOW(sc)) {
16863        /* enable hw interrupt from doorbell Q */
16864        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16865    }
16866
16867    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16868
16869    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16870    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
16871
16872    if (!CHIP_IS_E1(sc)) {
16873        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
16874    }
16875
16876    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
16877        if (IS_MF_AFEX(sc)) {
16878            /*
16879             * configure that AFEX and VLAN headers must be
16880             * received in AFEX mode
16881             */
16882            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
16883            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
16884            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
16885            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
16886            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
16887        } else {
16888            /*
16889             * Bit-map indicating which L2 hdrs may appear
16890             * after the basic Ethernet header
16891             */
16892            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
16893                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16894        }
16895    }
16896
16897    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
16898    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
16899    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
16900    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
16901
16902    if (!CHIP_IS_E1x(sc)) {
16903        /* reset VFC memories */
16904        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16905               VFC_MEMORIES_RST_REG_CAM_RST |
16906               VFC_MEMORIES_RST_REG_RAM_RST);
16907        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16908               VFC_MEMORIES_RST_REG_CAM_RST |
16909               VFC_MEMORIES_RST_REG_RAM_RST);
16910
16911        DELAY(20000);
16912    }
16913
16914    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
16915    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
16916    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
16917    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
16918
16919    /* sync semi rtc */
16920    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
16921           0x80000000);
16922    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
16923           0x80000000);
16924
16925    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
16926    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
16927    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
16928
16929    if (!CHIP_IS_E1x(sc)) {
16930        if (IS_MF_AFEX(sc)) {
16931            /*
16932             * configure that AFEX and VLAN headers must be
16933             * sent in AFEX mode
16934             */
16935            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
16936            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
16937            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
16938            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
16939            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
16940        } else {
16941            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
16942                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16943        }
16944    }
16945
16946    REG_WR(sc, SRC_REG_SOFT_RST, 1);
16947
16948    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
16949
16950    if (CNIC_SUPPORT(sc)) {
16951        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
16952        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
16953        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
16954        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
16955        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
16956        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
16957        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
16958        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
16959        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
16960        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
16961    }
16962    REG_WR(sc, SRC_REG_SOFT_RST, 0);
16963
16964    if (sizeof(union cdu_context) != 1024) {
16965        /* we currently assume that a context is 1024 bytes */
16966        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
16967              (long)sizeof(union cdu_context));
16968    }
16969
16970    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
16971    val = (4 << 24) + (0 << 12) + 1024;
16972    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
16973
16974    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
16975
16976    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
16977    /* enable context validation interrupt from CFC */
16978    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16979
16980    /* set the thresholds to prevent CFC/CDU race */
16981    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
16982    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
16983
16984    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
16985        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
16986    }
16987
16988    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
16989    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
16990
16991    /* Reset PCIE errors for debug */
16992    REG_WR(sc, 0x2814, 0xffffffff);
16993    REG_WR(sc, 0x3820, 0xffffffff);
16994
16995    if (!CHIP_IS_E1x(sc)) {
16996        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
16997               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
16998                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
16999        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17000               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17001                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17002                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17003        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17004               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17005                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17006                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17007    }
17008
17009    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17010
17011    if (!CHIP_IS_E1(sc)) {
17012        /* in E3 this done in per-port section */
17013        if (!CHIP_IS_E3(sc))
17014            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17015    }
17016
17017    if (CHIP_IS_E1H(sc)) {
17018        /* not applicable for E2 (and above ...) */
17019        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17020    }
17021
17022    if (CHIP_REV_IS_SLOW(sc)) {
17023        DELAY(200000);
17024    }
17025
17026    /* finish CFC init */
17027    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17028    if (val != 1) {
17029        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17030        return (-1);
17031    }
17032    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17033    if (val != 1) {
17034        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17035        return (-1);
17036    }
17037    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17038    if (val != 1) {
17039        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17040        return (-1);
17041    }
17042    REG_WR(sc, CFC_REG_DEBUG0, 0);
17043
17044    if (CHIP_IS_E1(sc)) {
17045        /* read NIG statistic to see if this is our first up since powerup */
17046        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17047        val = *BXE_SP(sc, wb_data[0]);
17048
17049        /* do internal memory self test */
17050        if ((val == 0) && bxe_int_mem_test(sc)) {
17051            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17052            return (-1);
17053        }
17054    }
17055
17056    bxe_setup_fan_failure_detection(sc);
17057
17058    /* clear PXP2 attentions */
17059    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17060
17061    bxe_enable_blocks_attention(sc);
17062
17063    if (!CHIP_REV_IS_SLOW(sc)) {
17064        ecore_enable_blocks_parity(sc);
17065    }
17066
17067    if (!BXE_NOMCP(sc)) {
17068        if (CHIP_IS_E1x(sc)) {
17069            bxe_common_init_phy(sc);
17070        }
17071    }
17072
17073    return (0);
17074}
17075
17076/**
17077 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17078 *
17079 * @sc:     driver handle
17080 */
17081static int
17082bxe_init_hw_common_chip(struct bxe_softc *sc)
17083{
17084    int rc = bxe_init_hw_common(sc);
17085
17086    if (rc) {
17087        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17088        return (rc);
17089    }
17090
17091    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17092    if (!BXE_NOMCP(sc)) {
17093        bxe_common_init_phy(sc);
17094    }
17095
17096    return (0);
17097}
17098
17099static int
17100bxe_init_hw_port(struct bxe_softc *sc)
17101{
17102    int port = SC_PORT(sc);
17103    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17104    uint32_t low, high;
17105    uint32_t val;
17106
17107    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17108
17109    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17110
17111    ecore_init_block(sc, BLOCK_MISC, init_phase);
17112    ecore_init_block(sc, BLOCK_PXP, init_phase);
17113    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17114
17115    /*
17116     * Timers bug workaround: disables the pf_master bit in pglue at
17117     * common phase, we need to enable it here before any dmae access are
17118     * attempted. Therefore we manually added the enable-master to the
17119     * port phase (it also happens in the function phase)
17120     */
17121    if (!CHIP_IS_E1x(sc)) {
17122        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17123    }
17124
17125    ecore_init_block(sc, BLOCK_ATC, init_phase);
17126    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17127    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17128    ecore_init_block(sc, BLOCK_QM, init_phase);
17129
17130    ecore_init_block(sc, BLOCK_TCM, init_phase);
17131    ecore_init_block(sc, BLOCK_UCM, init_phase);
17132    ecore_init_block(sc, BLOCK_CCM, init_phase);
17133    ecore_init_block(sc, BLOCK_XCM, init_phase);
17134
17135    /* QM cid (connection) count */
17136    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17137
17138    if (CNIC_SUPPORT(sc)) {
17139        ecore_init_block(sc, BLOCK_TM, init_phase);
17140        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17141        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17142    }
17143
17144    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17145
17146    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17147
17148    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17149        if (IS_MF(sc)) {
17150            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17151        } else if (sc->mtu > 4096) {
17152            if (BXE_ONE_PORT(sc)) {
17153                low = 160;
17154            } else {
17155                val = sc->mtu;
17156                /* (24*1024 + val*4)/256 */
17157                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17158            }
17159        } else {
17160            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17161        }
17162        high = (low + 56); /* 14*1024/256 */
17163        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17164        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17165    }
17166
17167    if (CHIP_IS_MODE_4_PORT(sc)) {
17168        REG_WR(sc, SC_PORT(sc) ?
17169               BRB1_REG_MAC_GUARANTIED_1 :
17170               BRB1_REG_MAC_GUARANTIED_0, 40);
17171    }
17172
17173    ecore_init_block(sc, BLOCK_PRS, init_phase);
17174    if (CHIP_IS_E3B0(sc)) {
17175        if (IS_MF_AFEX(sc)) {
17176            /* configure headers for AFEX mode */
17177            REG_WR(sc, SC_PORT(sc) ?
17178                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17179                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17180            REG_WR(sc, SC_PORT(sc) ?
17181                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17182                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17183            REG_WR(sc, SC_PORT(sc) ?
17184                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17185                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17186        } else {
17187            /* Ovlan exists only if we are in multi-function +
17188             * switch-dependent mode, in switch-independent there
17189             * is no ovlan headers
17190             */
17191            REG_WR(sc, SC_PORT(sc) ?
17192                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17193                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17194                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17195        }
17196    }
17197
17198    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17199    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17200    ecore_init_block(sc, BLOCK_USDM, init_phase);
17201    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17202
17203    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17204    ecore_init_block(sc, BLOCK_USEM, init_phase);
17205    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17206    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17207
17208    ecore_init_block(sc, BLOCK_UPB, init_phase);
17209    ecore_init_block(sc, BLOCK_XPB, init_phase);
17210
17211    ecore_init_block(sc, BLOCK_PBF, init_phase);
17212
17213    if (CHIP_IS_E1x(sc)) {
17214        /* configure PBF to work without PAUSE mtu 9000 */
17215        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17216
17217        /* update threshold */
17218        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17219        /* update init credit */
17220        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17221
17222        /* probe changes */
17223        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17224        DELAY(50);
17225        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17226    }
17227
17228    if (CNIC_SUPPORT(sc)) {
17229        ecore_init_block(sc, BLOCK_SRC, init_phase);
17230    }
17231
17232    ecore_init_block(sc, BLOCK_CDU, init_phase);
17233    ecore_init_block(sc, BLOCK_CFC, init_phase);
17234
17235    if (CHIP_IS_E1(sc)) {
17236        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17237        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17238    }
17239    ecore_init_block(sc, BLOCK_HC, init_phase);
17240
17241    ecore_init_block(sc, BLOCK_IGU, init_phase);
17242
17243    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17244    /* init aeu_mask_attn_func_0/1:
17245     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17246     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17247     *             bits 4-7 are used for "per vn group attention" */
17248    val = IS_MF(sc) ? 0xF7 : 0x7;
17249    /* Enable DCBX attention for all but E1 */
17250    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17251    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17252
17253    ecore_init_block(sc, BLOCK_NIG, init_phase);
17254
17255    if (!CHIP_IS_E1x(sc)) {
17256        /* Bit-map indicating which L2 hdrs may appear after the
17257         * basic Ethernet header
17258         */
17259        if (IS_MF_AFEX(sc)) {
17260            REG_WR(sc, SC_PORT(sc) ?
17261                   NIG_REG_P1_HDRS_AFTER_BASIC :
17262                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17263        } else {
17264            REG_WR(sc, SC_PORT(sc) ?
17265                   NIG_REG_P1_HDRS_AFTER_BASIC :
17266                   NIG_REG_P0_HDRS_AFTER_BASIC,
17267                   IS_MF_SD(sc) ? 7 : 6);
17268        }
17269
17270        if (CHIP_IS_E3(sc)) {
17271            REG_WR(sc, SC_PORT(sc) ?
17272                   NIG_REG_LLH1_MF_MODE :
17273                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17274        }
17275    }
17276    if (!CHIP_IS_E3(sc)) {
17277        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17278    }
17279
17280    if (!CHIP_IS_E1(sc)) {
17281        /* 0x2 disable mf_ov, 0x1 enable */
17282        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17283               (IS_MF_SD(sc) ? 0x1 : 0x2));
17284
17285        if (!CHIP_IS_E1x(sc)) {
17286            val = 0;
17287            switch (sc->devinfo.mf_info.mf_mode) {
17288            case MULTI_FUNCTION_SD:
17289                val = 1;
17290                break;
17291            case MULTI_FUNCTION_SI:
17292            case MULTI_FUNCTION_AFEX:
17293                val = 2;
17294                break;
17295            }
17296
17297            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17298                        NIG_REG_LLH0_CLS_TYPE), val);
17299        }
17300        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17301        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17302        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17303    }
17304
17305    /* If SPIO5 is set to generate interrupts, enable it for this port */
17306    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17307    if (val & MISC_SPIO_SPIO5) {
17308        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17309                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17310        val = REG_RD(sc, reg_addr);
17311        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17312        REG_WR(sc, reg_addr, val);
17313    }
17314
17315    return (0);
17316}
17317
17318static uint32_t
17319bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17320                       uint32_t         reg,
17321                       uint32_t         expected,
17322                       uint32_t         poll_count)
17323{
17324    uint32_t cur_cnt = poll_count;
17325    uint32_t val;
17326
17327    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17328        DELAY(FLR_WAIT_INTERVAL);
17329    }
17330
17331    return (val);
17332}
17333
17334static int
17335bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17336                              uint32_t         reg,
17337                              char             *msg,
17338                              uint32_t         poll_cnt)
17339{
17340    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17341
17342    if (val != 0) {
17343        BLOGE(sc, "%s usage count=%d\n", msg, val);
17344        return (1);
17345    }
17346
17347    return (0);
17348}
17349
17350/* Common routines with VF FLR cleanup */
17351static uint32_t
17352bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17353{
17354    /* adjust polling timeout */
17355    if (CHIP_REV_IS_EMUL(sc)) {
17356        return (FLR_POLL_CNT * 2000);
17357    }
17358
17359    if (CHIP_REV_IS_FPGA(sc)) {
17360        return (FLR_POLL_CNT * 120);
17361    }
17362
17363    return (FLR_POLL_CNT);
17364}
17365
17366static int
17367bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17368                           uint32_t         poll_cnt)
17369{
17370    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17371    if (bxe_flr_clnup_poll_hw_counter(sc,
17372                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17373                                      "CFC PF usage counter timed out",
17374                                      poll_cnt)) {
17375        return (1);
17376    }
17377
17378    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17379    if (bxe_flr_clnup_poll_hw_counter(sc,
17380                                      DORQ_REG_PF_USAGE_CNT,
17381                                      "DQ PF usage counter timed out",
17382                                      poll_cnt)) {
17383        return (1);
17384    }
17385
17386    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17387    if (bxe_flr_clnup_poll_hw_counter(sc,
17388                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17389                                      "QM PF usage counter timed out",
17390                                      poll_cnt)) {
17391        return (1);
17392    }
17393
17394    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17395    if (bxe_flr_clnup_poll_hw_counter(sc,
17396                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17397                                      "Timers VNIC usage counter timed out",
17398                                      poll_cnt)) {
17399        return (1);
17400    }
17401
17402    if (bxe_flr_clnup_poll_hw_counter(sc,
17403                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17404                                      "Timers NUM_SCANS usage counter timed out",
17405                                      poll_cnt)) {
17406        return (1);
17407    }
17408
17409    /* Wait DMAE PF usage counter to zero */
17410    if (bxe_flr_clnup_poll_hw_counter(sc,
17411                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17412                                      "DMAE dommand register timed out",
17413                                      poll_cnt)) {
17414        return (1);
17415    }
17416
17417    return (0);
17418}
17419
17420#define OP_GEN_PARAM(param)                                            \
17421    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17422#define OP_GEN_TYPE(type)                                           \
17423    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17424#define OP_GEN_AGG_VECT(index)                                             \
17425    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17426
17427static int
17428bxe_send_final_clnup(struct bxe_softc *sc,
17429                     uint8_t          clnup_func,
17430                     uint32_t         poll_cnt)
17431{
17432    uint32_t op_gen_command = 0;
17433    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17434                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17435    int ret = 0;
17436
17437    if (REG_RD(sc, comp_addr)) {
17438        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17439        return (1);
17440    }
17441
17442    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17443    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17444    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17445    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17446
17447    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17448    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17449
17450    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17451        BLOGE(sc, "FW final cleanup did not succeed\n");
17452        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17453              (REG_RD(sc, comp_addr)));
17454        bxe_panic(sc, ("FLR cleanup failed\n"));
17455        return (1);
17456    }
17457
17458    /* Zero completion for nxt FLR */
17459    REG_WR(sc, comp_addr, 0);
17460
17461    return (ret);
17462}
17463
17464static void
17465bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17466                       struct pbf_pN_buf_regs *regs,
17467                       uint32_t               poll_count)
17468{
17469    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17470    uint32_t cur_cnt = poll_count;
17471
17472    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17473    crd = crd_start = REG_RD(sc, regs->crd);
17474    init_crd = REG_RD(sc, regs->init_crd);
17475
17476    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17477    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17478    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17479
17480    while ((crd != init_crd) &&
17481           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17482            (init_crd - crd_start))) {
17483        if (cur_cnt--) {
17484            DELAY(FLR_WAIT_INTERVAL);
17485            crd = REG_RD(sc, regs->crd);
17486            crd_freed = REG_RD(sc, regs->crd_freed);
17487        } else {
17488            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17489            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17490            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17491            break;
17492        }
17493    }
17494
17495    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17496          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17497}
17498
17499static void
17500bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17501                       struct pbf_pN_cmd_regs *regs,
17502                       uint32_t               poll_count)
17503{
17504    uint32_t occup, to_free, freed, freed_start;
17505    uint32_t cur_cnt = poll_count;
17506
17507    occup = to_free = REG_RD(sc, regs->lines_occup);
17508    freed = freed_start = REG_RD(sc, regs->lines_freed);
17509
17510    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17511    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17512
17513    while (occup &&
17514           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17515        if (cur_cnt--) {
17516            DELAY(FLR_WAIT_INTERVAL);
17517            occup = REG_RD(sc, regs->lines_occup);
17518            freed = REG_RD(sc, regs->lines_freed);
17519        } else {
17520            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17521            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17522            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17523            break;
17524        }
17525    }
17526
17527    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17528          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17529}
17530
17531static void
17532bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17533{
17534    struct pbf_pN_cmd_regs cmd_regs[] = {
17535        {0, (CHIP_IS_E3B0(sc)) ?
17536            PBF_REG_TQ_OCCUPANCY_Q0 :
17537            PBF_REG_P0_TQ_OCCUPANCY,
17538            (CHIP_IS_E3B0(sc)) ?
17539            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17540            PBF_REG_P0_TQ_LINES_FREED_CNT},
17541        {1, (CHIP_IS_E3B0(sc)) ?
17542            PBF_REG_TQ_OCCUPANCY_Q1 :
17543            PBF_REG_P1_TQ_OCCUPANCY,
17544            (CHIP_IS_E3B0(sc)) ?
17545            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17546            PBF_REG_P1_TQ_LINES_FREED_CNT},
17547        {4, (CHIP_IS_E3B0(sc)) ?
17548            PBF_REG_TQ_OCCUPANCY_LB_Q :
17549            PBF_REG_P4_TQ_OCCUPANCY,
17550            (CHIP_IS_E3B0(sc)) ?
17551            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17552            PBF_REG_P4_TQ_LINES_FREED_CNT}
17553    };
17554
17555    struct pbf_pN_buf_regs buf_regs[] = {
17556        {0, (CHIP_IS_E3B0(sc)) ?
17557            PBF_REG_INIT_CRD_Q0 :
17558            PBF_REG_P0_INIT_CRD ,
17559            (CHIP_IS_E3B0(sc)) ?
17560            PBF_REG_CREDIT_Q0 :
17561            PBF_REG_P0_CREDIT,
17562            (CHIP_IS_E3B0(sc)) ?
17563            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17564            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17565        {1, (CHIP_IS_E3B0(sc)) ?
17566            PBF_REG_INIT_CRD_Q1 :
17567            PBF_REG_P1_INIT_CRD,
17568            (CHIP_IS_E3B0(sc)) ?
17569            PBF_REG_CREDIT_Q1 :
17570            PBF_REG_P1_CREDIT,
17571            (CHIP_IS_E3B0(sc)) ?
17572            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17573            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17574        {4, (CHIP_IS_E3B0(sc)) ?
17575            PBF_REG_INIT_CRD_LB_Q :
17576            PBF_REG_P4_INIT_CRD,
17577            (CHIP_IS_E3B0(sc)) ?
17578            PBF_REG_CREDIT_LB_Q :
17579            PBF_REG_P4_CREDIT,
17580            (CHIP_IS_E3B0(sc)) ?
17581            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17582            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17583    };
17584
17585    int i;
17586
17587    /* Verify the command queues are flushed P0, P1, P4 */
17588    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17589        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17590    }
17591
17592    /* Verify the transmission buffers are flushed P0, P1, P4 */
17593    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17594        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17595    }
17596}
17597
17598static void
17599bxe_hw_enable_status(struct bxe_softc *sc)
17600{
17601    uint32_t val;
17602
17603    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17604    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17605
17606    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17607    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17608
17609    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17610    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17611
17612    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17613    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17614
17615    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17616    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17617
17618    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17619    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17620
17621    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17622    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17623
17624    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17625    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17626}
17627
17628static int
17629bxe_pf_flr_clnup(struct bxe_softc *sc)
17630{
17631    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17632
17633    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17634
17635    /* Re-enable PF target read access */
17636    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17637
17638    /* Poll HW usage counters */
17639    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17640    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17641        return (-1);
17642    }
17643
17644    /* Zero the igu 'trailing edge' and 'leading edge' */
17645
17646    /* Send the FW cleanup command */
17647    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17648        return (-1);
17649    }
17650
17651    /* ATC cleanup */
17652
17653    /* Verify TX hw is flushed */
17654    bxe_tx_hw_flushed(sc, poll_cnt);
17655
17656    /* Wait 100ms (not adjusted according to platform) */
17657    DELAY(100000);
17658
17659    /* Verify no pending pci transactions */
17660    if (bxe_is_pcie_pending(sc)) {
17661        BLOGE(sc, "PCIE Transactions still pending\n");
17662    }
17663
17664    /* Debug */
17665    bxe_hw_enable_status(sc);
17666
17667    /*
17668     * Master enable - Due to WB DMAE writes performed before this
17669     * register is re-initialized as part of the regular function init
17670     */
17671    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17672
17673    return (0);
17674}
17675
17676static int
17677bxe_init_hw_func(struct bxe_softc *sc)
17678{
17679    int port = SC_PORT(sc);
17680    int func = SC_FUNC(sc);
17681    int init_phase = PHASE_PF0 + func;
17682    struct ecore_ilt *ilt = sc->ilt;
17683    uint16_t cdu_ilt_start;
17684    uint32_t addr, val;
17685    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17686    int i, main_mem_width, rc;
17687
17688    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17689
17690    /* FLR cleanup */
17691    if (!CHIP_IS_E1x(sc)) {
17692        rc = bxe_pf_flr_clnup(sc);
17693        if (rc) {
17694            BLOGE(sc, "FLR cleanup failed!\n");
17695            // XXX bxe_fw_dump(sc);
17696            // XXX bxe_idle_chk(sc);
17697            return (rc);
17698        }
17699    }
17700
17701    /* set MSI reconfigure capability */
17702    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17703        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17704        val = REG_RD(sc, addr);
17705        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17706        REG_WR(sc, addr, val);
17707    }
17708
17709    ecore_init_block(sc, BLOCK_PXP, init_phase);
17710    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17711
17712    ilt = sc->ilt;
17713    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17714
17715    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17716        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17717        ilt->lines[cdu_ilt_start + i].page_mapping =
17718            sc->context[i].vcxt_dma.paddr;
17719        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17720    }
17721    ecore_ilt_init_op(sc, INITOP_SET);
17722
17723    /* Set NIC mode */
17724    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17725    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17726
17727    if (!CHIP_IS_E1x(sc)) {
17728        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17729
17730        /* Turn on a single ISR mode in IGU if driver is going to use
17731         * INT#x or MSI
17732         */
17733        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17734            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17735        }
17736
17737        /*
17738         * Timers workaround bug: function init part.
17739         * Need to wait 20msec after initializing ILT,
17740         * needed to make sure there are no requests in
17741         * one of the PXP internal queues with "old" ILT addresses
17742         */
17743        DELAY(20000);
17744
17745        /*
17746         * Master enable - Due to WB DMAE writes performed before this
17747         * register is re-initialized as part of the regular function
17748         * init
17749         */
17750        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17751        /* Enable the function in IGU */
17752        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17753    }
17754
17755    sc->dmae_ready = 1;
17756
17757    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17758
17759    if (!CHIP_IS_E1x(sc))
17760        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17761
17762    ecore_init_block(sc, BLOCK_ATC, init_phase);
17763    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17764    ecore_init_block(sc, BLOCK_NIG, init_phase);
17765    ecore_init_block(sc, BLOCK_SRC, init_phase);
17766    ecore_init_block(sc, BLOCK_MISC, init_phase);
17767    ecore_init_block(sc, BLOCK_TCM, init_phase);
17768    ecore_init_block(sc, BLOCK_UCM, init_phase);
17769    ecore_init_block(sc, BLOCK_CCM, init_phase);
17770    ecore_init_block(sc, BLOCK_XCM, init_phase);
17771    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17772    ecore_init_block(sc, BLOCK_USEM, init_phase);
17773    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17774    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17775
17776    if (!CHIP_IS_E1x(sc))
17777        REG_WR(sc, QM_REG_PF_EN, 1);
17778
17779    if (!CHIP_IS_E1x(sc)) {
17780        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17781        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17782        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17783        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17784    }
17785    ecore_init_block(sc, BLOCK_QM, init_phase);
17786
17787    ecore_init_block(sc, BLOCK_TM, init_phase);
17788    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17789
17790    bxe_iov_init_dq(sc);
17791
17792    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17793    ecore_init_block(sc, BLOCK_PRS, init_phase);
17794    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17795    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17796    ecore_init_block(sc, BLOCK_USDM, init_phase);
17797    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17798    ecore_init_block(sc, BLOCK_UPB, init_phase);
17799    ecore_init_block(sc, BLOCK_XPB, init_phase);
17800    ecore_init_block(sc, BLOCK_PBF, init_phase);
17801    if (!CHIP_IS_E1x(sc))
17802        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17803
17804    ecore_init_block(sc, BLOCK_CDU, init_phase);
17805
17806    ecore_init_block(sc, BLOCK_CFC, init_phase);
17807
17808    if (!CHIP_IS_E1x(sc))
17809        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17810
17811    if (IS_MF(sc)) {
17812        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17813        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17814    }
17815
17816    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17817
17818    /* HC init per function */
17819    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17820        if (CHIP_IS_E1H(sc)) {
17821            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17822
17823            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17824            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17825        }
17826        ecore_init_block(sc, BLOCK_HC, init_phase);
17827
17828    } else {
17829        int num_segs, sb_idx, prod_offset;
17830
17831        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17832
17833        if (!CHIP_IS_E1x(sc)) {
17834            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17835            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17836        }
17837
17838        ecore_init_block(sc, BLOCK_IGU, init_phase);
17839
17840        if (!CHIP_IS_E1x(sc)) {
17841            int dsb_idx = 0;
17842            /**
17843             * Producer memory:
17844             * E2 mode: address 0-135 match to the mapping memory;
17845             * 136 - PF0 default prod; 137 - PF1 default prod;
17846             * 138 - PF2 default prod; 139 - PF3 default prod;
17847             * 140 - PF0 attn prod;    141 - PF1 attn prod;
17848             * 142 - PF2 attn prod;    143 - PF3 attn prod;
17849             * 144-147 reserved.
17850             *
17851             * E1.5 mode - In backward compatible mode;
17852             * for non default SB; each even line in the memory
17853             * holds the U producer and each odd line hold
17854             * the C producer. The first 128 producers are for
17855             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
17856             * producers are for the DSB for each PF.
17857             * Each PF has five segments: (the order inside each
17858             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
17859             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
17860             * 144-147 attn prods;
17861             */
17862            /* non-default-status-blocks */
17863            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17864                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
17865            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
17866                prod_offset = (sc->igu_base_sb + sb_idx) *
17867                    num_segs;
17868
17869                for (i = 0; i < num_segs; i++) {
17870                    addr = IGU_REG_PROD_CONS_MEMORY +
17871                            (prod_offset + i) * 4;
17872                    REG_WR(sc, addr, 0);
17873                }
17874                /* send consumer update with value 0 */
17875                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
17876                           USTORM_ID, 0, IGU_INT_NOP, 1);
17877                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
17878            }
17879
17880            /* default-status-blocks */
17881            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17882                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
17883
17884            if (CHIP_IS_MODE_4_PORT(sc))
17885                dsb_idx = SC_FUNC(sc);
17886            else
17887                dsb_idx = SC_VN(sc);
17888
17889            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
17890                       IGU_BC_BASE_DSB_PROD + dsb_idx :
17891                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
17892
17893            /*
17894             * igu prods come in chunks of E1HVN_MAX (4) -
17895             * does not matters what is the current chip mode
17896             */
17897            for (i = 0; i < (num_segs * E1HVN_MAX);
17898                 i += E1HVN_MAX) {
17899                addr = IGU_REG_PROD_CONS_MEMORY +
17900                            (prod_offset + i)*4;
17901                REG_WR(sc, addr, 0);
17902            }
17903            /* send consumer update with 0 */
17904            if (CHIP_INT_MODE_IS_BC(sc)) {
17905                bxe_ack_sb(sc, sc->igu_dsb_id,
17906                           USTORM_ID, 0, IGU_INT_NOP, 1);
17907                bxe_ack_sb(sc, sc->igu_dsb_id,
17908                           CSTORM_ID, 0, IGU_INT_NOP, 1);
17909                bxe_ack_sb(sc, sc->igu_dsb_id,
17910                           XSTORM_ID, 0, IGU_INT_NOP, 1);
17911                bxe_ack_sb(sc, sc->igu_dsb_id,
17912                           TSTORM_ID, 0, IGU_INT_NOP, 1);
17913                bxe_ack_sb(sc, sc->igu_dsb_id,
17914                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
17915            } else {
17916                bxe_ack_sb(sc, sc->igu_dsb_id,
17917                           USTORM_ID, 0, IGU_INT_NOP, 1);
17918                bxe_ack_sb(sc, sc->igu_dsb_id,
17919                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
17920            }
17921            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
17922
17923            /* !!! these should become driver const once
17924               rf-tool supports split-68 const */
17925            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
17926            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
17927            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
17928            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
17929            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
17930            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
17931        }
17932    }
17933
17934    /* Reset PCIE errors for debug */
17935    REG_WR(sc, 0x2114, 0xffffffff);
17936    REG_WR(sc, 0x2120, 0xffffffff);
17937
17938    if (CHIP_IS_E1x(sc)) {
17939        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
17940        main_mem_base = HC_REG_MAIN_MEMORY +
17941                SC_PORT(sc) * (main_mem_size * 4);
17942        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
17943        main_mem_width = 8;
17944
17945        val = REG_RD(sc, main_mem_prty_clr);
17946        if (val) {
17947            BLOGD(sc, DBG_LOAD,
17948                  "Parity errors in HC block during function init (0x%x)!\n",
17949                  val);
17950        }
17951
17952        /* Clear "false" parity errors in MSI-X table */
17953        for (i = main_mem_base;
17954             i < main_mem_base + main_mem_size * 4;
17955             i += main_mem_width) {
17956            bxe_read_dmae(sc, i, main_mem_width / 4);
17957            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
17958                           i, main_mem_width / 4);
17959        }
17960        /* Clear HC parity attention */
17961        REG_RD(sc, main_mem_prty_clr);
17962    }
17963
17964#if 1
17965    /* Enable STORMs SP logging */
17966    REG_WR8(sc, BAR_USTRORM_INTMEM +
17967           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17968    REG_WR8(sc, BAR_TSTRORM_INTMEM +
17969           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17970    REG_WR8(sc, BAR_CSTRORM_INTMEM +
17971           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17972    REG_WR8(sc, BAR_XSTRORM_INTMEM +
17973           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17974#endif
17975
17976    elink_phy_probe(&sc->link_params);
17977
17978    return (0);
17979}
17980
17981static void
17982bxe_link_reset(struct bxe_softc *sc)
17983{
17984    if (!BXE_NOMCP(sc)) {
17985	bxe_acquire_phy_lock(sc);
17986        elink_lfa_reset(&sc->link_params, &sc->link_vars);
17987	bxe_release_phy_lock(sc);
17988    } else {
17989        if (!CHIP_REV_IS_SLOW(sc)) {
17990            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
17991        }
17992    }
17993}
17994
17995static void
17996bxe_reset_port(struct bxe_softc *sc)
17997{
17998    int port = SC_PORT(sc);
17999    uint32_t val;
18000
18001    /* reset physical Link */
18002    bxe_link_reset(sc);
18003
18004    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18005
18006    /* Do not rcv packets to BRB */
18007    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18008    /* Do not direct rcv packets that are not for MCP to the BRB */
18009    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18010               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18011
18012    /* Configure AEU */
18013    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18014
18015    DELAY(100000);
18016
18017    /* Check for BRB port occupancy */
18018    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18019    if (val) {
18020        BLOGD(sc, DBG_LOAD,
18021              "BRB1 is not empty, %d blocks are occupied\n", val);
18022    }
18023
18024    /* TODO: Close Doorbell port? */
18025}
18026
18027static void
18028bxe_ilt_wr(struct bxe_softc *sc,
18029           uint32_t         index,
18030           bus_addr_t       addr)
18031{
18032    int reg;
18033    uint32_t wb_write[2];
18034
18035    if (CHIP_IS_E1(sc)) {
18036        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18037    } else {
18038        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18039    }
18040
18041    wb_write[0] = ONCHIP_ADDR1(addr);
18042    wb_write[1] = ONCHIP_ADDR2(addr);
18043    REG_WR_DMAE(sc, reg, wb_write, 2);
18044}
18045
18046static void
18047bxe_clear_func_ilt(struct bxe_softc *sc,
18048                   uint32_t         func)
18049{
18050    uint32_t i, base = FUNC_ILT_BASE(func);
18051    for (i = base; i < base + ILT_PER_FUNC; i++) {
18052        bxe_ilt_wr(sc, i, 0);
18053    }
18054}
18055
18056static void
18057bxe_reset_func(struct bxe_softc *sc)
18058{
18059    struct bxe_fastpath *fp;
18060    int port = SC_PORT(sc);
18061    int func = SC_FUNC(sc);
18062    int i;
18063
18064    /* Disable the function in the FW */
18065    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18066    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18067    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18068    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18069
18070    /* FP SBs */
18071    FOR_EACH_ETH_QUEUE(sc, i) {
18072        fp = &sc->fp[i];
18073        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18074                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18075                SB_DISABLED);
18076    }
18077
18078    /* SP SB */
18079    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18080            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18081            SB_DISABLED);
18082
18083    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18084        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18085    }
18086
18087    /* Configure IGU */
18088    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18089        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18090        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18091    } else {
18092        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18093        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18094    }
18095
18096    if (CNIC_LOADED(sc)) {
18097        /* Disable Timer scan */
18098        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18099        /*
18100         * Wait for at least 10ms and up to 2 second for the timers
18101         * scan to complete
18102         */
18103        for (i = 0; i < 200; i++) {
18104            DELAY(10000);
18105            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18106                break;
18107        }
18108    }
18109
18110    /* Clear ILT */
18111    bxe_clear_func_ilt(sc, func);
18112
18113    /*
18114     * Timers workaround bug for E2: if this is vnic-3,
18115     * we need to set the entire ilt range for this timers.
18116     */
18117    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18118        struct ilt_client_info ilt_cli;
18119        /* use dummy TM client */
18120        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18121        ilt_cli.start = 0;
18122        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18123        ilt_cli.client_num = ILT_CLIENT_TM;
18124
18125        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18126    }
18127
18128    /* this assumes that reset_port() called before reset_func()*/
18129    if (!CHIP_IS_E1x(sc)) {
18130        bxe_pf_disable(sc);
18131    }
18132
18133    sc->dmae_ready = 0;
18134}
18135
18136static int
18137bxe_gunzip_init(struct bxe_softc *sc)
18138{
18139    return (0);
18140}
18141
18142static void
18143bxe_gunzip_end(struct bxe_softc *sc)
18144{
18145    return;
18146}
18147
18148static int
18149bxe_init_firmware(struct bxe_softc *sc)
18150{
18151    if (CHIP_IS_E1(sc)) {
18152        ecore_init_e1_firmware(sc);
18153        sc->iro_array = e1_iro_arr;
18154    } else if (CHIP_IS_E1H(sc)) {
18155        ecore_init_e1h_firmware(sc);
18156        sc->iro_array = e1h_iro_arr;
18157    } else if (!CHIP_IS_E1x(sc)) {
18158        ecore_init_e2_firmware(sc);
18159        sc->iro_array = e2_iro_arr;
18160    } else {
18161        BLOGE(sc, "Unsupported chip revision\n");
18162        return (-1);
18163    }
18164
18165    return (0);
18166}
18167
18168static void
18169bxe_release_firmware(struct bxe_softc *sc)
18170{
18171    /* Do nothing */
18172    return;
18173}
18174
18175static int
18176ecore_gunzip(struct bxe_softc *sc,
18177             const uint8_t    *zbuf,
18178             int              len)
18179{
18180    /* XXX : Implement... */
18181    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18182    return (FALSE);
18183}
18184
18185static void
18186ecore_reg_wr_ind(struct bxe_softc *sc,
18187                 uint32_t         addr,
18188                 uint32_t         val)
18189{
18190    bxe_reg_wr_ind(sc, addr, val);
18191}
18192
18193static void
18194ecore_write_dmae_phys_len(struct bxe_softc *sc,
18195                          bus_addr_t       phys_addr,
18196                          uint32_t         addr,
18197                          uint32_t         len)
18198{
18199    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18200}
18201
18202void
18203ecore_storm_memset_struct(struct bxe_softc *sc,
18204                          uint32_t         addr,
18205                          size_t           size,
18206                          uint32_t         *data)
18207{
18208    uint8_t i;
18209    for (i = 0; i < size/4; i++) {
18210        REG_WR(sc, addr + (i * 4), data[i]);
18211    }
18212}
18213
18214
18215/*
18216 * character device - ioctl interface definitions
18217 */
18218
18219
18220#include "bxe_dump.h"
18221#include "bxe_ioctl.h"
18222#include <sys/conf.h>
18223
18224static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18225                struct thread *td);
18226
18227static struct cdevsw bxe_cdevsw = {
18228    .d_version = D_VERSION,
18229    .d_ioctl = bxe_eioctl,
18230    .d_name = "bxecnic",
18231};
18232
18233#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18234
18235
18236#define DUMP_ALL_PRESETS        0x1FFF
18237#define DUMP_MAX_PRESETS        13
18238#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18239#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18240#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18241#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18242#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18243
18244#define IS_REG_IN_PRESET(presets, idx)  \
18245                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18246
18247
18248static int
18249bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18250{
18251    if (CHIP_IS_E1(sc))
18252        return dump_num_registers[0][preset-1];
18253    else if (CHIP_IS_E1H(sc))
18254        return dump_num_registers[1][preset-1];
18255    else if (CHIP_IS_E2(sc))
18256        return dump_num_registers[2][preset-1];
18257    else if (CHIP_IS_E3A0(sc))
18258        return dump_num_registers[3][preset-1];
18259    else if (CHIP_IS_E3B0(sc))
18260        return dump_num_registers[4][preset-1];
18261    else
18262        return 0;
18263}
18264
18265static int
18266bxe_get_total_regs_len32(struct bxe_softc *sc)
18267{
18268    uint32_t preset_idx;
18269    int regdump_len32 = 0;
18270
18271
18272    /* Calculate the total preset regs length */
18273    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18274        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18275    }
18276
18277    return regdump_len32;
18278}
18279
18280static const uint32_t *
18281__bxe_get_page_addr_ar(struct bxe_softc *sc)
18282{
18283    if (CHIP_IS_E2(sc))
18284        return page_vals_e2;
18285    else if (CHIP_IS_E3(sc))
18286        return page_vals_e3;
18287    else
18288        return NULL;
18289}
18290
18291static uint32_t
18292__bxe_get_page_reg_num(struct bxe_softc *sc)
18293{
18294    if (CHIP_IS_E2(sc))
18295        return PAGE_MODE_VALUES_E2;
18296    else if (CHIP_IS_E3(sc))
18297        return PAGE_MODE_VALUES_E3;
18298    else
18299        return 0;
18300}
18301
18302static const uint32_t *
18303__bxe_get_page_write_ar(struct bxe_softc *sc)
18304{
18305    if (CHIP_IS_E2(sc))
18306        return page_write_regs_e2;
18307    else if (CHIP_IS_E3(sc))
18308        return page_write_regs_e3;
18309    else
18310        return NULL;
18311}
18312
18313static uint32_t
18314__bxe_get_page_write_num(struct bxe_softc *sc)
18315{
18316    if (CHIP_IS_E2(sc))
18317        return PAGE_WRITE_REGS_E2;
18318    else if (CHIP_IS_E3(sc))
18319        return PAGE_WRITE_REGS_E3;
18320    else
18321        return 0;
18322}
18323
18324static const struct reg_addr *
18325__bxe_get_page_read_ar(struct bxe_softc *sc)
18326{
18327    if (CHIP_IS_E2(sc))
18328        return page_read_regs_e2;
18329    else if (CHIP_IS_E3(sc))
18330        return page_read_regs_e3;
18331    else
18332        return NULL;
18333}
18334
18335static uint32_t
18336__bxe_get_page_read_num(struct bxe_softc *sc)
18337{
18338    if (CHIP_IS_E2(sc))
18339        return PAGE_READ_REGS_E2;
18340    else if (CHIP_IS_E3(sc))
18341        return PAGE_READ_REGS_E3;
18342    else
18343        return 0;
18344}
18345
18346static bool
18347bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18348{
18349    if (CHIP_IS_E1(sc))
18350        return IS_E1_REG(reg_info->chips);
18351    else if (CHIP_IS_E1H(sc))
18352        return IS_E1H_REG(reg_info->chips);
18353    else if (CHIP_IS_E2(sc))
18354        return IS_E2_REG(reg_info->chips);
18355    else if (CHIP_IS_E3A0(sc))
18356        return IS_E3A0_REG(reg_info->chips);
18357    else if (CHIP_IS_E3B0(sc))
18358        return IS_E3B0_REG(reg_info->chips);
18359    else
18360        return 0;
18361}
18362
18363static bool
18364bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18365{
18366    if (CHIP_IS_E1(sc))
18367        return IS_E1_REG(wreg_info->chips);
18368    else if (CHIP_IS_E1H(sc))
18369        return IS_E1H_REG(wreg_info->chips);
18370    else if (CHIP_IS_E2(sc))
18371        return IS_E2_REG(wreg_info->chips);
18372    else if (CHIP_IS_E3A0(sc))
18373        return IS_E3A0_REG(wreg_info->chips);
18374    else if (CHIP_IS_E3B0(sc))
18375        return IS_E3B0_REG(wreg_info->chips);
18376    else
18377        return 0;
18378}
18379
18380/**
18381 * bxe_read_pages_regs - read "paged" registers
18382 *
18383 * @bp          device handle
18384 * @p           output buffer
18385 *
18386 * Reads "paged" memories: memories that may only be read by first writing to a
18387 * specific address ("write address") and then reading from a specific address
18388 * ("read address"). There may be more than one write address per "page" and
18389 * more than one read address per write address.
18390 */
18391static void
18392bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18393{
18394    uint32_t i, j, k, n;
18395
18396    /* addresses of the paged registers */
18397    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18398    /* number of paged registers */
18399    int num_pages = __bxe_get_page_reg_num(sc);
18400    /* write addresses */
18401    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18402    /* number of write addresses */
18403    int write_num = __bxe_get_page_write_num(sc);
18404    /* read addresses info */
18405    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18406    /* number of read addresses */
18407    int read_num = __bxe_get_page_read_num(sc);
18408    uint32_t addr, size;
18409
18410    for (i = 0; i < num_pages; i++) {
18411        for (j = 0; j < write_num; j++) {
18412            REG_WR(sc, write_addr[j], page_addr[i]);
18413
18414            for (k = 0; k < read_num; k++) {
18415                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18416                    size = read_addr[k].size;
18417                    for (n = 0; n < size; n++) {
18418                        addr = read_addr[k].addr + n*4;
18419                        *p++ = REG_RD(sc, addr);
18420                    }
18421                }
18422            }
18423        }
18424    }
18425    return;
18426}
18427
18428
18429static int
18430bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18431{
18432    uint32_t i, j, addr;
18433    const struct wreg_addr *wreg_addr_p = NULL;
18434
18435    if (CHIP_IS_E1(sc))
18436        wreg_addr_p = &wreg_addr_e1;
18437    else if (CHIP_IS_E1H(sc))
18438        wreg_addr_p = &wreg_addr_e1h;
18439    else if (CHIP_IS_E2(sc))
18440        wreg_addr_p = &wreg_addr_e2;
18441    else if (CHIP_IS_E3A0(sc))
18442        wreg_addr_p = &wreg_addr_e3;
18443    else if (CHIP_IS_E3B0(sc))
18444        wreg_addr_p = &wreg_addr_e3b0;
18445    else
18446        return (-1);
18447
18448    /* Read the idle_chk registers */
18449    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18450        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18451            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18452            for (j = 0; j < idle_reg_addrs[i].size; j++)
18453                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18454        }
18455    }
18456
18457    /* Read the regular registers */
18458    for (i = 0; i < REGS_COUNT; i++) {
18459        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18460            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18461            for (j = 0; j < reg_addrs[i].size; j++)
18462                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18463        }
18464    }
18465
18466    /* Read the CAM registers */
18467    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18468        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18469        for (i = 0; i < wreg_addr_p->size; i++) {
18470            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18471
18472            /* In case of wreg_addr register, read additional
18473               registers from read_regs array
18474             */
18475            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18476                addr = *(wreg_addr_p->read_regs);
18477                *p++ = REG_RD(sc, addr + j*4);
18478            }
18479        }
18480    }
18481
18482    /* Paged registers are supported in E2 & E3 only */
18483    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18484        /* Read "paged" registers */
18485        bxe_read_pages_regs(sc, p, preset);
18486    }
18487
18488    return 0;
18489}
18490
18491int
18492bxe_grc_dump(struct bxe_softc *sc)
18493{
18494    int rval = 0;
18495    uint32_t preset_idx;
18496    uint8_t *buf;
18497    uint32_t size;
18498    struct  dump_header *d_hdr;
18499    uint32_t i;
18500    uint32_t reg_val;
18501    uint32_t reg_addr;
18502    uint32_t cmd_offset;
18503    int context_size;
18504    int allocated;
18505    struct ecore_ilt *ilt = SC_ILT(sc);
18506    struct bxe_fastpath *fp;
18507    struct ilt_client_info *ilt_cli;
18508    int grc_dump_size;
18509
18510
18511    if (sc->grcdump_done || sc->grcdump_started)
18512	return (rval);
18513
18514    sc->grcdump_started = 1;
18515    BLOGI(sc, "Started collecting grcdump\n");
18516
18517    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18518                sizeof(struct  dump_header);
18519
18520    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18521
18522    if (sc->grc_dump == NULL) {
18523        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18524        return(ENOMEM);
18525    }
18526
18527
18528
18529    /* Disable parity attentions as long as following dump may
18530     * cause false alarms by reading never written registers. We
18531     * will re-enable parity attentions right after the dump.
18532     */
18533
18534    /* Disable parity on path 0 */
18535    bxe_pretend_func(sc, 0);
18536
18537    ecore_disable_blocks_parity(sc);
18538
18539    /* Disable parity on path 1 */
18540    bxe_pretend_func(sc, 1);
18541    ecore_disable_blocks_parity(sc);
18542
18543    /* Return to current function */
18544    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18545
18546    buf = sc->grc_dump;
18547    d_hdr = sc->grc_dump;
18548
18549    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18550    d_hdr->version = BNX2X_DUMP_VERSION;
18551    d_hdr->preset = DUMP_ALL_PRESETS;
18552
18553    if (CHIP_IS_E1(sc)) {
18554        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18555    } else if (CHIP_IS_E1H(sc)) {
18556        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18557    } else if (CHIP_IS_E2(sc)) {
18558        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18559                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18560    } else if (CHIP_IS_E3A0(sc)) {
18561        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18562                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18563    } else if (CHIP_IS_E3B0(sc)) {
18564        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18565                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18566    }
18567
18568    buf += sizeof(struct  dump_header);
18569
18570    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18571
18572        /* Skip presets with IOR */
18573        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18574            (preset_idx == 11))
18575            continue;
18576
18577        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18578
18579	if (rval)
18580            break;
18581
18582        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18583
18584        buf += size;
18585    }
18586
18587    bxe_pretend_func(sc, 0);
18588    ecore_clear_blocks_parity(sc);
18589    ecore_enable_blocks_parity(sc);
18590
18591    bxe_pretend_func(sc, 1);
18592    ecore_clear_blocks_parity(sc);
18593    ecore_enable_blocks_parity(sc);
18594
18595    /* Return to current function */
18596    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18597
18598
18599    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
18600    for (i = 0, allocated = 0; allocated < context_size; i++) {
18601
18602        BLOGI(sc, "cdu_context i %d paddr %#jx vaddr %p size 0x%zx\n", i,
18603            (uintmax_t)sc->context[i].vcxt_dma.paddr,
18604            sc->context[i].vcxt_dma.vaddr,
18605            sc->context[i].size);
18606        allocated += sc->context[i].size;
18607    }
18608    BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18609        (uintmax_t)sc->fw_stats_req_mapping,
18610        (uintmax_t)sc->fw_stats_data_mapping,
18611        sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18612    BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18613        (void *)sc->def_sb_dma.paddr, sc->def_sb,
18614        sizeof(struct host_sp_status_block));
18615    BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18616        (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18617    BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18618        (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18619        sizeof(struct bxe_slowpath));
18620    BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18621        (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18622    BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18623        (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18624        FW_BUF_SIZE);
18625    for (i = 0; i < sc->num_queues; i++) {
18626        fp = &sc->fp[i];
18627        BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18628            (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18629            sizeof(union bxe_host_hc_status_block));
18630        BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18631            (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18632            (BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18633        BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18634            (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18635            (BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18636        BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18637            (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18638            (BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18639        BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18640            (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18641            (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18642    }
18643
18644    ilt_cli = &ilt->clients[1];
18645    for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18646        BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18647            (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18648            ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18649    }
18650
18651
18652    cmd_offset = DMAE_REG_CMD_MEM;
18653    for (i = 0; i < 224; i++) {
18654        reg_addr = (cmd_offset +(i * 4));
18655        reg_val = REG_RD(sc, reg_addr);
18656        BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18657            reg_addr, reg_val);
18658    }
18659
18660
18661    BLOGI(sc, "Collection of grcdump done\n");
18662    sc->grcdump_done = 1;
18663    return(rval);
18664}
18665
18666static int
18667bxe_add_cdev(struct bxe_softc *sc)
18668{
18669    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18670
18671    if (sc->eeprom == NULL) {
18672        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18673        return (-1);
18674    }
18675
18676    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18677                            sc->ifp->if_dunit,
18678                            UID_ROOT,
18679                            GID_WHEEL,
18680                            0600,
18681                            "%s",
18682                            if_name(sc->ifp));
18683
18684    if (sc->ioctl_dev == NULL) {
18685        free(sc->eeprom, M_DEVBUF);
18686        sc->eeprom = NULL;
18687        return (-1);
18688    }
18689
18690    sc->ioctl_dev->si_drv1 = sc;
18691
18692    return (0);
18693}
18694
18695static void
18696bxe_del_cdev(struct bxe_softc *sc)
18697{
18698    if (sc->ioctl_dev != NULL)
18699        destroy_dev(sc->ioctl_dev);
18700
18701    if (sc->eeprom != NULL) {
18702        free(sc->eeprom, M_DEVBUF);
18703        sc->eeprom = NULL;
18704    }
18705    sc->ioctl_dev = NULL;
18706
18707    return;
18708}
18709
18710static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18711{
18712
18713    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
18714        return FALSE;
18715
18716    return TRUE;
18717}
18718
18719
18720static int
18721bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18722{
18723    int rval = 0;
18724
18725    if(!bxe_is_nvram_accessible(sc)) {
18726        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18727        return (-EAGAIN);
18728    }
18729    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18730
18731
18732   return (rval);
18733}
18734
18735static int
18736bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18737{
18738    int rval = 0;
18739
18740    if(!bxe_is_nvram_accessible(sc)) {
18741        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18742        return (-EAGAIN);
18743    }
18744    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18745
18746   return (rval);
18747}
18748
18749static int
18750bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18751{
18752    int rval = 0;
18753
18754    switch (eeprom->eeprom_cmd) {
18755
18756    case BXE_EEPROM_CMD_SET_EEPROM:
18757
18758        rval = copyin(eeprom->eeprom_data, sc->eeprom,
18759                       eeprom->eeprom_data_len);
18760
18761        if (rval)
18762            break;
18763
18764        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18765                       eeprom->eeprom_data_len);
18766        break;
18767
18768    case BXE_EEPROM_CMD_GET_EEPROM:
18769
18770        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18771                       eeprom->eeprom_data_len);
18772
18773        if (rval) {
18774            break;
18775        }
18776
18777        rval = copyout(sc->eeprom, eeprom->eeprom_data,
18778                       eeprom->eeprom_data_len);
18779        break;
18780
18781    default:
18782            rval = EINVAL;
18783            break;
18784    }
18785
18786    if (rval) {
18787        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
18788    }
18789
18790    return (rval);
18791}
18792
18793static int
18794bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18795{
18796    uint32_t ext_phy_config;
18797    int port = SC_PORT(sc);
18798    int cfg_idx = bxe_get_link_cfg_idx(sc);
18799
18800    dev_p->supported = sc->port.supported[cfg_idx] |
18801            (sc->port.supported[cfg_idx ^ 1] &
18802            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
18803    dev_p->advertising = sc->port.advertising[cfg_idx];
18804    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
18805        ELINK_ETH_PHY_SFP_1G_FIBER) {
18806        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
18807        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
18808    }
18809    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
18810        !(sc->flags & BXE_MF_FUNC_DIS)) {
18811        dev_p->duplex = sc->link_vars.duplex;
18812        if (IS_MF(sc) && !BXE_NOMCP(sc))
18813            dev_p->speed = bxe_get_mf_speed(sc);
18814        else
18815            dev_p->speed = sc->link_vars.line_speed;
18816    } else {
18817        dev_p->duplex = DUPLEX_UNKNOWN;
18818        dev_p->speed = SPEED_UNKNOWN;
18819    }
18820
18821    dev_p->port = bxe_media_detect(sc);
18822
18823    ext_phy_config = SHMEM_RD(sc,
18824                         dev_info.port_hw_config[port].external_phy_config);
18825    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
18826        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
18827        dev_p->phy_address =  sc->port.phy_addr;
18828    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18829            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
18830        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18831            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
18832        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
18833    else
18834        dev_p->phy_address = 0;
18835
18836    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
18837        dev_p->autoneg = AUTONEG_ENABLE;
18838    else
18839       dev_p->autoneg = AUTONEG_DISABLE;
18840
18841
18842    return 0;
18843}
18844
18845static int
18846bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18847        struct thread *td)
18848{
18849    struct bxe_softc    *sc;
18850    int                 rval = 0;
18851    device_t            pci_dev;
18852    bxe_grcdump_t       *dump = NULL;
18853    int grc_dump_size;
18854    bxe_drvinfo_t   *drv_infop = NULL;
18855    bxe_dev_setting_t  *dev_p;
18856    bxe_dev_setting_t  dev_set;
18857    bxe_get_regs_t  *reg_p;
18858    bxe_reg_rdw_t *reg_rdw_p;
18859    bxe_pcicfg_rdw_t *cfg_rdw_p;
18860    bxe_perm_mac_addr_t *mac_addr_p;
18861
18862
18863    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
18864        return ENXIO;
18865
18866    pci_dev= sc->dev;
18867
18868    dump = (bxe_grcdump_t *)data;
18869
18870    switch(cmd) {
18871
18872        case BXE_GRC_DUMP_SIZE:
18873            dump->pci_func = sc->pcie_func;
18874            dump->grcdump_size =
18875                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18876                     sizeof(struct  dump_header);
18877            break;
18878
18879        case BXE_GRC_DUMP:
18880
18881            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18882                                sizeof(struct  dump_header);
18883            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
18884                (dump->grcdump_size < grc_dump_size)) {
18885                rval = EINVAL;
18886                break;
18887            }
18888
18889            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
18890                (!sc->grcdump_started)) {
18891                rval =  bxe_grc_dump(sc);
18892            }
18893
18894            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
18895                (sc->grc_dump != NULL))  {
18896                dump->grcdump_dwords = grc_dump_size >> 2;
18897                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
18898                free(sc->grc_dump, M_DEVBUF);
18899                sc->grc_dump = NULL;
18900                sc->grcdump_started = 0;
18901                sc->grcdump_done = 0;
18902            }
18903
18904            break;
18905
18906        case BXE_DRV_INFO:
18907            drv_infop = (bxe_drvinfo_t *)data;
18908            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
18909            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
18910                BXE_DRIVER_VERSION);
18911            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
18912                sc->devinfo.bc_ver_str);
18913            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
18914                "%s", sc->fw_ver_str);
18915            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
18916            drv_infop->reg_dump_len =
18917                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
18918                    + sizeof(struct  dump_header);
18919            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
18920                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
18921            break;
18922
18923        case BXE_DEV_SETTING:
18924            dev_p = (bxe_dev_setting_t *)data;
18925            bxe_get_settings(sc, &dev_set);
18926            dev_p->supported = dev_set.supported;
18927            dev_p->advertising = dev_set.advertising;
18928            dev_p->speed = dev_set.speed;
18929            dev_p->duplex = dev_set.duplex;
18930            dev_p->port = dev_set.port;
18931            dev_p->phy_address = dev_set.phy_address;
18932            dev_p->autoneg = dev_set.autoneg;
18933
18934            break;
18935
18936        case BXE_GET_REGS:
18937
18938            reg_p = (bxe_get_regs_t *)data;
18939            grc_dump_size = reg_p->reg_buf_len;
18940
18941            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
18942                bxe_grc_dump(sc);
18943            }
18944            if((sc->grcdump_done) && (sc->grcdump_started) &&
18945                (sc->grc_dump != NULL))  {
18946                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
18947                free(sc->grc_dump, M_DEVBUF);
18948                sc->grc_dump = NULL;
18949                sc->grcdump_started = 0;
18950                sc->grcdump_done = 0;
18951            }
18952
18953            break;
18954
18955        case BXE_RDW_REG:
18956            reg_rdw_p = (bxe_reg_rdw_t *)data;
18957            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
18958                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
18959                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
18960
18961            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
18962                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
18963                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
18964
18965            break;
18966
18967        case BXE_RDW_PCICFG:
18968            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
18969            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
18970
18971                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
18972                                         cfg_rdw_p->cfg_width);
18973
18974            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
18975                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
18976                            cfg_rdw_p->cfg_width);
18977            } else {
18978                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
18979            }
18980            break;
18981
18982        case BXE_MAC_ADDR:
18983            mac_addr_p = (bxe_perm_mac_addr_t *)data;
18984            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
18985                sc->mac_addr_str);
18986            break;
18987
18988        case BXE_EEPROM:
18989            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
18990            break;
18991
18992
18993        default:
18994            break;
18995    }
18996
18997    return (rval);
18998}
18999