bxe.c revision 297793
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/bxe/bxe.c 297793 2016-04-10 23:07:00Z pfg $");
29
30#define BXE_DRIVER_VERSION "1.78.81"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        BRCM_VENDORID,
171        CHIP_NUM_57840_MF,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 MF 10GbE"
174    },
175    {
176        0, 0, 0, 0, NULL
177    }
178};
179
180MALLOC_DECLARE(M_BXE_ILT);
181MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
182
183/*
184 * FreeBSD device entry points.
185 */
186static int bxe_probe(device_t);
187static int bxe_attach(device_t);
188static int bxe_detach(device_t);
189static int bxe_shutdown(device_t);
190
191/*
192 * FreeBSD KLD module/device interface event handler method.
193 */
194static device_method_t bxe_methods[] = {
195    /* Device interface (device_if.h) */
196    DEVMETHOD(device_probe,     bxe_probe),
197    DEVMETHOD(device_attach,    bxe_attach),
198    DEVMETHOD(device_detach,    bxe_detach),
199    DEVMETHOD(device_shutdown,  bxe_shutdown),
200    /* Bus interface (bus_if.h) */
201    DEVMETHOD(bus_print_child,  bus_generic_print_child),
202    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
203    KOBJMETHOD_END
204};
205
206/*
207 * FreeBSD KLD Module data declaration
208 */
209static driver_t bxe_driver = {
210    "bxe",                   /* module name */
211    bxe_methods,             /* event handler */
212    sizeof(struct bxe_softc) /* extra data */
213};
214
215/*
216 * FreeBSD dev class is needed to manage dev instances and
217 * to associate with a bus type
218 */
219static devclass_t bxe_devclass;
220
221MODULE_DEPEND(bxe, pci, 1, 1, 1);
222MODULE_DEPEND(bxe, ether, 1, 1, 1);
223DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
224
225/* resources needed for unloading a previously loaded device */
226
227#define BXE_PREV_WAIT_NEEDED 1
228struct mtx bxe_prev_mtx;
229MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
230struct bxe_prev_list_node {
231    LIST_ENTRY(bxe_prev_list_node) node;
232    uint8_t bus;
233    uint8_t slot;
234    uint8_t path;
235    uint8_t aer; /* XXX automatic error recovery */
236    uint8_t undi;
237};
238static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
239
240static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
241
242/* Tunable device values... */
243
244SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
245
246/* Debug */
247unsigned long bxe_debug = 0;
248SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
249             &bxe_debug, 0, "Debug logging mode");
250
251/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
252static int bxe_interrupt_mode = INTR_MODE_MSIX;
253SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
254           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
255
256/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
257static int bxe_queue_count = 4;
258SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
259           &bxe_queue_count, 0, "Multi-Queue queue count");
260
261/* max number of buffers per queue (default RX_BD_USABLE) */
262static int bxe_max_rx_bufs = 0;
263SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
264           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
265
266/* Host interrupt coalescing RX tick timer (usecs) */
267static int bxe_hc_rx_ticks = 25;
268SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
269           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
270
271/* Host interrupt coalescing TX tick timer (usecs) */
272static int bxe_hc_tx_ticks = 50;
273SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
274           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
275
276/* Maximum number of Rx packets to process at a time */
277static int bxe_rx_budget = 0xffffffff;
278SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
279           &bxe_rx_budget, 0, "Rx processing budget");
280
281/* Maximum LRO aggregation size */
282static int bxe_max_aggregation_size = 0;
283SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
284           &bxe_max_aggregation_size, 0, "max aggregation size");
285
286/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
287static int bxe_mrrs = -1;
288SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
289           &bxe_mrrs, 0, "PCIe maximum read request size");
290
291/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
292static int bxe_autogreeen = 0;
293SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
294           &bxe_autogreeen, 0, "AutoGrEEEn support");
295
296/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
297static int bxe_udp_rss = 0;
298SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
299           &bxe_udp_rss, 0, "UDP RSS support");
300
301
302#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
303
304#define STATS_OFFSET32(stat_name)                   \
305    (offsetof(struct bxe_eth_stats, stat_name) / 4)
306
307#define Q_STATS_OFFSET32(stat_name)                   \
308    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
309
310static const struct {
311    uint32_t offset;
312    uint32_t size;
313    uint32_t flags;
314#define STATS_FLAGS_PORT  1
315#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
316#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
317    char string[STAT_NAME_LEN];
318} bxe_eth_stats_arr[] = {
319    { STATS_OFFSET32(total_bytes_received_hi),
320                8, STATS_FLAGS_BOTH, "rx_bytes" },
321    { STATS_OFFSET32(error_bytes_received_hi),
322                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
323    { STATS_OFFSET32(total_unicast_packets_received_hi),
324                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
325    { STATS_OFFSET32(total_multicast_packets_received_hi),
326                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
327    { STATS_OFFSET32(total_broadcast_packets_received_hi),
328                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
329    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
330                8, STATS_FLAGS_PORT, "rx_crc_errors" },
331    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
332                8, STATS_FLAGS_PORT, "rx_align_errors" },
333    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
334                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
335    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
336                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
337    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
338                8, STATS_FLAGS_PORT, "rx_fragments" },
339    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
340                8, STATS_FLAGS_PORT, "rx_jabbers" },
341    { STATS_OFFSET32(no_buff_discard_hi),
342                8, STATS_FLAGS_BOTH, "rx_discards" },
343    { STATS_OFFSET32(mac_filter_discard),
344                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
345    { STATS_OFFSET32(mf_tag_discard),
346                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
347    { STATS_OFFSET32(pfc_frames_received_hi),
348                8, STATS_FLAGS_PORT, "pfc_frames_received" },
349    { STATS_OFFSET32(pfc_frames_sent_hi),
350                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
351    { STATS_OFFSET32(brb_drop_hi),
352                8, STATS_FLAGS_PORT, "rx_brb_discard" },
353    { STATS_OFFSET32(brb_truncate_hi),
354                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
355    { STATS_OFFSET32(pause_frames_received_hi),
356                8, STATS_FLAGS_PORT, "rx_pause_frames" },
357    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
358                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
359    { STATS_OFFSET32(nig_timer_max),
360                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
361    { STATS_OFFSET32(total_bytes_transmitted_hi),
362                8, STATS_FLAGS_BOTH, "tx_bytes" },
363    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
364                8, STATS_FLAGS_PORT, "tx_error_bytes" },
365    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
366                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
367    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
368                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
369    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
370                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
371    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
372                8, STATS_FLAGS_PORT, "tx_mac_errors" },
373    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
374                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
375    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
376                8, STATS_FLAGS_PORT, "tx_single_collisions" },
377    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
378                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
379    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
380                8, STATS_FLAGS_PORT, "tx_deferred" },
381    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
382                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
383    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
384                8, STATS_FLAGS_PORT, "tx_late_collisions" },
385    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
386                8, STATS_FLAGS_PORT, "tx_total_collisions" },
387    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
388                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
389    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
390                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
391    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
392                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
393    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
394                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
395    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
396                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
397    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
398                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
399    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
400                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
401    { STATS_OFFSET32(pause_frames_sent_hi),
402                8, STATS_FLAGS_PORT, "tx_pause_frames" },
403    { STATS_OFFSET32(total_tpa_aggregations_hi),
404                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
405    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
406                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
407    { STATS_OFFSET32(total_tpa_bytes_hi),
408                8, STATS_FLAGS_FUNC, "tpa_bytes"},
409    { STATS_OFFSET32(eee_tx_lpi),
410                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
411    { STATS_OFFSET32(rx_calls),
412                4, STATS_FLAGS_FUNC, "rx_calls"},
413    { STATS_OFFSET32(rx_pkts),
414                4, STATS_FLAGS_FUNC, "rx_pkts"},
415    { STATS_OFFSET32(rx_tpa_pkts),
416                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
417    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
418                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
419    { STATS_OFFSET32(rx_bxe_service_rxsgl),
420                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
421    { STATS_OFFSET32(rx_jumbo_sge_pkts),
422                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
423    { STATS_OFFSET32(rx_soft_errors),
424                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
425    { STATS_OFFSET32(rx_hw_csum_errors),
426                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
427    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
428                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
429    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
430                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
431    { STATS_OFFSET32(rx_budget_reached),
432                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
433    { STATS_OFFSET32(tx_pkts),
434                4, STATS_FLAGS_FUNC, "tx_pkts"},
435    { STATS_OFFSET32(tx_soft_errors),
436                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
437    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
438                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
439    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
440                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
441    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
442                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
443    { STATS_OFFSET32(tx_ofld_frames_lso),
444                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
445    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
446                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
447    { STATS_OFFSET32(tx_encap_failures),
448                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
449    { STATS_OFFSET32(tx_hw_queue_full),
450                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
451    { STATS_OFFSET32(tx_hw_max_queue_depth),
452                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
453    { STATS_OFFSET32(tx_dma_mapping_failure),
454                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
455    { STATS_OFFSET32(tx_max_drbr_queue_depth),
456                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
457    { STATS_OFFSET32(tx_window_violation_std),
458                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
459    { STATS_OFFSET32(tx_window_violation_tso),
460                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
461    { STATS_OFFSET32(tx_chain_lost_mbuf),
462                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
463    { STATS_OFFSET32(tx_frames_deferred),
464                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
465    { STATS_OFFSET32(tx_queue_xoff),
466                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
467    { STATS_OFFSET32(mbuf_defrag_attempts),
468                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
469    { STATS_OFFSET32(mbuf_defrag_failures),
470                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
471    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
472                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
473    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
474                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
475    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
476                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
477    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
478                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
479    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
480                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
481    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
482                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
483    { STATS_OFFSET32(mbuf_alloc_tx),
484                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
485    { STATS_OFFSET32(mbuf_alloc_rx),
486                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
487    { STATS_OFFSET32(mbuf_alloc_sge),
488                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
489    { STATS_OFFSET32(mbuf_alloc_tpa),
490                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}
491};
492
493static const struct {
494    uint32_t offset;
495    uint32_t size;
496    char string[STAT_NAME_LEN];
497} bxe_eth_q_stats_arr[] = {
498    { Q_STATS_OFFSET32(total_bytes_received_hi),
499                8, "rx_bytes" },
500    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
501                8, "rx_ucast_packets" },
502    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
503                8, "rx_mcast_packets" },
504    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
505                8, "rx_bcast_packets" },
506    { Q_STATS_OFFSET32(no_buff_discard_hi),
507                8, "rx_discards" },
508    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
509                8, "tx_bytes" },
510    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
511                8, "tx_ucast_packets" },
512    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
513                8, "tx_mcast_packets" },
514    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
515                8, "tx_bcast_packets" },
516    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
517                8, "tpa_aggregations" },
518    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
519                8, "tpa_aggregated_frames"},
520    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
521                8, "tpa_bytes"},
522    { Q_STATS_OFFSET32(rx_calls),
523                4, "rx_calls"},
524    { Q_STATS_OFFSET32(rx_pkts),
525                4, "rx_pkts"},
526    { Q_STATS_OFFSET32(rx_tpa_pkts),
527                4, "rx_tpa_pkts"},
528    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
529                4, "rx_erroneous_jumbo_sge_pkts"},
530    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
531                4, "rx_bxe_service_rxsgl"},
532    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
533                4, "rx_jumbo_sge_pkts"},
534    { Q_STATS_OFFSET32(rx_soft_errors),
535                4, "rx_soft_errors"},
536    { Q_STATS_OFFSET32(rx_hw_csum_errors),
537                4, "rx_hw_csum_errors"},
538    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
539                4, "rx_ofld_frames_csum_ip"},
540    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
541                4, "rx_ofld_frames_csum_tcp_udp"},
542    { Q_STATS_OFFSET32(rx_budget_reached),
543                4, "rx_budget_reached"},
544    { Q_STATS_OFFSET32(tx_pkts),
545                4, "tx_pkts"},
546    { Q_STATS_OFFSET32(tx_soft_errors),
547                4, "tx_soft_errors"},
548    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
549                4, "tx_ofld_frames_csum_ip"},
550    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
551                4, "tx_ofld_frames_csum_tcp"},
552    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
553                4, "tx_ofld_frames_csum_udp"},
554    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
555                4, "tx_ofld_frames_lso"},
556    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
557                4, "tx_ofld_frames_lso_hdr_splits"},
558    { Q_STATS_OFFSET32(tx_encap_failures),
559                4, "tx_encap_failures"},
560    { Q_STATS_OFFSET32(tx_hw_queue_full),
561                4, "tx_hw_queue_full"},
562    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
563                4, "tx_hw_max_queue_depth"},
564    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
565                4, "tx_dma_mapping_failure"},
566    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
567                4, "tx_max_drbr_queue_depth"},
568    { Q_STATS_OFFSET32(tx_window_violation_std),
569                4, "tx_window_violation_std"},
570    { Q_STATS_OFFSET32(tx_window_violation_tso),
571                4, "tx_window_violation_tso"},
572    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
573                4, "tx_chain_lost_mbuf"},
574    { Q_STATS_OFFSET32(tx_frames_deferred),
575                4, "tx_frames_deferred"},
576    { Q_STATS_OFFSET32(tx_queue_xoff),
577                4, "tx_queue_xoff"},
578    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
579                4, "mbuf_defrag_attempts"},
580    { Q_STATS_OFFSET32(mbuf_defrag_failures),
581                4, "mbuf_defrag_failures"},
582    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
583                4, "mbuf_rx_bd_alloc_failed"},
584    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
585                4, "mbuf_rx_bd_mapping_failed"},
586    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
587                4, "mbuf_rx_tpa_alloc_failed"},
588    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
589                4, "mbuf_rx_tpa_mapping_failed"},
590    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
591                4, "mbuf_rx_sge_alloc_failed"},
592    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
593                4, "mbuf_rx_sge_mapping_failed"},
594    { Q_STATS_OFFSET32(mbuf_alloc_tx),
595                4, "mbuf_alloc_tx"},
596    { Q_STATS_OFFSET32(mbuf_alloc_rx),
597                4, "mbuf_alloc_rx"},
598    { Q_STATS_OFFSET32(mbuf_alloc_sge),
599                4, "mbuf_alloc_sge"},
600    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
601                4, "mbuf_alloc_tpa"}
602};
603
604#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
605#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
606
607
608static void    bxe_cmng_fns_init(struct bxe_softc *sc,
609                                 uint8_t          read_cfg,
610                                 uint8_t          cmng_type);
611static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
612static void    storm_memset_cmng(struct bxe_softc *sc,
613                                 struct cmng_init *cmng,
614                                 uint8_t          port);
615static void    bxe_set_reset_global(struct bxe_softc *sc);
616static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
617static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
618                                 int              engine);
619static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
620static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
621                                   uint8_t          *global,
622                                   uint8_t          print);
623static void    bxe_int_disable(struct bxe_softc *sc);
624static int     bxe_release_leader_lock(struct bxe_softc *sc);
625static void    bxe_pf_disable(struct bxe_softc *sc);
626static void    bxe_free_fp_buffers(struct bxe_softc *sc);
627static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
628                                      struct bxe_fastpath *fp,
629                                      uint16_t            rx_bd_prod,
630                                      uint16_t            rx_cq_prod,
631                                      uint16_t            rx_sge_prod);
632static void    bxe_link_report_locked(struct bxe_softc *sc);
633static void    bxe_link_report(struct bxe_softc *sc);
634static void    bxe_link_status_update(struct bxe_softc *sc);
635static void    bxe_periodic_callout_func(void *xsc);
636static void    bxe_periodic_start(struct bxe_softc *sc);
637static void    bxe_periodic_stop(struct bxe_softc *sc);
638static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
639                                    uint16_t prev_index,
640                                    uint16_t index);
641static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
642                                     int                 queue);
643static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
644                                     uint16_t            index);
645static uint8_t bxe_txeof(struct bxe_softc *sc,
646                         struct bxe_fastpath *fp);
647static void    bxe_task_fp(struct bxe_fastpath *fp);
648static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
649                                     struct mbuf      *m,
650                                     uint8_t          contents);
651static int     bxe_alloc_mem(struct bxe_softc *sc);
652static void    bxe_free_mem(struct bxe_softc *sc);
653static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
654static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
655static int     bxe_interrupt_attach(struct bxe_softc *sc);
656static void    bxe_interrupt_detach(struct bxe_softc *sc);
657static void    bxe_set_rx_mode(struct bxe_softc *sc);
658static int     bxe_init_locked(struct bxe_softc *sc);
659static int     bxe_stop_locked(struct bxe_softc *sc);
660static __noinline int bxe_nic_load(struct bxe_softc *sc,
661                                   int              load_mode);
662static __noinline int bxe_nic_unload(struct bxe_softc *sc,
663                                     uint32_t         unload_mode,
664                                     uint8_t          keep_link);
665
666static void bxe_handle_sp_tq(void *context, int pending);
667static void bxe_handle_fp_tq(void *context, int pending);
668
669static int bxe_add_cdev(struct bxe_softc *sc);
670static void bxe_del_cdev(struct bxe_softc *sc);
671static int bxe_grc_dump(struct bxe_softc *sc);
672static int bxe_alloc_buf_rings(struct bxe_softc *sc);
673static void bxe_free_buf_rings(struct bxe_softc *sc);
674
675/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
676uint32_t
677calc_crc32(uint8_t  *crc32_packet,
678           uint32_t crc32_length,
679           uint32_t crc32_seed,
680           uint8_t  complement)
681{
682   uint32_t byte         = 0;
683   uint32_t bit          = 0;
684   uint8_t  msb          = 0;
685   uint32_t temp         = 0;
686   uint32_t shft         = 0;
687   uint8_t  current_byte = 0;
688   uint32_t crc32_result = crc32_seed;
689   const uint32_t CRC32_POLY = 0x1edc6f41;
690
691   if ((crc32_packet == NULL) ||
692       (crc32_length == 0) ||
693       ((crc32_length % 8) != 0))
694    {
695        return (crc32_result);
696    }
697
698    for (byte = 0; byte < crc32_length; byte = byte + 1)
699    {
700        current_byte = crc32_packet[byte];
701        for (bit = 0; bit < 8; bit = bit + 1)
702        {
703            /* msb = crc32_result[31]; */
704            msb = (uint8_t)(crc32_result >> 31);
705
706            crc32_result = crc32_result << 1;
707
708            /* it (msb != current_byte[bit]) */
709            if (msb != (0x1 & (current_byte >> bit)))
710            {
711                crc32_result = crc32_result ^ CRC32_POLY;
712                /* crc32_result[0] = 1 */
713                crc32_result |= 1;
714            }
715        }
716    }
717
718    /* Last step is to:
719     * 1. "mirror" every bit
720     * 2. swap the 4 bytes
721     * 3. complement each bit
722     */
723
724    /* Mirror */
725    temp = crc32_result;
726    shft = sizeof(crc32_result) * 8 - 1;
727
728    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
729    {
730        temp <<= 1;
731        temp |= crc32_result & 1;
732        shft-- ;
733    }
734
735    /* temp[31-bit] = crc32_result[bit] */
736    temp <<= shft;
737
738    /* Swap */
739    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
740    {
741        uint32_t t0, t1, t2, t3;
742        t0 = (0x000000ff & (temp >> 24));
743        t1 = (0x0000ff00 & (temp >> 8));
744        t2 = (0x00ff0000 & (temp << 8));
745        t3 = (0xff000000 & (temp << 24));
746        crc32_result = t0 | t1 | t2 | t3;
747    }
748
749    /* Complement */
750    if (complement)
751    {
752        crc32_result = ~crc32_result;
753    }
754
755    return (crc32_result);
756}
757
758int
759bxe_test_bit(int                    nr,
760             volatile unsigned long *addr)
761{
762    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
763}
764
765void
766bxe_set_bit(unsigned int           nr,
767            volatile unsigned long *addr)
768{
769    atomic_set_acq_long(addr, (1 << nr));
770}
771
772void
773bxe_clear_bit(int                    nr,
774              volatile unsigned long *addr)
775{
776    atomic_clear_acq_long(addr, (1 << nr));
777}
778
779int
780bxe_test_and_set_bit(int                    nr,
781                       volatile unsigned long *addr)
782{
783    unsigned long x;
784    nr = (1 << nr);
785    do {
786        x = *addr;
787    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
788    // if (x & nr) bit_was_set; else bit_was_not_set;
789    return (x & nr);
790}
791
792int
793bxe_test_and_clear_bit(int                    nr,
794                       volatile unsigned long *addr)
795{
796    unsigned long x;
797    nr = (1 << nr);
798    do {
799        x = *addr;
800    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
801    // if (x & nr) bit_was_set; else bit_was_not_set;
802    return (x & nr);
803}
804
805int
806bxe_cmpxchg(volatile int *addr,
807            int          old,
808            int          new)
809{
810    int x;
811    do {
812        x = *addr;
813    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
814    return (x);
815}
816
817/*
818 * Get DMA memory from the OS.
819 *
820 * Validates that the OS has provided DMA buffers in response to a
821 * bus_dmamap_load call and saves the physical address of those buffers.
822 * When the callback is used the OS will return 0 for the mapping function
823 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
824 * failures back to the caller.
825 *
826 * Returns:
827 *   Nothing.
828 */
829static void
830bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
831{
832    struct bxe_dma *dma = arg;
833
834    if (error) {
835        dma->paddr = 0;
836        dma->nseg  = 0;
837        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
838    } else {
839        dma->paddr = segs->ds_addr;
840        dma->nseg  = nseg;
841    }
842}
843
844/*
845 * Allocate a block of memory and map it for DMA. No partial completions
846 * allowed and release any resources acquired if we can't acquire all
847 * resources.
848 *
849 * Returns:
850 *   0 = Success, !0 = Failure
851 */
852int
853bxe_dma_alloc(struct bxe_softc *sc,
854              bus_size_t       size,
855              struct bxe_dma   *dma,
856              const char       *msg)
857{
858    int rc;
859
860    if (dma->size > 0) {
861        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
862              (unsigned long)dma->size);
863        return (1);
864    }
865
866    memset(dma, 0, sizeof(*dma)); /* sanity */
867    dma->sc   = sc;
868    dma->size = size;
869    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
870
871    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
872                            BCM_PAGE_SIZE,      /* alignment */
873                            0,                  /* boundary limit */
874                            BUS_SPACE_MAXADDR,  /* restricted low */
875                            BUS_SPACE_MAXADDR,  /* restricted hi */
876                            NULL,               /* addr filter() */
877                            NULL,               /* addr filter() arg */
878                            size,               /* max map size */
879                            1,                  /* num discontinuous */
880                            size,               /* max seg size */
881                            BUS_DMA_ALLOCNOW,   /* flags */
882                            NULL,               /* lock() */
883                            NULL,               /* lock() arg */
884                            &dma->tag);         /* returned dma tag */
885    if (rc != 0) {
886        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
887        memset(dma, 0, sizeof(*dma));
888        return (1);
889    }
890
891    rc = bus_dmamem_alloc(dma->tag,
892                          (void **)&dma->vaddr,
893                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
894                          &dma->map);
895    if (rc != 0) {
896        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
897        bus_dma_tag_destroy(dma->tag);
898        memset(dma, 0, sizeof(*dma));
899        return (1);
900    }
901
902    rc = bus_dmamap_load(dma->tag,
903                         dma->map,
904                         dma->vaddr,
905                         size,
906                         bxe_dma_map_addr, /* BLOGD in here */
907                         dma,
908                         BUS_DMA_NOWAIT);
909    if (rc != 0) {
910        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
911        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
912        bus_dma_tag_destroy(dma->tag);
913        memset(dma, 0, sizeof(*dma));
914        return (1);
915    }
916
917    return (0);
918}
919
920void
921bxe_dma_free(struct bxe_softc *sc,
922             struct bxe_dma   *dma)
923{
924    if (dma->size > 0) {
925        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
926
927        bus_dmamap_sync(dma->tag, dma->map,
928                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
929        bus_dmamap_unload(dma->tag, dma->map);
930        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
931        bus_dma_tag_destroy(dma->tag);
932    }
933
934    memset(dma, 0, sizeof(*dma));
935}
936
937/*
938 * These indirect read and write routines are only during init.
939 * The locking is handled by the MCP.
940 */
941
942void
943bxe_reg_wr_ind(struct bxe_softc *sc,
944               uint32_t         addr,
945               uint32_t         val)
946{
947    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
948    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
949    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
950}
951
952uint32_t
953bxe_reg_rd_ind(struct bxe_softc *sc,
954               uint32_t         addr)
955{
956    uint32_t val;
957
958    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
959    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
960    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
961
962    return (val);
963}
964
965static int
966bxe_acquire_hw_lock(struct bxe_softc *sc,
967                    uint32_t         resource)
968{
969    uint32_t lock_status;
970    uint32_t resource_bit = (1 << resource);
971    int func = SC_FUNC(sc);
972    uint32_t hw_lock_control_reg;
973    int cnt;
974
975    /* validate the resource is within range */
976    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
977        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
978            " resource_bit 0x%x\n", resource, resource_bit);
979        return (-1);
980    }
981
982    if (func <= 5) {
983        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
984    } else {
985        hw_lock_control_reg =
986                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
987    }
988
989    /* validate the resource is not already taken */
990    lock_status = REG_RD(sc, hw_lock_control_reg);
991    if (lock_status & resource_bit) {
992        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
993              resource, lock_status, resource_bit);
994        return (-1);
995    }
996
997    /* try every 5ms for 5 seconds */
998    for (cnt = 0; cnt < 1000; cnt++) {
999        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1000        lock_status = REG_RD(sc, hw_lock_control_reg);
1001        if (lock_status & resource_bit) {
1002            return (0);
1003        }
1004        DELAY(5000);
1005    }
1006
1007    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1008        resource, resource_bit);
1009    return (-1);
1010}
1011
1012static int
1013bxe_release_hw_lock(struct bxe_softc *sc,
1014                    uint32_t         resource)
1015{
1016    uint32_t lock_status;
1017    uint32_t resource_bit = (1 << resource);
1018    int func = SC_FUNC(sc);
1019    uint32_t hw_lock_control_reg;
1020
1021    /* validate the resource is within range */
1022    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1023        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1024            " resource_bit 0x%x\n", resource, resource_bit);
1025        return (-1);
1026    }
1027
1028    if (func <= 5) {
1029        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1030    } else {
1031        hw_lock_control_reg =
1032                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1033    }
1034
1035    /* validate the resource is currently taken */
1036    lock_status = REG_RD(sc, hw_lock_control_reg);
1037    if (!(lock_status & resource_bit)) {
1038        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1039              resource, lock_status, resource_bit);
1040        return (-1);
1041    }
1042
1043    REG_WR(sc, hw_lock_control_reg, resource_bit);
1044    return (0);
1045}
1046static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1047{
1048	BXE_PHY_LOCK(sc);
1049	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1050}
1051
1052static void bxe_release_phy_lock(struct bxe_softc *sc)
1053{
1054	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1055	BXE_PHY_UNLOCK(sc);
1056}
1057/*
1058 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1059 * had we done things the other way around, if two pfs from the same port
1060 * would attempt to access nvram at the same time, we could run into a
1061 * scenario such as:
1062 * pf A takes the port lock.
1063 * pf B succeeds in taking the same lock since they are from the same port.
1064 * pf A takes the per pf misc lock. Performs eeprom access.
1065 * pf A finishes. Unlocks the per pf misc lock.
1066 * Pf B takes the lock and proceeds to perform it's own access.
1067 * pf A unlocks the per port lock, while pf B is still working (!).
1068 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1069 * access corrupted by pf B).*
1070 */
1071static int
1072bxe_acquire_nvram_lock(struct bxe_softc *sc)
1073{
1074    int port = SC_PORT(sc);
1075    int count, i;
1076    uint32_t val = 0;
1077
1078    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1079    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1080
1081    /* adjust timeout for emulation/FPGA */
1082    count = NVRAM_TIMEOUT_COUNT;
1083    if (CHIP_REV_IS_SLOW(sc)) {
1084        count *= 100;
1085    }
1086
1087    /* request access to nvram interface */
1088    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1089           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1090
1091    for (i = 0; i < count*10; i++) {
1092        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1093        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1094            break;
1095        }
1096
1097        DELAY(5);
1098    }
1099
1100    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1101        BLOGE(sc, "Cannot get access to nvram interface "
1102            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1103            port, val);
1104        return (-1);
1105    }
1106
1107    return (0);
1108}
1109
1110static int
1111bxe_release_nvram_lock(struct bxe_softc *sc)
1112{
1113    int port = SC_PORT(sc);
1114    int count, i;
1115    uint32_t val = 0;
1116
1117    /* adjust timeout for emulation/FPGA */
1118    count = NVRAM_TIMEOUT_COUNT;
1119    if (CHIP_REV_IS_SLOW(sc)) {
1120        count *= 100;
1121    }
1122
1123    /* relinquish nvram interface */
1124    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1125           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1126
1127    for (i = 0; i < count*10; i++) {
1128        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1129        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1130            break;
1131        }
1132
1133        DELAY(5);
1134    }
1135
1136    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1137        BLOGE(sc, "Cannot free access to nvram interface "
1138            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1139            port, val);
1140        return (-1);
1141    }
1142
1143    /* release HW lock: protect against other PFs in PF Direct Assignment */
1144    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1145
1146    return (0);
1147}
1148
1149static void
1150bxe_enable_nvram_access(struct bxe_softc *sc)
1151{
1152    uint32_t val;
1153
1154    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1155
1156    /* enable both bits, even on read */
1157    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1158           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1159}
1160
1161static void
1162bxe_disable_nvram_access(struct bxe_softc *sc)
1163{
1164    uint32_t val;
1165
1166    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1167
1168    /* disable both bits, even after read */
1169    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1170           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1171                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1172}
1173
1174static int
1175bxe_nvram_read_dword(struct bxe_softc *sc,
1176                     uint32_t         offset,
1177                     uint32_t         *ret_val,
1178                     uint32_t         cmd_flags)
1179{
1180    int count, i, rc;
1181    uint32_t val;
1182
1183    /* build the command word */
1184    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1185
1186    /* need to clear DONE bit separately */
1187    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1188
1189    /* address of the NVRAM to read from */
1190    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1191           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1192
1193    /* issue a read command */
1194    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1195
1196    /* adjust timeout for emulation/FPGA */
1197    count = NVRAM_TIMEOUT_COUNT;
1198    if (CHIP_REV_IS_SLOW(sc)) {
1199        count *= 100;
1200    }
1201
1202    /* wait for completion */
1203    *ret_val = 0;
1204    rc = -1;
1205    for (i = 0; i < count; i++) {
1206        DELAY(5);
1207        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1208
1209        if (val & MCPR_NVM_COMMAND_DONE) {
1210            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1211            /* we read nvram data in cpu order
1212             * but ethtool sees it as an array of bytes
1213             * converting to big-endian will do the work
1214             */
1215            *ret_val = htobe32(val);
1216            rc = 0;
1217            break;
1218        }
1219    }
1220
1221    if (rc == -1) {
1222        BLOGE(sc, "nvram read timeout expired "
1223            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1224            offset, cmd_flags, val);
1225    }
1226
1227    return (rc);
1228}
1229
1230static int
1231bxe_nvram_read(struct bxe_softc *sc,
1232               uint32_t         offset,
1233               uint8_t          *ret_buf,
1234               int              buf_size)
1235{
1236    uint32_t cmd_flags;
1237    uint32_t val;
1238    int rc;
1239
1240    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1241        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1242              offset, buf_size);
1243        return (-1);
1244    }
1245
1246    if ((offset + buf_size) > sc->devinfo.flash_size) {
1247        BLOGE(sc, "Invalid parameter, "
1248                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1249              offset, buf_size, sc->devinfo.flash_size);
1250        return (-1);
1251    }
1252
1253    /* request access to nvram interface */
1254    rc = bxe_acquire_nvram_lock(sc);
1255    if (rc) {
1256        return (rc);
1257    }
1258
1259    /* enable access to nvram interface */
1260    bxe_enable_nvram_access(sc);
1261
1262    /* read the first word(s) */
1263    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1264    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1265        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1266        memcpy(ret_buf, &val, 4);
1267
1268        /* advance to the next dword */
1269        offset += sizeof(uint32_t);
1270        ret_buf += sizeof(uint32_t);
1271        buf_size -= sizeof(uint32_t);
1272        cmd_flags = 0;
1273    }
1274
1275    if (rc == 0) {
1276        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1277        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1278        memcpy(ret_buf, &val, 4);
1279    }
1280
1281    /* disable access to nvram interface */
1282    bxe_disable_nvram_access(sc);
1283    bxe_release_nvram_lock(sc);
1284
1285    return (rc);
1286}
1287
1288static int
1289bxe_nvram_write_dword(struct bxe_softc *sc,
1290                      uint32_t         offset,
1291                      uint32_t         val,
1292                      uint32_t         cmd_flags)
1293{
1294    int count, i, rc;
1295
1296    /* build the command word */
1297    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1298
1299    /* need to clear DONE bit separately */
1300    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1301
1302    /* write the data */
1303    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1304
1305    /* address of the NVRAM to write to */
1306    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1307           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1308
1309    /* issue the write command */
1310    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1311
1312    /* adjust timeout for emulation/FPGA */
1313    count = NVRAM_TIMEOUT_COUNT;
1314    if (CHIP_REV_IS_SLOW(sc)) {
1315        count *= 100;
1316    }
1317
1318    /* wait for completion */
1319    rc = -1;
1320    for (i = 0; i < count; i++) {
1321        DELAY(5);
1322        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1323        if (val & MCPR_NVM_COMMAND_DONE) {
1324            rc = 0;
1325            break;
1326        }
1327    }
1328
1329    if (rc == -1) {
1330        BLOGE(sc, "nvram write timeout expired "
1331            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1332            offset, cmd_flags, val);
1333    }
1334
1335    return (rc);
1336}
1337
1338#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1339
1340static int
1341bxe_nvram_write1(struct bxe_softc *sc,
1342                 uint32_t         offset,
1343                 uint8_t          *data_buf,
1344                 int              buf_size)
1345{
1346    uint32_t cmd_flags;
1347    uint32_t align_offset;
1348    uint32_t val;
1349    int rc;
1350
1351    if ((offset + buf_size) > sc->devinfo.flash_size) {
1352        BLOGE(sc, "Invalid parameter, "
1353                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1354              offset, buf_size, sc->devinfo.flash_size);
1355        return (-1);
1356    }
1357
1358    /* request access to nvram interface */
1359    rc = bxe_acquire_nvram_lock(sc);
1360    if (rc) {
1361        return (rc);
1362    }
1363
1364    /* enable access to nvram interface */
1365    bxe_enable_nvram_access(sc);
1366
1367    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1368    align_offset = (offset & ~0x03);
1369    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1370
1371    if (rc == 0) {
1372        val &= ~(0xff << BYTE_OFFSET(offset));
1373        val |= (*data_buf << BYTE_OFFSET(offset));
1374
1375        /* nvram data is returned as an array of bytes
1376         * convert it back to cpu order
1377         */
1378        val = be32toh(val);
1379
1380        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1381    }
1382
1383    /* disable access to nvram interface */
1384    bxe_disable_nvram_access(sc);
1385    bxe_release_nvram_lock(sc);
1386
1387    return (rc);
1388}
1389
1390static int
1391bxe_nvram_write(struct bxe_softc *sc,
1392                uint32_t         offset,
1393                uint8_t          *data_buf,
1394                int              buf_size)
1395{
1396    uint32_t cmd_flags;
1397    uint32_t val;
1398    uint32_t written_so_far;
1399    int rc;
1400
1401    if (buf_size == 1) {
1402        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1403    }
1404
1405    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1406        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1407              offset, buf_size);
1408        return (-1);
1409    }
1410
1411    if (buf_size == 0) {
1412        return (0); /* nothing to do */
1413    }
1414
1415    if ((offset + buf_size) > sc->devinfo.flash_size) {
1416        BLOGE(sc, "Invalid parameter, "
1417                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1418              offset, buf_size, sc->devinfo.flash_size);
1419        return (-1);
1420    }
1421
1422    /* request access to nvram interface */
1423    rc = bxe_acquire_nvram_lock(sc);
1424    if (rc) {
1425        return (rc);
1426    }
1427
1428    /* enable access to nvram interface */
1429    bxe_enable_nvram_access(sc);
1430
1431    written_so_far = 0;
1432    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1433    while ((written_so_far < buf_size) && (rc == 0)) {
1434        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1435            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1436        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1437            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1438        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1439            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1440        }
1441
1442        memcpy(&val, data_buf, 4);
1443
1444        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1445
1446        /* advance to the next dword */
1447        offset += sizeof(uint32_t);
1448        data_buf += sizeof(uint32_t);
1449        written_so_far += sizeof(uint32_t);
1450        cmd_flags = 0;
1451    }
1452
1453    /* disable access to nvram interface */
1454    bxe_disable_nvram_access(sc);
1455    bxe_release_nvram_lock(sc);
1456
1457    return (rc);
1458}
1459
1460/* copy command into DMAE command memory and set DMAE command Go */
1461void
1462bxe_post_dmae(struct bxe_softc    *sc,
1463              struct dmae_cmd *dmae,
1464              int                 idx)
1465{
1466    uint32_t cmd_offset;
1467    int i;
1468
1469    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1470    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1471        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1472    }
1473
1474    REG_WR(sc, dmae_reg_go_c[idx], 1);
1475}
1476
1477uint32_t
1478bxe_dmae_opcode_add_comp(uint32_t opcode,
1479                         uint8_t  comp_type)
1480{
1481    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1482                      DMAE_CMD_C_TYPE_ENABLE));
1483}
1484
1485uint32_t
1486bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1487{
1488    return (opcode & ~DMAE_CMD_SRC_RESET);
1489}
1490
1491uint32_t
1492bxe_dmae_opcode(struct bxe_softc *sc,
1493                uint8_t          src_type,
1494                uint8_t          dst_type,
1495                uint8_t          with_comp,
1496                uint8_t          comp_type)
1497{
1498    uint32_t opcode = 0;
1499
1500    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1501               (dst_type << DMAE_CMD_DST_SHIFT));
1502
1503    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1504
1505    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1506
1507    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1508               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1509
1510    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1511
1512#ifdef __BIG_ENDIAN
1513    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1514#else
1515    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1516#endif
1517
1518    if (with_comp) {
1519        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1520    }
1521
1522    return (opcode);
1523}
1524
1525static void
1526bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1527                        struct dmae_cmd *dmae,
1528                        uint8_t             src_type,
1529                        uint8_t             dst_type)
1530{
1531    memset(dmae, 0, sizeof(struct dmae_cmd));
1532
1533    /* set the opcode */
1534    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1535                                   TRUE, DMAE_COMP_PCI);
1536
1537    /* fill in the completion parameters */
1538    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1539    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1540    dmae->comp_val     = DMAE_COMP_VAL;
1541}
1542
1543/* issue a DMAE command over the init channel and wait for completion */
1544static int
1545bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1546                         struct dmae_cmd *dmae)
1547{
1548    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1549    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1550
1551    BXE_DMAE_LOCK(sc);
1552
1553    /* reset completion */
1554    *wb_comp = 0;
1555
1556    /* post the command on the channel used for initializations */
1557    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1558
1559    /* wait for completion */
1560    DELAY(5);
1561
1562    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1563        if (!timeout ||
1564            (sc->recovery_state != BXE_RECOVERY_DONE &&
1565             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1566            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1567                *wb_comp, sc->recovery_state);
1568            BXE_DMAE_UNLOCK(sc);
1569            return (DMAE_TIMEOUT);
1570        }
1571
1572        timeout--;
1573        DELAY(50);
1574    }
1575
1576    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1577        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1578                *wb_comp, sc->recovery_state);
1579        BXE_DMAE_UNLOCK(sc);
1580        return (DMAE_PCI_ERROR);
1581    }
1582
1583    BXE_DMAE_UNLOCK(sc);
1584    return (0);
1585}
1586
1587void
1588bxe_read_dmae(struct bxe_softc *sc,
1589              uint32_t         src_addr,
1590              uint32_t         len32)
1591{
1592    struct dmae_cmd dmae;
1593    uint32_t *data;
1594    int i, rc;
1595
1596    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1597
1598    if (!sc->dmae_ready) {
1599        data = BXE_SP(sc, wb_data[0]);
1600
1601        for (i = 0; i < len32; i++) {
1602            data[i] = (CHIP_IS_E1(sc)) ?
1603                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1604                          REG_RD(sc, (src_addr + (i * 4)));
1605        }
1606
1607        return;
1608    }
1609
1610    /* set opcode and fixed command fields */
1611    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1612
1613    /* fill in addresses and len */
1614    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1615    dmae.src_addr_hi = 0;
1616    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1617    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1618    dmae.len         = len32;
1619
1620    /* issue the command and wait for completion */
1621    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1622        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1623    }
1624}
1625
1626void
1627bxe_write_dmae(struct bxe_softc *sc,
1628               bus_addr_t       dma_addr,
1629               uint32_t         dst_addr,
1630               uint32_t         len32)
1631{
1632    struct dmae_cmd dmae;
1633    int rc;
1634
1635    if (!sc->dmae_ready) {
1636        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1637
1638        if (CHIP_IS_E1(sc)) {
1639            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1640        } else {
1641            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1642        }
1643
1644        return;
1645    }
1646
1647    /* set opcode and fixed command fields */
1648    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1649
1650    /* fill in addresses and len */
1651    dmae.src_addr_lo = U64_LO(dma_addr);
1652    dmae.src_addr_hi = U64_HI(dma_addr);
1653    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1654    dmae.dst_addr_hi = 0;
1655    dmae.len         = len32;
1656
1657    /* issue the command and wait for completion */
1658    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1659        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1660    }
1661}
1662
1663void
1664bxe_write_dmae_phys_len(struct bxe_softc *sc,
1665                        bus_addr_t       phys_addr,
1666                        uint32_t         addr,
1667                        uint32_t         len)
1668{
1669    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1670    int offset = 0;
1671
1672    while (len > dmae_wr_max) {
1673        bxe_write_dmae(sc,
1674                       (phys_addr + offset), /* src DMA address */
1675                       (addr + offset),      /* dst GRC address */
1676                       dmae_wr_max);
1677        offset += (dmae_wr_max * 4);
1678        len -= dmae_wr_max;
1679    }
1680
1681    bxe_write_dmae(sc,
1682                   (phys_addr + offset), /* src DMA address */
1683                   (addr + offset),      /* dst GRC address */
1684                   len);
1685}
1686
1687void
1688bxe_set_ctx_validation(struct bxe_softc   *sc,
1689                       struct eth_context *cxt,
1690                       uint32_t           cid)
1691{
1692    /* ustorm cxt validation */
1693    cxt->ustorm_ag_context.cdu_usage =
1694        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1695            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1696    /* xcontext validation */
1697    cxt->xstorm_ag_context.cdu_reserved =
1698        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1699            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1700}
1701
1702static void
1703bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1704                            uint8_t          port,
1705                            uint8_t          fw_sb_id,
1706                            uint8_t          sb_index,
1707                            uint8_t          ticks)
1708{
1709    uint32_t addr =
1710        (BAR_CSTRORM_INTMEM +
1711         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1712
1713    REG_WR8(sc, addr, ticks);
1714
1715    BLOGD(sc, DBG_LOAD,
1716          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1717          port, fw_sb_id, sb_index, ticks);
1718}
1719
1720static void
1721bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1722                            uint8_t          port,
1723                            uint16_t         fw_sb_id,
1724                            uint8_t          sb_index,
1725                            uint8_t          disable)
1726{
1727    uint32_t enable_flag =
1728        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1729    uint32_t addr =
1730        (BAR_CSTRORM_INTMEM +
1731         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1732    uint8_t flags;
1733
1734    /* clear and set */
1735    flags = REG_RD8(sc, addr);
1736    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1737    flags |= enable_flag;
1738    REG_WR8(sc, addr, flags);
1739
1740    BLOGD(sc, DBG_LOAD,
1741          "port %d fw_sb_id %d sb_index %d disable %d\n",
1742          port, fw_sb_id, sb_index, disable);
1743}
1744
1745void
1746bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1747                             uint8_t          fw_sb_id,
1748                             uint8_t          sb_index,
1749                             uint8_t          disable,
1750                             uint16_t         usec)
1751{
1752    int port = SC_PORT(sc);
1753    uint8_t ticks = (usec / 4); /* XXX ??? */
1754
1755    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1756
1757    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1758    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1759}
1760
1761void
1762elink_cb_udelay(struct bxe_softc *sc,
1763                uint32_t         usecs)
1764{
1765    DELAY(usecs);
1766}
1767
1768uint32_t
1769elink_cb_reg_read(struct bxe_softc *sc,
1770                  uint32_t         reg_addr)
1771{
1772    return (REG_RD(sc, reg_addr));
1773}
1774
1775void
1776elink_cb_reg_write(struct bxe_softc *sc,
1777                   uint32_t         reg_addr,
1778                   uint32_t         val)
1779{
1780    REG_WR(sc, reg_addr, val);
1781}
1782
1783void
1784elink_cb_reg_wb_write(struct bxe_softc *sc,
1785                      uint32_t         offset,
1786                      uint32_t         *wb_write,
1787                      uint16_t         len)
1788{
1789    REG_WR_DMAE(sc, offset, wb_write, len);
1790}
1791
1792void
1793elink_cb_reg_wb_read(struct bxe_softc *sc,
1794                     uint32_t         offset,
1795                     uint32_t         *wb_write,
1796                     uint16_t         len)
1797{
1798    REG_RD_DMAE(sc, offset, wb_write, len);
1799}
1800
1801uint8_t
1802elink_cb_path_id(struct bxe_softc *sc)
1803{
1804    return (SC_PATH(sc));
1805}
1806
1807void
1808elink_cb_event_log(struct bxe_softc     *sc,
1809                   const elink_log_id_t elink_log_id,
1810                   ...)
1811{
1812    /* XXX */
1813    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1814}
1815
1816static int
1817bxe_set_spio(struct bxe_softc *sc,
1818             int              spio,
1819             uint32_t         mode)
1820{
1821    uint32_t spio_reg;
1822
1823    /* Only 2 SPIOs are configurable */
1824    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1825        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1826        return (-1);
1827    }
1828
1829    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1830
1831    /* read SPIO and mask except the float bits */
1832    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1833
1834    switch (mode) {
1835    case MISC_SPIO_OUTPUT_LOW:
1836        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1837        /* clear FLOAT and set CLR */
1838        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1839        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1840        break;
1841
1842    case MISC_SPIO_OUTPUT_HIGH:
1843        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1844        /* clear FLOAT and set SET */
1845        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1846        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1847        break;
1848
1849    case MISC_SPIO_INPUT_HI_Z:
1850        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1851        /* set FLOAT */
1852        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1853        break;
1854
1855    default:
1856        break;
1857    }
1858
1859    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1860    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1861
1862    return (0);
1863}
1864
1865static int
1866bxe_gpio_read(struct bxe_softc *sc,
1867              int              gpio_num,
1868              uint8_t          port)
1869{
1870    /* The GPIO should be swapped if swap register is set and active */
1871    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1872                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1873    int gpio_shift = (gpio_num +
1874                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1875    uint32_t gpio_mask = (1 << gpio_shift);
1876    uint32_t gpio_reg;
1877
1878    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1879        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1880            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1881            gpio_mask);
1882        return (-1);
1883    }
1884
1885    /* read GPIO value */
1886    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1887
1888    /* get the requested pin value */
1889    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1890}
1891
1892static int
1893bxe_gpio_write(struct bxe_softc *sc,
1894               int              gpio_num,
1895               uint32_t         mode,
1896               uint8_t          port)
1897{
1898    /* The GPIO should be swapped if swap register is set and active */
1899    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1900                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1901    int gpio_shift = (gpio_num +
1902                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1903    uint32_t gpio_mask = (1 << gpio_shift);
1904    uint32_t gpio_reg;
1905
1906    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1907        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1908            " gpio_shift %d gpio_mask 0x%x\n",
1909            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1910        return (-1);
1911    }
1912
1913    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1914
1915    /* read GPIO and mask except the float bits */
1916    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1917
1918    switch (mode) {
1919    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1920        BLOGD(sc, DBG_PHY,
1921              "Set GPIO %d (shift %d) -> output low\n",
1922              gpio_num, gpio_shift);
1923        /* clear FLOAT and set CLR */
1924        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1925        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1926        break;
1927
1928    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1929        BLOGD(sc, DBG_PHY,
1930              "Set GPIO %d (shift %d) -> output high\n",
1931              gpio_num, gpio_shift);
1932        /* clear FLOAT and set SET */
1933        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1935        break;
1936
1937    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1938        BLOGD(sc, DBG_PHY,
1939              "Set GPIO %d (shift %d) -> input\n",
1940              gpio_num, gpio_shift);
1941        /* set FLOAT */
1942        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1943        break;
1944
1945    default:
1946        break;
1947    }
1948
1949    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1950    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1951
1952    return (0);
1953}
1954
1955static int
1956bxe_gpio_mult_write(struct bxe_softc *sc,
1957                    uint8_t          pins,
1958                    uint32_t         mode)
1959{
1960    uint32_t gpio_reg;
1961
1962    /* any port swapping should be handled by caller */
1963
1964    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1965
1966    /* read GPIO and mask except the float bits */
1967    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1968    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1969    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1970    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1971
1972    switch (mode) {
1973    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1974        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
1975        /* set CLR */
1976        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
1977        break;
1978
1979    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1980        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
1981        /* set SET */
1982        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
1983        break;
1984
1985    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1986        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
1987        /* set FLOAT */
1988        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1989        break;
1990
1991    default:
1992        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
1993            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
1994        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1995        return (-1);
1996    }
1997
1998    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1999    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2000
2001    return (0);
2002}
2003
2004static int
2005bxe_gpio_int_write(struct bxe_softc *sc,
2006                   int              gpio_num,
2007                   uint32_t         mode,
2008                   uint8_t          port)
2009{
2010    /* The GPIO should be swapped if swap register is set and active */
2011    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2012                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2013    int gpio_shift = (gpio_num +
2014                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2015    uint32_t gpio_mask = (1 << gpio_shift);
2016    uint32_t gpio_reg;
2017
2018    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2019        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2020            " gpio_shift %d gpio_mask 0x%x\n",
2021            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2022        return (-1);
2023    }
2024
2025    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2026
2027    /* read GPIO int */
2028    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2029
2030    switch (mode) {
2031    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2032        BLOGD(sc, DBG_PHY,
2033              "Clear GPIO INT %d (shift %d) -> output low\n",
2034              gpio_num, gpio_shift);
2035        /* clear SET and set CLR */
2036        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2037        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2038        break;
2039
2040    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2041        BLOGD(sc, DBG_PHY,
2042              "Set GPIO INT %d (shift %d) -> output high\n",
2043              gpio_num, gpio_shift);
2044        /* clear CLR and set SET */
2045        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2046        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2047        break;
2048
2049    default:
2050        break;
2051    }
2052
2053    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2054    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2055
2056    return (0);
2057}
2058
2059uint32_t
2060elink_cb_gpio_read(struct bxe_softc *sc,
2061                   uint16_t         gpio_num,
2062                   uint8_t          port)
2063{
2064    return (bxe_gpio_read(sc, gpio_num, port));
2065}
2066
2067uint8_t
2068elink_cb_gpio_write(struct bxe_softc *sc,
2069                    uint16_t         gpio_num,
2070                    uint8_t          mode, /* 0=low 1=high */
2071                    uint8_t          port)
2072{
2073    return (bxe_gpio_write(sc, gpio_num, mode, port));
2074}
2075
2076uint8_t
2077elink_cb_gpio_mult_write(struct bxe_softc *sc,
2078                         uint8_t          pins,
2079                         uint8_t          mode) /* 0=low 1=high */
2080{
2081    return (bxe_gpio_mult_write(sc, pins, mode));
2082}
2083
2084uint8_t
2085elink_cb_gpio_int_write(struct bxe_softc *sc,
2086                        uint16_t         gpio_num,
2087                        uint8_t          mode, /* 0=low 1=high */
2088                        uint8_t          port)
2089{
2090    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2091}
2092
2093void
2094elink_cb_notify_link_changed(struct bxe_softc *sc)
2095{
2096    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2097                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2098}
2099
2100/* send the MCP a request, block until there is a reply */
2101uint32_t
2102elink_cb_fw_command(struct bxe_softc *sc,
2103                    uint32_t         command,
2104                    uint32_t         param)
2105{
2106    int mb_idx = SC_FW_MB_IDX(sc);
2107    uint32_t seq;
2108    uint32_t rc = 0;
2109    uint32_t cnt = 1;
2110    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2111
2112    BXE_FWMB_LOCK(sc);
2113
2114    seq = ++sc->fw_seq;
2115    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2116    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2117
2118    BLOGD(sc, DBG_PHY,
2119          "wrote command 0x%08x to FW MB param 0x%08x\n",
2120          (command | seq), param);
2121
2122    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2123    do {
2124        DELAY(delay * 1000);
2125        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2126    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2127
2128    BLOGD(sc, DBG_PHY,
2129          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2130          cnt*delay, rc, seq);
2131
2132    /* is this a reply to our command? */
2133    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2134        rc &= FW_MSG_CODE_MASK;
2135    } else {
2136        /* Ruh-roh! */
2137        BLOGE(sc, "FW failed to respond!\n");
2138        // XXX bxe_fw_dump(sc);
2139        rc = 0;
2140    }
2141
2142    BXE_FWMB_UNLOCK(sc);
2143    return (rc);
2144}
2145
2146static uint32_t
2147bxe_fw_command(struct bxe_softc *sc,
2148               uint32_t         command,
2149               uint32_t         param)
2150{
2151    return (elink_cb_fw_command(sc, command, param));
2152}
2153
2154static void
2155__storm_memset_dma_mapping(struct bxe_softc *sc,
2156                           uint32_t         addr,
2157                           bus_addr_t       mapping)
2158{
2159    REG_WR(sc, addr, U64_LO(mapping));
2160    REG_WR(sc, (addr + 4), U64_HI(mapping));
2161}
2162
2163static void
2164storm_memset_spq_addr(struct bxe_softc *sc,
2165                      bus_addr_t       mapping,
2166                      uint16_t         abs_fid)
2167{
2168    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2169                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2170    __storm_memset_dma_mapping(sc, addr, mapping);
2171}
2172
2173static void
2174storm_memset_vf_to_pf(struct bxe_softc *sc,
2175                      uint16_t         abs_fid,
2176                      uint16_t         pf_id)
2177{
2178    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2179    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2180    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2181    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2182}
2183
2184static void
2185storm_memset_func_en(struct bxe_softc *sc,
2186                     uint16_t         abs_fid,
2187                     uint8_t          enable)
2188{
2189    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2190    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2191    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2192    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2193}
2194
2195static void
2196storm_memset_eq_data(struct bxe_softc       *sc,
2197                     struct event_ring_data *eq_data,
2198                     uint16_t               pfid)
2199{
2200    uint32_t addr;
2201    size_t size;
2202
2203    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2204    size = sizeof(struct event_ring_data);
2205    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2206}
2207
2208static void
2209storm_memset_eq_prod(struct bxe_softc *sc,
2210                     uint16_t         eq_prod,
2211                     uint16_t         pfid)
2212{
2213    uint32_t addr = (BAR_CSTRORM_INTMEM +
2214                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2215    REG_WR16(sc, addr, eq_prod);
2216}
2217
2218/*
2219 * Post a slowpath command.
2220 *
2221 * A slowpath command is used to propogate a configuration change through
2222 * the controller in a controlled manner, allowing each STORM processor and
2223 * other H/W blocks to phase in the change.  The commands sent on the
2224 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2225 * completion of the ramrod will occur in different ways.  Here's a
2226 * breakdown of ramrods and how they complete:
2227 *
2228 * RAMROD_CMD_ID_ETH_PORT_SETUP
2229 *   Used to setup the leading connection on a port.  Completes on the
2230 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2231 *
2232 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2233 *   Used to setup an additional connection on a port.  Completes on the
2234 *   RCQ of the multi-queue/RSS connection being initialized.
2235 *
2236 * RAMROD_CMD_ID_ETH_STAT_QUERY
2237 *   Used to force the storm processors to update the statistics database
2238 *   in host memory.  This ramrod is send on the leading connection CID and
2239 *   completes as an index increment of the CSTORM on the default status
2240 *   block.
2241 *
2242 * RAMROD_CMD_ID_ETH_UPDATE
2243 *   Used to update the state of the leading connection, usually to udpate
2244 *   the RSS indirection table.  Completes on the RCQ of the leading
2245 *   connection. (Not currently used under FreeBSD until OS support becomes
2246 *   available.)
2247 *
2248 * RAMROD_CMD_ID_ETH_HALT
2249 *   Used when tearing down a connection prior to driver unload.  Completes
2250 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2251 *   use this on the leading connection.
2252 *
2253 * RAMROD_CMD_ID_ETH_SET_MAC
2254 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2255 *   the RCQ of the leading connection.
2256 *
2257 * RAMROD_CMD_ID_ETH_CFC_DEL
2258 *   Used when tearing down a conneciton prior to driver unload.  Completes
2259 *   on the RCQ of the leading connection (since the current connection
2260 *   has been completely removed from controller memory).
2261 *
2262 * RAMROD_CMD_ID_ETH_PORT_DEL
2263 *   Used to tear down the leading connection prior to driver unload,
2264 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2265 *   default status block.
2266 *
2267 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2268 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2269 *   RSS connection that is being offloaded.  (Not currently used under
2270 *   FreeBSD.)
2271 *
2272 * There can only be one command pending per function.
2273 *
2274 * Returns:
2275 *   0 = Success, !0 = Failure.
2276 */
2277
2278/* must be called under the spq lock */
2279static inline
2280struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2281{
2282    struct eth_spe *next_spe = sc->spq_prod_bd;
2283
2284    if (sc->spq_prod_bd == sc->spq_last_bd) {
2285        /* wrap back to the first eth_spq */
2286        sc->spq_prod_bd = sc->spq;
2287        sc->spq_prod_idx = 0;
2288    } else {
2289        sc->spq_prod_bd++;
2290        sc->spq_prod_idx++;
2291    }
2292
2293    return (next_spe);
2294}
2295
2296/* must be called under the spq lock */
2297static inline
2298void bxe_sp_prod_update(struct bxe_softc *sc)
2299{
2300    int func = SC_FUNC(sc);
2301
2302    /*
2303     * Make sure that BD data is updated before writing the producer.
2304     * BD data is written to the memory, the producer is read from the
2305     * memory, thus we need a full memory barrier to ensure the ordering.
2306     */
2307    mb();
2308
2309    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2310             sc->spq_prod_idx);
2311
2312    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2313                      BUS_SPACE_BARRIER_WRITE);
2314}
2315
2316/**
2317 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2318 *
2319 * @cmd:      command to check
2320 * @cmd_type: command type
2321 */
2322static inline
2323int bxe_is_contextless_ramrod(int cmd,
2324                              int cmd_type)
2325{
2326    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2327        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2328        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2329        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2330        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2331        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2332        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2333        return (TRUE);
2334    } else {
2335        return (FALSE);
2336    }
2337}
2338
2339/**
2340 * bxe_sp_post - place a single command on an SP ring
2341 *
2342 * @sc:         driver handle
2343 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2344 * @cid:        SW CID the command is related to
2345 * @data_hi:    command private data address (high 32 bits)
2346 * @data_lo:    command private data address (low 32 bits)
2347 * @cmd_type:   command type (e.g. NONE, ETH)
2348 *
2349 * SP data is handled as if it's always an address pair, thus data fields are
2350 * not swapped to little endian in upper functions. Instead this function swaps
2351 * data as if it's two uint32 fields.
2352 */
2353int
2354bxe_sp_post(struct bxe_softc *sc,
2355            int              command,
2356            int              cid,
2357            uint32_t         data_hi,
2358            uint32_t         data_lo,
2359            int              cmd_type)
2360{
2361    struct eth_spe *spe;
2362    uint16_t type;
2363    int common;
2364
2365    common = bxe_is_contextless_ramrod(command, cmd_type);
2366
2367    BXE_SP_LOCK(sc);
2368
2369    if (common) {
2370        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2371            BLOGE(sc, "EQ ring is full!\n");
2372            BXE_SP_UNLOCK(sc);
2373            return (-1);
2374        }
2375    } else {
2376        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2377            BLOGE(sc, "SPQ ring is full!\n");
2378            BXE_SP_UNLOCK(sc);
2379            return (-1);
2380        }
2381    }
2382
2383    spe = bxe_sp_get_next(sc);
2384
2385    /* CID needs port number to be encoded int it */
2386    spe->hdr.conn_and_cmd_data =
2387        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2388
2389    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2390
2391    /* TBD: Check if it works for VFs */
2392    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2393             SPE_HDR_T_FUNCTION_ID);
2394
2395    spe->hdr.type = htole16(type);
2396
2397    spe->data.update_data_addr.hi = htole32(data_hi);
2398    spe->data.update_data_addr.lo = htole32(data_lo);
2399
2400    /*
2401     * It's ok if the actual decrement is issued towards the memory
2402     * somewhere between the lock and unlock. Thus no more explict
2403     * memory barrier is needed.
2404     */
2405    if (common) {
2406        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2407    } else {
2408        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2409    }
2410
2411    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2412    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2413          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2414    BLOGD(sc, DBG_SP,
2415          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2416          sc->spq_prod_idx,
2417          (uint32_t)U64_HI(sc->spq_dma.paddr),
2418          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2419          command,
2420          common,
2421          HW_CID(sc, cid),
2422          data_hi,
2423          data_lo,
2424          type,
2425          atomic_load_acq_long(&sc->cq_spq_left),
2426          atomic_load_acq_long(&sc->eq_spq_left));
2427
2428    bxe_sp_prod_update(sc);
2429
2430    BXE_SP_UNLOCK(sc);
2431    return (0);
2432}
2433
2434/**
2435 * bxe_debug_print_ind_table - prints the indirection table configuration.
2436 *
2437 * @sc: driver hanlde
2438 * @p:  pointer to rss configuration
2439 */
2440
2441/*
2442 * FreeBSD Device probe function.
2443 *
2444 * Compares the device found to the driver's list of supported devices and
2445 * reports back to the bsd loader whether this is the right driver for the device.
2446 * This is the driver entry function called from the "kldload" command.
2447 *
2448 * Returns:
2449 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2450 */
2451static int
2452bxe_probe(device_t dev)
2453{
2454    struct bxe_softc *sc;
2455    struct bxe_device_type *t;
2456    char *descbuf;
2457    uint16_t did, sdid, svid, vid;
2458
2459    /* Find our device structure */
2460    sc = device_get_softc(dev);
2461    sc->dev = dev;
2462    t = bxe_devs;
2463
2464    /* Get the data for the device to be probed. */
2465    vid  = pci_get_vendor(dev);
2466    did  = pci_get_device(dev);
2467    svid = pci_get_subvendor(dev);
2468    sdid = pci_get_subdevice(dev);
2469
2470    BLOGD(sc, DBG_LOAD,
2471          "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2472          "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2473
2474    /* Look through the list of known devices for a match. */
2475    while (t->bxe_name != NULL) {
2476        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2477            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2478            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2479            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2480            if (descbuf == NULL)
2481                return (ENOMEM);
2482
2483            /* Print out the device identity. */
2484            snprintf(descbuf, BXE_DEVDESC_MAX,
2485                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2486                     (((pci_read_config(dev, PCIR_REVID, 4) &
2487                        0xf0) >> 4) + 'A'),
2488                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2489                     BXE_DRIVER_VERSION);
2490
2491            device_set_desc_copy(dev, descbuf);
2492            free(descbuf, M_TEMP);
2493            return (BUS_PROBE_DEFAULT);
2494        }
2495        t++;
2496    }
2497
2498    return (ENXIO);
2499}
2500
2501static void
2502bxe_init_mutexes(struct bxe_softc *sc)
2503{
2504#ifdef BXE_CORE_LOCK_SX
2505    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2506             "bxe%d_core_lock", sc->unit);
2507    sx_init(&sc->core_sx, sc->core_sx_name);
2508#else
2509    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2510             "bxe%d_core_lock", sc->unit);
2511    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2512#endif
2513
2514    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2515             "bxe%d_sp_lock", sc->unit);
2516    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2517
2518    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2519             "bxe%d_dmae_lock", sc->unit);
2520    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2521
2522    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2523             "bxe%d_phy_lock", sc->unit);
2524    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2525
2526    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2527             "bxe%d_fwmb_lock", sc->unit);
2528    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2529
2530    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2531             "bxe%d_print_lock", sc->unit);
2532    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2533
2534    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2535             "bxe%d_stats_lock", sc->unit);
2536    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2537
2538    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2539             "bxe%d_mcast_lock", sc->unit);
2540    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2541}
2542
2543static void
2544bxe_release_mutexes(struct bxe_softc *sc)
2545{
2546#ifdef BXE_CORE_LOCK_SX
2547    sx_destroy(&sc->core_sx);
2548#else
2549    if (mtx_initialized(&sc->core_mtx)) {
2550        mtx_destroy(&sc->core_mtx);
2551    }
2552#endif
2553
2554    if (mtx_initialized(&sc->sp_mtx)) {
2555        mtx_destroy(&sc->sp_mtx);
2556    }
2557
2558    if (mtx_initialized(&sc->dmae_mtx)) {
2559        mtx_destroy(&sc->dmae_mtx);
2560    }
2561
2562    if (mtx_initialized(&sc->port.phy_mtx)) {
2563        mtx_destroy(&sc->port.phy_mtx);
2564    }
2565
2566    if (mtx_initialized(&sc->fwmb_mtx)) {
2567        mtx_destroy(&sc->fwmb_mtx);
2568    }
2569
2570    if (mtx_initialized(&sc->print_mtx)) {
2571        mtx_destroy(&sc->print_mtx);
2572    }
2573
2574    if (mtx_initialized(&sc->stats_mtx)) {
2575        mtx_destroy(&sc->stats_mtx);
2576    }
2577
2578    if (mtx_initialized(&sc->mcast_mtx)) {
2579        mtx_destroy(&sc->mcast_mtx);
2580    }
2581}
2582
2583static void
2584bxe_tx_disable(struct bxe_softc* sc)
2585{
2586    if_t ifp = sc->ifp;
2587
2588    /* tell the stack the driver is stopped and TX queue is full */
2589    if (ifp !=  NULL) {
2590        if_setdrvflags(ifp, 0);
2591    }
2592}
2593
2594static void
2595bxe_drv_pulse(struct bxe_softc *sc)
2596{
2597    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2598             sc->fw_drv_pulse_wr_seq);
2599}
2600
2601static inline uint16_t
2602bxe_tx_avail(struct bxe_softc *sc,
2603             struct bxe_fastpath *fp)
2604{
2605    int16_t  used;
2606    uint16_t prod;
2607    uint16_t cons;
2608
2609    prod = fp->tx_bd_prod;
2610    cons = fp->tx_bd_cons;
2611
2612    used = SUB_S16(prod, cons);
2613
2614    return (int16_t)(sc->tx_ring_size) - used;
2615}
2616
2617static inline int
2618bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2619{
2620    uint16_t hw_cons;
2621
2622    mb(); /* status block fields can change */
2623    hw_cons = le16toh(*fp->tx_cons_sb);
2624    return (hw_cons != fp->tx_pkt_cons);
2625}
2626
2627static inline uint8_t
2628bxe_has_tx_work(struct bxe_fastpath *fp)
2629{
2630    /* expand this for multi-cos if ever supported */
2631    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2632}
2633
2634static inline int
2635bxe_has_rx_work(struct bxe_fastpath *fp)
2636{
2637    uint16_t rx_cq_cons_sb;
2638
2639    mb(); /* status block fields can change */
2640    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2641    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2642        rx_cq_cons_sb++;
2643    return (fp->rx_cq_cons != rx_cq_cons_sb);
2644}
2645
2646static void
2647bxe_sp_event(struct bxe_softc    *sc,
2648             struct bxe_fastpath *fp,
2649             union eth_rx_cqe    *rr_cqe)
2650{
2651    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2652    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2653    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2654    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2655
2656    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2657          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2658
2659    switch (command) {
2660    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2661        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2662        drv_cmd = ECORE_Q_CMD_UPDATE;
2663        break;
2664
2665    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2666        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2667        drv_cmd = ECORE_Q_CMD_SETUP;
2668        break;
2669
2670    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2671        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2672        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2673        break;
2674
2675    case (RAMROD_CMD_ID_ETH_HALT):
2676        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2677        drv_cmd = ECORE_Q_CMD_HALT;
2678        break;
2679
2680    case (RAMROD_CMD_ID_ETH_TERMINATE):
2681        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2682        drv_cmd = ECORE_Q_CMD_TERMINATE;
2683        break;
2684
2685    case (RAMROD_CMD_ID_ETH_EMPTY):
2686        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2687        drv_cmd = ECORE_Q_CMD_EMPTY;
2688        break;
2689
2690    default:
2691        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2692              command, fp->index);
2693        return;
2694    }
2695
2696    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2697        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2698        /*
2699         * q_obj->complete_cmd() failure means that this was
2700         * an unexpected completion.
2701         *
2702         * In this case we don't want to increase the sc->spq_left
2703         * because apparently we haven't sent this command the first
2704         * place.
2705         */
2706        // bxe_panic(sc, ("Unexpected SP completion\n"));
2707        return;
2708    }
2709
2710    atomic_add_acq_long(&sc->cq_spq_left, 1);
2711
2712    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2713          atomic_load_acq_long(&sc->cq_spq_left));
2714}
2715
2716/*
2717 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2718 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2719 * the current aggregation queue as in-progress.
2720 */
2721static void
2722bxe_tpa_start(struct bxe_softc            *sc,
2723              struct bxe_fastpath         *fp,
2724              uint16_t                    queue,
2725              uint16_t                    cons,
2726              uint16_t                    prod,
2727              struct eth_fast_path_rx_cqe *cqe)
2728{
2729    struct bxe_sw_rx_bd tmp_bd;
2730    struct bxe_sw_rx_bd *rx_buf;
2731    struct eth_rx_bd *rx_bd;
2732    int max_agg_queues;
2733    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2734    uint16_t index;
2735
2736    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2737                       "cons=%d prod=%d\n",
2738          fp->index, queue, cons, prod);
2739
2740    max_agg_queues = MAX_AGG_QS(sc);
2741
2742    KASSERT((queue < max_agg_queues),
2743            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2744             fp->index, queue, max_agg_queues));
2745
2746    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2747            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2748             fp->index, queue));
2749
2750    /* copy the existing mbuf and mapping from the TPA pool */
2751    tmp_bd = tpa_info->bd;
2752
2753    if (tmp_bd.m == NULL) {
2754        uint32_t *tmp;
2755
2756        tmp = (uint32_t *)cqe;
2757
2758        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2759              fp->index, queue, cons, prod);
2760        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2761            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2762
2763        /* XXX Error handling? */
2764        return;
2765    }
2766
2767    /* change the TPA queue to the start state */
2768    tpa_info->state            = BXE_TPA_STATE_START;
2769    tpa_info->placement_offset = cqe->placement_offset;
2770    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2771    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2772    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2773
2774    fp->rx_tpa_queue_used |= (1 << queue);
2775
2776    /*
2777     * If all the buffer descriptors are filled with mbufs then fill in
2778     * the current consumer index with a new BD. Else if a maximum Rx
2779     * buffer limit is imposed then fill in the next producer index.
2780     */
2781    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2782                prod : cons;
2783
2784    /* move the received mbuf and mapping to TPA pool */
2785    tpa_info->bd = fp->rx_mbuf_chain[cons];
2786
2787    /* release any existing RX BD mbuf mappings */
2788    if (cons != index) {
2789        rx_buf = &fp->rx_mbuf_chain[cons];
2790
2791        if (rx_buf->m_map != NULL) {
2792            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2793                            BUS_DMASYNC_POSTREAD);
2794            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2795        }
2796
2797        /*
2798         * We get here when the maximum number of rx buffers is less than
2799         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2800         * it out here without concern of a memory leak.
2801         */
2802        fp->rx_mbuf_chain[cons].m = NULL;
2803    }
2804
2805    /* update the Rx SW BD with the mbuf info from the TPA pool */
2806    fp->rx_mbuf_chain[index] = tmp_bd;
2807
2808    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2809    rx_bd = &fp->rx_chain[index];
2810    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2811    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2812}
2813
2814/*
2815 * When a TPA aggregation is completed, loop through the individual mbufs
2816 * of the aggregation, combining them into a single mbuf which will be sent
2817 * up the stack. Refill all freed SGEs with mbufs as we go along.
2818 */
2819static int
2820bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2821                   struct bxe_fastpath       *fp,
2822                   struct bxe_sw_tpa_info    *tpa_info,
2823                   uint16_t                  queue,
2824                   uint16_t                  pages,
2825                   struct mbuf               *m,
2826			       struct eth_end_agg_rx_cqe *cqe,
2827                   uint16_t                  cqe_idx)
2828{
2829    struct mbuf *m_frag;
2830    uint32_t frag_len, frag_size, i;
2831    uint16_t sge_idx;
2832    int rc = 0;
2833    int j;
2834
2835    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2836
2837    BLOGD(sc, DBG_LRO,
2838          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2839          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2840
2841    /* make sure the aggregated frame is not too big to handle */
2842    if (pages > 8 * PAGES_PER_SGE) {
2843
2844        uint32_t *tmp = (uint32_t *)cqe;
2845
2846        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2847                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2848              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2849              tpa_info->len_on_bd, frag_size);
2850
2851        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2852            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2853
2854        bxe_panic(sc, ("sge page count error\n"));
2855        return (EINVAL);
2856    }
2857
2858    /*
2859     * Scan through the scatter gather list pulling individual mbufs into a
2860     * single mbuf for the host stack.
2861     */
2862    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2863        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2864
2865        /*
2866         * Firmware gives the indices of the SGE as if the ring is an array
2867         * (meaning that the "next" element will consume 2 indices).
2868         */
2869        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2870
2871        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2872                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2873              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2874
2875        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2876
2877        /* allocate a new mbuf for the SGE */
2878        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2879        if (rc) {
2880            /* Leave all remaining SGEs in the ring! */
2881            return (rc);
2882        }
2883
2884        /* update the fragment length */
2885        m_frag->m_len = frag_len;
2886
2887        /* concatenate the fragment to the head mbuf */
2888        m_cat(m, m_frag);
2889        fp->eth_q_stats.mbuf_alloc_sge--;
2890
2891        /* update the TPA mbuf size and remaining fragment size */
2892        m->m_pkthdr.len += frag_len;
2893        frag_size -= frag_len;
2894    }
2895
2896    BLOGD(sc, DBG_LRO,
2897          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2898          fp->index, queue, frag_size);
2899
2900    return (rc);
2901}
2902
2903static inline void
2904bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2905{
2906    int i, j;
2907
2908    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2909        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2910
2911        for (j = 0; j < 2; j++) {
2912            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2913            idx--;
2914        }
2915    }
2916}
2917
2918static inline void
2919bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2920{
2921    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2922    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2923
2924    /*
2925     * Clear the two last indices in the page to 1. These are the indices that
2926     * correspond to the "next" element, hence will never be indicated and
2927     * should be removed from the calculations.
2928     */
2929    bxe_clear_sge_mask_next_elems(fp);
2930}
2931
2932static inline void
2933bxe_update_last_max_sge(struct bxe_fastpath *fp,
2934                        uint16_t            idx)
2935{
2936    uint16_t last_max = fp->last_max_sge;
2937
2938    if (SUB_S16(idx, last_max) > 0) {
2939        fp->last_max_sge = idx;
2940    }
2941}
2942
2943static inline void
2944bxe_update_sge_prod(struct bxe_softc          *sc,
2945                    struct bxe_fastpath       *fp,
2946                    uint16_t                  sge_len,
2947                    union eth_sgl_or_raw_data *cqe)
2948{
2949    uint16_t last_max, last_elem, first_elem;
2950    uint16_t delta = 0;
2951    uint16_t i;
2952
2953    if (!sge_len) {
2954        return;
2955    }
2956
2957    /* first mark all used pages */
2958    for (i = 0; i < sge_len; i++) {
2959        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2960                            RX_SGE(le16toh(cqe->sgl[i])));
2961    }
2962
2963    BLOGD(sc, DBG_LRO,
2964          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2965          fp->index, sge_len - 1,
2966          le16toh(cqe->sgl[sge_len - 1]));
2967
2968    /* assume that the last SGE index is the biggest */
2969    bxe_update_last_max_sge(fp,
2970                            le16toh(cqe->sgl[sge_len - 1]));
2971
2972    last_max = RX_SGE(fp->last_max_sge);
2973    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2974    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2975
2976    /* if ring is not full */
2977    if (last_elem + 1 != first_elem) {
2978        last_elem++;
2979    }
2980
2981    /* now update the prod */
2982    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
2983        if (__predict_true(fp->sge_mask[i])) {
2984            break;
2985        }
2986
2987        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
2988        delta += BIT_VEC64_ELEM_SZ;
2989    }
2990
2991    if (delta > 0) {
2992        fp->rx_sge_prod += delta;
2993        /* clear page-end entries */
2994        bxe_clear_sge_mask_next_elems(fp);
2995    }
2996
2997    BLOGD(sc, DBG_LRO,
2998          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
2999          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3000}
3001
3002/*
3003 * The aggregation on the current TPA queue has completed. Pull the individual
3004 * mbuf fragments together into a single mbuf, perform all necessary checksum
3005 * calculations, and send the resuting mbuf to the stack.
3006 */
3007static void
3008bxe_tpa_stop(struct bxe_softc          *sc,
3009             struct bxe_fastpath       *fp,
3010             struct bxe_sw_tpa_info    *tpa_info,
3011             uint16_t                  queue,
3012             uint16_t                  pages,
3013			 struct eth_end_agg_rx_cqe *cqe,
3014             uint16_t                  cqe_idx)
3015{
3016    if_t ifp = sc->ifp;
3017    struct mbuf *m;
3018    int rc = 0;
3019
3020    BLOGD(sc, DBG_LRO,
3021          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3022          fp->index, queue, tpa_info->placement_offset,
3023          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3024
3025    m = tpa_info->bd.m;
3026
3027    /* allocate a replacement before modifying existing mbuf */
3028    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3029    if (rc) {
3030        /* drop the frame and log an error */
3031        fp->eth_q_stats.rx_soft_errors++;
3032        goto bxe_tpa_stop_exit;
3033    }
3034
3035    /* we have a replacement, fixup the current mbuf */
3036    m_adj(m, tpa_info->placement_offset);
3037    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3038
3039    /* mark the checksums valid (taken care of by the firmware) */
3040    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3041    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3042    m->m_pkthdr.csum_data = 0xffff;
3043    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3044                               CSUM_IP_VALID   |
3045                               CSUM_DATA_VALID |
3046                               CSUM_PSEUDO_HDR);
3047
3048    /* aggregate all of the SGEs into a single mbuf */
3049    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3050    if (rc) {
3051        /* drop the packet and log an error */
3052        fp->eth_q_stats.rx_soft_errors++;
3053        m_freem(m);
3054    } else {
3055        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3056            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3057            m->m_flags |= M_VLANTAG;
3058        }
3059
3060        /* assign packet to this interface interface */
3061        if_setrcvif(m, ifp);
3062
3063#if __FreeBSD_version >= 800000
3064        /* specify what RSS queue was used for this flow */
3065        m->m_pkthdr.flowid = fp->index;
3066        BXE_SET_FLOWID(m);
3067#endif
3068
3069        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3070        fp->eth_q_stats.rx_tpa_pkts++;
3071
3072        /* pass the frame to the stack */
3073        if_input(ifp, m);
3074    }
3075
3076    /* we passed an mbuf up the stack or dropped the frame */
3077    fp->eth_q_stats.mbuf_alloc_tpa--;
3078
3079bxe_tpa_stop_exit:
3080
3081    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3082    fp->rx_tpa_queue_used &= ~(1 << queue);
3083}
3084
3085static uint8_t
3086bxe_service_rxsgl(
3087                 struct bxe_fastpath *fp,
3088                 uint16_t len,
3089                 uint16_t lenonbd,
3090                 struct mbuf *m,
3091                 struct eth_fast_path_rx_cqe *cqe_fp)
3092{
3093    struct mbuf *m_frag;
3094    uint16_t frags, frag_len;
3095    uint16_t sge_idx = 0;
3096    uint16_t j;
3097    uint8_t i, rc = 0;
3098    uint32_t frag_size;
3099
3100    /* adjust the mbuf */
3101    m->m_len = lenonbd;
3102
3103    frag_size =  len - lenonbd;
3104    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3105
3106    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3107        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3108
3109        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3110        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3111        m_frag->m_len = frag_len;
3112
3113       /* allocate a new mbuf for the SGE */
3114        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3115        if (rc) {
3116            /* Leave all remaining SGEs in the ring! */
3117            return (rc);
3118        }
3119        fp->eth_q_stats.mbuf_alloc_sge--;
3120
3121        /* concatenate the fragment to the head mbuf */
3122        m_cat(m, m_frag);
3123
3124        frag_size -= frag_len;
3125    }
3126
3127    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3128
3129    return rc;
3130}
3131
3132static uint8_t
3133bxe_rxeof(struct bxe_softc    *sc,
3134          struct bxe_fastpath *fp)
3135{
3136    if_t ifp = sc->ifp;
3137    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3138    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3139    int rx_pkts = 0;
3140    int rc = 0;
3141
3142    BXE_FP_RX_LOCK(fp);
3143
3144    /* CQ "next element" is of the size of the regular element */
3145    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3146    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3147        hw_cq_cons++;
3148    }
3149
3150    bd_cons = fp->rx_bd_cons;
3151    bd_prod = fp->rx_bd_prod;
3152    bd_prod_fw = bd_prod;
3153    sw_cq_cons = fp->rx_cq_cons;
3154    sw_cq_prod = fp->rx_cq_prod;
3155
3156    /*
3157     * Memory barrier necessary as speculative reads of the rx
3158     * buffer can be ahead of the index in the status block
3159     */
3160    rmb();
3161
3162    BLOGD(sc, DBG_RX,
3163          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3164          fp->index, hw_cq_cons, sw_cq_cons);
3165
3166    while (sw_cq_cons != hw_cq_cons) {
3167        struct bxe_sw_rx_bd *rx_buf = NULL;
3168        union eth_rx_cqe *cqe;
3169        struct eth_fast_path_rx_cqe *cqe_fp;
3170        uint8_t cqe_fp_flags;
3171        enum eth_rx_cqe_type cqe_fp_type;
3172        uint16_t len, lenonbd,  pad;
3173        struct mbuf *m = NULL;
3174
3175        comp_ring_cons = RCQ(sw_cq_cons);
3176        bd_prod = RX_BD(bd_prod);
3177        bd_cons = RX_BD(bd_cons);
3178
3179        cqe          = &fp->rcq_chain[comp_ring_cons];
3180        cqe_fp       = &cqe->fast_path_cqe;
3181        cqe_fp_flags = cqe_fp->type_error_flags;
3182        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3183
3184        BLOGD(sc, DBG_RX,
3185              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3186              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3187              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3188              fp->index,
3189              hw_cq_cons,
3190              sw_cq_cons,
3191              bd_prod,
3192              bd_cons,
3193              CQE_TYPE(cqe_fp_flags),
3194              cqe_fp_flags,
3195              cqe_fp->status_flags,
3196              le32toh(cqe_fp->rss_hash_result),
3197              le16toh(cqe_fp->vlan_tag),
3198              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3199              le16toh(cqe_fp->len_on_bd));
3200
3201        /* is this a slowpath msg? */
3202        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3203            bxe_sp_event(sc, fp, cqe);
3204            goto next_cqe;
3205        }
3206
3207        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3208
3209        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3210            struct bxe_sw_tpa_info *tpa_info;
3211            uint16_t frag_size, pages;
3212            uint8_t queue;
3213
3214            if (CQE_TYPE_START(cqe_fp_type)) {
3215                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3216                              bd_cons, bd_prod, cqe_fp);
3217                m = NULL; /* packet not ready yet */
3218                goto next_rx;
3219            }
3220
3221            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3222                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3223
3224            queue = cqe->end_agg_cqe.queue_index;
3225            tpa_info = &fp->rx_tpa_info[queue];
3226
3227            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3228                  fp->index, queue);
3229
3230            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3231                         tpa_info->len_on_bd);
3232            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3233
3234            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3235                         &cqe->end_agg_cqe, comp_ring_cons);
3236
3237            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3238
3239            goto next_cqe;
3240        }
3241
3242        /* non TPA */
3243
3244        /* is this an error packet? */
3245        if (__predict_false(cqe_fp_flags &
3246                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3247            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3248            fp->eth_q_stats.rx_soft_errors++;
3249            goto next_rx;
3250        }
3251
3252        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3253        lenonbd = le16toh(cqe_fp->len_on_bd);
3254        pad = cqe_fp->placement_offset;
3255
3256        m = rx_buf->m;
3257
3258        if (__predict_false(m == NULL)) {
3259            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3260                  bd_cons, fp->index);
3261            goto next_rx;
3262        }
3263
3264        /* XXX double copy if packet length under a threshold */
3265
3266        /*
3267         * If all the buffer descriptors are filled with mbufs then fill in
3268         * the current consumer index with a new BD. Else if a maximum Rx
3269         * buffer limit is imposed then fill in the next producer index.
3270         */
3271        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3272                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3273                                      bd_prod : bd_cons);
3274        if (rc != 0) {
3275
3276            /* we simply reuse the received mbuf and don't post it to the stack */
3277            m = NULL;
3278
3279            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3280                  fp->index, rc);
3281            fp->eth_q_stats.rx_soft_errors++;
3282
3283            if (sc->max_rx_bufs != RX_BD_USABLE) {
3284                /* copy this consumer index to the producer index */
3285                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3286                       sizeof(struct bxe_sw_rx_bd));
3287                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3288            }
3289
3290            goto next_rx;
3291        }
3292
3293        /* current mbuf was detached from the bd */
3294        fp->eth_q_stats.mbuf_alloc_rx--;
3295
3296        /* we allocated a replacement mbuf, fixup the current one */
3297        m_adj(m, pad);
3298        m->m_pkthdr.len = m->m_len = len;
3299
3300        if ((len > 60) && (len > lenonbd)) {
3301            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3302            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3303            if (rc)
3304                break;
3305            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3306        } else if (lenonbd < len) {
3307            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3308        }
3309
3310        /* assign packet to this interface interface */
3311	if_setrcvif(m, ifp);
3312
3313        /* assume no hardware checksum has complated */
3314        m->m_pkthdr.csum_flags = 0;
3315
3316        /* validate checksum if offload enabled */
3317        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3318            /* check for a valid IP frame */
3319            if (!(cqe->fast_path_cqe.status_flags &
3320                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3321                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3322                if (__predict_false(cqe_fp_flags &
3323                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3324                    fp->eth_q_stats.rx_hw_csum_errors++;
3325                } else {
3326                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3327                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3328                }
3329            }
3330
3331            /* check for a valid TCP/UDP frame */
3332            if (!(cqe->fast_path_cqe.status_flags &
3333                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3334                if (__predict_false(cqe_fp_flags &
3335                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3336                    fp->eth_q_stats.rx_hw_csum_errors++;
3337                } else {
3338                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3339                    m->m_pkthdr.csum_data = 0xFFFF;
3340                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3341                                               CSUM_PSEUDO_HDR);
3342                }
3343            }
3344        }
3345
3346        /* if there is a VLAN tag then flag that info */
3347        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3348            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3349            m->m_flags |= M_VLANTAG;
3350        }
3351
3352#if __FreeBSD_version >= 800000
3353        /* specify what RSS queue was used for this flow */
3354        m->m_pkthdr.flowid = fp->index;
3355        BXE_SET_FLOWID(m);
3356#endif
3357
3358next_rx:
3359
3360        bd_cons    = RX_BD_NEXT(bd_cons);
3361        bd_prod    = RX_BD_NEXT(bd_prod);
3362        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3363
3364        /* pass the frame to the stack */
3365        if (__predict_true(m != NULL)) {
3366            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3367            rx_pkts++;
3368            if_input(ifp, m);
3369        }
3370
3371next_cqe:
3372
3373        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3374        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3375
3376        /* limit spinning on the queue */
3377        if (rc != 0)
3378            break;
3379
3380        if (rx_pkts == sc->rx_budget) {
3381            fp->eth_q_stats.rx_budget_reached++;
3382            break;
3383        }
3384    } /* while work to do */
3385
3386    fp->rx_bd_cons = bd_cons;
3387    fp->rx_bd_prod = bd_prod_fw;
3388    fp->rx_cq_cons = sw_cq_cons;
3389    fp->rx_cq_prod = sw_cq_prod;
3390
3391    /* Update producers */
3392    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3393
3394    fp->eth_q_stats.rx_pkts += rx_pkts;
3395    fp->eth_q_stats.rx_calls++;
3396
3397    BXE_FP_RX_UNLOCK(fp);
3398
3399    return (sw_cq_cons != hw_cq_cons);
3400}
3401
3402static uint16_t
3403bxe_free_tx_pkt(struct bxe_softc    *sc,
3404                struct bxe_fastpath *fp,
3405                uint16_t            idx)
3406{
3407    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3408    struct eth_tx_start_bd *tx_start_bd;
3409    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3410    uint16_t new_cons;
3411    int nbd;
3412
3413    /* unmap the mbuf from non-paged memory */
3414    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3415
3416    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3417    nbd = le16toh(tx_start_bd->nbd) - 1;
3418
3419    new_cons = (tx_buf->first_bd + nbd);
3420
3421    /* free the mbuf */
3422    if (__predict_true(tx_buf->m != NULL)) {
3423        m_freem(tx_buf->m);
3424        fp->eth_q_stats.mbuf_alloc_tx--;
3425    } else {
3426        fp->eth_q_stats.tx_chain_lost_mbuf++;
3427    }
3428
3429    tx_buf->m = NULL;
3430    tx_buf->first_bd = 0;
3431
3432    return (new_cons);
3433}
3434
3435/* transmit timeout watchdog */
3436static int
3437bxe_watchdog(struct bxe_softc    *sc,
3438             struct bxe_fastpath *fp)
3439{
3440    BXE_FP_TX_LOCK(fp);
3441
3442    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3443        BXE_FP_TX_UNLOCK(fp);
3444        return (0);
3445    }
3446
3447    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3448
3449    BXE_FP_TX_UNLOCK(fp);
3450
3451    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3452    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3453
3454    return (-1);
3455}
3456
3457/* processes transmit completions */
3458static uint8_t
3459bxe_txeof(struct bxe_softc    *sc,
3460          struct bxe_fastpath *fp)
3461{
3462    if_t ifp = sc->ifp;
3463    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3464    uint16_t tx_bd_avail;
3465
3466    BXE_FP_TX_LOCK_ASSERT(fp);
3467
3468    bd_cons = fp->tx_bd_cons;
3469    hw_cons = le16toh(*fp->tx_cons_sb);
3470    sw_cons = fp->tx_pkt_cons;
3471
3472    while (sw_cons != hw_cons) {
3473        pkt_cons = TX_BD(sw_cons);
3474
3475        BLOGD(sc, DBG_TX,
3476              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3477              fp->index, hw_cons, sw_cons, pkt_cons);
3478
3479        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3480
3481        sw_cons++;
3482    }
3483
3484    fp->tx_pkt_cons = sw_cons;
3485    fp->tx_bd_cons  = bd_cons;
3486
3487    BLOGD(sc, DBG_TX,
3488          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3489          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3490
3491    mb();
3492
3493    tx_bd_avail = bxe_tx_avail(sc, fp);
3494
3495    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3496        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3497    } else {
3498        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3499    }
3500
3501    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3502        /* reset the watchdog timer if there are pending transmits */
3503        fp->watchdog_timer = BXE_TX_TIMEOUT;
3504        return (TRUE);
3505    } else {
3506        /* clear watchdog when there are no pending transmits */
3507        fp->watchdog_timer = 0;
3508        return (FALSE);
3509    }
3510}
3511
3512static void
3513bxe_drain_tx_queues(struct bxe_softc *sc)
3514{
3515    struct bxe_fastpath *fp;
3516    int i, count;
3517
3518    /* wait until all TX fastpath tasks have completed */
3519    for (i = 0; i < sc->num_queues; i++) {
3520        fp = &sc->fp[i];
3521
3522        count = 1000;
3523
3524        while (bxe_has_tx_work(fp)) {
3525
3526            BXE_FP_TX_LOCK(fp);
3527            bxe_txeof(sc, fp);
3528            BXE_FP_TX_UNLOCK(fp);
3529
3530            if (count == 0) {
3531                BLOGE(sc, "Timeout waiting for fp[%d] "
3532                          "transmits to complete!\n", i);
3533                bxe_panic(sc, ("tx drain failure\n"));
3534                return;
3535            }
3536
3537            count--;
3538            DELAY(1000);
3539            rmb();
3540        }
3541    }
3542
3543    return;
3544}
3545
3546static int
3547bxe_del_all_macs(struct bxe_softc          *sc,
3548                 struct ecore_vlan_mac_obj *mac_obj,
3549                 int                       mac_type,
3550                 uint8_t                   wait_for_comp)
3551{
3552    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3553    int rc;
3554
3555    /* wait for completion of requested */
3556    if (wait_for_comp) {
3557        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3558    }
3559
3560    /* Set the mac type of addresses we want to clear */
3561    bxe_set_bit(mac_type, &vlan_mac_flags);
3562
3563    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3564    if (rc < 0) {
3565        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3566            rc, mac_type, wait_for_comp);
3567    }
3568
3569    return (rc);
3570}
3571
3572static int
3573bxe_fill_accept_flags(struct bxe_softc *sc,
3574                      uint32_t         rx_mode,
3575                      unsigned long    *rx_accept_flags,
3576                      unsigned long    *tx_accept_flags)
3577{
3578    /* Clear the flags first */
3579    *rx_accept_flags = 0;
3580    *tx_accept_flags = 0;
3581
3582    switch (rx_mode) {
3583    case BXE_RX_MODE_NONE:
3584        /*
3585         * 'drop all' supersedes any accept flags that may have been
3586         * passed to the function.
3587         */
3588        break;
3589
3590    case BXE_RX_MODE_NORMAL:
3591        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3592        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3593        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3594
3595        /* internal switching mode */
3596        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3597        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3598        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3599
3600        break;
3601
3602    case BXE_RX_MODE_ALLMULTI:
3603        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3604        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3605        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3606
3607        /* internal switching mode */
3608        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3609        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3610        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3611
3612        break;
3613
3614    case BXE_RX_MODE_PROMISC:
3615        /*
3616         * According to deffinition of SI mode, iface in promisc mode
3617         * should receive matched and unmatched (in resolution of port)
3618         * unicast packets.
3619         */
3620        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3621        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3622        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3623        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3624
3625        /* internal switching mode */
3626        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3627        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3628
3629        if (IS_MF_SI(sc)) {
3630            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3631        } else {
3632            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3633        }
3634
3635        break;
3636
3637    default:
3638        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3639        return (-1);
3640    }
3641
3642    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3643    if (rx_mode != BXE_RX_MODE_NONE) {
3644        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3645        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3646    }
3647
3648    return (0);
3649}
3650
3651static int
3652bxe_set_q_rx_mode(struct bxe_softc *sc,
3653                  uint8_t          cl_id,
3654                  unsigned long    rx_mode_flags,
3655                  unsigned long    rx_accept_flags,
3656                  unsigned long    tx_accept_flags,
3657                  unsigned long    ramrod_flags)
3658{
3659    struct ecore_rx_mode_ramrod_params ramrod_param;
3660    int rc;
3661
3662    memset(&ramrod_param, 0, sizeof(ramrod_param));
3663
3664    /* Prepare ramrod parameters */
3665    ramrod_param.cid = 0;
3666    ramrod_param.cl_id = cl_id;
3667    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3668    ramrod_param.func_id = SC_FUNC(sc);
3669
3670    ramrod_param.pstate = &sc->sp_state;
3671    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3672
3673    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3674    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3675
3676    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3677
3678    ramrod_param.ramrod_flags = ramrod_flags;
3679    ramrod_param.rx_mode_flags = rx_mode_flags;
3680
3681    ramrod_param.rx_accept_flags = rx_accept_flags;
3682    ramrod_param.tx_accept_flags = tx_accept_flags;
3683
3684    rc = ecore_config_rx_mode(sc, &ramrod_param);
3685    if (rc < 0) {
3686        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3687            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3688            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3689            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3690            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3691        return (rc);
3692    }
3693
3694    return (0);
3695}
3696
3697static int
3698bxe_set_storm_rx_mode(struct bxe_softc *sc)
3699{
3700    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3701    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3702    int rc;
3703
3704    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3705                               &tx_accept_flags);
3706    if (rc) {
3707        return (rc);
3708    }
3709
3710    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3711    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3712
3713    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3714    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3715                              rx_accept_flags, tx_accept_flags,
3716                              ramrod_flags));
3717}
3718
3719/* returns the "mcp load_code" according to global load_count array */
3720static int
3721bxe_nic_load_no_mcp(struct bxe_softc *sc)
3722{
3723    int path = SC_PATH(sc);
3724    int port = SC_PORT(sc);
3725
3726    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3727          path, load_count[path][0], load_count[path][1],
3728          load_count[path][2]);
3729    load_count[path][0]++;
3730    load_count[path][1 + port]++;
3731    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3732          path, load_count[path][0], load_count[path][1],
3733          load_count[path][2]);
3734    if (load_count[path][0] == 1) {
3735        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3736    } else if (load_count[path][1 + port] == 1) {
3737        return (FW_MSG_CODE_DRV_LOAD_PORT);
3738    } else {
3739        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3740    }
3741}
3742
3743/* returns the "mcp load_code" according to global load_count array */
3744static int
3745bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3746{
3747    int port = SC_PORT(sc);
3748    int path = SC_PATH(sc);
3749
3750    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3751          path, load_count[path][0], load_count[path][1],
3752          load_count[path][2]);
3753    load_count[path][0]--;
3754    load_count[path][1 + port]--;
3755    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3756          path, load_count[path][0], load_count[path][1],
3757          load_count[path][2]);
3758    if (load_count[path][0] == 0) {
3759        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3760    } else if (load_count[path][1 + port] == 0) {
3761        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3762    } else {
3763        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3764    }
3765}
3766
3767/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3768static uint32_t
3769bxe_send_unload_req(struct bxe_softc *sc,
3770                    int              unload_mode)
3771{
3772    uint32_t reset_code = 0;
3773
3774    /* Select the UNLOAD request mode */
3775    if (unload_mode == UNLOAD_NORMAL) {
3776        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3777    } else {
3778        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3779    }
3780
3781    /* Send the request to the MCP */
3782    if (!BXE_NOMCP(sc)) {
3783        reset_code = bxe_fw_command(sc, reset_code, 0);
3784    } else {
3785        reset_code = bxe_nic_unload_no_mcp(sc);
3786    }
3787
3788    return (reset_code);
3789}
3790
3791/* send UNLOAD_DONE command to the MCP */
3792static void
3793bxe_send_unload_done(struct bxe_softc *sc,
3794                     uint8_t          keep_link)
3795{
3796    uint32_t reset_param =
3797        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3798
3799    /* Report UNLOAD_DONE to MCP */
3800    if (!BXE_NOMCP(sc)) {
3801        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3802    }
3803}
3804
3805static int
3806bxe_func_wait_started(struct bxe_softc *sc)
3807{
3808    int tout = 50;
3809
3810    if (!sc->port.pmf) {
3811        return (0);
3812    }
3813
3814    /*
3815     * (assumption: No Attention from MCP at this stage)
3816     * PMF probably in the middle of TX disable/enable transaction
3817     * 1. Sync IRS for default SB
3818     * 2. Sync SP queue - this guarantees us that attention handling started
3819     * 3. Wait, that TX disable/enable transaction completes
3820     *
3821     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3822     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3823     * received completion for the transaction the state is TX_STOPPED.
3824     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3825     * transaction.
3826     */
3827
3828    /* XXX make sure default SB ISR is done */
3829    /* need a way to synchronize an irq (intr_mtx?) */
3830
3831    /* XXX flush any work queues */
3832
3833    while (ecore_func_get_state(sc, &sc->func_obj) !=
3834           ECORE_F_STATE_STARTED && tout--) {
3835        DELAY(20000);
3836    }
3837
3838    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3839        /*
3840         * Failed to complete the transaction in a "good way"
3841         * Force both transactions with CLR bit.
3842         */
3843        struct ecore_func_state_params func_params = { NULL };
3844
3845        BLOGE(sc, "Unexpected function state! "
3846                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3847
3848        func_params.f_obj = &sc->func_obj;
3849        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3850
3851        /* STARTED-->TX_STOPPED */
3852        func_params.cmd = ECORE_F_CMD_TX_STOP;
3853        ecore_func_state_change(sc, &func_params);
3854
3855        /* TX_STOPPED-->STARTED */
3856        func_params.cmd = ECORE_F_CMD_TX_START;
3857        return (ecore_func_state_change(sc, &func_params));
3858    }
3859
3860    return (0);
3861}
3862
3863static int
3864bxe_stop_queue(struct bxe_softc *sc,
3865               int              index)
3866{
3867    struct bxe_fastpath *fp = &sc->fp[index];
3868    struct ecore_queue_state_params q_params = { NULL };
3869    int rc;
3870
3871    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3872
3873    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3874    /* We want to wait for completion in this context */
3875    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3876
3877    /* Stop the primary connection: */
3878
3879    /* ...halt the connection */
3880    q_params.cmd = ECORE_Q_CMD_HALT;
3881    rc = ecore_queue_state_change(sc, &q_params);
3882    if (rc) {
3883        return (rc);
3884    }
3885
3886    /* ...terminate the connection */
3887    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3888    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3889    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3890    rc = ecore_queue_state_change(sc, &q_params);
3891    if (rc) {
3892        return (rc);
3893    }
3894
3895    /* ...delete cfc entry */
3896    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3897    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3898    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3899    return (ecore_queue_state_change(sc, &q_params));
3900}
3901
3902/* wait for the outstanding SP commands */
3903static inline uint8_t
3904bxe_wait_sp_comp(struct bxe_softc *sc,
3905                 unsigned long    mask)
3906{
3907    unsigned long tmp;
3908    int tout = 5000; /* wait for 5 secs tops */
3909
3910    while (tout--) {
3911        mb();
3912        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3913            return (TRUE);
3914        }
3915
3916        DELAY(1000);
3917    }
3918
3919    mb();
3920
3921    tmp = atomic_load_acq_long(&sc->sp_state);
3922    if (tmp & mask) {
3923        BLOGE(sc, "Filtering completion timed out: "
3924                  "sp_state 0x%lx, mask 0x%lx\n",
3925              tmp, mask);
3926        return (FALSE);
3927    }
3928
3929    return (FALSE);
3930}
3931
3932static int
3933bxe_func_stop(struct bxe_softc *sc)
3934{
3935    struct ecore_func_state_params func_params = { NULL };
3936    int rc;
3937
3938    /* prepare parameters for function state transitions */
3939    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3940    func_params.f_obj = &sc->func_obj;
3941    func_params.cmd = ECORE_F_CMD_STOP;
3942
3943    /*
3944     * Try to stop the function the 'good way'. If it fails (in case
3945     * of a parity error during bxe_chip_cleanup()) and we are
3946     * not in a debug mode, perform a state transaction in order to
3947     * enable further HW_RESET transaction.
3948     */
3949    rc = ecore_func_state_change(sc, &func_params);
3950    if (rc) {
3951        BLOGE(sc, "FUNC_STOP ramrod failed. "
3952                  "Running a dry transaction (%d)\n", rc);
3953        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3954        return (ecore_func_state_change(sc, &func_params));
3955    }
3956
3957    return (0);
3958}
3959
3960static int
3961bxe_reset_hw(struct bxe_softc *sc,
3962             uint32_t         load_code)
3963{
3964    struct ecore_func_state_params func_params = { NULL };
3965
3966    /* Prepare parameters for function state transitions */
3967    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3968
3969    func_params.f_obj = &sc->func_obj;
3970    func_params.cmd = ECORE_F_CMD_HW_RESET;
3971
3972    func_params.params.hw_init.load_phase = load_code;
3973
3974    return (ecore_func_state_change(sc, &func_params));
3975}
3976
3977static void
3978bxe_int_disable_sync(struct bxe_softc *sc,
3979                     int              disable_hw)
3980{
3981    if (disable_hw) {
3982        /* prevent the HW from sending interrupts */
3983        bxe_int_disable(sc);
3984    }
3985
3986    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
3987    /* make sure all ISRs are done */
3988
3989    /* XXX make sure sp_task is not running */
3990    /* cancel and flush work queues */
3991}
3992
3993static void
3994bxe_chip_cleanup(struct bxe_softc *sc,
3995                 uint32_t         unload_mode,
3996                 uint8_t          keep_link)
3997{
3998    int port = SC_PORT(sc);
3999    struct ecore_mcast_ramrod_params rparam = { NULL };
4000    uint32_t reset_code;
4001    int i, rc = 0;
4002
4003    bxe_drain_tx_queues(sc);
4004
4005    /* give HW time to discard old tx messages */
4006    DELAY(1000);
4007
4008    /* Clean all ETH MACs */
4009    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4010    if (rc < 0) {
4011        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4012    }
4013
4014    /* Clean up UC list  */
4015    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4016    if (rc < 0) {
4017        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4018    }
4019
4020    /* Disable LLH */
4021    if (!CHIP_IS_E1(sc)) {
4022        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4023    }
4024
4025    /* Set "drop all" to stop Rx */
4026
4027    /*
4028     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4029     * a race between the completion code and this code.
4030     */
4031    BXE_MCAST_LOCK(sc);
4032
4033    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4034        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4035    } else {
4036        bxe_set_storm_rx_mode(sc);
4037    }
4038
4039    /* Clean up multicast configuration */
4040    rparam.mcast_obj = &sc->mcast_obj;
4041    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4042    if (rc < 0) {
4043        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4044    }
4045
4046    BXE_MCAST_UNLOCK(sc);
4047
4048    // XXX bxe_iov_chip_cleanup(sc);
4049
4050    /*
4051     * Send the UNLOAD_REQUEST to the MCP. This will return if
4052     * this function should perform FUNCTION, PORT, or COMMON HW
4053     * reset.
4054     */
4055    reset_code = bxe_send_unload_req(sc, unload_mode);
4056
4057    /*
4058     * (assumption: No Attention from MCP at this stage)
4059     * PMF probably in the middle of TX disable/enable transaction
4060     */
4061    rc = bxe_func_wait_started(sc);
4062    if (rc) {
4063        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4064    }
4065
4066    /*
4067     * Close multi and leading connections
4068     * Completions for ramrods are collected in a synchronous way
4069     */
4070    for (i = 0; i < sc->num_queues; i++) {
4071        if (bxe_stop_queue(sc, i)) {
4072            goto unload_error;
4073        }
4074    }
4075
4076    /*
4077     * If SP settings didn't get completed so far - something
4078     * very wrong has happen.
4079     */
4080    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4081        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4082    }
4083
4084unload_error:
4085
4086    rc = bxe_func_stop(sc);
4087    if (rc) {
4088        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4089    }
4090
4091    /* disable HW interrupts */
4092    bxe_int_disable_sync(sc, TRUE);
4093
4094    /* detach interrupts */
4095    bxe_interrupt_detach(sc);
4096
4097    /* Reset the chip */
4098    rc = bxe_reset_hw(sc, reset_code);
4099    if (rc) {
4100        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4101    }
4102
4103    /* Report UNLOAD_DONE to MCP */
4104    bxe_send_unload_done(sc, keep_link);
4105}
4106
4107static void
4108bxe_disable_close_the_gate(struct bxe_softc *sc)
4109{
4110    uint32_t val;
4111    int port = SC_PORT(sc);
4112
4113    BLOGD(sc, DBG_LOAD,
4114          "Disabling 'close the gates'\n");
4115
4116    if (CHIP_IS_E1(sc)) {
4117        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4118                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4119        val = REG_RD(sc, addr);
4120        val &= ~(0x300);
4121        REG_WR(sc, addr, val);
4122    } else {
4123        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4124        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4125                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4126        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4127    }
4128}
4129
4130/*
4131 * Cleans the object that have internal lists without sending
4132 * ramrods. Should be run when interrutps are disabled.
4133 */
4134static void
4135bxe_squeeze_objects(struct bxe_softc *sc)
4136{
4137    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4138    struct ecore_mcast_ramrod_params rparam = { NULL };
4139    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4140    int rc;
4141
4142    /* Cleanup MACs' object first... */
4143
4144    /* Wait for completion of requested */
4145    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4146    /* Perform a dry cleanup */
4147    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4148
4149    /* Clean ETH primary MAC */
4150    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4151    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4152                             &ramrod_flags);
4153    if (rc != 0) {
4154        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4155    }
4156
4157    /* Cleanup UC list */
4158    vlan_mac_flags = 0;
4159    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4160    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4161                             &ramrod_flags);
4162    if (rc != 0) {
4163        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4164    }
4165
4166    /* Now clean mcast object... */
4167
4168    rparam.mcast_obj = &sc->mcast_obj;
4169    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4170
4171    /* Add a DEL command... */
4172    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4173    if (rc < 0) {
4174        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4175    }
4176
4177    /* now wait until all pending commands are cleared */
4178
4179    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4180    while (rc != 0) {
4181        if (rc < 0) {
4182            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4183            return;
4184        }
4185
4186        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4187    }
4188}
4189
4190/* stop the controller */
4191static __noinline int
4192bxe_nic_unload(struct bxe_softc *sc,
4193               uint32_t         unload_mode,
4194               uint8_t          keep_link)
4195{
4196    uint8_t global = FALSE;
4197    uint32_t val;
4198    int i;
4199
4200    BXE_CORE_LOCK_ASSERT(sc);
4201
4202    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4203
4204    for (i = 0; i < sc->num_queues; i++) {
4205        struct bxe_fastpath *fp;
4206
4207        fp = &sc->fp[i];
4208        BXE_FP_TX_LOCK(fp);
4209        BXE_FP_TX_UNLOCK(fp);
4210    }
4211
4212    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4213
4214    /* mark driver as unloaded in shmem2 */
4215    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4216        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4217        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4218                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4219    }
4220
4221    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4222        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4223        /*
4224         * We can get here if the driver has been unloaded
4225         * during parity error recovery and is either waiting for a
4226         * leader to complete or for other functions to unload and
4227         * then ifconfig down has been issued. In this case we want to
4228         * unload and let other functions to complete a recovery
4229         * process.
4230         */
4231        sc->recovery_state = BXE_RECOVERY_DONE;
4232        sc->is_leader = 0;
4233        bxe_release_leader_lock(sc);
4234        mb();
4235
4236        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4237        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4238            " state = 0x%x\n", sc->recovery_state, sc->state);
4239        return (-1);
4240    }
4241
4242    /*
4243     * Nothing to do during unload if previous bxe_nic_load()
4244     * did not completed succesfully - all resourses are released.
4245     */
4246    if ((sc->state == BXE_STATE_CLOSED) ||
4247        (sc->state == BXE_STATE_ERROR)) {
4248        return (0);
4249    }
4250
4251    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4252    mb();
4253
4254    /* stop tx */
4255    bxe_tx_disable(sc);
4256
4257    sc->rx_mode = BXE_RX_MODE_NONE;
4258    /* XXX set rx mode ??? */
4259
4260    if (IS_PF(sc) && !sc->grcdump_done) {
4261        /* set ALWAYS_ALIVE bit in shmem */
4262        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4263
4264        bxe_drv_pulse(sc);
4265
4266        bxe_stats_handle(sc, STATS_EVENT_STOP);
4267        bxe_save_statistics(sc);
4268    }
4269
4270    /* wait till consumers catch up with producers in all queues */
4271    bxe_drain_tx_queues(sc);
4272
4273    /* if VF indicate to PF this function is going down (PF will delete sp
4274     * elements and clear initializations
4275     */
4276    if (IS_VF(sc)) {
4277        ; /* bxe_vfpf_close_vf(sc); */
4278    } else if (unload_mode != UNLOAD_RECOVERY) {
4279        /* if this is a normal/close unload need to clean up chip */
4280        if (!sc->grcdump_done)
4281            bxe_chip_cleanup(sc, unload_mode, keep_link);
4282    } else {
4283        /* Send the UNLOAD_REQUEST to the MCP */
4284        bxe_send_unload_req(sc, unload_mode);
4285
4286        /*
4287         * Prevent transactions to host from the functions on the
4288         * engine that doesn't reset global blocks in case of global
4289         * attention once gloabl blocks are reset and gates are opened
4290         * (the engine which leader will perform the recovery
4291         * last).
4292         */
4293        if (!CHIP_IS_E1x(sc)) {
4294            bxe_pf_disable(sc);
4295        }
4296
4297        /* disable HW interrupts */
4298        bxe_int_disable_sync(sc, TRUE);
4299
4300        /* detach interrupts */
4301        bxe_interrupt_detach(sc);
4302
4303        /* Report UNLOAD_DONE to MCP */
4304        bxe_send_unload_done(sc, FALSE);
4305    }
4306
4307    /*
4308     * At this stage no more interrupts will arrive so we may safely clean
4309     * the queue'able objects here in case they failed to get cleaned so far.
4310     */
4311    if (IS_PF(sc)) {
4312        bxe_squeeze_objects(sc);
4313    }
4314
4315    /* There should be no more pending SP commands at this stage */
4316    sc->sp_state = 0;
4317
4318    sc->port.pmf = 0;
4319
4320    bxe_free_fp_buffers(sc);
4321
4322    if (IS_PF(sc)) {
4323        bxe_free_mem(sc);
4324    }
4325
4326    bxe_free_fw_stats_mem(sc);
4327
4328    sc->state = BXE_STATE_CLOSED;
4329
4330    /*
4331     * Check if there are pending parity attentions. If there are - set
4332     * RECOVERY_IN_PROGRESS.
4333     */
4334    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4335        bxe_set_reset_in_progress(sc);
4336
4337        /* Set RESET_IS_GLOBAL if needed */
4338        if (global) {
4339            bxe_set_reset_global(sc);
4340        }
4341    }
4342
4343    /*
4344     * The last driver must disable a "close the gate" if there is no
4345     * parity attention or "process kill" pending.
4346     */
4347    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4348        bxe_reset_is_done(sc, SC_PATH(sc))) {
4349        bxe_disable_close_the_gate(sc);
4350    }
4351
4352    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4353
4354    return (0);
4355}
4356
4357/*
4358 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4359 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4360 */
4361static int
4362bxe_ifmedia_update(struct ifnet  *ifp)
4363{
4364    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4365    struct ifmedia *ifm;
4366
4367    ifm = &sc->ifmedia;
4368
4369    /* We only support Ethernet media type. */
4370    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4371        return (EINVAL);
4372    }
4373
4374    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4375    case IFM_AUTO:
4376         break;
4377    case IFM_10G_CX4:
4378    case IFM_10G_SR:
4379    case IFM_10G_T:
4380    case IFM_10G_TWINAX:
4381    default:
4382        /* We don't support changing the media type. */
4383        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4384              IFM_SUBTYPE(ifm->ifm_media));
4385        return (EINVAL);
4386    }
4387
4388    return (0);
4389}
4390
4391/*
4392 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4393 */
4394static void
4395bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4396{
4397    struct bxe_softc *sc = if_getsoftc(ifp);
4398
4399    /* Report link down if the driver isn't running. */
4400    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4401        ifmr->ifm_active |= IFM_NONE;
4402        return;
4403    }
4404
4405    /* Setup the default interface info. */
4406    ifmr->ifm_status = IFM_AVALID;
4407    ifmr->ifm_active = IFM_ETHER;
4408
4409    if (sc->link_vars.link_up) {
4410        ifmr->ifm_status |= IFM_ACTIVE;
4411    } else {
4412        ifmr->ifm_active |= IFM_NONE;
4413        return;
4414    }
4415
4416    ifmr->ifm_active |= sc->media;
4417
4418    if (sc->link_vars.duplex == DUPLEX_FULL) {
4419        ifmr->ifm_active |= IFM_FDX;
4420    } else {
4421        ifmr->ifm_active |= IFM_HDX;
4422    }
4423}
4424
4425static int
4426bxe_ioctl_nvram(struct bxe_softc *sc,
4427                uint32_t         priv_op,
4428                struct ifreq     *ifr)
4429{
4430    struct bxe_nvram_data nvdata_base;
4431    struct bxe_nvram_data *nvdata;
4432    int len;
4433    int error = 0;
4434
4435    copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base));
4436
4437    len = (sizeof(struct bxe_nvram_data) +
4438           nvdata_base.len -
4439           sizeof(uint32_t));
4440
4441    if (len > sizeof(struct bxe_nvram_data)) {
4442        if ((nvdata = (struct bxe_nvram_data *)
4443                 malloc(len, M_DEVBUF,
4444                        (M_NOWAIT | M_ZERO))) == NULL) {
4445            BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed priv_op 0x%x "
4446                " len = 0x%x\n", priv_op, len);
4447            return (1);
4448        }
4449        memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data));
4450    } else {
4451        nvdata = &nvdata_base;
4452    }
4453
4454    if (priv_op == BXE_IOC_RD_NVRAM) {
4455        BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n",
4456              nvdata->offset, nvdata->len);
4457        error = bxe_nvram_read(sc,
4458                               nvdata->offset,
4459                               (uint8_t *)nvdata->value,
4460                               nvdata->len);
4461        copyout(nvdata, ifr->ifr_data, len);
4462    } else { /* BXE_IOC_WR_NVRAM */
4463        BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n",
4464              nvdata->offset, nvdata->len);
4465        copyin(ifr->ifr_data, nvdata, len);
4466        error = bxe_nvram_write(sc,
4467                                nvdata->offset,
4468                                (uint8_t *)nvdata->value,
4469                                nvdata->len);
4470    }
4471
4472    if (len > sizeof(struct bxe_nvram_data)) {
4473        free(nvdata, M_DEVBUF);
4474    }
4475
4476    return (error);
4477}
4478
4479static int
4480bxe_ioctl_stats_show(struct bxe_softc *sc,
4481                     uint32_t         priv_op,
4482                     struct ifreq     *ifr)
4483{
4484    const size_t str_size   = (BXE_NUM_ETH_STATS * STAT_NAME_LEN);
4485    const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t));
4486    caddr_t p_tmp;
4487    uint32_t *offset;
4488    int i;
4489
4490    switch (priv_op)
4491    {
4492    case BXE_IOC_STATS_SHOW_NUM:
4493        memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data));
4494        ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num =
4495            BXE_NUM_ETH_STATS;
4496        ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len =
4497            STAT_NAME_LEN;
4498        return (0);
4499
4500    case BXE_IOC_STATS_SHOW_STR:
4501        memset(ifr->ifr_data, 0, str_size);
4502        p_tmp = ifr->ifr_data;
4503        for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4504            strcpy(p_tmp, bxe_eth_stats_arr[i].string);
4505            p_tmp += STAT_NAME_LEN;
4506        }
4507        return (0);
4508
4509    case BXE_IOC_STATS_SHOW_CNT:
4510        memset(ifr->ifr_data, 0, stats_size);
4511        p_tmp = ifr->ifr_data;
4512        for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4513            offset = ((uint32_t *)&sc->eth_stats +
4514                      bxe_eth_stats_arr[i].offset);
4515            switch (bxe_eth_stats_arr[i].size) {
4516            case 4:
4517                *((uint64_t *)p_tmp) = (uint64_t)*offset;
4518                break;
4519            case 8:
4520                *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1));
4521                break;
4522            default:
4523                *((uint64_t *)p_tmp) = 0;
4524            }
4525            p_tmp += sizeof(uint64_t);
4526        }
4527        return (0);
4528
4529    default:
4530        return (-1);
4531    }
4532}
4533
4534static void
4535bxe_handle_chip_tq(void *context,
4536                   int  pending)
4537{
4538    struct bxe_softc *sc = (struct bxe_softc *)context;
4539    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4540
4541    switch (work)
4542    {
4543
4544    case CHIP_TQ_REINIT:
4545        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4546            /* restart the interface */
4547            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4548            bxe_periodic_stop(sc);
4549            BXE_CORE_LOCK(sc);
4550            bxe_stop_locked(sc);
4551            bxe_init_locked(sc);
4552            BXE_CORE_UNLOCK(sc);
4553        }
4554        break;
4555
4556    default:
4557        break;
4558    }
4559}
4560
4561/*
4562 * Handles any IOCTL calls from the operating system.
4563 *
4564 * Returns:
4565 *   0 = Success, >0 Failure
4566 */
4567static int
4568bxe_ioctl(if_t ifp,
4569          u_long       command,
4570          caddr_t      data)
4571{
4572    struct bxe_softc *sc = if_getsoftc(ifp);
4573    struct ifreq *ifr = (struct ifreq *)data;
4574    struct bxe_nvram_data *nvdata;
4575    uint32_t priv_op;
4576    int mask = 0;
4577    int reinit = 0;
4578    int error = 0;
4579
4580    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4581    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4582
4583    switch (command)
4584    {
4585    case SIOCSIFMTU:
4586        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4587              ifr->ifr_mtu);
4588
4589        if (sc->mtu == ifr->ifr_mtu) {
4590            /* nothing to change */
4591            break;
4592        }
4593
4594        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4595            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4596                  ifr->ifr_mtu, mtu_min, mtu_max);
4597            error = EINVAL;
4598            break;
4599        }
4600
4601        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4602                             (unsigned long)ifr->ifr_mtu);
4603	/*
4604        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4605                              (unsigned long)ifr->ifr_mtu);
4606	XXX - Not sure why it needs to be atomic
4607	*/
4608	if_setmtu(ifp, ifr->ifr_mtu);
4609        reinit = 1;
4610        break;
4611
4612    case SIOCSIFFLAGS:
4613        /* toggle the interface state up or down */
4614        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4615
4616	BXE_CORE_LOCK(sc);
4617        /* check if the interface is up */
4618        if (if_getflags(ifp) & IFF_UP) {
4619            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4620                /* set the receive mode flags */
4621                bxe_set_rx_mode(sc);
4622            } else {
4623		bxe_init_locked(sc);
4624            }
4625        } else {
4626            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4627		bxe_periodic_stop(sc);
4628		bxe_stop_locked(sc);
4629            }
4630        }
4631	BXE_CORE_UNLOCK(sc);
4632
4633        break;
4634
4635    case SIOCADDMULTI:
4636    case SIOCDELMULTI:
4637        /* add/delete multicast addresses */
4638        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4639
4640        /* check if the interface is up */
4641        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4642            /* set the receive mode flags */
4643	    BXE_CORE_LOCK(sc);
4644            bxe_set_rx_mode(sc);
4645	    BXE_CORE_UNLOCK(sc);
4646        }
4647
4648        break;
4649
4650    case SIOCSIFCAP:
4651        /* find out which capabilities have changed */
4652        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4653
4654        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4655              mask);
4656
4657        /* toggle the LRO capabilites enable flag */
4658        if (mask & IFCAP_LRO) {
4659	    if_togglecapenable(ifp, IFCAP_LRO);
4660            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4661                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4662            reinit = 1;
4663        }
4664
4665        /* toggle the TXCSUM checksum capabilites enable flag */
4666        if (mask & IFCAP_TXCSUM) {
4667	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4668            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4669                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4670            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4671                if_sethwassistbits(ifp, (CSUM_IP      |
4672                                    CSUM_TCP      |
4673                                    CSUM_UDP      |
4674                                    CSUM_TSO      |
4675                                    CSUM_TCP_IPV6 |
4676                                    CSUM_UDP_IPV6), 0);
4677            } else {
4678		if_clearhwassist(ifp); /* XXX */
4679            }
4680        }
4681
4682        /* toggle the RXCSUM checksum capabilities enable flag */
4683        if (mask & IFCAP_RXCSUM) {
4684	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4685            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4686                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4687            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4688                if_sethwassistbits(ifp, (CSUM_IP      |
4689                                    CSUM_TCP      |
4690                                    CSUM_UDP      |
4691                                    CSUM_TSO      |
4692                                    CSUM_TCP_IPV6 |
4693                                    CSUM_UDP_IPV6), 0);
4694            } else {
4695		if_clearhwassist(ifp); /* XXX */
4696            }
4697        }
4698
4699        /* toggle TSO4 capabilities enabled flag */
4700        if (mask & IFCAP_TSO4) {
4701            if_togglecapenable(ifp, IFCAP_TSO4);
4702            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4703                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4704        }
4705
4706        /* toggle TSO6 capabilities enabled flag */
4707        if (mask & IFCAP_TSO6) {
4708	    if_togglecapenable(ifp, IFCAP_TSO6);
4709            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4710                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4711        }
4712
4713        /* toggle VLAN_HWTSO capabilities enabled flag */
4714        if (mask & IFCAP_VLAN_HWTSO) {
4715
4716	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4717            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4718                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4719        }
4720
4721        /* toggle VLAN_HWCSUM capabilities enabled flag */
4722        if (mask & IFCAP_VLAN_HWCSUM) {
4723            /* XXX investigate this... */
4724            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4725            error = EINVAL;
4726        }
4727
4728        /* toggle VLAN_MTU capabilities enable flag */
4729        if (mask & IFCAP_VLAN_MTU) {
4730            /* XXX investigate this... */
4731            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4732            error = EINVAL;
4733        }
4734
4735        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4736        if (mask & IFCAP_VLAN_HWTAGGING) {
4737            /* XXX investigate this... */
4738            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4739            error = EINVAL;
4740        }
4741
4742        /* toggle VLAN_HWFILTER capabilities enabled flag */
4743        if (mask & IFCAP_VLAN_HWFILTER) {
4744            /* XXX investigate this... */
4745            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4746            error = EINVAL;
4747        }
4748
4749        /* XXX not yet...
4750         * IFCAP_WOL_MAGIC
4751         */
4752
4753        break;
4754
4755    case SIOCSIFMEDIA:
4756    case SIOCGIFMEDIA:
4757        /* set/get interface media */
4758        BLOGD(sc, DBG_IOCTL,
4759              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4760              (command & 0xff));
4761        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4762        break;
4763
4764    case SIOCGPRIVATE_0:
4765        copyin(ifr->ifr_data, &priv_op, sizeof(priv_op));
4766
4767        switch (priv_op)
4768        {
4769        case BXE_IOC_RD_NVRAM:
4770        case BXE_IOC_WR_NVRAM:
4771            nvdata = (struct bxe_nvram_data *)ifr->ifr_data;
4772            BLOGD(sc, DBG_IOCTL,
4773                  "Received Private NVRAM ioctl addr=0x%x size=%u\n",
4774                  nvdata->offset, nvdata->len);
4775            error = bxe_ioctl_nvram(sc, priv_op, ifr);
4776            break;
4777
4778        case BXE_IOC_STATS_SHOW_NUM:
4779        case BXE_IOC_STATS_SHOW_STR:
4780        case BXE_IOC_STATS_SHOW_CNT:
4781            BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n",
4782                  priv_op);
4783            error = bxe_ioctl_stats_show(sc, priv_op, ifr);
4784            break;
4785
4786        default:
4787            BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op);
4788            error = EINVAL;
4789            break;
4790        }
4791
4792        break;
4793
4794    default:
4795        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4796              (command & 0xff));
4797        error = ether_ioctl(ifp, command, data);
4798        break;
4799    }
4800
4801    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4802        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4803              "Re-initializing hardware from IOCTL change\n");
4804	bxe_periodic_stop(sc);
4805	BXE_CORE_LOCK(sc);
4806	bxe_stop_locked(sc);
4807	bxe_init_locked(sc);
4808	BXE_CORE_UNLOCK(sc);
4809    }
4810
4811    return (error);
4812}
4813
4814static __noinline void
4815bxe_dump_mbuf(struct bxe_softc *sc,
4816              struct mbuf      *m,
4817              uint8_t          contents)
4818{
4819    char * type;
4820    int i = 0;
4821
4822    if (!(sc->debug & DBG_MBUF)) {
4823        return;
4824    }
4825
4826    if (m == NULL) {
4827        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4828        return;
4829    }
4830
4831    while (m) {
4832
4833#if __FreeBSD_version >= 1000000
4834        BLOGD(sc, DBG_MBUF,
4835              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4836              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4837
4838        if (m->m_flags & M_PKTHDR) {
4839             BLOGD(sc, DBG_MBUF,
4840                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4841                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4842                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4843        }
4844#else
4845        BLOGD(sc, DBG_MBUF,
4846              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4847              i, m, m->m_len, m->m_flags,
4848              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4849
4850        if (m->m_flags & M_PKTHDR) {
4851             BLOGD(sc, DBG_MBUF,
4852                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4853                   i, m->m_pkthdr.len, m->m_flags,
4854                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4855                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4856                   "\22M_PROMISC\23M_NOFREE",
4857                   (int)m->m_pkthdr.csum_flags,
4858                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4859                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4860                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4861                   "\14CSUM_PSEUDO_HDR");
4862        }
4863#endif /* #if __FreeBSD_version >= 1000000 */
4864
4865        if (m->m_flags & M_EXT) {
4866            switch (m->m_ext.ext_type) {
4867            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4868            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4869            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4870            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4871            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4872            case EXT_PACKET:     type = "EXT_PACKET";     break;
4873            case EXT_MBUF:       type = "EXT_MBUF";       break;
4874            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4875            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4876            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4877            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4878            default:             type = "UNKNOWN";        break;
4879            }
4880
4881            BLOGD(sc, DBG_MBUF,
4882                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4883                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4884        }
4885
4886        if (contents) {
4887            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4888        }
4889
4890        m = m->m_next;
4891        i++;
4892    }
4893}
4894
4895/*
4896 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4897 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4898 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4899 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
4900 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4901 */
4902static int
4903bxe_chktso_window(struct bxe_softc  *sc,
4904                  int               nsegs,
4905                  bus_dma_segment_t *segs,
4906                  struct mbuf       *m)
4907{
4908    uint32_t num_wnds, wnd_size, wnd_sum;
4909    int32_t frag_idx, wnd_idx;
4910    unsigned short lso_mss;
4911    int defrag;
4912
4913    defrag = 0;
4914    wnd_sum = 0;
4915    wnd_size = 10;
4916    num_wnds = nsegs - wnd_size;
4917    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4918
4919    /*
4920     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4921     * first window sum of data while skipping the first assuming it is the
4922     * header in FreeBSD.
4923     */
4924    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4925        wnd_sum += htole16(segs[frag_idx].ds_len);
4926    }
4927
4928    /* check the first 10 bd window size */
4929    if (wnd_sum < lso_mss) {
4930        return (1);
4931    }
4932
4933    /* run through the windows */
4934    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4935        /* subtract the first mbuf->m_len of the last wndw(-header) */
4936        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4937        /* add the next mbuf len to the len of our new window */
4938        wnd_sum += htole16(segs[frag_idx].ds_len);
4939        if (wnd_sum < lso_mss) {
4940            return (1);
4941        }
4942    }
4943
4944    return (0);
4945}
4946
4947static uint8_t
4948bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4949                    struct mbuf         *m,
4950                    uint32_t            *parsing_data)
4951{
4952    struct ether_vlan_header *eh = NULL;
4953    struct ip *ip4 = NULL;
4954    struct ip6_hdr *ip6 = NULL;
4955    caddr_t ip = NULL;
4956    struct tcphdr *th = NULL;
4957    int e_hlen, ip_hlen, l4_off;
4958    uint16_t proto;
4959
4960    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4961        /* no L4 checksum offload needed */
4962        return (0);
4963    }
4964
4965    /* get the Ethernet header */
4966    eh = mtod(m, struct ether_vlan_header *);
4967
4968    /* handle VLAN encapsulation if present */
4969    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4970        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4971        proto  = ntohs(eh->evl_proto);
4972    } else {
4973        e_hlen = ETHER_HDR_LEN;
4974        proto  = ntohs(eh->evl_encap_proto);
4975    }
4976
4977    switch (proto) {
4978    case ETHERTYPE_IP:
4979        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4980        ip4 = (m->m_len < sizeof(struct ip)) ?
4981                  (struct ip *)m->m_next->m_data :
4982                  (struct ip *)(m->m_data + e_hlen);
4983        /* ip_hl is number of 32-bit words */
4984        ip_hlen = (ip4->ip_hl << 2);
4985        ip = (caddr_t)ip4;
4986        break;
4987    case ETHERTYPE_IPV6:
4988        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4989        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4990                  (struct ip6_hdr *)m->m_next->m_data :
4991                  (struct ip6_hdr *)(m->m_data + e_hlen);
4992        /* XXX cannot support offload with IPv6 extensions */
4993        ip_hlen = sizeof(struct ip6_hdr);
4994        ip = (caddr_t)ip6;
4995        break;
4996    default:
4997        /* We can't offload in this case... */
4998        /* XXX error stat ??? */
4999        return (0);
5000    }
5001
5002    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5003    l4_off = (e_hlen + ip_hlen);
5004
5005    *parsing_data |=
5006        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
5007         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
5008
5009    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5010                                  CSUM_TSO |
5011                                  CSUM_TCP_IPV6)) {
5012        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5013        th = (struct tcphdr *)(ip + ip_hlen);
5014        /* th_off is number of 32-bit words */
5015        *parsing_data |= ((th->th_off <<
5016                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
5017                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
5018        return (l4_off + (th->th_off << 2)); /* entire header length */
5019    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5020                                         CSUM_UDP_IPV6)) {
5021        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5022        return (l4_off + sizeof(struct udphdr)); /* entire header length */
5023    } else {
5024        /* XXX error stat ??? */
5025        return (0);
5026    }
5027}
5028
5029static uint8_t
5030bxe_set_pbd_csum(struct bxe_fastpath        *fp,
5031                 struct mbuf                *m,
5032                 struct eth_tx_parse_bd_e1x *pbd)
5033{
5034    struct ether_vlan_header *eh = NULL;
5035    struct ip *ip4 = NULL;
5036    struct ip6_hdr *ip6 = NULL;
5037    caddr_t ip = NULL;
5038    struct tcphdr *th = NULL;
5039    struct udphdr *uh = NULL;
5040    int e_hlen, ip_hlen;
5041    uint16_t proto;
5042    uint8_t hlen;
5043    uint16_t tmp_csum;
5044    uint32_t *tmp_uh;
5045
5046    /* get the Ethernet header */
5047    eh = mtod(m, struct ether_vlan_header *);
5048
5049    /* handle VLAN encapsulation if present */
5050    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5051        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5052        proto  = ntohs(eh->evl_proto);
5053    } else {
5054        e_hlen = ETHER_HDR_LEN;
5055        proto  = ntohs(eh->evl_encap_proto);
5056    }
5057
5058    switch (proto) {
5059    case ETHERTYPE_IP:
5060        /* get the IP header, if mbuf len < 20 then header in next mbuf */
5061        ip4 = (m->m_len < sizeof(struct ip)) ?
5062                  (struct ip *)m->m_next->m_data :
5063                  (struct ip *)(m->m_data + e_hlen);
5064        /* ip_hl is number of 32-bit words */
5065        ip_hlen = (ip4->ip_hl << 1);
5066        ip = (caddr_t)ip4;
5067        break;
5068    case ETHERTYPE_IPV6:
5069        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5070        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5071                  (struct ip6_hdr *)m->m_next->m_data :
5072                  (struct ip6_hdr *)(m->m_data + e_hlen);
5073        /* XXX cannot support offload with IPv6 extensions */
5074        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
5075        ip = (caddr_t)ip6;
5076        break;
5077    default:
5078        /* We can't offload in this case... */
5079        /* XXX error stat ??? */
5080        return (0);
5081    }
5082
5083    hlen = (e_hlen >> 1);
5084
5085    /* note that rest of global_data is indirectly zeroed here */
5086    if (m->m_flags & M_VLANTAG) {
5087        pbd->global_data =
5088            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5089    } else {
5090        pbd->global_data = htole16(hlen);
5091    }
5092
5093    pbd->ip_hlen_w = ip_hlen;
5094
5095    hlen += pbd->ip_hlen_w;
5096
5097    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5098
5099    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5100                                  CSUM_TSO |
5101                                  CSUM_TCP_IPV6)) {
5102        th = (struct tcphdr *)(ip + (ip_hlen << 1));
5103        /* th_off is number of 32-bit words */
5104        hlen += (uint16_t)(th->th_off << 1);
5105    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5106                                         CSUM_UDP_IPV6)) {
5107        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5108        hlen += (sizeof(struct udphdr) / 2);
5109    } else {
5110        /* valid case as only CSUM_IP was set */
5111        return (0);
5112    }
5113
5114    pbd->total_hlen_w = htole16(hlen);
5115
5116    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5117                                  CSUM_TSO |
5118                                  CSUM_TCP_IPV6)) {
5119        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5120        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5121    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5122                                         CSUM_UDP_IPV6)) {
5123        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5124
5125        /*
5126         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5127         * checksums and does not know anything about the UDP header and where
5128         * the checksum field is located. It only knows about TCP. Therefore
5129         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5130         * offload. Since the checksum field offset for TCP is 16 bytes and
5131         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5132         * bytes less than the start of the UDP header. This allows the
5133         * hardware to write the checksum in the correct spot. But the
5134         * hardware will compute a checksum which includes the last 10 bytes
5135         * of the IP header. To correct this we tweak the stack computed
5136         * pseudo checksum by folding in the calculation of the inverse
5137         * checksum for those final 10 bytes of the IP header. This allows
5138         * the correct checksum to be computed by the hardware.
5139         */
5140
5141        /* set pointer 10 bytes before UDP header */
5142        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5143
5144        /* calculate a pseudo header checksum over the first 10 bytes */
5145        tmp_csum = in_pseudo(*tmp_uh,
5146                             *(tmp_uh + 1),
5147                             *(uint16_t *)(tmp_uh + 2));
5148
5149        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5150    }
5151
5152    return (hlen * 2); /* entire header length, number of bytes */
5153}
5154
5155static void
5156bxe_set_pbd_lso_e2(struct mbuf *m,
5157                   uint32_t    *parsing_data)
5158{
5159    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5160                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5161                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5162
5163    /* XXX test for IPv6 with extension header... */
5164}
5165
5166static void
5167bxe_set_pbd_lso(struct mbuf                *m,
5168                struct eth_tx_parse_bd_e1x *pbd)
5169{
5170    struct ether_vlan_header *eh = NULL;
5171    struct ip *ip = NULL;
5172    struct tcphdr *th = NULL;
5173    int e_hlen;
5174
5175    /* get the Ethernet header */
5176    eh = mtod(m, struct ether_vlan_header *);
5177
5178    /* handle VLAN encapsulation if present */
5179    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5180                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5181
5182    /* get the IP and TCP header, with LSO entire header in first mbuf */
5183    /* XXX assuming IPv4 */
5184    ip = (struct ip *)(m->m_data + e_hlen);
5185    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5186
5187    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5188    pbd->tcp_send_seq = ntohl(th->th_seq);
5189    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5190
5191#if 1
5192        /* XXX IPv4 */
5193        pbd->ip_id = ntohs(ip->ip_id);
5194        pbd->tcp_pseudo_csum =
5195            ntohs(in_pseudo(ip->ip_src.s_addr,
5196                            ip->ip_dst.s_addr,
5197                            htons(IPPROTO_TCP)));
5198#else
5199        /* XXX IPv6 */
5200        pbd->tcp_pseudo_csum =
5201            ntohs(in_pseudo(&ip6->ip6_src,
5202                            &ip6->ip6_dst,
5203                            htons(IPPROTO_TCP)));
5204#endif
5205
5206    pbd->global_data |=
5207        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5208}
5209
5210/*
5211 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5212 * visible to the controller.
5213 *
5214 * If an mbuf is submitted to this routine and cannot be given to the
5215 * controller (e.g. it has too many fragments) then the function may free
5216 * the mbuf and return to the caller.
5217 *
5218 * Returns:
5219 *   0 = Success, !0 = Failure
5220 *   Note the side effect that an mbuf may be freed if it causes a problem.
5221 */
5222static int
5223bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5224{
5225    bus_dma_segment_t segs[32];
5226    struct mbuf *m0;
5227    struct bxe_sw_tx_bd *tx_buf;
5228    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5229    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5230    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5231    struct eth_tx_bd *tx_data_bd;
5232    struct eth_tx_bd *tx_total_pkt_size_bd;
5233    struct eth_tx_start_bd *tx_start_bd;
5234    uint16_t bd_prod, pkt_prod, total_pkt_size;
5235    uint8_t mac_type;
5236    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5237    struct bxe_softc *sc;
5238    uint16_t tx_bd_avail;
5239    struct ether_vlan_header *eh;
5240    uint32_t pbd_e2_parsing_data = 0;
5241    uint8_t hlen = 0;
5242    int tmp_bd;
5243    int i;
5244
5245    sc = fp->sc;
5246
5247#if __FreeBSD_version >= 800000
5248    M_ASSERTPKTHDR(*m_head);
5249#endif /* #if __FreeBSD_version >= 800000 */
5250
5251    m0 = *m_head;
5252    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5253    tx_start_bd = NULL;
5254    tx_data_bd = NULL;
5255    tx_total_pkt_size_bd = NULL;
5256
5257    /* get the H/W pointer for packets and BDs */
5258    pkt_prod = fp->tx_pkt_prod;
5259    bd_prod = fp->tx_bd_prod;
5260
5261    mac_type = UNICAST_ADDRESS;
5262
5263    /* map the mbuf into the next open DMAable memory */
5264    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5265    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5266                                    tx_buf->m_map, m0,
5267                                    segs, &nsegs, BUS_DMA_NOWAIT);
5268
5269    /* mapping errors */
5270    if(__predict_false(error != 0)) {
5271        fp->eth_q_stats.tx_dma_mapping_failure++;
5272        if (error == ENOMEM) {
5273            /* resource issue, try again later */
5274            rc = ENOMEM;
5275        } else if (error == EFBIG) {
5276            /* possibly recoverable with defragmentation */
5277            fp->eth_q_stats.mbuf_defrag_attempts++;
5278            m0 = m_defrag(*m_head, M_NOWAIT);
5279            if (m0 == NULL) {
5280                fp->eth_q_stats.mbuf_defrag_failures++;
5281                rc = ENOBUFS;
5282            } else {
5283                /* defrag successful, try mapping again */
5284                *m_head = m0;
5285                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5286                                                tx_buf->m_map, m0,
5287                                                segs, &nsegs, BUS_DMA_NOWAIT);
5288                if (error) {
5289                    fp->eth_q_stats.tx_dma_mapping_failure++;
5290                    rc = error;
5291                }
5292            }
5293        } else {
5294            /* unknown, unrecoverable mapping error */
5295            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5296            bxe_dump_mbuf(sc, m0, FALSE);
5297            rc = error;
5298        }
5299
5300        goto bxe_tx_encap_continue;
5301    }
5302
5303    tx_bd_avail = bxe_tx_avail(sc, fp);
5304
5305    /* make sure there is enough room in the send queue */
5306    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5307        /* Recoverable, try again later. */
5308        fp->eth_q_stats.tx_hw_queue_full++;
5309        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5310        rc = ENOMEM;
5311        goto bxe_tx_encap_continue;
5312    }
5313
5314    /* capture the current H/W TX chain high watermark */
5315    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5316                        (TX_BD_USABLE - tx_bd_avail))) {
5317        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5318    }
5319
5320    /* make sure it fits in the packet window */
5321    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5322        /*
5323         * The mbuf may be to big for the controller to handle. If the frame
5324         * is a TSO frame we'll need to do an additional check.
5325         */
5326        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5327            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5328                goto bxe_tx_encap_continue; /* OK to send */
5329            } else {
5330                fp->eth_q_stats.tx_window_violation_tso++;
5331            }
5332        } else {
5333            fp->eth_q_stats.tx_window_violation_std++;
5334        }
5335
5336        /* lets try to defragment this mbuf and remap it */
5337        fp->eth_q_stats.mbuf_defrag_attempts++;
5338        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5339
5340        m0 = m_defrag(*m_head, M_NOWAIT);
5341        if (m0 == NULL) {
5342            fp->eth_q_stats.mbuf_defrag_failures++;
5343            /* Ugh, just drop the frame... :( */
5344            rc = ENOBUFS;
5345        } else {
5346            /* defrag successful, try mapping again */
5347            *m_head = m0;
5348            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5349                                            tx_buf->m_map, m0,
5350                                            segs, &nsegs, BUS_DMA_NOWAIT);
5351            if (error) {
5352                fp->eth_q_stats.tx_dma_mapping_failure++;
5353                /* No sense in trying to defrag/copy chain, drop it. :( */
5354                rc = error;
5355            }
5356            else {
5357                /* if the chain is still too long then drop it */
5358                if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5359                    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5360                    rc = ENODEV;
5361                }
5362            }
5363        }
5364    }
5365
5366bxe_tx_encap_continue:
5367
5368    /* Check for errors */
5369    if (rc) {
5370        if (rc == ENOMEM) {
5371            /* recoverable try again later  */
5372        } else {
5373            fp->eth_q_stats.tx_soft_errors++;
5374            fp->eth_q_stats.mbuf_alloc_tx--;
5375            m_freem(*m_head);
5376            *m_head = NULL;
5377        }
5378
5379        return (rc);
5380    }
5381
5382    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5383    if (m0->m_flags & M_BCAST) {
5384        mac_type = BROADCAST_ADDRESS;
5385    } else if (m0->m_flags & M_MCAST) {
5386        mac_type = MULTICAST_ADDRESS;
5387    }
5388
5389    /* store the mbuf into the mbuf ring */
5390    tx_buf->m        = m0;
5391    tx_buf->first_bd = fp->tx_bd_prod;
5392    tx_buf->flags    = 0;
5393
5394    /* prepare the first transmit (start) BD for the mbuf */
5395    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5396
5397    BLOGD(sc, DBG_TX,
5398          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5399          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5400
5401    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5402    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5403    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5404    total_pkt_size += tx_start_bd->nbytes;
5405    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5406
5407    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5408
5409    /* all frames have at least Start BD + Parsing BD */
5410    nbds = nsegs + 1;
5411    tx_start_bd->nbd = htole16(nbds);
5412
5413    if (m0->m_flags & M_VLANTAG) {
5414        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5415        tx_start_bd->bd_flags.as_bitfield |=
5416            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5417    } else {
5418        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5419        if (IS_VF(sc)) {
5420            /* map ethernet header to find type and header length */
5421            eh = mtod(m0, struct ether_vlan_header *);
5422            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5423        } else {
5424            /* used by FW for packet accounting */
5425            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5426        }
5427    }
5428
5429    /*
5430     * add a parsing BD from the chain. The parsing BD is always added
5431     * though it is only used for TSO and chksum
5432     */
5433    bd_prod = TX_BD_NEXT(bd_prod);
5434
5435    if (m0->m_pkthdr.csum_flags) {
5436        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5437            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5438            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5439        }
5440
5441        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5442            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5443                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5444        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5445            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5446                                                  ETH_TX_BD_FLAGS_IS_UDP |
5447                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5448        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5449                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5450            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5451        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5452            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5453                                                  ETH_TX_BD_FLAGS_IS_UDP);
5454        }
5455    }
5456
5457    if (!CHIP_IS_E1x(sc)) {
5458        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5459        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5460
5461        if (m0->m_pkthdr.csum_flags) {
5462            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5463        }
5464
5465        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5466                 mac_type);
5467    } else {
5468        uint16_t global_data = 0;
5469
5470        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5471        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5472
5473        if (m0->m_pkthdr.csum_flags) {
5474            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5475        }
5476
5477        SET_FLAG(global_data,
5478                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5479        pbd_e1x->global_data |= htole16(global_data);
5480    }
5481
5482    /* setup the parsing BD with TSO specific info */
5483    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5484        fp->eth_q_stats.tx_ofld_frames_lso++;
5485        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5486
5487        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5488            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5489
5490            /* split the first BD into header/data making the fw job easy */
5491            nbds++;
5492            tx_start_bd->nbd = htole16(nbds);
5493            tx_start_bd->nbytes = htole16(hlen);
5494
5495            bd_prod = TX_BD_NEXT(bd_prod);
5496
5497            /* new transmit BD after the tx_parse_bd */
5498            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5499            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5500            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5501            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5502            if (tx_total_pkt_size_bd == NULL) {
5503                tx_total_pkt_size_bd = tx_data_bd;
5504            }
5505
5506            BLOGD(sc, DBG_TX,
5507                  "TSO split header size is %d (%x:%x) nbds %d\n",
5508                  le16toh(tx_start_bd->nbytes),
5509                  le32toh(tx_start_bd->addr_hi),
5510                  le32toh(tx_start_bd->addr_lo),
5511                  nbds);
5512        }
5513
5514        if (!CHIP_IS_E1x(sc)) {
5515            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5516        } else {
5517            bxe_set_pbd_lso(m0, pbd_e1x);
5518        }
5519    }
5520
5521    if (pbd_e2_parsing_data) {
5522        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5523    }
5524
5525    /* prepare remaining BDs, start tx bd contains first seg/frag */
5526    for (i = 1; i < nsegs ; i++) {
5527        bd_prod = TX_BD_NEXT(bd_prod);
5528        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5529        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5530        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5531        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5532        if (tx_total_pkt_size_bd == NULL) {
5533            tx_total_pkt_size_bd = tx_data_bd;
5534        }
5535        total_pkt_size += tx_data_bd->nbytes;
5536    }
5537
5538    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5539
5540    if (tx_total_pkt_size_bd != NULL) {
5541        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5542    }
5543
5544    if (__predict_false(sc->debug & DBG_TX)) {
5545        tmp_bd = tx_buf->first_bd;
5546        for (i = 0; i < nbds; i++)
5547        {
5548            if (i == 0) {
5549                BLOGD(sc, DBG_TX,
5550                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5551                      "bd_flags=0x%x hdr_nbds=%d\n",
5552                      tx_start_bd,
5553                      tmp_bd,
5554                      le16toh(tx_start_bd->nbd),
5555                      le16toh(tx_start_bd->vlan_or_ethertype),
5556                      tx_start_bd->bd_flags.as_bitfield,
5557                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5558            } else if (i == 1) {
5559                if (pbd_e1x) {
5560                    BLOGD(sc, DBG_TX,
5561                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5562                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5563                          "tcp_seq=%u total_hlen_w=%u\n",
5564                          pbd_e1x,
5565                          tmp_bd,
5566                          pbd_e1x->global_data,
5567                          pbd_e1x->ip_hlen_w,
5568                          pbd_e1x->ip_id,
5569                          pbd_e1x->lso_mss,
5570                          pbd_e1x->tcp_flags,
5571                          pbd_e1x->tcp_pseudo_csum,
5572                          pbd_e1x->tcp_send_seq,
5573                          le16toh(pbd_e1x->total_hlen_w));
5574                } else { /* if (pbd_e2) */
5575                    BLOGD(sc, DBG_TX,
5576                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5577                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5578                          pbd_e2,
5579                          tmp_bd,
5580                          pbd_e2->data.mac_addr.dst_hi,
5581                          pbd_e2->data.mac_addr.dst_mid,
5582                          pbd_e2->data.mac_addr.dst_lo,
5583                          pbd_e2->data.mac_addr.src_hi,
5584                          pbd_e2->data.mac_addr.src_mid,
5585                          pbd_e2->data.mac_addr.src_lo,
5586                          pbd_e2->parsing_data);
5587                }
5588            }
5589
5590            if (i != 1) { /* skip parse db as it doesn't hold data */
5591                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5592                BLOGD(sc, DBG_TX,
5593                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5594                      tx_data_bd,
5595                      tmp_bd,
5596                      le16toh(tx_data_bd->nbytes),
5597                      le32toh(tx_data_bd->addr_hi),
5598                      le32toh(tx_data_bd->addr_lo));
5599            }
5600
5601            tmp_bd = TX_BD_NEXT(tmp_bd);
5602        }
5603    }
5604
5605    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5606
5607    /* update TX BD producer index value for next TX */
5608    bd_prod = TX_BD_NEXT(bd_prod);
5609
5610    /*
5611     * If the chain of tx_bd's describing this frame is adjacent to or spans
5612     * an eth_tx_next_bd element then we need to increment the nbds value.
5613     */
5614    if (TX_BD_IDX(bd_prod) < nbds) {
5615        nbds++;
5616    }
5617
5618    /* don't allow reordering of writes for nbd and packets */
5619    mb();
5620
5621    fp->tx_db.data.prod += nbds;
5622
5623    /* producer points to the next free tx_bd at this point */
5624    fp->tx_pkt_prod++;
5625    fp->tx_bd_prod = bd_prod;
5626
5627    DOORBELL(sc, fp->index, fp->tx_db.raw);
5628
5629    fp->eth_q_stats.tx_pkts++;
5630
5631    /* Prevent speculative reads from getting ahead of the status block. */
5632    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5633                      0, 0, BUS_SPACE_BARRIER_READ);
5634
5635    /* Prevent speculative reads from getting ahead of the doorbell. */
5636    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5637                      0, 0, BUS_SPACE_BARRIER_READ);
5638
5639    return (0);
5640}
5641
5642static void
5643bxe_tx_start_locked(struct bxe_softc *sc,
5644                    if_t ifp,
5645                    struct bxe_fastpath *fp)
5646{
5647    struct mbuf *m = NULL;
5648    int tx_count = 0;
5649    uint16_t tx_bd_avail;
5650
5651    BXE_FP_TX_LOCK_ASSERT(fp);
5652
5653    /* keep adding entries while there are frames to send */
5654    while (!if_sendq_empty(ifp)) {
5655
5656        /*
5657         * check for any frames to send
5658         * dequeue can still be NULL even if queue is not empty
5659         */
5660        m = if_dequeue(ifp);
5661        if (__predict_false(m == NULL)) {
5662            break;
5663        }
5664
5665        /* the mbuf now belongs to us */
5666        fp->eth_q_stats.mbuf_alloc_tx++;
5667
5668        /*
5669         * Put the frame into the transmit ring. If we don't have room,
5670         * place the mbuf back at the head of the TX queue, set the
5671         * OACTIVE flag, and wait for the NIC to drain the chain.
5672         */
5673        if (__predict_false(bxe_tx_encap(fp, &m))) {
5674            fp->eth_q_stats.tx_encap_failures++;
5675            if (m != NULL) {
5676                /* mark the TX queue as full and return the frame */
5677                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5678		if_sendq_prepend(ifp, m);
5679                fp->eth_q_stats.mbuf_alloc_tx--;
5680                fp->eth_q_stats.tx_queue_xoff++;
5681            }
5682
5683            /* stop looking for more work */
5684            break;
5685        }
5686
5687        /* the frame was enqueued successfully */
5688        tx_count++;
5689
5690        /* send a copy of the frame to any BPF listeners. */
5691        if_etherbpfmtap(ifp, m);
5692
5693        tx_bd_avail = bxe_tx_avail(sc, fp);
5694
5695        /* handle any completions if we're running low */
5696        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5697            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5698            bxe_txeof(sc, fp);
5699            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5700                break;
5701            }
5702        }
5703    }
5704
5705    /* all TX packets were dequeued and/or the tx ring is full */
5706    if (tx_count > 0) {
5707        /* reset the TX watchdog timeout timer */
5708        fp->watchdog_timer = BXE_TX_TIMEOUT;
5709    }
5710}
5711
5712/* Legacy (non-RSS) dispatch routine */
5713static void
5714bxe_tx_start(if_t ifp)
5715{
5716    struct bxe_softc *sc;
5717    struct bxe_fastpath *fp;
5718
5719    sc = if_getsoftc(ifp);
5720
5721    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5722        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5723        return;
5724    }
5725
5726    if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5727        BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
5728        return;
5729    }
5730
5731    if (!sc->link_vars.link_up) {
5732        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5733        return;
5734    }
5735
5736    fp = &sc->fp[0];
5737
5738    BXE_FP_TX_LOCK(fp);
5739    bxe_tx_start_locked(sc, ifp, fp);
5740    BXE_FP_TX_UNLOCK(fp);
5741}
5742
5743#if __FreeBSD_version >= 800000
5744
5745static int
5746bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5747                       if_t                ifp,
5748                       struct bxe_fastpath *fp,
5749                       struct mbuf         *m)
5750{
5751    struct buf_ring *tx_br = fp->tx_br;
5752    struct mbuf *next;
5753    int depth, rc, tx_count;
5754    uint16_t tx_bd_avail;
5755
5756    rc = tx_count = 0;
5757
5758    BXE_FP_TX_LOCK_ASSERT(fp);
5759
5760    if (!tx_br) {
5761        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5762        return (EINVAL);
5763    }
5764
5765    if (!sc->link_vars.link_up ||
5766        (if_getdrvflags(ifp) &
5767        (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
5768        rc = drbr_enqueue(ifp, tx_br, m);
5769        goto bxe_tx_mq_start_locked_exit;
5770    }
5771
5772    /* fetch the depth of the driver queue */
5773    depth = drbr_inuse_drv(ifp, tx_br);
5774    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5775        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5776    }
5777
5778    if (m == NULL) {
5779        /* no new work, check for pending frames */
5780        next = drbr_dequeue_drv(ifp, tx_br);
5781    } else if (drbr_needs_enqueue_drv(ifp, tx_br)) {
5782        /* have both new and pending work, maintain packet order */
5783        rc = drbr_enqueue(ifp, tx_br, m);
5784        if (rc != 0) {
5785            fp->eth_q_stats.tx_soft_errors++;
5786            goto bxe_tx_mq_start_locked_exit;
5787        }
5788        next = drbr_dequeue_drv(ifp, tx_br);
5789    } else {
5790        /* new work only and nothing pending */
5791        next = m;
5792    }
5793
5794    /* keep adding entries while there are frames to send */
5795    while (next != NULL) {
5796
5797        /* the mbuf now belongs to us */
5798        fp->eth_q_stats.mbuf_alloc_tx++;
5799
5800        /*
5801         * Put the frame into the transmit ring. If we don't have room,
5802         * place the mbuf back at the head of the TX queue, set the
5803         * OACTIVE flag, and wait for the NIC to drain the chain.
5804         */
5805        rc = bxe_tx_encap(fp, &next);
5806        if (__predict_false(rc != 0)) {
5807            fp->eth_q_stats.tx_encap_failures++;
5808            if (next != NULL) {
5809                /* mark the TX queue as full and save the frame */
5810                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5811                /* XXX this may reorder the frame */
5812                rc = drbr_enqueue(ifp, tx_br, next);
5813                fp->eth_q_stats.mbuf_alloc_tx--;
5814                fp->eth_q_stats.tx_frames_deferred++;
5815            }
5816
5817            /* stop looking for more work */
5818            break;
5819        }
5820
5821        /* the transmit frame was enqueued successfully */
5822        tx_count++;
5823
5824        /* send a copy of the frame to any BPF listeners */
5825	if_etherbpfmtap(ifp, next);
5826
5827        tx_bd_avail = bxe_tx_avail(sc, fp);
5828
5829        /* handle any completions if we're running low */
5830        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5831            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5832            bxe_txeof(sc, fp);
5833            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5834                break;
5835            }
5836        }
5837
5838        next = drbr_dequeue_drv(ifp, tx_br);
5839    }
5840
5841    /* all TX packets were dequeued and/or the tx ring is full */
5842    if (tx_count > 0) {
5843        /* reset the TX watchdog timeout timer */
5844        fp->watchdog_timer = BXE_TX_TIMEOUT;
5845    }
5846
5847bxe_tx_mq_start_locked_exit:
5848
5849    return (rc);
5850}
5851
5852/* Multiqueue (TSS) dispatch routine. */
5853static int
5854bxe_tx_mq_start(struct ifnet *ifp,
5855                struct mbuf  *m)
5856{
5857    struct bxe_softc *sc = if_getsoftc(ifp);
5858    struct bxe_fastpath *fp;
5859    int fp_index, rc;
5860
5861    fp_index = 0; /* default is the first queue */
5862
5863    /* check if flowid is set */
5864
5865    if (BXE_VALID_FLOWID(m))
5866        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5867
5868    fp = &sc->fp[fp_index];
5869
5870    if (BXE_FP_TX_TRYLOCK(fp)) {
5871        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5872        BXE_FP_TX_UNLOCK(fp);
5873    } else
5874        rc = drbr_enqueue(ifp, fp->tx_br, m);
5875
5876    return (rc);
5877}
5878
5879static void
5880bxe_mq_flush(struct ifnet *ifp)
5881{
5882    struct bxe_softc *sc = if_getsoftc(ifp);
5883    struct bxe_fastpath *fp;
5884    struct mbuf *m;
5885    int i;
5886
5887    for (i = 0; i < sc->num_queues; i++) {
5888        fp = &sc->fp[i];
5889
5890        if (fp->state != BXE_FP_STATE_OPEN) {
5891            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5892                  fp->index, fp->state);
5893            continue;
5894        }
5895
5896        if (fp->tx_br != NULL) {
5897            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5898            BXE_FP_TX_LOCK(fp);
5899            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5900                m_freem(m);
5901            }
5902            BXE_FP_TX_UNLOCK(fp);
5903        }
5904    }
5905
5906    if_qflush(ifp);
5907}
5908
5909#endif /* FreeBSD_version >= 800000 */
5910
5911static uint16_t
5912bxe_cid_ilt_lines(struct bxe_softc *sc)
5913{
5914    if (IS_SRIOV(sc)) {
5915        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5916    }
5917    return (L2_ILT_LINES(sc));
5918}
5919
5920static void
5921bxe_ilt_set_info(struct bxe_softc *sc)
5922{
5923    struct ilt_client_info *ilt_client;
5924    struct ecore_ilt *ilt = sc->ilt;
5925    uint16_t line = 0;
5926
5927    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5928    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5929
5930    /* CDU */
5931    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5932    ilt_client->client_num = ILT_CLIENT_CDU;
5933    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5934    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5935    ilt_client->start = line;
5936    line += bxe_cid_ilt_lines(sc);
5937
5938    if (CNIC_SUPPORT(sc)) {
5939        line += CNIC_ILT_LINES;
5940    }
5941
5942    ilt_client->end = (line - 1);
5943
5944    BLOGD(sc, DBG_LOAD,
5945          "ilt client[CDU]: start %d, end %d, "
5946          "psz 0x%x, flags 0x%x, hw psz %d\n",
5947          ilt_client->start, ilt_client->end,
5948          ilt_client->page_size,
5949          ilt_client->flags,
5950          ilog2(ilt_client->page_size >> 12));
5951
5952    /* QM */
5953    if (QM_INIT(sc->qm_cid_count)) {
5954        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5955        ilt_client->client_num = ILT_CLIENT_QM;
5956        ilt_client->page_size = QM_ILT_PAGE_SZ;
5957        ilt_client->flags = 0;
5958        ilt_client->start = line;
5959
5960        /* 4 bytes for each cid */
5961        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5962                             QM_ILT_PAGE_SZ);
5963
5964        ilt_client->end = (line - 1);
5965
5966        BLOGD(sc, DBG_LOAD,
5967              "ilt client[QM]: start %d, end %d, "
5968              "psz 0x%x, flags 0x%x, hw psz %d\n",
5969              ilt_client->start, ilt_client->end,
5970              ilt_client->page_size, ilt_client->flags,
5971              ilog2(ilt_client->page_size >> 12));
5972    }
5973
5974    if (CNIC_SUPPORT(sc)) {
5975        /* SRC */
5976        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5977        ilt_client->client_num = ILT_CLIENT_SRC;
5978        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5979        ilt_client->flags = 0;
5980        ilt_client->start = line;
5981        line += SRC_ILT_LINES;
5982        ilt_client->end = (line - 1);
5983
5984        BLOGD(sc, DBG_LOAD,
5985              "ilt client[SRC]: start %d, end %d, "
5986              "psz 0x%x, flags 0x%x, hw psz %d\n",
5987              ilt_client->start, ilt_client->end,
5988              ilt_client->page_size, ilt_client->flags,
5989              ilog2(ilt_client->page_size >> 12));
5990
5991        /* TM */
5992        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5993        ilt_client->client_num = ILT_CLIENT_TM;
5994        ilt_client->page_size = TM_ILT_PAGE_SZ;
5995        ilt_client->flags = 0;
5996        ilt_client->start = line;
5997        line += TM_ILT_LINES;
5998        ilt_client->end = (line - 1);
5999
6000        BLOGD(sc, DBG_LOAD,
6001              "ilt client[TM]: start %d, end %d, "
6002              "psz 0x%x, flags 0x%x, hw psz %d\n",
6003              ilt_client->start, ilt_client->end,
6004              ilt_client->page_size, ilt_client->flags,
6005              ilog2(ilt_client->page_size >> 12));
6006    }
6007
6008    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
6009}
6010
6011static void
6012bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
6013{
6014    int i;
6015    uint32_t rx_buf_size;
6016
6017    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
6018
6019    for (i = 0; i < sc->num_queues; i++) {
6020        if(rx_buf_size <= MCLBYTES){
6021            sc->fp[i].rx_buf_size = rx_buf_size;
6022            sc->fp[i].mbuf_alloc_size = MCLBYTES;
6023        }else if (rx_buf_size <= MJUMPAGESIZE){
6024            sc->fp[i].rx_buf_size = rx_buf_size;
6025            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
6026        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
6027            sc->fp[i].rx_buf_size = MCLBYTES;
6028            sc->fp[i].mbuf_alloc_size = MCLBYTES;
6029        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
6030            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
6031            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
6032        }else {
6033            sc->fp[i].rx_buf_size = MCLBYTES;
6034            sc->fp[i].mbuf_alloc_size = MCLBYTES;
6035        }
6036    }
6037}
6038
6039static int
6040bxe_alloc_ilt_mem(struct bxe_softc *sc)
6041{
6042    int rc = 0;
6043
6044    if ((sc->ilt =
6045         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
6046                                    M_BXE_ILT,
6047                                    (M_NOWAIT | M_ZERO))) == NULL) {
6048        rc = 1;
6049    }
6050
6051    return (rc);
6052}
6053
6054static int
6055bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6056{
6057    int rc = 0;
6058
6059    if ((sc->ilt->lines =
6060         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6061                                    M_BXE_ILT,
6062                                    (M_NOWAIT | M_ZERO))) == NULL) {
6063        rc = 1;
6064    }
6065
6066    return (rc);
6067}
6068
6069static void
6070bxe_free_ilt_mem(struct bxe_softc *sc)
6071{
6072    if (sc->ilt != NULL) {
6073        free(sc->ilt, M_BXE_ILT);
6074        sc->ilt = NULL;
6075    }
6076}
6077
6078static void
6079bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6080{
6081    if (sc->ilt->lines != NULL) {
6082        free(sc->ilt->lines, M_BXE_ILT);
6083        sc->ilt->lines = NULL;
6084    }
6085}
6086
6087static void
6088bxe_free_mem(struct bxe_softc *sc)
6089{
6090    int i;
6091
6092    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6093        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6094        sc->context[i].vcxt = NULL;
6095        sc->context[i].size = 0;
6096    }
6097
6098    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6099
6100    bxe_free_ilt_lines_mem(sc);
6101
6102}
6103
6104static int
6105bxe_alloc_mem(struct bxe_softc *sc)
6106{
6107    int context_size;
6108    int allocated;
6109    int i;
6110
6111    /*
6112     * Allocate memory for CDU context:
6113     * This memory is allocated separately and not in the generic ILT
6114     * functions because CDU differs in few aspects:
6115     * 1. There can be multiple entities allocating memory for context -
6116     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6117     * its own ILT lines.
6118     * 2. Since CDU page-size is not a single 4KB page (which is the case
6119     * for the other ILT clients), to be efficient we want to support
6120     * allocation of sub-page-size in the last entry.
6121     * 3. Context pointers are used by the driver to pass to FW / update
6122     * the context (for the other ILT clients the pointers are used just to
6123     * free the memory during unload).
6124     */
6125    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6126    for (i = 0, allocated = 0; allocated < context_size; i++) {
6127        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6128                                  (context_size - allocated));
6129
6130        if (bxe_dma_alloc(sc, sc->context[i].size,
6131                          &sc->context[i].vcxt_dma,
6132                          "cdu context") != 0) {
6133            bxe_free_mem(sc);
6134            return (-1);
6135        }
6136
6137        sc->context[i].vcxt =
6138            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6139
6140        allocated += sc->context[i].size;
6141    }
6142
6143    bxe_alloc_ilt_lines_mem(sc);
6144
6145    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6146          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6147    {
6148        for (i = 0; i < 4; i++) {
6149            BLOGD(sc, DBG_LOAD,
6150                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6151                  i,
6152                  sc->ilt->clients[i].page_size,
6153                  sc->ilt->clients[i].start,
6154                  sc->ilt->clients[i].end,
6155                  sc->ilt->clients[i].client_num,
6156                  sc->ilt->clients[i].flags);
6157        }
6158    }
6159    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6160        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6161        bxe_free_mem(sc);
6162        return (-1);
6163    }
6164
6165    return (0);
6166}
6167
6168static void
6169bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6170{
6171    struct bxe_softc *sc;
6172    int i;
6173
6174    sc = fp->sc;
6175
6176    if (fp->rx_mbuf_tag == NULL) {
6177        return;
6178    }
6179
6180    /* free all mbufs and unload all maps */
6181    for (i = 0; i < RX_BD_TOTAL; i++) {
6182        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6183            bus_dmamap_sync(fp->rx_mbuf_tag,
6184                            fp->rx_mbuf_chain[i].m_map,
6185                            BUS_DMASYNC_POSTREAD);
6186            bus_dmamap_unload(fp->rx_mbuf_tag,
6187                              fp->rx_mbuf_chain[i].m_map);
6188        }
6189
6190        if (fp->rx_mbuf_chain[i].m != NULL) {
6191            m_freem(fp->rx_mbuf_chain[i].m);
6192            fp->rx_mbuf_chain[i].m = NULL;
6193            fp->eth_q_stats.mbuf_alloc_rx--;
6194        }
6195    }
6196}
6197
6198static void
6199bxe_free_tpa_pool(struct bxe_fastpath *fp)
6200{
6201    struct bxe_softc *sc;
6202    int i, max_agg_queues;
6203
6204    sc = fp->sc;
6205
6206    if (fp->rx_mbuf_tag == NULL) {
6207        return;
6208    }
6209
6210    max_agg_queues = MAX_AGG_QS(sc);
6211
6212    /* release all mbufs and unload all DMA maps in the TPA pool */
6213    for (i = 0; i < max_agg_queues; i++) {
6214        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6215            bus_dmamap_sync(fp->rx_mbuf_tag,
6216                            fp->rx_tpa_info[i].bd.m_map,
6217                            BUS_DMASYNC_POSTREAD);
6218            bus_dmamap_unload(fp->rx_mbuf_tag,
6219                              fp->rx_tpa_info[i].bd.m_map);
6220        }
6221
6222        if (fp->rx_tpa_info[i].bd.m != NULL) {
6223            m_freem(fp->rx_tpa_info[i].bd.m);
6224            fp->rx_tpa_info[i].bd.m = NULL;
6225            fp->eth_q_stats.mbuf_alloc_tpa--;
6226        }
6227    }
6228}
6229
6230static void
6231bxe_free_sge_chain(struct bxe_fastpath *fp)
6232{
6233    struct bxe_softc *sc;
6234    int i;
6235
6236    sc = fp->sc;
6237
6238    if (fp->rx_sge_mbuf_tag == NULL) {
6239        return;
6240    }
6241
6242    /* rree all mbufs and unload all maps */
6243    for (i = 0; i < RX_SGE_TOTAL; i++) {
6244        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6245            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6246                            fp->rx_sge_mbuf_chain[i].m_map,
6247                            BUS_DMASYNC_POSTREAD);
6248            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6249                              fp->rx_sge_mbuf_chain[i].m_map);
6250        }
6251
6252        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6253            m_freem(fp->rx_sge_mbuf_chain[i].m);
6254            fp->rx_sge_mbuf_chain[i].m = NULL;
6255            fp->eth_q_stats.mbuf_alloc_sge--;
6256        }
6257    }
6258}
6259
6260static void
6261bxe_free_fp_buffers(struct bxe_softc *sc)
6262{
6263    struct bxe_fastpath *fp;
6264    int i;
6265
6266    for (i = 0; i < sc->num_queues; i++) {
6267        fp = &sc->fp[i];
6268
6269#if __FreeBSD_version >= 800000
6270        if (fp->tx_br != NULL) {
6271            /* just in case bxe_mq_flush() wasn't called */
6272            if (mtx_initialized(&fp->tx_mtx)) {
6273                struct mbuf *m;
6274
6275                BXE_FP_TX_LOCK(fp);
6276                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6277                    m_freem(m);
6278                BXE_FP_TX_UNLOCK(fp);
6279            }
6280        }
6281#endif
6282
6283        /* free all RX buffers */
6284        bxe_free_rx_bd_chain(fp);
6285        bxe_free_tpa_pool(fp);
6286        bxe_free_sge_chain(fp);
6287
6288        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6289            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6290                  fp->eth_q_stats.mbuf_alloc_rx);
6291        }
6292
6293        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6294            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6295                  fp->eth_q_stats.mbuf_alloc_sge);
6296        }
6297
6298        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6299            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6300                  fp->eth_q_stats.mbuf_alloc_tpa);
6301        }
6302
6303        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6304            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6305                  fp->eth_q_stats.mbuf_alloc_tx);
6306        }
6307
6308        /* XXX verify all mbufs were reclaimed */
6309    }
6310}
6311
6312static int
6313bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6314                     uint16_t            prev_index,
6315                     uint16_t            index)
6316{
6317    struct bxe_sw_rx_bd *rx_buf;
6318    struct eth_rx_bd *rx_bd;
6319    bus_dma_segment_t segs[1];
6320    bus_dmamap_t map;
6321    struct mbuf *m;
6322    int nsegs, rc;
6323
6324    rc = 0;
6325
6326    /* allocate the new RX BD mbuf */
6327    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6328    if (__predict_false(m == NULL)) {
6329        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6330        return (ENOBUFS);
6331    }
6332
6333    fp->eth_q_stats.mbuf_alloc_rx++;
6334
6335    /* initialize the mbuf buffer length */
6336    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6337
6338    /* map the mbuf into non-paged pool */
6339    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6340                                 fp->rx_mbuf_spare_map,
6341                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6342    if (__predict_false(rc != 0)) {
6343        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6344        m_freem(m);
6345        fp->eth_q_stats.mbuf_alloc_rx--;
6346        return (rc);
6347    }
6348
6349    /* all mbufs must map to a single segment */
6350    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6351
6352    /* release any existing RX BD mbuf mappings */
6353
6354    if (prev_index != index) {
6355        rx_buf = &fp->rx_mbuf_chain[prev_index];
6356
6357        if (rx_buf->m_map != NULL) {
6358            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6359                            BUS_DMASYNC_POSTREAD);
6360            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6361        }
6362
6363        /*
6364         * We only get here from bxe_rxeof() when the maximum number
6365         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6366         * holds the mbuf in the prev_index so it's OK to NULL it out
6367         * here without concern of a memory leak.
6368         */
6369        fp->rx_mbuf_chain[prev_index].m = NULL;
6370    }
6371
6372    rx_buf = &fp->rx_mbuf_chain[index];
6373
6374    if (rx_buf->m_map != NULL) {
6375        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6376                        BUS_DMASYNC_POSTREAD);
6377        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6378    }
6379
6380    /* save the mbuf and mapping info for a future packet */
6381    map = (prev_index != index) ?
6382              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6383    rx_buf->m_map = fp->rx_mbuf_spare_map;
6384    fp->rx_mbuf_spare_map = map;
6385    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6386                    BUS_DMASYNC_PREREAD);
6387    rx_buf->m = m;
6388
6389    rx_bd = &fp->rx_chain[index];
6390    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6391    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6392
6393    return (rc);
6394}
6395
6396static int
6397bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6398                      int                 queue)
6399{
6400    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6401    bus_dma_segment_t segs[1];
6402    bus_dmamap_t map;
6403    struct mbuf *m;
6404    int nsegs;
6405    int rc = 0;
6406
6407    /* allocate the new TPA mbuf */
6408    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6409    if (__predict_false(m == NULL)) {
6410        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6411        return (ENOBUFS);
6412    }
6413
6414    fp->eth_q_stats.mbuf_alloc_tpa++;
6415
6416    /* initialize the mbuf buffer length */
6417    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6418
6419    /* map the mbuf into non-paged pool */
6420    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6421                                 fp->rx_tpa_info_mbuf_spare_map,
6422                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6423    if (__predict_false(rc != 0)) {
6424        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6425        m_free(m);
6426        fp->eth_q_stats.mbuf_alloc_tpa--;
6427        return (rc);
6428    }
6429
6430    /* all mbufs must map to a single segment */
6431    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6432
6433    /* release any existing TPA mbuf mapping */
6434    if (tpa_info->bd.m_map != NULL) {
6435        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6436                        BUS_DMASYNC_POSTREAD);
6437        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6438    }
6439
6440    /* save the mbuf and mapping info for the TPA mbuf */
6441    map = tpa_info->bd.m_map;
6442    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6443    fp->rx_tpa_info_mbuf_spare_map = map;
6444    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6445                    BUS_DMASYNC_PREREAD);
6446    tpa_info->bd.m = m;
6447    tpa_info->seg = segs[0];
6448
6449    return (rc);
6450}
6451
6452/*
6453 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6454 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6455 * chain.
6456 */
6457static int
6458bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6459                      uint16_t            index)
6460{
6461    struct bxe_sw_rx_bd *sge_buf;
6462    struct eth_rx_sge *sge;
6463    bus_dma_segment_t segs[1];
6464    bus_dmamap_t map;
6465    struct mbuf *m;
6466    int nsegs;
6467    int rc = 0;
6468
6469    /* allocate a new SGE mbuf */
6470    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6471    if (__predict_false(m == NULL)) {
6472        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6473        return (ENOMEM);
6474    }
6475
6476    fp->eth_q_stats.mbuf_alloc_sge++;
6477
6478    /* initialize the mbuf buffer length */
6479    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6480
6481    /* map the SGE mbuf into non-paged pool */
6482    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6483                                 fp->rx_sge_mbuf_spare_map,
6484                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6485    if (__predict_false(rc != 0)) {
6486        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6487        m_freem(m);
6488        fp->eth_q_stats.mbuf_alloc_sge--;
6489        return (rc);
6490    }
6491
6492    /* all mbufs must map to a single segment */
6493    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6494
6495    sge_buf = &fp->rx_sge_mbuf_chain[index];
6496
6497    /* release any existing SGE mbuf mapping */
6498    if (sge_buf->m_map != NULL) {
6499        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6500                        BUS_DMASYNC_POSTREAD);
6501        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6502    }
6503
6504    /* save the mbuf and mapping info for a future packet */
6505    map = sge_buf->m_map;
6506    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6507    fp->rx_sge_mbuf_spare_map = map;
6508    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6509                    BUS_DMASYNC_PREREAD);
6510    sge_buf->m = m;
6511
6512    sge = &fp->rx_sge_chain[index];
6513    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6514    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6515
6516    return (rc);
6517}
6518
6519static __noinline int
6520bxe_alloc_fp_buffers(struct bxe_softc *sc)
6521{
6522    struct bxe_fastpath *fp;
6523    int i, j, rc = 0;
6524    int ring_prod, cqe_ring_prod;
6525    int max_agg_queues;
6526
6527    for (i = 0; i < sc->num_queues; i++) {
6528        fp = &sc->fp[i];
6529
6530        ring_prod = cqe_ring_prod = 0;
6531        fp->rx_bd_cons = 0;
6532        fp->rx_cq_cons = 0;
6533
6534        /* allocate buffers for the RX BDs in RX BD chain */
6535        for (j = 0; j < sc->max_rx_bufs; j++) {
6536            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6537            if (rc != 0) {
6538                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6539                      i, rc);
6540                goto bxe_alloc_fp_buffers_error;
6541            }
6542
6543            ring_prod     = RX_BD_NEXT(ring_prod);
6544            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6545        }
6546
6547        fp->rx_bd_prod = ring_prod;
6548        fp->rx_cq_prod = cqe_ring_prod;
6549        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6550
6551        max_agg_queues = MAX_AGG_QS(sc);
6552
6553        fp->tpa_enable = TRUE;
6554
6555        /* fill the TPA pool */
6556        for (j = 0; j < max_agg_queues; j++) {
6557            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6558            if (rc != 0) {
6559                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6560                          i, j);
6561                fp->tpa_enable = FALSE;
6562                goto bxe_alloc_fp_buffers_error;
6563            }
6564
6565            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6566        }
6567
6568        if (fp->tpa_enable) {
6569            /* fill the RX SGE chain */
6570            ring_prod = 0;
6571            for (j = 0; j < RX_SGE_USABLE; j++) {
6572                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6573                if (rc != 0) {
6574                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6575                              i, ring_prod);
6576                    fp->tpa_enable = FALSE;
6577                    ring_prod = 0;
6578                    goto bxe_alloc_fp_buffers_error;
6579                }
6580
6581                ring_prod = RX_SGE_NEXT(ring_prod);
6582            }
6583
6584            fp->rx_sge_prod = ring_prod;
6585        }
6586    }
6587
6588    return (0);
6589
6590bxe_alloc_fp_buffers_error:
6591
6592    /* unwind what was already allocated */
6593    bxe_free_rx_bd_chain(fp);
6594    bxe_free_tpa_pool(fp);
6595    bxe_free_sge_chain(fp);
6596
6597    return (ENOBUFS);
6598}
6599
6600static void
6601bxe_free_fw_stats_mem(struct bxe_softc *sc)
6602{
6603    bxe_dma_free(sc, &sc->fw_stats_dma);
6604
6605    sc->fw_stats_num = 0;
6606
6607    sc->fw_stats_req_size = 0;
6608    sc->fw_stats_req = NULL;
6609    sc->fw_stats_req_mapping = 0;
6610
6611    sc->fw_stats_data_size = 0;
6612    sc->fw_stats_data = NULL;
6613    sc->fw_stats_data_mapping = 0;
6614}
6615
6616static int
6617bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6618{
6619    uint8_t num_queue_stats;
6620    int num_groups;
6621
6622    /* number of queues for statistics is number of eth queues */
6623    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6624
6625    /*
6626     * Total number of FW statistics requests =
6627     *   1 for port stats + 1 for PF stats + num of queues
6628     */
6629    sc->fw_stats_num = (2 + num_queue_stats);
6630
6631    /*
6632     * Request is built from stats_query_header and an array of
6633     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6634     * rules. The real number or requests is configured in the
6635     * stats_query_header.
6636     */
6637    num_groups =
6638        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6639         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6640
6641    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6642          sc->fw_stats_num, num_groups);
6643
6644    sc->fw_stats_req_size =
6645        (sizeof(struct stats_query_header) +
6646         (num_groups * sizeof(struct stats_query_cmd_group)));
6647
6648    /*
6649     * Data for statistics requests + stats_counter.
6650     * stats_counter holds per-STORM counters that are incremented when
6651     * STORM has finished with the current request. Memory for FCoE
6652     * offloaded statistics are counted anyway, even if they will not be sent.
6653     * VF stats are not accounted for here as the data of VF stats is stored
6654     * in memory allocated by the VF, not here.
6655     */
6656    sc->fw_stats_data_size =
6657        (sizeof(struct stats_counter) +
6658         sizeof(struct per_port_stats) +
6659         sizeof(struct per_pf_stats) +
6660         /* sizeof(struct fcoe_statistics_params) + */
6661         (sizeof(struct per_queue_stats) * num_queue_stats));
6662
6663    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6664                      &sc->fw_stats_dma, "fw stats") != 0) {
6665        bxe_free_fw_stats_mem(sc);
6666        return (-1);
6667    }
6668
6669    /* set up the shortcuts */
6670
6671    sc->fw_stats_req =
6672        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6673    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6674
6675    sc->fw_stats_data =
6676        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6677                                     sc->fw_stats_req_size);
6678    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6679                                 sc->fw_stats_req_size);
6680
6681    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6682          (uintmax_t)sc->fw_stats_req_mapping);
6683
6684    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6685          (uintmax_t)sc->fw_stats_data_mapping);
6686
6687    return (0);
6688}
6689
6690/*
6691 * Bits map:
6692 * 0-7  - Engine0 load counter.
6693 * 8-15 - Engine1 load counter.
6694 * 16   - Engine0 RESET_IN_PROGRESS bit.
6695 * 17   - Engine1 RESET_IN_PROGRESS bit.
6696 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6697 *        function on the engine
6698 * 19   - Engine1 ONE_IS_LOADED.
6699 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6700 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6701 *        for just the one belonging to its engine).
6702 */
6703#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6704#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6705#define BXE_PATH0_LOAD_CNT_SHIFT  0
6706#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6707#define BXE_PATH1_LOAD_CNT_SHIFT  8
6708#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6709#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6710#define BXE_GLOBAL_RESET_BIT      0x00040000
6711
6712/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6713static void
6714bxe_set_reset_global(struct bxe_softc *sc)
6715{
6716    uint32_t val;
6717    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6718    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6719    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6720    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6721}
6722
6723/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6724static void
6725bxe_clear_reset_global(struct bxe_softc *sc)
6726{
6727    uint32_t val;
6728    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6729    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6730    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6731    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6732}
6733
6734/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6735static uint8_t
6736bxe_reset_is_global(struct bxe_softc *sc)
6737{
6738    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6739    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6740    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6741}
6742
6743/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6744static void
6745bxe_set_reset_done(struct bxe_softc *sc)
6746{
6747    uint32_t val;
6748    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6749                                 BXE_PATH0_RST_IN_PROG_BIT;
6750
6751    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6752
6753    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6754    /* Clear the bit */
6755    val &= ~bit;
6756    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6757
6758    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6759}
6760
6761/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6762static void
6763bxe_set_reset_in_progress(struct bxe_softc *sc)
6764{
6765    uint32_t val;
6766    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6767                                 BXE_PATH0_RST_IN_PROG_BIT;
6768
6769    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6770
6771    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6772    /* Set the bit */
6773    val |= bit;
6774    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6775
6776    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6777}
6778
6779/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6780static uint8_t
6781bxe_reset_is_done(struct bxe_softc *sc,
6782                  int              engine)
6783{
6784    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6785    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6786                            BXE_PATH0_RST_IN_PROG_BIT;
6787
6788    /* return false if bit is set */
6789    return (val & bit) ? FALSE : TRUE;
6790}
6791
6792/* get the load status for an engine, should be run under rtnl lock */
6793static uint8_t
6794bxe_get_load_status(struct bxe_softc *sc,
6795                    int              engine)
6796{
6797    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6798                             BXE_PATH0_LOAD_CNT_MASK;
6799    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6800                              BXE_PATH0_LOAD_CNT_SHIFT;
6801    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6802
6803    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6804
6805    val = ((val & mask) >> shift);
6806
6807    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6808
6809    return (val != 0);
6810}
6811
6812/* set pf load mark */
6813/* XXX needs to be under rtnl lock */
6814static void
6815bxe_set_pf_load(struct bxe_softc *sc)
6816{
6817    uint32_t val;
6818    uint32_t val1;
6819    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6820                                  BXE_PATH0_LOAD_CNT_MASK;
6821    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6822                                   BXE_PATH0_LOAD_CNT_SHIFT;
6823
6824    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6825
6826    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6827    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6828
6829    /* get the current counter value */
6830    val1 = ((val & mask) >> shift);
6831
6832    /* set bit of this PF */
6833    val1 |= (1 << SC_ABS_FUNC(sc));
6834
6835    /* clear the old value */
6836    val &= ~mask;
6837
6838    /* set the new one */
6839    val |= ((val1 << shift) & mask);
6840
6841    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6842
6843    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6844}
6845
6846/* clear pf load mark */
6847/* XXX needs to be under rtnl lock */
6848static uint8_t
6849bxe_clear_pf_load(struct bxe_softc *sc)
6850{
6851    uint32_t val1, val;
6852    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6853                                  BXE_PATH0_LOAD_CNT_MASK;
6854    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6855                                   BXE_PATH0_LOAD_CNT_SHIFT;
6856
6857    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6858    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6859    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6860
6861    /* get the current counter value */
6862    val1 = (val & mask) >> shift;
6863
6864    /* clear bit of that PF */
6865    val1 &= ~(1 << SC_ABS_FUNC(sc));
6866
6867    /* clear the old value */
6868    val &= ~mask;
6869
6870    /* set the new one */
6871    val |= ((val1 << shift) & mask);
6872
6873    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6874    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6875    return (val1 != 0);
6876}
6877
6878/* send load requrest to mcp and analyze response */
6879static int
6880bxe_nic_load_request(struct bxe_softc *sc,
6881                     uint32_t         *load_code)
6882{
6883    /* init fw_seq */
6884    sc->fw_seq =
6885        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6886         DRV_MSG_SEQ_NUMBER_MASK);
6887
6888    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6889
6890    /* get the current FW pulse sequence */
6891    sc->fw_drv_pulse_wr_seq =
6892        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6893         DRV_PULSE_SEQ_MASK);
6894
6895    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6896          sc->fw_drv_pulse_wr_seq);
6897
6898    /* load request */
6899    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6900                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6901
6902    /* if the MCP fails to respond we must abort */
6903    if (!(*load_code)) {
6904        BLOGE(sc, "MCP response failure!\n");
6905        return (-1);
6906    }
6907
6908    /* if MCP refused then must abort */
6909    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6910        BLOGE(sc, "MCP refused load request\n");
6911        return (-1);
6912    }
6913
6914    return (0);
6915}
6916
6917/*
6918 * Check whether another PF has already loaded FW to chip. In virtualized
6919 * environments a pf from anoth VM may have already initialized the device
6920 * including loading FW.
6921 */
6922static int
6923bxe_nic_load_analyze_req(struct bxe_softc *sc,
6924                         uint32_t         load_code)
6925{
6926    uint32_t my_fw, loaded_fw;
6927
6928    /* is another pf loaded on this engine? */
6929    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6930        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6931        /* build my FW version dword */
6932        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6933                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6934                 (BCM_5710_FW_REVISION_VERSION << 16) +
6935                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6936
6937        /* read loaded FW from chip */
6938        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6939        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6940              loaded_fw, my_fw);
6941
6942        /* abort nic load if version mismatch */
6943        if (my_fw != loaded_fw) {
6944            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6945                  loaded_fw, my_fw);
6946            return (-1);
6947        }
6948    }
6949
6950    return (0);
6951}
6952
6953/* mark PMF if applicable */
6954static void
6955bxe_nic_load_pmf(struct bxe_softc *sc,
6956                 uint32_t         load_code)
6957{
6958    uint32_t ncsi_oem_data_addr;
6959
6960    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6961        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6962        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6963        /*
6964         * Barrier here for ordering between the writing to sc->port.pmf here
6965         * and reading it from the periodic task.
6966         */
6967        sc->port.pmf = 1;
6968        mb();
6969    } else {
6970        sc->port.pmf = 0;
6971    }
6972
6973    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6974
6975    /* XXX needed? */
6976    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6977        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6978            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6979            if (ncsi_oem_data_addr) {
6980                REG_WR(sc,
6981                       (ncsi_oem_data_addr +
6982                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6983                       0);
6984            }
6985        }
6986    }
6987}
6988
6989static void
6990bxe_read_mf_cfg(struct bxe_softc *sc)
6991{
6992    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6993    int abs_func;
6994    int vn;
6995
6996    if (BXE_NOMCP(sc)) {
6997        return; /* what should be the default bvalue in this case */
6998    }
6999
7000    /*
7001     * The formula for computing the absolute function number is...
7002     * For 2 port configuration (4 functions per port):
7003     *   abs_func = 2 * vn + SC_PORT + SC_PATH
7004     * For 4 port configuration (2 functions per port):
7005     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
7006     */
7007    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
7008        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
7009        if (abs_func >= E1H_FUNC_MAX) {
7010            break;
7011        }
7012        sc->devinfo.mf_info.mf_config[vn] =
7013            MFCFG_RD(sc, func_mf_config[abs_func].config);
7014    }
7015
7016    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
7017        FUNC_MF_CFG_FUNC_DISABLED) {
7018        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
7019        sc->flags |= BXE_MF_FUNC_DIS;
7020    } else {
7021        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
7022        sc->flags &= ~BXE_MF_FUNC_DIS;
7023    }
7024}
7025
7026/* acquire split MCP access lock register */
7027static int bxe_acquire_alr(struct bxe_softc *sc)
7028{
7029    uint32_t j, val;
7030
7031    for (j = 0; j < 1000; j++) {
7032        val = (1UL << 31);
7033        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
7034        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
7035        if (val & (1L << 31))
7036            break;
7037
7038        DELAY(5000);
7039    }
7040
7041    if (!(val & (1L << 31))) {
7042        BLOGE(sc, "Cannot acquire MCP access lock register\n");
7043        return (-1);
7044    }
7045
7046    return (0);
7047}
7048
7049/* release split MCP access lock register */
7050static void bxe_release_alr(struct bxe_softc *sc)
7051{
7052    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7053}
7054
7055static void
7056bxe_fan_failure(struct bxe_softc *sc)
7057{
7058    int port = SC_PORT(sc);
7059    uint32_t ext_phy_config;
7060
7061    /* mark the failure */
7062    ext_phy_config =
7063        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7064
7065    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7066    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7067    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7068             ext_phy_config);
7069
7070    /* log the failure */
7071    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7072              "the card to prevent permanent damage. "
7073              "Please contact OEM Support for assistance\n");
7074
7075    /* XXX */
7076#if 1
7077    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7078#else
7079    /*
7080     * Schedule device reset (unload)
7081     * This is due to some boards consuming sufficient power when driver is
7082     * up to overheat if fan fails.
7083     */
7084    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7085    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7086#endif
7087}
7088
7089/* this function is called upon a link interrupt */
7090static void
7091bxe_link_attn(struct bxe_softc *sc)
7092{
7093    uint32_t pause_enabled = 0;
7094    struct host_port_stats *pstats;
7095    int cmng_fns;
7096
7097    /* Make sure that we are synced with the current statistics */
7098    bxe_stats_handle(sc, STATS_EVENT_STOP);
7099
7100    elink_link_update(&sc->link_params, &sc->link_vars);
7101
7102    if (sc->link_vars.link_up) {
7103
7104        /* dropless flow control */
7105        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7106            pause_enabled = 0;
7107
7108            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7109                pause_enabled = 1;
7110            }
7111
7112            REG_WR(sc,
7113                   (BAR_USTRORM_INTMEM +
7114                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7115                   pause_enabled);
7116        }
7117
7118        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7119            pstats = BXE_SP(sc, port_stats);
7120            /* reset old mac stats */
7121            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7122        }
7123
7124        if (sc->state == BXE_STATE_OPEN) {
7125            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7126        }
7127    }
7128
7129    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7130        cmng_fns = bxe_get_cmng_fns_mode(sc);
7131
7132        if (cmng_fns != CMNG_FNS_NONE) {
7133            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7134            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7135        } else {
7136            /* rate shaping and fairness are disabled */
7137            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7138        }
7139    }
7140
7141    bxe_link_report_locked(sc);
7142
7143    if (IS_MF(sc)) {
7144        ; // XXX bxe_link_sync_notify(sc);
7145    }
7146}
7147
7148static void
7149bxe_attn_int_asserted(struct bxe_softc *sc,
7150                      uint32_t         asserted)
7151{
7152    int port = SC_PORT(sc);
7153    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7154                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7155    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7156                                        NIG_REG_MASK_INTERRUPT_PORT0;
7157    uint32_t aeu_mask;
7158    uint32_t nig_mask = 0;
7159    uint32_t reg_addr;
7160    uint32_t igu_acked;
7161    uint32_t cnt;
7162
7163    if (sc->attn_state & asserted) {
7164        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7165    }
7166
7167    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7168
7169    aeu_mask = REG_RD(sc, aeu_addr);
7170
7171    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7172          aeu_mask, asserted);
7173
7174    aeu_mask &= ~(asserted & 0x3ff);
7175
7176    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7177
7178    REG_WR(sc, aeu_addr, aeu_mask);
7179
7180    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7181
7182    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7183    sc->attn_state |= asserted;
7184    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7185
7186    if (asserted & ATTN_HARD_WIRED_MASK) {
7187        if (asserted & ATTN_NIG_FOR_FUNC) {
7188
7189	    bxe_acquire_phy_lock(sc);
7190            /* save nig interrupt mask */
7191            nig_mask = REG_RD(sc, nig_int_mask_addr);
7192
7193            /* If nig_mask is not set, no need to call the update function */
7194            if (nig_mask) {
7195                REG_WR(sc, nig_int_mask_addr, 0);
7196
7197                bxe_link_attn(sc);
7198            }
7199
7200            /* handle unicore attn? */
7201        }
7202
7203        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7204            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7205        }
7206
7207        if (asserted & GPIO_2_FUNC) {
7208            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7209        }
7210
7211        if (asserted & GPIO_3_FUNC) {
7212            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7213        }
7214
7215        if (asserted & GPIO_4_FUNC) {
7216            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7217        }
7218
7219        if (port == 0) {
7220            if (asserted & ATTN_GENERAL_ATTN_1) {
7221                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7222                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7223            }
7224            if (asserted & ATTN_GENERAL_ATTN_2) {
7225                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7226                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7227            }
7228            if (asserted & ATTN_GENERAL_ATTN_3) {
7229                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7230                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7231            }
7232        } else {
7233            if (asserted & ATTN_GENERAL_ATTN_4) {
7234                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7235                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7236            }
7237            if (asserted & ATTN_GENERAL_ATTN_5) {
7238                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7239                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7240            }
7241            if (asserted & ATTN_GENERAL_ATTN_6) {
7242                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7243                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7244            }
7245        }
7246    } /* hardwired */
7247
7248    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7249        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7250    } else {
7251        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7252    }
7253
7254    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7255          asserted,
7256          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7257    REG_WR(sc, reg_addr, asserted);
7258
7259    /* now set back the mask */
7260    if (asserted & ATTN_NIG_FOR_FUNC) {
7261        /*
7262         * Verify that IGU ack through BAR was written before restoring
7263         * NIG mask. This loop should exit after 2-3 iterations max.
7264         */
7265        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7266            cnt = 0;
7267
7268            do {
7269                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7270            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7271                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7272
7273            if (!igu_acked) {
7274                BLOGE(sc, "Failed to verify IGU ack on time\n");
7275            }
7276
7277            mb();
7278        }
7279
7280        REG_WR(sc, nig_int_mask_addr, nig_mask);
7281
7282	bxe_release_phy_lock(sc);
7283    }
7284}
7285
7286static void
7287bxe_print_next_block(struct bxe_softc *sc,
7288                     int              idx,
7289                     const char       *blk)
7290{
7291    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7292}
7293
7294static int
7295bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7296                              uint32_t         sig,
7297                              int              par_num,
7298                              uint8_t          print)
7299{
7300    uint32_t cur_bit = 0;
7301    int i = 0;
7302
7303    for (i = 0; sig; i++) {
7304        cur_bit = ((uint32_t)0x1 << i);
7305        if (sig & cur_bit) {
7306            switch (cur_bit) {
7307            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7308                if (print)
7309                    bxe_print_next_block(sc, par_num++, "BRB");
7310                break;
7311            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7312                if (print)
7313                    bxe_print_next_block(sc, par_num++, "PARSER");
7314                break;
7315            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7316                if (print)
7317                    bxe_print_next_block(sc, par_num++, "TSDM");
7318                break;
7319            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7320                if (print)
7321                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7322                break;
7323            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7324                if (print)
7325                    bxe_print_next_block(sc, par_num++, "TCM");
7326                break;
7327            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7328                if (print)
7329                    bxe_print_next_block(sc, par_num++, "TSEMI");
7330                break;
7331            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7332                if (print)
7333                    bxe_print_next_block(sc, par_num++, "XPB");
7334                break;
7335            }
7336
7337            /* Clear the bit */
7338            sig &= ~cur_bit;
7339        }
7340    }
7341
7342    return (par_num);
7343}
7344
7345static int
7346bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7347                              uint32_t         sig,
7348                              int              par_num,
7349                              uint8_t          *global,
7350                              uint8_t          print)
7351{
7352    int i = 0;
7353    uint32_t cur_bit = 0;
7354    for (i = 0; sig; i++) {
7355        cur_bit = ((uint32_t)0x1 << i);
7356        if (sig & cur_bit) {
7357            switch (cur_bit) {
7358            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7359                if (print)
7360                    bxe_print_next_block(sc, par_num++, "PBF");
7361                break;
7362            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7363                if (print)
7364                    bxe_print_next_block(sc, par_num++, "QM");
7365                break;
7366            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7367                if (print)
7368                    bxe_print_next_block(sc, par_num++, "TM");
7369                break;
7370            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7371                if (print)
7372                    bxe_print_next_block(sc, par_num++, "XSDM");
7373                break;
7374            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7375                if (print)
7376                    bxe_print_next_block(sc, par_num++, "XCM");
7377                break;
7378            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7379                if (print)
7380                    bxe_print_next_block(sc, par_num++, "XSEMI");
7381                break;
7382            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7383                if (print)
7384                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7385                break;
7386            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7387                if (print)
7388                    bxe_print_next_block(sc, par_num++, "NIG");
7389                break;
7390            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7391                if (print)
7392                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7393                *global = TRUE;
7394                break;
7395            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7396                if (print)
7397                    bxe_print_next_block(sc, par_num++, "DEBUG");
7398                break;
7399            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7400                if (print)
7401                    bxe_print_next_block(sc, par_num++, "USDM");
7402                break;
7403            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7404                if (print)
7405                    bxe_print_next_block(sc, par_num++, "UCM");
7406                break;
7407            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7408                if (print)
7409                    bxe_print_next_block(sc, par_num++, "USEMI");
7410                break;
7411            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7412                if (print)
7413                    bxe_print_next_block(sc, par_num++, "UPB");
7414                break;
7415            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7416                if (print)
7417                    bxe_print_next_block(sc, par_num++, "CSDM");
7418                break;
7419            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7420                if (print)
7421                    bxe_print_next_block(sc, par_num++, "CCM");
7422                break;
7423            }
7424
7425            /* Clear the bit */
7426            sig &= ~cur_bit;
7427        }
7428    }
7429
7430    return (par_num);
7431}
7432
7433static int
7434bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7435                              uint32_t         sig,
7436                              int              par_num,
7437                              uint8_t          print)
7438{
7439    uint32_t cur_bit = 0;
7440    int i = 0;
7441
7442    for (i = 0; sig; i++) {
7443        cur_bit = ((uint32_t)0x1 << i);
7444        if (sig & cur_bit) {
7445            switch (cur_bit) {
7446            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7447                if (print)
7448                    bxe_print_next_block(sc, par_num++, "CSEMI");
7449                break;
7450            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7451                if (print)
7452                    bxe_print_next_block(sc, par_num++, "PXP");
7453                break;
7454            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7455                if (print)
7456                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7457                break;
7458            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7459                if (print)
7460                    bxe_print_next_block(sc, par_num++, "CFC");
7461                break;
7462            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7463                if (print)
7464                    bxe_print_next_block(sc, par_num++, "CDU");
7465                break;
7466            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7467                if (print)
7468                    bxe_print_next_block(sc, par_num++, "DMAE");
7469                break;
7470            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7471                if (print)
7472                    bxe_print_next_block(sc, par_num++, "IGU");
7473                break;
7474            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7475                if (print)
7476                    bxe_print_next_block(sc, par_num++, "MISC");
7477                break;
7478            }
7479
7480            /* Clear the bit */
7481            sig &= ~cur_bit;
7482        }
7483    }
7484
7485    return (par_num);
7486}
7487
7488static int
7489bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7490                              uint32_t         sig,
7491                              int              par_num,
7492                              uint8_t          *global,
7493                              uint8_t          print)
7494{
7495    uint32_t cur_bit = 0;
7496    int i = 0;
7497
7498    for (i = 0; sig; i++) {
7499        cur_bit = ((uint32_t)0x1 << i);
7500        if (sig & cur_bit) {
7501            switch (cur_bit) {
7502            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7503                if (print)
7504                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7505                *global = TRUE;
7506                break;
7507            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7508                if (print)
7509                    bxe_print_next_block(sc, par_num++,
7510                              "MCP UMP RX");
7511                *global = TRUE;
7512                break;
7513            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7514                if (print)
7515                    bxe_print_next_block(sc, par_num++,
7516                              "MCP UMP TX");
7517                *global = TRUE;
7518                break;
7519            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7520                if (print)
7521                    bxe_print_next_block(sc, par_num++,
7522                              "MCP SCPAD");
7523                *global = TRUE;
7524                break;
7525            }
7526
7527            /* Clear the bit */
7528            sig &= ~cur_bit;
7529        }
7530    }
7531
7532    return (par_num);
7533}
7534
7535static int
7536bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7537                              uint32_t         sig,
7538                              int              par_num,
7539                              uint8_t          print)
7540{
7541    uint32_t cur_bit = 0;
7542    int i = 0;
7543
7544    for (i = 0; sig; i++) {
7545        cur_bit = ((uint32_t)0x1 << i);
7546        if (sig & cur_bit) {
7547            switch (cur_bit) {
7548            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7549                if (print)
7550                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7551                break;
7552            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7553                if (print)
7554                    bxe_print_next_block(sc, par_num++, "ATC");
7555                break;
7556            }
7557
7558            /* Clear the bit */
7559            sig &= ~cur_bit;
7560        }
7561    }
7562
7563    return (par_num);
7564}
7565
7566static uint8_t
7567bxe_parity_attn(struct bxe_softc *sc,
7568                uint8_t          *global,
7569                uint8_t          print,
7570                uint32_t         *sig)
7571{
7572    int par_num = 0;
7573
7574    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7575        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7576        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7577        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7578        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7579        BLOGE(sc, "Parity error: HW block parity attention:\n"
7580                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7581              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7582              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7583              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7584              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7585              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7586
7587        if (print)
7588            BLOGI(sc, "Parity errors detected in blocks: ");
7589
7590        par_num =
7591            bxe_check_blocks_with_parity0(sc, sig[0] &
7592                                          HW_PRTY_ASSERT_SET_0,
7593                                          par_num, print);
7594        par_num =
7595            bxe_check_blocks_with_parity1(sc, sig[1] &
7596                                          HW_PRTY_ASSERT_SET_1,
7597                                          par_num, global, print);
7598        par_num =
7599            bxe_check_blocks_with_parity2(sc, sig[2] &
7600                                          HW_PRTY_ASSERT_SET_2,
7601                                          par_num, print);
7602        par_num =
7603            bxe_check_blocks_with_parity3(sc, sig[3] &
7604                                          HW_PRTY_ASSERT_SET_3,
7605                                          par_num, global, print);
7606        par_num =
7607            bxe_check_blocks_with_parity4(sc, sig[4] &
7608                                          HW_PRTY_ASSERT_SET_4,
7609                                          par_num, print);
7610
7611        if (print)
7612            BLOGI(sc, "\n");
7613
7614        return (TRUE);
7615    }
7616
7617    return (FALSE);
7618}
7619
7620static uint8_t
7621bxe_chk_parity_attn(struct bxe_softc *sc,
7622                    uint8_t          *global,
7623                    uint8_t          print)
7624{
7625    struct attn_route attn = { {0} };
7626    int port = SC_PORT(sc);
7627
7628    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7629    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7630    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7631    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7632
7633    /*
7634     * Since MCP attentions can't be disabled inside the block, we need to
7635     * read AEU registers to see whether they're currently disabled
7636     */
7637    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7638                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7639                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7640                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7641
7642
7643    if (!CHIP_IS_E1x(sc))
7644        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7645
7646    return (bxe_parity_attn(sc, global, print, attn.sig));
7647}
7648
7649static void
7650bxe_attn_int_deasserted4(struct bxe_softc *sc,
7651                         uint32_t         attn)
7652{
7653    uint32_t val;
7654
7655    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7656        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7657        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7658        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7659            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7660        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7661            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7662        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7663            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7664        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7665            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7666        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7667            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7668        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7669            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7670        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7671            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7672        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7673            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7674        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7675            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7676    }
7677
7678    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7679        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7680        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7681        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7682            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7683        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7684            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7685        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7686            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7687        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7688            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7689        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7690            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7691        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7692            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7693    }
7694
7695    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7696                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7697        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7698              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7699                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7700    }
7701}
7702
7703static void
7704bxe_e1h_disable(struct bxe_softc *sc)
7705{
7706    int port = SC_PORT(sc);
7707
7708    bxe_tx_disable(sc);
7709
7710    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7711}
7712
7713static void
7714bxe_e1h_enable(struct bxe_softc *sc)
7715{
7716    int port = SC_PORT(sc);
7717
7718    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7719
7720    // XXX bxe_tx_enable(sc);
7721}
7722
7723/*
7724 * called due to MCP event (on pmf):
7725 *   reread new bandwidth configuration
7726 *   configure FW
7727 *   notify others function about the change
7728 */
7729static void
7730bxe_config_mf_bw(struct bxe_softc *sc)
7731{
7732    if (sc->link_vars.link_up) {
7733        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7734        // XXX bxe_link_sync_notify(sc);
7735    }
7736
7737    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7738}
7739
7740static void
7741bxe_set_mf_bw(struct bxe_softc *sc)
7742{
7743    bxe_config_mf_bw(sc);
7744    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7745}
7746
7747static void
7748bxe_handle_eee_event(struct bxe_softc *sc)
7749{
7750    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7751    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7752}
7753
7754#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7755
7756static void
7757bxe_drv_info_ether_stat(struct bxe_softc *sc)
7758{
7759    struct eth_stats_info *ether_stat =
7760        &sc->sp->drv_info_to_mcp.ether_stat;
7761
7762    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7763            ETH_STAT_INFO_VERSION_LEN);
7764
7765    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7766    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7767                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7768                                          ether_stat->mac_local + MAC_PAD,
7769                                          MAC_PAD, ETH_ALEN);
7770
7771    ether_stat->mtu_size = sc->mtu;
7772
7773    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7774    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7775        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7776    }
7777
7778    // XXX ether_stat->feature_flags |= ???;
7779
7780    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7781
7782    ether_stat->txq_size = sc->tx_ring_size;
7783    ether_stat->rxq_size = sc->rx_ring_size;
7784}
7785
7786static void
7787bxe_handle_drv_info_req(struct bxe_softc *sc)
7788{
7789    enum drv_info_opcode op_code;
7790    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7791
7792    /* if drv_info version supported by MFW doesn't match - send NACK */
7793    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7794        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7795        return;
7796    }
7797
7798    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7799               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7800
7801    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7802
7803    switch (op_code) {
7804    case ETH_STATS_OPCODE:
7805        bxe_drv_info_ether_stat(sc);
7806        break;
7807    case FCOE_STATS_OPCODE:
7808    case ISCSI_STATS_OPCODE:
7809    default:
7810        /* if op code isn't supported - send NACK */
7811        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7812        return;
7813    }
7814
7815    /*
7816     * If we got drv_info attn from MFW then these fields are defined in
7817     * shmem2 for sure
7818     */
7819    SHMEM2_WR(sc, drv_info_host_addr_lo,
7820              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7821    SHMEM2_WR(sc, drv_info_host_addr_hi,
7822              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7823
7824    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7825}
7826
7827static void
7828bxe_dcc_event(struct bxe_softc *sc,
7829              uint32_t         dcc_event)
7830{
7831    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7832
7833    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7834        /*
7835         * This is the only place besides the function initialization
7836         * where the sc->flags can change so it is done without any
7837         * locks
7838         */
7839        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7840            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7841            sc->flags |= BXE_MF_FUNC_DIS;
7842            bxe_e1h_disable(sc);
7843        } else {
7844            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7845            sc->flags &= ~BXE_MF_FUNC_DIS;
7846            bxe_e1h_enable(sc);
7847        }
7848        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7849    }
7850
7851    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7852        bxe_config_mf_bw(sc);
7853        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7854    }
7855
7856    /* Report results to MCP */
7857    if (dcc_event)
7858        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7859    else
7860        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7861}
7862
7863static void
7864bxe_pmf_update(struct bxe_softc *sc)
7865{
7866    int port = SC_PORT(sc);
7867    uint32_t val;
7868
7869    sc->port.pmf = 1;
7870    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7871
7872    /*
7873     * We need the mb() to ensure the ordering between the writing to
7874     * sc->port.pmf here and reading it from the bxe_periodic_task().
7875     */
7876    mb();
7877
7878    /* queue a periodic task */
7879    // XXX schedule task...
7880
7881    // XXX bxe_dcbx_pmf_update(sc);
7882
7883    /* enable nig attention */
7884    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7885    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7886        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7887        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7888    } else if (!CHIP_IS_E1x(sc)) {
7889        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7890        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7891    }
7892
7893    bxe_stats_handle(sc, STATS_EVENT_PMF);
7894}
7895
7896static int
7897bxe_mc_assert(struct bxe_softc *sc)
7898{
7899    char last_idx;
7900    int i, rc = 0;
7901    uint32_t row0, row1, row2, row3;
7902
7903    /* XSTORM */
7904    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7905    if (last_idx)
7906        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7907
7908    /* print the asserts */
7909    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7910
7911        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7912        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7913        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7914        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7915
7916        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7917            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7918                  i, row3, row2, row1, row0);
7919            rc++;
7920        } else {
7921            break;
7922        }
7923    }
7924
7925    /* TSTORM */
7926    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7927    if (last_idx) {
7928        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7929    }
7930
7931    /* print the asserts */
7932    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7933
7934        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7935        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7936        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7937        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7938
7939        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7940            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7941                  i, row3, row2, row1, row0);
7942            rc++;
7943        } else {
7944            break;
7945        }
7946    }
7947
7948    /* CSTORM */
7949    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7950    if (last_idx) {
7951        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7952    }
7953
7954    /* print the asserts */
7955    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7956
7957        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7958        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7959        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7960        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7961
7962        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7963            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7964                  i, row3, row2, row1, row0);
7965            rc++;
7966        } else {
7967            break;
7968        }
7969    }
7970
7971    /* USTORM */
7972    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7973    if (last_idx) {
7974        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7975    }
7976
7977    /* print the asserts */
7978    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7979
7980        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7981        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7982        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7983        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7984
7985        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7986            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7987                  i, row3, row2, row1, row0);
7988            rc++;
7989        } else {
7990            break;
7991        }
7992    }
7993
7994    return (rc);
7995}
7996
7997static void
7998bxe_attn_int_deasserted3(struct bxe_softc *sc,
7999                         uint32_t         attn)
8000{
8001    int func = SC_FUNC(sc);
8002    uint32_t val;
8003
8004    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
8005
8006        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
8007
8008            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8009            bxe_read_mf_cfg(sc);
8010            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
8011                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
8012            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
8013
8014            if (val & DRV_STATUS_DCC_EVENT_MASK)
8015                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
8016
8017            if (val & DRV_STATUS_SET_MF_BW)
8018                bxe_set_mf_bw(sc);
8019
8020            if (val & DRV_STATUS_DRV_INFO_REQ)
8021                bxe_handle_drv_info_req(sc);
8022
8023            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
8024                bxe_pmf_update(sc);
8025
8026            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
8027                bxe_handle_eee_event(sc);
8028
8029            if (sc->link_vars.periodic_flags &
8030                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
8031                /* sync with link */
8032		bxe_acquire_phy_lock(sc);
8033                sc->link_vars.periodic_flags &=
8034                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
8035		bxe_release_phy_lock(sc);
8036                if (IS_MF(sc))
8037                    ; // XXX bxe_link_sync_notify(sc);
8038                bxe_link_report(sc);
8039            }
8040
8041            /*
8042             * Always call it here: bxe_link_report() will
8043             * prevent the link indication duplication.
8044             */
8045            bxe_link_status_update(sc);
8046
8047        } else if (attn & BXE_MC_ASSERT_BITS) {
8048
8049            BLOGE(sc, "MC assert!\n");
8050            bxe_mc_assert(sc);
8051            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8052            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8053            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8054            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8055            bxe_panic(sc, ("MC assert!\n"));
8056
8057        } else if (attn & BXE_MCP_ASSERT) {
8058
8059            BLOGE(sc, "MCP assert!\n");
8060            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8061            // XXX bxe_fw_dump(sc);
8062
8063        } else {
8064            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8065        }
8066    }
8067
8068    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8069        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8070        if (attn & BXE_GRC_TIMEOUT) {
8071            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8072            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8073        }
8074        if (attn & BXE_GRC_RSV) {
8075            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8076            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8077        }
8078        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8079    }
8080}
8081
8082static void
8083bxe_attn_int_deasserted2(struct bxe_softc *sc,
8084                         uint32_t         attn)
8085{
8086    int port = SC_PORT(sc);
8087    int reg_offset;
8088    uint32_t val0, mask0, val1, mask1;
8089    uint32_t val;
8090
8091    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8092        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8093        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8094        /* CFC error attention */
8095        if (val & 0x2) {
8096            BLOGE(sc, "FATAL error from CFC\n");
8097        }
8098    }
8099
8100    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8101        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8102        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8103        /* RQ_USDMDP_FIFO_OVERFLOW */
8104        if (val & 0x18000) {
8105            BLOGE(sc, "FATAL error from PXP\n");
8106        }
8107
8108        if (!CHIP_IS_E1x(sc)) {
8109            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8110            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8111        }
8112    }
8113
8114#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8115#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8116
8117    if (attn & AEU_PXP2_HW_INT_BIT) {
8118        /*  CQ47854 workaround do not panic on
8119         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8120         */
8121        if (!CHIP_IS_E1x(sc)) {
8122            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8123            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8124            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8125            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8126            /*
8127             * If the olny PXP2_EOP_ERROR_BIT is set in
8128             * STS0 and STS1 - clear it
8129             *
8130             * probably we lose additional attentions between
8131             * STS0 and STS_CLR0, in this case user will not
8132             * be notified about them
8133             */
8134            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8135                !(val1 & mask1))
8136                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8137
8138            /* print the register, since no one can restore it */
8139            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8140
8141            /*
8142             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8143             * then notify
8144             */
8145            if (val0 & PXP2_EOP_ERROR_BIT) {
8146                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8147
8148                /*
8149                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8150                 * set then clear attention from PXP2 block without panic
8151                 */
8152                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8153                    ((val1 & mask1) == 0))
8154                    attn &= ~AEU_PXP2_HW_INT_BIT;
8155            }
8156        }
8157    }
8158
8159    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8160        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8161                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8162
8163        val = REG_RD(sc, reg_offset);
8164        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8165        REG_WR(sc, reg_offset, val);
8166
8167        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8168              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8169        bxe_panic(sc, ("HW block attention set2\n"));
8170    }
8171}
8172
8173static void
8174bxe_attn_int_deasserted1(struct bxe_softc *sc,
8175                         uint32_t         attn)
8176{
8177    int port = SC_PORT(sc);
8178    int reg_offset;
8179    uint32_t val;
8180
8181    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8182        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8183        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8184        /* DORQ discard attention */
8185        if (val & 0x2) {
8186            BLOGE(sc, "FATAL error from DORQ\n");
8187        }
8188    }
8189
8190    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8191        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8192                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8193
8194        val = REG_RD(sc, reg_offset);
8195        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8196        REG_WR(sc, reg_offset, val);
8197
8198        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8199              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8200        bxe_panic(sc, ("HW block attention set1\n"));
8201    }
8202}
8203
8204static void
8205bxe_attn_int_deasserted0(struct bxe_softc *sc,
8206                         uint32_t         attn)
8207{
8208    int port = SC_PORT(sc);
8209    int reg_offset;
8210    uint32_t val;
8211
8212    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8213                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8214
8215    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8216        val = REG_RD(sc, reg_offset);
8217        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8218        REG_WR(sc, reg_offset, val);
8219
8220        BLOGW(sc, "SPIO5 hw attention\n");
8221
8222        /* Fan failure attention */
8223        elink_hw_reset_phy(&sc->link_params);
8224        bxe_fan_failure(sc);
8225    }
8226
8227    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8228	bxe_acquire_phy_lock(sc);
8229        elink_handle_module_detect_int(&sc->link_params);
8230	bxe_release_phy_lock(sc);
8231    }
8232
8233    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8234        val = REG_RD(sc, reg_offset);
8235        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8236        REG_WR(sc, reg_offset, val);
8237
8238        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8239                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8240    }
8241}
8242
8243static void
8244bxe_attn_int_deasserted(struct bxe_softc *sc,
8245                        uint32_t         deasserted)
8246{
8247    struct attn_route attn;
8248    struct attn_route *group_mask;
8249    int port = SC_PORT(sc);
8250    int index;
8251    uint32_t reg_addr;
8252    uint32_t val;
8253    uint32_t aeu_mask;
8254    uint8_t global = FALSE;
8255
8256    /*
8257     * Need to take HW lock because MCP or other port might also
8258     * try to handle this event.
8259     */
8260    bxe_acquire_alr(sc);
8261
8262    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8263        /* XXX
8264         * In case of parity errors don't handle attentions so that
8265         * other function would "see" parity errors.
8266         */
8267        sc->recovery_state = BXE_RECOVERY_INIT;
8268        // XXX schedule a recovery task...
8269        /* disable HW interrupts */
8270        bxe_int_disable(sc);
8271        bxe_release_alr(sc);
8272        return;
8273    }
8274
8275    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8276    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8277    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8278    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8279    if (!CHIP_IS_E1x(sc)) {
8280        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8281    } else {
8282        attn.sig[4] = 0;
8283    }
8284
8285    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8286          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8287
8288    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8289        if (deasserted & (1 << index)) {
8290            group_mask = &sc->attn_group[index];
8291
8292            BLOGD(sc, DBG_INTR,
8293                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8294                  group_mask->sig[0], group_mask->sig[1],
8295                  group_mask->sig[2], group_mask->sig[3],
8296                  group_mask->sig[4]);
8297
8298            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8299            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8300            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8301            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8302            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8303        }
8304    }
8305
8306    bxe_release_alr(sc);
8307
8308    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8309        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8310                    COMMAND_REG_ATTN_BITS_CLR);
8311    } else {
8312        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8313    }
8314
8315    val = ~deasserted;
8316    BLOGD(sc, DBG_INTR,
8317          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8318          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8319    REG_WR(sc, reg_addr, val);
8320
8321    if (~sc->attn_state & deasserted) {
8322        BLOGE(sc, "IGU error\n");
8323    }
8324
8325    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8326                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8327
8328    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8329
8330    aeu_mask = REG_RD(sc, reg_addr);
8331
8332    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8333          aeu_mask, deasserted);
8334    aeu_mask |= (deasserted & 0x3ff);
8335    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8336
8337    REG_WR(sc, reg_addr, aeu_mask);
8338    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8339
8340    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8341    sc->attn_state &= ~deasserted;
8342    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8343}
8344
8345static void
8346bxe_attn_int(struct bxe_softc *sc)
8347{
8348    /* read local copy of bits */
8349    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8350    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8351    uint32_t attn_state = sc->attn_state;
8352
8353    /* look for changed bits */
8354    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8355    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8356
8357    BLOGD(sc, DBG_INTR,
8358          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8359          attn_bits, attn_ack, asserted, deasserted);
8360
8361    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8362        BLOGE(sc, "BAD attention state\n");
8363    }
8364
8365    /* handle bits that were raised */
8366    if (asserted) {
8367        bxe_attn_int_asserted(sc, asserted);
8368    }
8369
8370    if (deasserted) {
8371        bxe_attn_int_deasserted(sc, deasserted);
8372    }
8373}
8374
8375static uint16_t
8376bxe_update_dsb_idx(struct bxe_softc *sc)
8377{
8378    struct host_sp_status_block *def_sb = sc->def_sb;
8379    uint16_t rc = 0;
8380
8381    mb(); /* status block is written to by the chip */
8382
8383    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8384        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8385        rc |= BXE_DEF_SB_ATT_IDX;
8386    }
8387
8388    if (sc->def_idx != def_sb->sp_sb.running_index) {
8389        sc->def_idx = def_sb->sp_sb.running_index;
8390        rc |= BXE_DEF_SB_IDX;
8391    }
8392
8393    mb();
8394
8395    return (rc);
8396}
8397
8398static inline struct ecore_queue_sp_obj *
8399bxe_cid_to_q_obj(struct bxe_softc *sc,
8400                 uint32_t         cid)
8401{
8402    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8403    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8404}
8405
8406static void
8407bxe_handle_mcast_eqe(struct bxe_softc *sc)
8408{
8409    struct ecore_mcast_ramrod_params rparam;
8410    int rc;
8411
8412    memset(&rparam, 0, sizeof(rparam));
8413
8414    rparam.mcast_obj = &sc->mcast_obj;
8415
8416    BXE_MCAST_LOCK(sc);
8417
8418    /* clear pending state for the last command */
8419    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8420
8421    /* if there are pending mcast commands - send them */
8422    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8423        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8424        if (rc < 0) {
8425            BLOGD(sc, DBG_SP,
8426                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8427        }
8428    }
8429
8430    BXE_MCAST_UNLOCK(sc);
8431}
8432
8433static void
8434bxe_handle_classification_eqe(struct bxe_softc      *sc,
8435                              union event_ring_elem *elem)
8436{
8437    unsigned long ramrod_flags = 0;
8438    int rc = 0;
8439    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8440    struct ecore_vlan_mac_obj *vlan_mac_obj;
8441
8442    /* always push next commands out, don't wait here */
8443    bit_set(&ramrod_flags, RAMROD_CONT);
8444
8445    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8446    case ECORE_FILTER_MAC_PENDING:
8447        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8448        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8449        break;
8450
8451    case ECORE_FILTER_MCAST_PENDING:
8452        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8453        /*
8454         * This is only relevant for 57710 where multicast MACs are
8455         * configured as unicast MACs using the same ramrod.
8456         */
8457        bxe_handle_mcast_eqe(sc);
8458        return;
8459
8460    default:
8461        BLOGE(sc, "Unsupported classification command: %d\n",
8462              elem->message.data.eth_event.echo);
8463        return;
8464    }
8465
8466    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8467
8468    if (rc < 0) {
8469        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8470    } else if (rc > 0) {
8471        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8472    }
8473}
8474
8475static void
8476bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8477                       union event_ring_elem *elem)
8478{
8479    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8480
8481    /* send rx_mode command again if was requested */
8482    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8483                               &sc->sp_state)) {
8484        bxe_set_storm_rx_mode(sc);
8485    }
8486}
8487
8488static void
8489bxe_update_eq_prod(struct bxe_softc *sc,
8490                   uint16_t         prod)
8491{
8492    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8493    wmb(); /* keep prod updates ordered */
8494}
8495
8496static void
8497bxe_eq_int(struct bxe_softc *sc)
8498{
8499    uint16_t hw_cons, sw_cons, sw_prod;
8500    union event_ring_elem *elem;
8501    uint8_t echo;
8502    uint32_t cid;
8503    uint8_t opcode;
8504    int spqe_cnt = 0;
8505    struct ecore_queue_sp_obj *q_obj;
8506    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8507    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8508
8509    hw_cons = le16toh(*sc->eq_cons_sb);
8510
8511    /*
8512     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8513     * when we get to the next-page we need to adjust so the loop
8514     * condition below will be met. The next element is the size of a
8515     * regular element and hence incrementing by 1
8516     */
8517    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8518        hw_cons++;
8519    }
8520
8521    /*
8522     * This function may never run in parallel with itself for a
8523     * specific sc and no need for a read memory barrier here.
8524     */
8525    sw_cons = sc->eq_cons;
8526    sw_prod = sc->eq_prod;
8527
8528    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8529          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8530
8531    for (;
8532         sw_cons != hw_cons;
8533         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8534
8535        elem = &sc->eq[EQ_DESC(sw_cons)];
8536
8537        /* elem CID originates from FW, actually LE */
8538        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8539        opcode = elem->message.opcode;
8540
8541        /* handle eq element */
8542        switch (opcode) {
8543
8544        case EVENT_RING_OPCODE_STAT_QUERY:
8545            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8546                  sc->stats_comp++);
8547            /* nothing to do with stats comp */
8548            goto next_spqe;
8549
8550        case EVENT_RING_OPCODE_CFC_DEL:
8551            /* handle according to cid range */
8552            /* we may want to verify here that the sc state is HALTING */
8553            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8554            q_obj = bxe_cid_to_q_obj(sc, cid);
8555            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8556                break;
8557            }
8558            goto next_spqe;
8559
8560        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8561            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8562            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8563                break;
8564            }
8565            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8566            goto next_spqe;
8567
8568        case EVENT_RING_OPCODE_START_TRAFFIC:
8569            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8570            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8571                break;
8572            }
8573            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8574            goto next_spqe;
8575
8576        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8577            echo = elem->message.data.function_update_event.echo;
8578            if (echo == SWITCH_UPDATE) {
8579                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8580                if (f_obj->complete_cmd(sc, f_obj,
8581                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8582                    break;
8583                }
8584            }
8585            else {
8586                BLOGD(sc, DBG_SP,
8587                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8588            }
8589            goto next_spqe;
8590
8591        case EVENT_RING_OPCODE_FORWARD_SETUP:
8592            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8593            if (q_obj->complete_cmd(sc, q_obj,
8594                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8595                break;
8596            }
8597            goto next_spqe;
8598
8599        case EVENT_RING_OPCODE_FUNCTION_START:
8600            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8601            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8602                break;
8603            }
8604            goto next_spqe;
8605
8606        case EVENT_RING_OPCODE_FUNCTION_STOP:
8607            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8608            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8609                break;
8610            }
8611            goto next_spqe;
8612        }
8613
8614        switch (opcode | sc->state) {
8615        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8616        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8617            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8618            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8619            rss_raw->clear_pending(rss_raw);
8620            break;
8621
8622        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8623        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8624        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8625        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8626        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8627        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8628            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8629            bxe_handle_classification_eqe(sc, elem);
8630            break;
8631
8632        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8633        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8634        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8635            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8636            bxe_handle_mcast_eqe(sc);
8637            break;
8638
8639        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8640        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8641        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8642            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8643            bxe_handle_rx_mode_eqe(sc, elem);
8644            break;
8645
8646        default:
8647            /* unknown event log error and continue */
8648            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8649                  elem->message.opcode, sc->state);
8650        }
8651
8652next_spqe:
8653        spqe_cnt++;
8654    } /* for */
8655
8656    mb();
8657    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8658
8659    sc->eq_cons = sw_cons;
8660    sc->eq_prod = sw_prod;
8661
8662    /* make sure that above mem writes were issued towards the memory */
8663    wmb();
8664
8665    /* update producer */
8666    bxe_update_eq_prod(sc, sc->eq_prod);
8667}
8668
8669static void
8670bxe_handle_sp_tq(void *context,
8671                 int  pending)
8672{
8673    struct bxe_softc *sc = (struct bxe_softc *)context;
8674    uint16_t status;
8675
8676    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8677
8678    /* what work needs to be performed? */
8679    status = bxe_update_dsb_idx(sc);
8680
8681    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8682
8683    /* HW attentions */
8684    if (status & BXE_DEF_SB_ATT_IDX) {
8685        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8686        bxe_attn_int(sc);
8687        status &= ~BXE_DEF_SB_ATT_IDX;
8688    }
8689
8690    /* SP events: STAT_QUERY and others */
8691    if (status & BXE_DEF_SB_IDX) {
8692        /* handle EQ completions */
8693        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8694        bxe_eq_int(sc);
8695        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8696                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8697        status &= ~BXE_DEF_SB_IDX;
8698    }
8699
8700    /* if status is non zero then something went wrong */
8701    if (__predict_false(status)) {
8702        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8703    }
8704
8705    /* ack status block only if something was actually handled */
8706    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8707               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8708
8709    /*
8710     * Must be called after the EQ processing (since eq leads to sriov
8711     * ramrod completion flows).
8712     * This flow may have been scheduled by the arrival of a ramrod
8713     * completion, or by the sriov code rescheduling itself.
8714     */
8715    // XXX bxe_iov_sp_task(sc);
8716
8717}
8718
8719static void
8720bxe_handle_fp_tq(void *context,
8721                 int  pending)
8722{
8723    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8724    struct bxe_softc *sc = fp->sc;
8725    uint8_t more_tx = FALSE;
8726    uint8_t more_rx = FALSE;
8727
8728    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8729
8730    /* XXX
8731     * IFF_DRV_RUNNING state can't be checked here since we process
8732     * slowpath events on a client queue during setup. Instead
8733     * we need to add a "process/continue" flag here that the driver
8734     * can use to tell the task here not to do anything.
8735     */
8736#if 0
8737    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8738        return;
8739    }
8740#endif
8741
8742    /* update the fastpath index */
8743    bxe_update_fp_sb_idx(fp);
8744
8745    /* XXX add loop here if ever support multiple tx CoS */
8746    /* fp->txdata[cos] */
8747    if (bxe_has_tx_work(fp)) {
8748        BXE_FP_TX_LOCK(fp);
8749        more_tx = bxe_txeof(sc, fp);
8750        BXE_FP_TX_UNLOCK(fp);
8751    }
8752
8753    if (bxe_has_rx_work(fp)) {
8754        more_rx = bxe_rxeof(sc, fp);
8755    }
8756
8757    if (more_rx /*|| more_tx*/) {
8758        /* still more work to do */
8759        taskqueue_enqueue(fp->tq, &fp->tq_task);
8760        return;
8761    }
8762
8763    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8764               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8765}
8766
8767static void
8768bxe_task_fp(struct bxe_fastpath *fp)
8769{
8770    struct bxe_softc *sc = fp->sc;
8771    uint8_t more_tx = FALSE;
8772    uint8_t more_rx = FALSE;
8773
8774    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8775
8776    /* update the fastpath index */
8777    bxe_update_fp_sb_idx(fp);
8778
8779    /* XXX add loop here if ever support multiple tx CoS */
8780    /* fp->txdata[cos] */
8781    if (bxe_has_tx_work(fp)) {
8782        BXE_FP_TX_LOCK(fp);
8783        more_tx = bxe_txeof(sc, fp);
8784        BXE_FP_TX_UNLOCK(fp);
8785    }
8786
8787    if (bxe_has_rx_work(fp)) {
8788        more_rx = bxe_rxeof(sc, fp);
8789    }
8790
8791    if (more_rx /*|| more_tx*/) {
8792        /* still more work to do, bail out if this ISR and process later */
8793        taskqueue_enqueue(fp->tq, &fp->tq_task);
8794        return;
8795    }
8796
8797    /*
8798     * Here we write the fastpath index taken before doing any tx or rx work.
8799     * It is very well possible other hw events occurred up to this point and
8800     * they were actually processed accordingly above. Since we're going to
8801     * write an older fastpath index, an interrupt is coming which we might
8802     * not do any work in.
8803     */
8804    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8805               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8806}
8807
8808/*
8809 * Legacy interrupt entry point.
8810 *
8811 * Verifies that the controller generated the interrupt and
8812 * then calls a separate routine to handle the various
8813 * interrupt causes: link, RX, and TX.
8814 */
8815static void
8816bxe_intr_legacy(void *xsc)
8817{
8818    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8819    struct bxe_fastpath *fp;
8820    uint16_t status, mask;
8821    int i;
8822
8823    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8824
8825    /*
8826     * 0 for ustorm, 1 for cstorm
8827     * the bits returned from ack_int() are 0-15
8828     * bit 0 = attention status block
8829     * bit 1 = fast path status block
8830     * a mask of 0x2 or more = tx/rx event
8831     * a mask of 1 = slow path event
8832     */
8833
8834    status = bxe_ack_int(sc);
8835
8836    /* the interrupt is not for us */
8837    if (__predict_false(status == 0)) {
8838        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8839        return;
8840    }
8841
8842    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8843
8844    FOR_EACH_ETH_QUEUE(sc, i) {
8845        fp = &sc->fp[i];
8846        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8847        if (status & mask) {
8848            /* acknowledge and disable further fastpath interrupts */
8849            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8850            bxe_task_fp(fp);
8851            status &= ~mask;
8852        }
8853    }
8854
8855    if (__predict_false(status & 0x1)) {
8856        /* acknowledge and disable further slowpath interrupts */
8857        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8858
8859        /* schedule slowpath handler */
8860        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8861
8862        status &= ~0x1;
8863    }
8864
8865    if (__predict_false(status)) {
8866        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8867    }
8868}
8869
8870/* slowpath interrupt entry point */
8871static void
8872bxe_intr_sp(void *xsc)
8873{
8874    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8875
8876    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8877
8878    /* acknowledge and disable further slowpath interrupts */
8879    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8880
8881    /* schedule slowpath handler */
8882    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8883}
8884
8885/* fastpath interrupt entry point */
8886static void
8887bxe_intr_fp(void *xfp)
8888{
8889    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8890    struct bxe_softc *sc = fp->sc;
8891
8892    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8893
8894    BLOGD(sc, DBG_INTR,
8895          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8896          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8897
8898    /* acknowledge and disable further fastpath interrupts */
8899    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8900
8901    bxe_task_fp(fp);
8902}
8903
8904/* Release all interrupts allocated by the driver. */
8905static void
8906bxe_interrupt_free(struct bxe_softc *sc)
8907{
8908    int i;
8909
8910    switch (sc->interrupt_mode) {
8911    case INTR_MODE_INTX:
8912        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8913        if (sc->intr[0].resource != NULL) {
8914            bus_release_resource(sc->dev,
8915                                 SYS_RES_IRQ,
8916                                 sc->intr[0].rid,
8917                                 sc->intr[0].resource);
8918        }
8919        break;
8920    case INTR_MODE_MSI:
8921        for (i = 0; i < sc->intr_count; i++) {
8922            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8923            if (sc->intr[i].resource && sc->intr[i].rid) {
8924                bus_release_resource(sc->dev,
8925                                     SYS_RES_IRQ,
8926                                     sc->intr[i].rid,
8927                                     sc->intr[i].resource);
8928            }
8929        }
8930        pci_release_msi(sc->dev);
8931        break;
8932    case INTR_MODE_MSIX:
8933        for (i = 0; i < sc->intr_count; i++) {
8934            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8935            if (sc->intr[i].resource && sc->intr[i].rid) {
8936                bus_release_resource(sc->dev,
8937                                     SYS_RES_IRQ,
8938                                     sc->intr[i].rid,
8939                                     sc->intr[i].resource);
8940            }
8941        }
8942        pci_release_msi(sc->dev);
8943        break;
8944    default:
8945        /* nothing to do as initial allocation failed */
8946        break;
8947    }
8948}
8949
8950/*
8951 * This function determines and allocates the appropriate
8952 * interrupt based on system capabilites and user request.
8953 *
8954 * The user may force a particular interrupt mode, specify
8955 * the number of receive queues, specify the method for
8956 * distribuitng received frames to receive queues, or use
8957 * the default settings which will automatically select the
8958 * best supported combination.  In addition, the OS may or
8959 * may not support certain combinations of these settings.
8960 * This routine attempts to reconcile the settings requested
8961 * by the user with the capabilites available from the system
8962 * to select the optimal combination of features.
8963 *
8964 * Returns:
8965 *   0 = Success, !0 = Failure.
8966 */
8967static int
8968bxe_interrupt_alloc(struct bxe_softc *sc)
8969{
8970    int msix_count = 0;
8971    int msi_count = 0;
8972    int num_requested = 0;
8973    int num_allocated = 0;
8974    int rid, i, j;
8975    int rc;
8976
8977    /* get the number of available MSI/MSI-X interrupts from the OS */
8978    if (sc->interrupt_mode > 0) {
8979        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8980            msix_count = pci_msix_count(sc->dev);
8981        }
8982
8983        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8984            msi_count = pci_msi_count(sc->dev);
8985        }
8986
8987        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8988              msi_count, msix_count);
8989    }
8990
8991    do { /* try allocating MSI-X interrupt resources (at least 2) */
8992        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8993            break;
8994        }
8995
8996        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8997            (msix_count < 2)) {
8998            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8999            break;
9000        }
9001
9002        /* ask for the necessary number of MSI-X vectors */
9003        num_requested = min((sc->num_queues + 1), msix_count);
9004
9005        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
9006
9007        num_allocated = num_requested;
9008        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
9009            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
9010            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9011            break;
9012        }
9013
9014        if (num_allocated < 2) { /* possible? */
9015            BLOGE(sc, "MSI-X allocation less than 2!\n");
9016            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9017            pci_release_msi(sc->dev);
9018            break;
9019        }
9020
9021        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
9022              num_requested, num_allocated);
9023
9024        /* best effort so use the number of vectors allocated to us */
9025        sc->intr_count = num_allocated;
9026        sc->num_queues = num_allocated - 1;
9027
9028        rid = 1; /* initial resource identifier */
9029
9030        /* allocate the MSI-X vectors */
9031        for (i = 0; i < num_allocated; i++) {
9032            sc->intr[i].rid = (rid + i);
9033
9034            if ((sc->intr[i].resource =
9035                 bus_alloc_resource_any(sc->dev,
9036                                        SYS_RES_IRQ,
9037                                        &sc->intr[i].rid,
9038                                        RF_ACTIVE)) == NULL) {
9039                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9040                      i, (rid + i));
9041
9042                for (j = (i - 1); j >= 0; j--) {
9043                    bus_release_resource(sc->dev,
9044                                         SYS_RES_IRQ,
9045                                         sc->intr[j].rid,
9046                                         sc->intr[j].resource);
9047                }
9048
9049                sc->intr_count = 0;
9050                sc->num_queues = 0;
9051                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9052                pci_release_msi(sc->dev);
9053                break;
9054            }
9055
9056            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9057        }
9058    } while (0);
9059
9060    do { /* try allocating MSI vector resources (at least 2) */
9061        if (sc->interrupt_mode != INTR_MODE_MSI) {
9062            break;
9063        }
9064
9065        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9066            (msi_count < 1)) {
9067            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9068            break;
9069        }
9070
9071        /* ask for a single MSI vector */
9072        num_requested = 1;
9073
9074        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9075
9076        num_allocated = num_requested;
9077        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9078            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9079            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9080            break;
9081        }
9082
9083        if (num_allocated != 1) { /* possible? */
9084            BLOGE(sc, "MSI allocation is not 1!\n");
9085            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9086            pci_release_msi(sc->dev);
9087            break;
9088        }
9089
9090        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9091              num_requested, num_allocated);
9092
9093        /* best effort so use the number of vectors allocated to us */
9094        sc->intr_count = num_allocated;
9095        sc->num_queues = num_allocated;
9096
9097        rid = 1; /* initial resource identifier */
9098
9099        sc->intr[0].rid = rid;
9100
9101        if ((sc->intr[0].resource =
9102             bus_alloc_resource_any(sc->dev,
9103                                    SYS_RES_IRQ,
9104                                    &sc->intr[0].rid,
9105                                    RF_ACTIVE)) == NULL) {
9106            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9107            sc->intr_count = 0;
9108            sc->num_queues = 0;
9109            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9110            pci_release_msi(sc->dev);
9111            break;
9112        }
9113
9114        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9115    } while (0);
9116
9117    do { /* try allocating INTx vector resources */
9118        if (sc->interrupt_mode != INTR_MODE_INTX) {
9119            break;
9120        }
9121
9122        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9123
9124        /* only one vector for INTx */
9125        sc->intr_count = 1;
9126        sc->num_queues = 1;
9127
9128        rid = 0; /* initial resource identifier */
9129
9130        sc->intr[0].rid = rid;
9131
9132        if ((sc->intr[0].resource =
9133             bus_alloc_resource_any(sc->dev,
9134                                    SYS_RES_IRQ,
9135                                    &sc->intr[0].rid,
9136                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9137            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9138            sc->intr_count = 0;
9139            sc->num_queues = 0;
9140            sc->interrupt_mode = -1; /* Failed! */
9141            break;
9142        }
9143
9144        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9145    } while (0);
9146
9147    if (sc->interrupt_mode == -1) {
9148        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9149        rc = 1;
9150    } else {
9151        BLOGD(sc, DBG_LOAD,
9152              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9153              sc->interrupt_mode, sc->num_queues);
9154        rc = 0;
9155    }
9156
9157    return (rc);
9158}
9159
9160static void
9161bxe_interrupt_detach(struct bxe_softc *sc)
9162{
9163    struct bxe_fastpath *fp;
9164    int i;
9165
9166    /* release interrupt resources */
9167    for (i = 0; i < sc->intr_count; i++) {
9168        if (sc->intr[i].resource && sc->intr[i].tag) {
9169            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9170            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9171        }
9172    }
9173
9174    for (i = 0; i < sc->num_queues; i++) {
9175        fp = &sc->fp[i];
9176        if (fp->tq) {
9177            taskqueue_drain(fp->tq, &fp->tq_task);
9178            taskqueue_free(fp->tq);
9179            fp->tq = NULL;
9180        }
9181    }
9182
9183
9184    if (sc->sp_tq) {
9185        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9186        taskqueue_free(sc->sp_tq);
9187        sc->sp_tq = NULL;
9188    }
9189}
9190
9191/*
9192 * Enables interrupts and attach to the ISR.
9193 *
9194 * When using multiple MSI/MSI-X vectors the first vector
9195 * is used for slowpath operations while all remaining
9196 * vectors are used for fastpath operations.  If only a
9197 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9198 * ISR must look for both slowpath and fastpath completions.
9199 */
9200static int
9201bxe_interrupt_attach(struct bxe_softc *sc)
9202{
9203    struct bxe_fastpath *fp;
9204    int rc = 0;
9205    int i;
9206
9207    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9208             "bxe%d_sp_tq", sc->unit);
9209    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9210    sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
9211                                      taskqueue_thread_enqueue,
9212                                      &sc->sp_tq);
9213    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9214                            "%s", sc->sp_tq_name);
9215
9216
9217    for (i = 0; i < sc->num_queues; i++) {
9218        fp = &sc->fp[i];
9219        snprintf(fp->tq_name, sizeof(fp->tq_name),
9220                 "bxe%d_fp%d_tq", sc->unit, i);
9221        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9222        fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
9223                                       taskqueue_thread_enqueue,
9224                                       &fp->tq);
9225        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9226                                "%s", fp->tq_name);
9227    }
9228
9229    /* setup interrupt handlers */
9230    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9231        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9232
9233        /*
9234         * Setup the interrupt handler. Note that we pass the driver instance
9235         * to the interrupt handler for the slowpath.
9236         */
9237        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9238                                 (INTR_TYPE_NET | INTR_MPSAFE),
9239                                 NULL, bxe_intr_sp, sc,
9240                                 &sc->intr[0].tag)) != 0) {
9241            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9242            goto bxe_interrupt_attach_exit;
9243        }
9244
9245        bus_describe_intr(sc->dev, sc->intr[0].resource,
9246                          sc->intr[0].tag, "sp");
9247
9248        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9249
9250        /* initialize the fastpath vectors (note the first was used for sp) */
9251        for (i = 0; i < sc->num_queues; i++) {
9252            fp = &sc->fp[i];
9253            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9254
9255            /*
9256             * Setup the interrupt handler. Note that we pass the
9257             * fastpath context to the interrupt handler in this
9258             * case.
9259             */
9260            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9261                                     (INTR_TYPE_NET | INTR_MPSAFE),
9262                                     NULL, bxe_intr_fp, fp,
9263                                     &sc->intr[i + 1].tag)) != 0) {
9264                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9265                      (i + 1), rc);
9266                goto bxe_interrupt_attach_exit;
9267            }
9268
9269            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9270                              sc->intr[i + 1].tag, "fp%02d", i);
9271
9272            /* bind the fastpath instance to a cpu */
9273            if (sc->num_queues > 1) {
9274                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9275            }
9276
9277            fp->state = BXE_FP_STATE_IRQ;
9278        }
9279    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9280        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9281
9282        /*
9283         * Setup the interrupt handler. Note that we pass the
9284         * driver instance to the interrupt handler which
9285         * will handle both the slowpath and fastpath.
9286         */
9287        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9288                                 (INTR_TYPE_NET | INTR_MPSAFE),
9289                                 NULL, bxe_intr_legacy, sc,
9290                                 &sc->intr[0].tag)) != 0) {
9291            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9292            goto bxe_interrupt_attach_exit;
9293        }
9294
9295    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9296        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9297
9298        /*
9299         * Setup the interrupt handler. Note that we pass the
9300         * driver instance to the interrupt handler which
9301         * will handle both the slowpath and fastpath.
9302         */
9303        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9304                                 (INTR_TYPE_NET | INTR_MPSAFE),
9305                                 NULL, bxe_intr_legacy, sc,
9306                                 &sc->intr[0].tag)) != 0) {
9307            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9308            goto bxe_interrupt_attach_exit;
9309        }
9310    }
9311
9312bxe_interrupt_attach_exit:
9313
9314    return (rc);
9315}
9316
9317static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9318static int  bxe_init_hw_common(struct bxe_softc *sc);
9319static int  bxe_init_hw_port(struct bxe_softc *sc);
9320static int  bxe_init_hw_func(struct bxe_softc *sc);
9321static void bxe_reset_common(struct bxe_softc *sc);
9322static void bxe_reset_port(struct bxe_softc *sc);
9323static void bxe_reset_func(struct bxe_softc *sc);
9324static int  bxe_gunzip_init(struct bxe_softc *sc);
9325static void bxe_gunzip_end(struct bxe_softc *sc);
9326static int  bxe_init_firmware(struct bxe_softc *sc);
9327static void bxe_release_firmware(struct bxe_softc *sc);
9328
9329static struct
9330ecore_func_sp_drv_ops bxe_func_sp_drv = {
9331    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9332    .init_hw_cmn      = bxe_init_hw_common,
9333    .init_hw_port     = bxe_init_hw_port,
9334    .init_hw_func     = bxe_init_hw_func,
9335
9336    .reset_hw_cmn     = bxe_reset_common,
9337    .reset_hw_port    = bxe_reset_port,
9338    .reset_hw_func    = bxe_reset_func,
9339
9340    .gunzip_init      = bxe_gunzip_init,
9341    .gunzip_end       = bxe_gunzip_end,
9342
9343    .init_fw          = bxe_init_firmware,
9344    .release_fw       = bxe_release_firmware,
9345};
9346
9347static void
9348bxe_init_func_obj(struct bxe_softc *sc)
9349{
9350    sc->dmae_ready = 0;
9351
9352    ecore_init_func_obj(sc,
9353                        &sc->func_obj,
9354                        BXE_SP(sc, func_rdata),
9355                        BXE_SP_MAPPING(sc, func_rdata),
9356                        BXE_SP(sc, func_afex_rdata),
9357                        BXE_SP_MAPPING(sc, func_afex_rdata),
9358                        &bxe_func_sp_drv);
9359}
9360
9361static int
9362bxe_init_hw(struct bxe_softc *sc,
9363            uint32_t         load_code)
9364{
9365    struct ecore_func_state_params func_params = { NULL };
9366    int rc;
9367
9368    /* prepare the parameters for function state transitions */
9369    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9370
9371    func_params.f_obj = &sc->func_obj;
9372    func_params.cmd = ECORE_F_CMD_HW_INIT;
9373
9374    func_params.params.hw_init.load_phase = load_code;
9375
9376    /*
9377     * Via a plethora of function pointers, we will eventually reach
9378     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9379     */
9380    rc = ecore_func_state_change(sc, &func_params);
9381
9382    return (rc);
9383}
9384
9385static void
9386bxe_fill(struct bxe_softc *sc,
9387         uint32_t         addr,
9388         int              fill,
9389         uint32_t         len)
9390{
9391    uint32_t i;
9392
9393    if (!(len % 4) && !(addr % 4)) {
9394        for (i = 0; i < len; i += 4) {
9395            REG_WR(sc, (addr + i), fill);
9396        }
9397    } else {
9398        for (i = 0; i < len; i++) {
9399            REG_WR8(sc, (addr + i), fill);
9400        }
9401    }
9402}
9403
9404/* writes FP SP data to FW - data_size in dwords */
9405static void
9406bxe_wr_fp_sb_data(struct bxe_softc *sc,
9407                  int              fw_sb_id,
9408                  uint32_t         *sb_data_p,
9409                  uint32_t         data_size)
9410{
9411    int index;
9412
9413    for (index = 0; index < data_size; index++) {
9414        REG_WR(sc,
9415               (BAR_CSTRORM_INTMEM +
9416                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9417                (sizeof(uint32_t) * index)),
9418               *(sb_data_p + index));
9419    }
9420}
9421
9422static void
9423bxe_zero_fp_sb(struct bxe_softc *sc,
9424               int              fw_sb_id)
9425{
9426    struct hc_status_block_data_e2 sb_data_e2;
9427    struct hc_status_block_data_e1x sb_data_e1x;
9428    uint32_t *sb_data_p;
9429    uint32_t data_size = 0;
9430
9431    if (!CHIP_IS_E1x(sc)) {
9432        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9433        sb_data_e2.common.state = SB_DISABLED;
9434        sb_data_e2.common.p_func.vf_valid = FALSE;
9435        sb_data_p = (uint32_t *)&sb_data_e2;
9436        data_size = (sizeof(struct hc_status_block_data_e2) /
9437                     sizeof(uint32_t));
9438    } else {
9439        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9440        sb_data_e1x.common.state = SB_DISABLED;
9441        sb_data_e1x.common.p_func.vf_valid = FALSE;
9442        sb_data_p = (uint32_t *)&sb_data_e1x;
9443        data_size = (sizeof(struct hc_status_block_data_e1x) /
9444                     sizeof(uint32_t));
9445    }
9446
9447    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9448
9449    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9450             0, CSTORM_STATUS_BLOCK_SIZE);
9451    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9452             0, CSTORM_SYNC_BLOCK_SIZE);
9453}
9454
9455static void
9456bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9457                  struct hc_sp_status_block_data *sp_sb_data)
9458{
9459    int i;
9460
9461    for (i = 0;
9462         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9463         i++) {
9464        REG_WR(sc,
9465               (BAR_CSTRORM_INTMEM +
9466                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9467                (i * sizeof(uint32_t))),
9468               *((uint32_t *)sp_sb_data + i));
9469    }
9470}
9471
9472static void
9473bxe_zero_sp_sb(struct bxe_softc *sc)
9474{
9475    struct hc_sp_status_block_data sp_sb_data;
9476
9477    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9478
9479    sp_sb_data.state           = SB_DISABLED;
9480    sp_sb_data.p_func.vf_valid = FALSE;
9481
9482    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9483
9484    bxe_fill(sc,
9485             (BAR_CSTRORM_INTMEM +
9486              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9487              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9488    bxe_fill(sc,
9489             (BAR_CSTRORM_INTMEM +
9490              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9491              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9492}
9493
9494static void
9495bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9496                             int                       igu_sb_id,
9497                             int                       igu_seg_id)
9498{
9499    hc_sm->igu_sb_id      = igu_sb_id;
9500    hc_sm->igu_seg_id     = igu_seg_id;
9501    hc_sm->timer_value    = 0xFF;
9502    hc_sm->time_to_expire = 0xFFFFFFFF;
9503}
9504
9505static void
9506bxe_map_sb_state_machines(struct hc_index_data *index_data)
9507{
9508    /* zero out state machine indices */
9509
9510    /* rx indices */
9511    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9512
9513    /* tx indices */
9514    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9515    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9516    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9517    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9518
9519    /* map indices */
9520
9521    /* rx indices */
9522    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9523        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9524
9525    /* tx indices */
9526    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9527        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9528    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9529        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9530    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9531        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9532    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9533        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9534}
9535
9536static void
9537bxe_init_sb(struct bxe_softc *sc,
9538            bus_addr_t       busaddr,
9539            int              vfid,
9540            uint8_t          vf_valid,
9541            int              fw_sb_id,
9542            int              igu_sb_id)
9543{
9544    struct hc_status_block_data_e2  sb_data_e2;
9545    struct hc_status_block_data_e1x sb_data_e1x;
9546    struct hc_status_block_sm       *hc_sm_p;
9547    uint32_t *sb_data_p;
9548    int igu_seg_id;
9549    int data_size;
9550
9551    if (CHIP_INT_MODE_IS_BC(sc)) {
9552        igu_seg_id = HC_SEG_ACCESS_NORM;
9553    } else {
9554        igu_seg_id = IGU_SEG_ACCESS_NORM;
9555    }
9556
9557    bxe_zero_fp_sb(sc, fw_sb_id);
9558
9559    if (!CHIP_IS_E1x(sc)) {
9560        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9561        sb_data_e2.common.state = SB_ENABLED;
9562        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9563        sb_data_e2.common.p_func.vf_id = vfid;
9564        sb_data_e2.common.p_func.vf_valid = vf_valid;
9565        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9566        sb_data_e2.common.same_igu_sb_1b = TRUE;
9567        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9568        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9569        hc_sm_p = sb_data_e2.common.state_machine;
9570        sb_data_p = (uint32_t *)&sb_data_e2;
9571        data_size = (sizeof(struct hc_status_block_data_e2) /
9572                     sizeof(uint32_t));
9573        bxe_map_sb_state_machines(sb_data_e2.index_data);
9574    } else {
9575        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9576        sb_data_e1x.common.state = SB_ENABLED;
9577        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9578        sb_data_e1x.common.p_func.vf_id = 0xff;
9579        sb_data_e1x.common.p_func.vf_valid = FALSE;
9580        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9581        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9582        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9583        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9584        hc_sm_p = sb_data_e1x.common.state_machine;
9585        sb_data_p = (uint32_t *)&sb_data_e1x;
9586        data_size = (sizeof(struct hc_status_block_data_e1x) /
9587                     sizeof(uint32_t));
9588        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9589    }
9590
9591    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9592    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9593
9594    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9595
9596    /* write indices to HW - PCI guarantees endianity of regpairs */
9597    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9598}
9599
9600static inline uint8_t
9601bxe_fp_qzone_id(struct bxe_fastpath *fp)
9602{
9603    if (CHIP_IS_E1x(fp->sc)) {
9604        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9605    } else {
9606        return (fp->cl_id);
9607    }
9608}
9609
9610static inline uint32_t
9611bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9612                           struct bxe_fastpath *fp)
9613{
9614    uint32_t offset = BAR_USTRORM_INTMEM;
9615
9616    if (!CHIP_IS_E1x(sc)) {
9617        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9618    } else {
9619        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9620    }
9621
9622    return (offset);
9623}
9624
9625static void
9626bxe_init_eth_fp(struct bxe_softc *sc,
9627                int              idx)
9628{
9629    struct bxe_fastpath *fp = &sc->fp[idx];
9630    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9631    unsigned long q_type = 0;
9632    int cos;
9633
9634    fp->sc    = sc;
9635    fp->index = idx;
9636
9637    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9638    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9639
9640    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9641                    (SC_L_ID(sc) + idx) :
9642                    /* want client ID same as IGU SB ID for non-E1 */
9643                    fp->igu_sb_id;
9644    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9645
9646    /* setup sb indices */
9647    if (!CHIP_IS_E1x(sc)) {
9648        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9649        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9650    } else {
9651        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9652        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9653    }
9654
9655    /* init shortcut */
9656    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9657
9658    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9659
9660    /*
9661     * XXX If multiple CoS is ever supported then each fastpath structure
9662     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9663     */
9664    for (cos = 0; cos < sc->max_cos; cos++) {
9665        cids[cos] = idx;
9666    }
9667    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9668
9669    /* nothing more for a VF to do */
9670    if (IS_VF(sc)) {
9671        return;
9672    }
9673
9674    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9675                fp->fw_sb_id, fp->igu_sb_id);
9676
9677    bxe_update_fp_sb_idx(fp);
9678
9679    /* Configure Queue State object */
9680    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9681    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9682
9683    ecore_init_queue_obj(sc,
9684                         &sc->sp_objs[idx].q_obj,
9685                         fp->cl_id,
9686                         cids,
9687                         sc->max_cos,
9688                         SC_FUNC(sc),
9689                         BXE_SP(sc, q_rdata),
9690                         BXE_SP_MAPPING(sc, q_rdata),
9691                         q_type);
9692
9693    /* configure classification DBs */
9694    ecore_init_mac_obj(sc,
9695                       &sc->sp_objs[idx].mac_obj,
9696                       fp->cl_id,
9697                       idx,
9698                       SC_FUNC(sc),
9699                       BXE_SP(sc, mac_rdata),
9700                       BXE_SP_MAPPING(sc, mac_rdata),
9701                       ECORE_FILTER_MAC_PENDING,
9702                       &sc->sp_state,
9703                       ECORE_OBJ_TYPE_RX_TX,
9704                       &sc->macs_pool);
9705
9706    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9707          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9708}
9709
9710static inline void
9711bxe_update_rx_prod(struct bxe_softc    *sc,
9712                   struct bxe_fastpath *fp,
9713                   uint16_t            rx_bd_prod,
9714                   uint16_t            rx_cq_prod,
9715                   uint16_t            rx_sge_prod)
9716{
9717    struct ustorm_eth_rx_producers rx_prods = { 0 };
9718    uint32_t i;
9719
9720    /* update producers */
9721    rx_prods.bd_prod  = rx_bd_prod;
9722    rx_prods.cqe_prod = rx_cq_prod;
9723    rx_prods.sge_prod = rx_sge_prod;
9724
9725    /*
9726     * Make sure that the BD and SGE data is updated before updating the
9727     * producers since FW might read the BD/SGE right after the producer
9728     * is updated.
9729     * This is only applicable for weak-ordered memory model archs such
9730     * as IA-64. The following barrier is also mandatory since FW will
9731     * assumes BDs must have buffers.
9732     */
9733    wmb();
9734
9735    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9736        REG_WR(sc,
9737               (fp->ustorm_rx_prods_offset + (i * 4)),
9738               ((uint32_t *)&rx_prods)[i]);
9739    }
9740
9741    wmb(); /* keep prod updates ordered */
9742
9743    BLOGD(sc, DBG_RX,
9744          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9745          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9746}
9747
9748static void
9749bxe_init_rx_rings(struct bxe_softc *sc)
9750{
9751    struct bxe_fastpath *fp;
9752    int i;
9753
9754    for (i = 0; i < sc->num_queues; i++) {
9755        fp = &sc->fp[i];
9756
9757        fp->rx_bd_cons = 0;
9758
9759        /*
9760         * Activate the BD ring...
9761         * Warning, this will generate an interrupt (to the TSTORM)
9762         * so this can only be done after the chip is initialized
9763         */
9764        bxe_update_rx_prod(sc, fp,
9765                           fp->rx_bd_prod,
9766                           fp->rx_cq_prod,
9767                           fp->rx_sge_prod);
9768
9769        if (i != 0) {
9770            continue;
9771        }
9772
9773        if (CHIP_IS_E1(sc)) {
9774            REG_WR(sc,
9775                   (BAR_USTRORM_INTMEM +
9776                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9777                   U64_LO(fp->rcq_dma.paddr));
9778            REG_WR(sc,
9779                   (BAR_USTRORM_INTMEM +
9780                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9781                   U64_HI(fp->rcq_dma.paddr));
9782        }
9783    }
9784}
9785
9786static void
9787bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9788{
9789    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9790    fp->tx_db.data.zero_fill1 = 0;
9791    fp->tx_db.data.prod = 0;
9792
9793    fp->tx_pkt_prod = 0;
9794    fp->tx_pkt_cons = 0;
9795    fp->tx_bd_prod = 0;
9796    fp->tx_bd_cons = 0;
9797    fp->eth_q_stats.tx_pkts = 0;
9798}
9799
9800static inline void
9801bxe_init_tx_rings(struct bxe_softc *sc)
9802{
9803    int i;
9804
9805    for (i = 0; i < sc->num_queues; i++) {
9806        bxe_init_tx_ring_one(&sc->fp[i]);
9807    }
9808}
9809
9810static void
9811bxe_init_def_sb(struct bxe_softc *sc)
9812{
9813    struct host_sp_status_block *def_sb = sc->def_sb;
9814    bus_addr_t mapping = sc->def_sb_dma.paddr;
9815    int igu_sp_sb_index;
9816    int igu_seg_id;
9817    int port = SC_PORT(sc);
9818    int func = SC_FUNC(sc);
9819    int reg_offset, reg_offset_en5;
9820    uint64_t section;
9821    int index, sindex;
9822    struct hc_sp_status_block_data sp_sb_data;
9823
9824    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9825
9826    if (CHIP_INT_MODE_IS_BC(sc)) {
9827        igu_sp_sb_index = DEF_SB_IGU_ID;
9828        igu_seg_id = HC_SEG_ACCESS_DEF;
9829    } else {
9830        igu_sp_sb_index = sc->igu_dsb_id;
9831        igu_seg_id = IGU_SEG_ACCESS_DEF;
9832    }
9833
9834    /* attentions */
9835    section = ((uint64_t)mapping +
9836               offsetof(struct host_sp_status_block, atten_status_block));
9837    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9838    sc->attn_state = 0;
9839
9840    reg_offset = (port) ?
9841                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9842                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9843    reg_offset_en5 = (port) ?
9844                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9845                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9846
9847    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9848        /* take care of sig[0]..sig[4] */
9849        for (sindex = 0; sindex < 4; sindex++) {
9850            sc->attn_group[index].sig[sindex] =
9851                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9852        }
9853
9854        if (!CHIP_IS_E1x(sc)) {
9855            /*
9856             * enable5 is separate from the rest of the registers,
9857             * and the address skip is 4 and not 16 between the
9858             * different groups
9859             */
9860            sc->attn_group[index].sig[4] =
9861                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9862        } else {
9863            sc->attn_group[index].sig[4] = 0;
9864        }
9865    }
9866
9867    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9868        reg_offset = (port) ?
9869                         HC_REG_ATTN_MSG1_ADDR_L :
9870                         HC_REG_ATTN_MSG0_ADDR_L;
9871        REG_WR(sc, reg_offset, U64_LO(section));
9872        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9873    } else if (!CHIP_IS_E1x(sc)) {
9874        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9875        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9876    }
9877
9878    section = ((uint64_t)mapping +
9879               offsetof(struct host_sp_status_block, sp_sb));
9880
9881    bxe_zero_sp_sb(sc);
9882
9883    /* PCI guarantees endianity of regpair */
9884    sp_sb_data.state           = SB_ENABLED;
9885    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9886    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9887    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9888    sp_sb_data.igu_seg_id      = igu_seg_id;
9889    sp_sb_data.p_func.pf_id    = func;
9890    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9891    sp_sb_data.p_func.vf_id    = 0xff;
9892
9893    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9894
9895    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9896}
9897
9898static void
9899bxe_init_sp_ring(struct bxe_softc *sc)
9900{
9901    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9902    sc->spq_prod_idx = 0;
9903    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9904    sc->spq_prod_bd = sc->spq;
9905    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9906}
9907
9908static void
9909bxe_init_eq_ring(struct bxe_softc *sc)
9910{
9911    union event_ring_elem *elem;
9912    int i;
9913
9914    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9915        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9916
9917        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9918                                                 BCM_PAGE_SIZE *
9919                                                 (i % NUM_EQ_PAGES)));
9920        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9921                                                 BCM_PAGE_SIZE *
9922                                                 (i % NUM_EQ_PAGES)));
9923    }
9924
9925    sc->eq_cons    = 0;
9926    sc->eq_prod    = NUM_EQ_DESC;
9927    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9928
9929    atomic_store_rel_long(&sc->eq_spq_left,
9930                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9931                               NUM_EQ_DESC) - 1));
9932}
9933
9934static void
9935bxe_init_internal_common(struct bxe_softc *sc)
9936{
9937    int i;
9938
9939    if (IS_MF_SI(sc)) {
9940        /*
9941         * In switch independent mode, the TSTORM needs to accept
9942         * packets that failed classification, since approximate match
9943         * mac addresses aren't written to NIG LLH.
9944         */
9945        REG_WR8(sc,
9946                (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
9947                2);
9948    } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */
9949        REG_WR8(sc,
9950                (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
9951                0);
9952    }
9953
9954    /*
9955     * Zero this manually as its initialization is currently missing
9956     * in the initTool.
9957     */
9958    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9959        REG_WR(sc,
9960               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9961               0);
9962    }
9963
9964    if (!CHIP_IS_E1x(sc)) {
9965        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9966                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9967    }
9968}
9969
9970static void
9971bxe_init_internal(struct bxe_softc *sc,
9972                  uint32_t         load_code)
9973{
9974    switch (load_code) {
9975    case FW_MSG_CODE_DRV_LOAD_COMMON:
9976    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9977        bxe_init_internal_common(sc);
9978        /* no break */
9979
9980    case FW_MSG_CODE_DRV_LOAD_PORT:
9981        /* nothing to do */
9982        /* no break */
9983
9984    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9985        /* internal memory per function is initialized inside bxe_pf_init */
9986        break;
9987
9988    default:
9989        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9990        break;
9991    }
9992}
9993
9994static void
9995storm_memset_func_cfg(struct bxe_softc                         *sc,
9996                      struct tstorm_eth_function_common_config *tcfg,
9997                      uint16_t                                  abs_fid)
9998{
9999    uint32_t addr;
10000    size_t size;
10001
10002    addr = (BAR_TSTRORM_INTMEM +
10003            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
10004    size = sizeof(struct tstorm_eth_function_common_config);
10005    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
10006}
10007
10008static void
10009bxe_func_init(struct bxe_softc            *sc,
10010              struct bxe_func_init_params *p)
10011{
10012    struct tstorm_eth_function_common_config tcfg = { 0 };
10013
10014    if (CHIP_IS_E1x(sc)) {
10015        storm_memset_func_cfg(sc, &tcfg, p->func_id);
10016    }
10017
10018    /* Enable the function in the FW */
10019    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
10020    storm_memset_func_en(sc, p->func_id, 1);
10021
10022    /* spq */
10023    if (p->func_flgs & FUNC_FLG_SPQ) {
10024        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
10025        REG_WR(sc,
10026               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
10027               p->spq_prod);
10028    }
10029}
10030
10031/*
10032 * Calculates the sum of vn_min_rates.
10033 * It's needed for further normalizing of the min_rates.
10034 * Returns:
10035 *   sum of vn_min_rates.
10036 *     or
10037 *   0 - if all the min_rates are 0.
10038 * In the later case fainess algorithm should be deactivated.
10039 * If all min rates are not zero then those that are zeroes will be set to 1.
10040 */
10041static void
10042bxe_calc_vn_min(struct bxe_softc       *sc,
10043                struct cmng_init_input *input)
10044{
10045    uint32_t vn_cfg;
10046    uint32_t vn_min_rate;
10047    int all_zero = 1;
10048    int vn;
10049
10050    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10051        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10052        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10053                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10054
10055        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10056            /* skip hidden VNs */
10057            vn_min_rate = 0;
10058        } else if (!vn_min_rate) {
10059            /* If min rate is zero - set it to 100 */
10060            vn_min_rate = DEF_MIN_RATE;
10061        } else {
10062            all_zero = 0;
10063        }
10064
10065        input->vnic_min_rate[vn] = vn_min_rate;
10066    }
10067
10068    /* if ETS or all min rates are zeros - disable fairness */
10069    if (BXE_IS_ETS_ENABLED(sc)) {
10070        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10071        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10072    } else if (all_zero) {
10073        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10074        BLOGD(sc, DBG_LOAD,
10075              "Fariness disabled (all MIN values are zeroes)\n");
10076    } else {
10077        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10078    }
10079}
10080
10081static inline uint16_t
10082bxe_extract_max_cfg(struct bxe_softc *sc,
10083                    uint32_t         mf_cfg)
10084{
10085    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10086                        FUNC_MF_CFG_MAX_BW_SHIFT);
10087
10088    if (!max_cfg) {
10089        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10090        max_cfg = 100;
10091    }
10092
10093    return (max_cfg);
10094}
10095
10096static void
10097bxe_calc_vn_max(struct bxe_softc       *sc,
10098                int                    vn,
10099                struct cmng_init_input *input)
10100{
10101    uint16_t vn_max_rate;
10102    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10103    uint32_t max_cfg;
10104
10105    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10106        vn_max_rate = 0;
10107    } else {
10108        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10109
10110        if (IS_MF_SI(sc)) {
10111            /* max_cfg in percents of linkspeed */
10112            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10113        } else { /* SD modes */
10114            /* max_cfg is absolute in 100Mb units */
10115            vn_max_rate = (max_cfg * 100);
10116        }
10117    }
10118
10119    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10120
10121    input->vnic_max_rate[vn] = vn_max_rate;
10122}
10123
10124static void
10125bxe_cmng_fns_init(struct bxe_softc *sc,
10126                  uint8_t          read_cfg,
10127                  uint8_t          cmng_type)
10128{
10129    struct cmng_init_input input;
10130    int vn;
10131
10132    memset(&input, 0, sizeof(struct cmng_init_input));
10133
10134    input.port_rate = sc->link_vars.line_speed;
10135
10136    if (cmng_type == CMNG_FNS_MINMAX) {
10137        /* read mf conf from shmem */
10138        if (read_cfg) {
10139            bxe_read_mf_cfg(sc);
10140        }
10141
10142        /* get VN min rate and enable fairness if not 0 */
10143        bxe_calc_vn_min(sc, &input);
10144
10145        /* get VN max rate */
10146        if (sc->port.pmf) {
10147            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10148                bxe_calc_vn_max(sc, vn, &input);
10149            }
10150        }
10151
10152        /* always enable rate shaping and fairness */
10153        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10154
10155        ecore_init_cmng(&input, &sc->cmng);
10156        return;
10157    }
10158
10159    /* rate shaping and fairness are disabled */
10160    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10161}
10162
10163static int
10164bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10165{
10166    if (CHIP_REV_IS_SLOW(sc)) {
10167        return (CMNG_FNS_NONE);
10168    }
10169
10170    if (IS_MF(sc)) {
10171        return (CMNG_FNS_MINMAX);
10172    }
10173
10174    return (CMNG_FNS_NONE);
10175}
10176
10177static void
10178storm_memset_cmng(struct bxe_softc *sc,
10179                  struct cmng_init *cmng,
10180                  uint8_t          port)
10181{
10182    int vn;
10183    int func;
10184    uint32_t addr;
10185    size_t size;
10186
10187    addr = (BAR_XSTRORM_INTMEM +
10188            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10189    size = sizeof(struct cmng_struct_per_port);
10190    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10191
10192    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10193        func = func_by_vn(sc, vn);
10194
10195        addr = (BAR_XSTRORM_INTMEM +
10196                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10197        size = sizeof(struct rate_shaping_vars_per_vn);
10198        ecore_storm_memset_struct(sc, addr, size,
10199                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10200
10201        addr = (BAR_XSTRORM_INTMEM +
10202                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10203        size = sizeof(struct fairness_vars_per_vn);
10204        ecore_storm_memset_struct(sc, addr, size,
10205                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10206    }
10207}
10208
10209static void
10210bxe_pf_init(struct bxe_softc *sc)
10211{
10212    struct bxe_func_init_params func_init = { 0 };
10213    struct event_ring_data eq_data = { { 0 } };
10214    uint16_t flags;
10215
10216    if (!CHIP_IS_E1x(sc)) {
10217        /* reset IGU PF statistics: MSIX + ATTN */
10218        /* PF */
10219        REG_WR(sc,
10220               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10221                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10222                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10223               0);
10224        /* ATTN */
10225        REG_WR(sc,
10226               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10227                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10228                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10229                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10230               0);
10231    }
10232
10233    /* function setup flags */
10234    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10235
10236    /*
10237     * This flag is relevant for E1x only.
10238     * E2 doesn't have a TPA configuration in a function level.
10239     */
10240    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10241
10242    func_init.func_flgs = flags;
10243    func_init.pf_id     = SC_FUNC(sc);
10244    func_init.func_id   = SC_FUNC(sc);
10245    func_init.spq_map   = sc->spq_dma.paddr;
10246    func_init.spq_prod  = sc->spq_prod_idx;
10247
10248    bxe_func_init(sc, &func_init);
10249
10250    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10251
10252    /*
10253     * Congestion management values depend on the link rate.
10254     * There is no active link so initial link rate is set to 10Gbps.
10255     * When the link comes up the congestion management values are
10256     * re-calculated according to the actual link rate.
10257     */
10258    sc->link_vars.line_speed = SPEED_10000;
10259    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10260
10261    /* Only the PMF sets the HW */
10262    if (sc->port.pmf) {
10263        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10264    }
10265
10266    /* init Event Queue - PCI bus guarantees correct endainity */
10267    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10268    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10269    eq_data.producer     = sc->eq_prod;
10270    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10271    eq_data.sb_id        = DEF_SB_ID;
10272    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10273}
10274
10275static void
10276bxe_hc_int_enable(struct bxe_softc *sc)
10277{
10278    int port = SC_PORT(sc);
10279    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10280    uint32_t val = REG_RD(sc, addr);
10281    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10282    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10283                           (sc->intr_count == 1)) ? TRUE : FALSE;
10284    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10285
10286    if (msix) {
10287        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10288                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10289        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10290                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10291        if (single_msix) {
10292            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10293        }
10294    } else if (msi) {
10295        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10296        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10297                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10298                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10299    } else {
10300        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10301                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10302                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10303                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10304
10305        if (!CHIP_IS_E1(sc)) {
10306            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10307                  val, port, addr);
10308
10309            REG_WR(sc, addr, val);
10310
10311            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10312        }
10313    }
10314
10315    if (CHIP_IS_E1(sc)) {
10316        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10317    }
10318
10319    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10320          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10321
10322    REG_WR(sc, addr, val);
10323
10324    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10325    mb();
10326
10327    if (!CHIP_IS_E1(sc)) {
10328        /* init leading/trailing edge */
10329        if (IS_MF(sc)) {
10330            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10331            if (sc->port.pmf) {
10332                /* enable nig and gpio3 attention */
10333                val |= 0x1100;
10334            }
10335        } else {
10336            val = 0xffff;
10337        }
10338
10339        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10340        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10341    }
10342
10343    /* make sure that interrupts are indeed enabled from here on */
10344    mb();
10345}
10346
10347static void
10348bxe_igu_int_enable(struct bxe_softc *sc)
10349{
10350    uint32_t val;
10351    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10352    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10353                           (sc->intr_count == 1)) ? TRUE : FALSE;
10354    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10355
10356    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10357
10358    if (msix) {
10359        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10360                 IGU_PF_CONF_SINGLE_ISR_EN);
10361        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10362                IGU_PF_CONF_ATTN_BIT_EN);
10363        if (single_msix) {
10364            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10365        }
10366    } else if (msi) {
10367        val &= ~IGU_PF_CONF_INT_LINE_EN;
10368        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10369                IGU_PF_CONF_ATTN_BIT_EN |
10370                IGU_PF_CONF_SINGLE_ISR_EN);
10371    } else {
10372        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10373        val |= (IGU_PF_CONF_INT_LINE_EN |
10374                IGU_PF_CONF_ATTN_BIT_EN |
10375                IGU_PF_CONF_SINGLE_ISR_EN);
10376    }
10377
10378    /* clean previous status - need to configure igu prior to ack*/
10379    if ((!msix) || single_msix) {
10380        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10381        bxe_ack_int(sc);
10382    }
10383
10384    val |= IGU_PF_CONF_FUNC_EN;
10385
10386    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10387          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10388
10389    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10390
10391    mb();
10392
10393    /* init leading/trailing edge */
10394    if (IS_MF(sc)) {
10395        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10396        if (sc->port.pmf) {
10397            /* enable nig and gpio3 attention */
10398            val |= 0x1100;
10399        }
10400    } else {
10401        val = 0xffff;
10402    }
10403
10404    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10405    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10406
10407    /* make sure that interrupts are indeed enabled from here on */
10408    mb();
10409}
10410
10411static void
10412bxe_int_enable(struct bxe_softc *sc)
10413{
10414    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10415        bxe_hc_int_enable(sc);
10416    } else {
10417        bxe_igu_int_enable(sc);
10418    }
10419}
10420
10421static void
10422bxe_hc_int_disable(struct bxe_softc *sc)
10423{
10424    int port = SC_PORT(sc);
10425    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10426    uint32_t val = REG_RD(sc, addr);
10427
10428    /*
10429     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10430     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10431     * block
10432     */
10433    if (CHIP_IS_E1(sc)) {
10434        /*
10435         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10436         * to prevent from HC sending interrupts after we exit the function
10437         */
10438        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10439
10440        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10441                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10442                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10443    } else {
10444        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10445                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10446                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10447                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10448    }
10449
10450    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10451
10452    /* flush all outstanding writes */
10453    mb();
10454
10455    REG_WR(sc, addr, val);
10456    if (REG_RD(sc, addr) != val) {
10457        BLOGE(sc, "proper val not read from HC IGU!\n");
10458    }
10459}
10460
10461static void
10462bxe_igu_int_disable(struct bxe_softc *sc)
10463{
10464    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10465
10466    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10467             IGU_PF_CONF_INT_LINE_EN |
10468             IGU_PF_CONF_ATTN_BIT_EN);
10469
10470    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10471
10472    /* flush all outstanding writes */
10473    mb();
10474
10475    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10476    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10477        BLOGE(sc, "proper val not read from IGU!\n");
10478    }
10479}
10480
10481static void
10482bxe_int_disable(struct bxe_softc *sc)
10483{
10484    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10485        bxe_hc_int_disable(sc);
10486    } else {
10487        bxe_igu_int_disable(sc);
10488    }
10489}
10490
10491static void
10492bxe_nic_init(struct bxe_softc *sc,
10493             int              load_code)
10494{
10495    int i;
10496
10497    for (i = 0; i < sc->num_queues; i++) {
10498        bxe_init_eth_fp(sc, i);
10499    }
10500
10501    rmb(); /* ensure status block indices were read */
10502
10503    bxe_init_rx_rings(sc);
10504    bxe_init_tx_rings(sc);
10505
10506    if (IS_VF(sc)) {
10507        return;
10508    }
10509
10510    /* initialize MOD_ABS interrupts */
10511    elink_init_mod_abs_int(sc, &sc->link_vars,
10512                           sc->devinfo.chip_id,
10513                           sc->devinfo.shmem_base,
10514                           sc->devinfo.shmem2_base,
10515                           SC_PORT(sc));
10516
10517    bxe_init_def_sb(sc);
10518    bxe_update_dsb_idx(sc);
10519    bxe_init_sp_ring(sc);
10520    bxe_init_eq_ring(sc);
10521    bxe_init_internal(sc, load_code);
10522    bxe_pf_init(sc);
10523    bxe_stats_init(sc);
10524
10525    /* flush all before enabling interrupts */
10526    mb();
10527
10528    bxe_int_enable(sc);
10529
10530    /* check for SPIO5 */
10531    bxe_attn_int_deasserted0(sc,
10532                             REG_RD(sc,
10533                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10534                                     SC_PORT(sc)*4)) &
10535                             AEU_INPUTS_ATTN_BITS_SPIO5);
10536}
10537
10538static inline void
10539bxe_init_objs(struct bxe_softc *sc)
10540{
10541    /* mcast rules must be added to tx if tx switching is enabled */
10542    ecore_obj_type o_type =
10543        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10544                                         ECORE_OBJ_TYPE_RX;
10545
10546    /* RX_MODE controlling object */
10547    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10548
10549    /* multicast configuration controlling object */
10550    ecore_init_mcast_obj(sc,
10551                         &sc->mcast_obj,
10552                         sc->fp[0].cl_id,
10553                         sc->fp[0].index,
10554                         SC_FUNC(sc),
10555                         SC_FUNC(sc),
10556                         BXE_SP(sc, mcast_rdata),
10557                         BXE_SP_MAPPING(sc, mcast_rdata),
10558                         ECORE_FILTER_MCAST_PENDING,
10559                         &sc->sp_state,
10560                         o_type);
10561
10562    /* Setup CAM credit pools */
10563    ecore_init_mac_credit_pool(sc,
10564                               &sc->macs_pool,
10565                               SC_FUNC(sc),
10566                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10567                                                 VNICS_PER_PATH(sc));
10568
10569    ecore_init_vlan_credit_pool(sc,
10570                                &sc->vlans_pool,
10571                                SC_ABS_FUNC(sc) >> 1,
10572                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10573                                                  VNICS_PER_PATH(sc));
10574
10575    /* RSS configuration object */
10576    ecore_init_rss_config_obj(sc,
10577                              &sc->rss_conf_obj,
10578                              sc->fp[0].cl_id,
10579                              sc->fp[0].index,
10580                              SC_FUNC(sc),
10581                              SC_FUNC(sc),
10582                              BXE_SP(sc, rss_rdata),
10583                              BXE_SP_MAPPING(sc, rss_rdata),
10584                              ECORE_FILTER_RSS_CONF_PENDING,
10585                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10586}
10587
10588/*
10589 * Initialize the function. This must be called before sending CLIENT_SETUP
10590 * for the first client.
10591 */
10592static inline int
10593bxe_func_start(struct bxe_softc *sc)
10594{
10595    struct ecore_func_state_params func_params = { NULL };
10596    struct ecore_func_start_params *start_params = &func_params.params.start;
10597
10598    /* Prepare parameters for function state transitions */
10599    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10600
10601    func_params.f_obj = &sc->func_obj;
10602    func_params.cmd = ECORE_F_CMD_START;
10603
10604    /* Function parameters */
10605    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10606    start_params->sd_vlan_tag = OVLAN(sc);
10607
10608    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10609        start_params->network_cos_mode = STATIC_COS;
10610    } else { /* CHIP_IS_E1X */
10611        start_params->network_cos_mode = FW_WRR;
10612    }
10613
10614    //start_params->gre_tunnel_mode = 0;
10615    //start_params->gre_tunnel_rss  = 0;
10616
10617    return (ecore_func_state_change(sc, &func_params));
10618}
10619
10620static int
10621bxe_set_power_state(struct bxe_softc *sc,
10622                    uint8_t          state)
10623{
10624    uint16_t pmcsr;
10625
10626    /* If there is no power capability, silently succeed */
10627    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10628        BLOGW(sc, "No power capability\n");
10629        return (0);
10630    }
10631
10632    pmcsr = pci_read_config(sc->dev,
10633                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10634                            2);
10635
10636    switch (state) {
10637    case PCI_PM_D0:
10638        pci_write_config(sc->dev,
10639                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10640                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10641
10642        if (pmcsr & PCIM_PSTAT_DMASK) {
10643            /* delay required during transition out of D3hot */
10644            DELAY(20000);
10645        }
10646
10647        break;
10648
10649    case PCI_PM_D3hot:
10650        /* XXX if there are other clients above don't shut down the power */
10651
10652        /* don't shut down the power for emulation and FPGA */
10653        if (CHIP_REV_IS_SLOW(sc)) {
10654            return (0);
10655        }
10656
10657        pmcsr &= ~PCIM_PSTAT_DMASK;
10658        pmcsr |= PCIM_PSTAT_D3;
10659
10660        if (sc->wol) {
10661            pmcsr |= PCIM_PSTAT_PMEENABLE;
10662        }
10663
10664        pci_write_config(sc->dev,
10665                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10666                         pmcsr, 4);
10667
10668        /*
10669         * No more memory access after this point until device is brought back
10670         * to D0 state.
10671         */
10672        break;
10673
10674    default:
10675        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10676            state, pmcsr);
10677        return (-1);
10678    }
10679
10680    return (0);
10681}
10682
10683
10684/* return true if succeeded to acquire the lock */
10685static uint8_t
10686bxe_trylock_hw_lock(struct bxe_softc *sc,
10687                    uint32_t         resource)
10688{
10689    uint32_t lock_status;
10690    uint32_t resource_bit = (1 << resource);
10691    int func = SC_FUNC(sc);
10692    uint32_t hw_lock_control_reg;
10693
10694    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10695
10696    /* Validating that the resource is within range */
10697    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10698        BLOGD(sc, DBG_LOAD,
10699              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10700              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10701        return (FALSE);
10702    }
10703
10704    if (func <= 5) {
10705        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10706    } else {
10707        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10708    }
10709
10710    /* try to acquire the lock */
10711    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10712    lock_status = REG_RD(sc, hw_lock_control_reg);
10713    if (lock_status & resource_bit) {
10714        return (TRUE);
10715    }
10716
10717    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10718        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10719        lock_status, resource_bit);
10720
10721    return (FALSE);
10722}
10723
10724/*
10725 * Get the recovery leader resource id according to the engine this function
10726 * belongs to. Currently only only 2 engines is supported.
10727 */
10728static int
10729bxe_get_leader_lock_resource(struct bxe_softc *sc)
10730{
10731    if (SC_PATH(sc)) {
10732        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10733    } else {
10734        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10735    }
10736}
10737
10738/* try to acquire a leader lock for current engine */
10739static uint8_t
10740bxe_trylock_leader_lock(struct bxe_softc *sc)
10741{
10742    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10743}
10744
10745static int
10746bxe_release_leader_lock(struct bxe_softc *sc)
10747{
10748    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10749}
10750
10751/* close gates #2, #3 and #4 */
10752static void
10753bxe_set_234_gates(struct bxe_softc *sc,
10754                  uint8_t          close)
10755{
10756    uint32_t val;
10757
10758    /* gates #2 and #4a are closed/opened for "not E1" only */
10759    if (!CHIP_IS_E1(sc)) {
10760        /* #4 */
10761        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10762        /* #2 */
10763        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10764    }
10765
10766    /* #3 */
10767    if (CHIP_IS_E1x(sc)) {
10768        /* prevent interrupts from HC on both ports */
10769        val = REG_RD(sc, HC_REG_CONFIG_1);
10770        REG_WR(sc, HC_REG_CONFIG_1,
10771               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10772               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10773
10774        val = REG_RD(sc, HC_REG_CONFIG_0);
10775        REG_WR(sc, HC_REG_CONFIG_0,
10776               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10777               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10778    } else {
10779        /* Prevent incomming interrupts in IGU */
10780        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10781
10782        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10783               (!close) ?
10784               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10785               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10786    }
10787
10788    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10789          close ? "closing" : "opening");
10790
10791    wmb();
10792}
10793
10794/* poll for pending writes bit, it should get cleared in no more than 1s */
10795static int
10796bxe_er_poll_igu_vq(struct bxe_softc *sc)
10797{
10798    uint32_t cnt = 1000;
10799    uint32_t pend_bits = 0;
10800
10801    do {
10802        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10803
10804        if (pend_bits == 0) {
10805            break;
10806        }
10807
10808        DELAY(1000);
10809    } while (--cnt > 0);
10810
10811    if (cnt == 0) {
10812        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10813        return (-1);
10814    }
10815
10816    return (0);
10817}
10818
10819#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10820
10821static void
10822bxe_clp_reset_prep(struct bxe_softc *sc,
10823                   uint32_t         *magic_val)
10824{
10825    /* Do some magic... */
10826    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10827    *magic_val = val & SHARED_MF_CLP_MAGIC;
10828    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10829}
10830
10831/* restore the value of the 'magic' bit */
10832static void
10833bxe_clp_reset_done(struct bxe_softc *sc,
10834                   uint32_t         magic_val)
10835{
10836    /* Restore the 'magic' bit value... */
10837    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10838    MFCFG_WR(sc, shared_mf_config.clp_mb,
10839              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10840}
10841
10842/* prepare for MCP reset, takes care of CLP configurations */
10843static void
10844bxe_reset_mcp_prep(struct bxe_softc *sc,
10845                   uint32_t         *magic_val)
10846{
10847    uint32_t shmem;
10848    uint32_t validity_offset;
10849
10850    /* set `magic' bit in order to save MF config */
10851    if (!CHIP_IS_E1(sc)) {
10852        bxe_clp_reset_prep(sc, magic_val);
10853    }
10854
10855    /* get shmem offset */
10856    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10857    validity_offset =
10858        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10859
10860    /* Clear validity map flags */
10861    if (shmem > 0) {
10862        REG_WR(sc, shmem + validity_offset, 0);
10863    }
10864}
10865
10866#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10867#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10868
10869static void
10870bxe_mcp_wait_one(struct bxe_softc *sc)
10871{
10872    /* special handling for emulation and FPGA (10 times longer) */
10873    if (CHIP_REV_IS_SLOW(sc)) {
10874        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10875    } else {
10876        DELAY((MCP_ONE_TIMEOUT) * 1000);
10877    }
10878}
10879
10880/* initialize shmem_base and waits for validity signature to appear */
10881static int
10882bxe_init_shmem(struct bxe_softc *sc)
10883{
10884    int cnt = 0;
10885    uint32_t val = 0;
10886
10887    do {
10888        sc->devinfo.shmem_base     =
10889        sc->link_params.shmem_base =
10890            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10891
10892        if (sc->devinfo.shmem_base) {
10893            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10894            if (val & SHR_MEM_VALIDITY_MB)
10895                return (0);
10896        }
10897
10898        bxe_mcp_wait_one(sc);
10899
10900    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10901
10902    BLOGE(sc, "BAD MCP validity signature\n");
10903
10904    return (-1);
10905}
10906
10907static int
10908bxe_reset_mcp_comp(struct bxe_softc *sc,
10909                   uint32_t         magic_val)
10910{
10911    int rc = bxe_init_shmem(sc);
10912
10913    /* Restore the `magic' bit value */
10914    if (!CHIP_IS_E1(sc)) {
10915        bxe_clp_reset_done(sc, magic_val);
10916    }
10917
10918    return (rc);
10919}
10920
10921static void
10922bxe_pxp_prep(struct bxe_softc *sc)
10923{
10924    if (!CHIP_IS_E1(sc)) {
10925        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10926        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10927        wmb();
10928    }
10929}
10930
10931/*
10932 * Reset the whole chip except for:
10933 *      - PCIE core
10934 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10935 *      - IGU
10936 *      - MISC (including AEU)
10937 *      - GRC
10938 *      - RBCN, RBCP
10939 */
10940static void
10941bxe_process_kill_chip_reset(struct bxe_softc *sc,
10942                            uint8_t          global)
10943{
10944    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10945    uint32_t global_bits2, stay_reset2;
10946
10947    /*
10948     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10949     * (per chip) blocks.
10950     */
10951    global_bits2 =
10952        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10953        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10954
10955    /*
10956     * Don't reset the following blocks.
10957     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10958     *            reset, as in 4 port device they might still be owned
10959     *            by the MCP (there is only one leader per path).
10960     */
10961    not_reset_mask1 =
10962        MISC_REGISTERS_RESET_REG_1_RST_HC |
10963        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10964        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10965
10966    not_reset_mask2 =
10967        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10968        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10969        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10970        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10971        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10972        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10973        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10974        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10975        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10976        MISC_REGISTERS_RESET_REG_2_PGLC |
10977        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10978        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10979        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10980        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10981        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10982        MISC_REGISTERS_RESET_REG_2_UMAC1;
10983
10984    /*
10985     * Keep the following blocks in reset:
10986     *  - all xxMACs are handled by the elink code.
10987     */
10988    stay_reset2 =
10989        MISC_REGISTERS_RESET_REG_2_XMAC |
10990        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10991
10992    /* Full reset masks according to the chip */
10993    reset_mask1 = 0xffffffff;
10994
10995    if (CHIP_IS_E1(sc))
10996        reset_mask2 = 0xffff;
10997    else if (CHIP_IS_E1H(sc))
10998        reset_mask2 = 0x1ffff;
10999    else if (CHIP_IS_E2(sc))
11000        reset_mask2 = 0xfffff;
11001    else /* CHIP_IS_E3 */
11002        reset_mask2 = 0x3ffffff;
11003
11004    /* Don't reset global blocks unless we need to */
11005    if (!global)
11006        reset_mask2 &= ~global_bits2;
11007
11008    /*
11009     * In case of attention in the QM, we need to reset PXP
11010     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
11011     * because otherwise QM reset would release 'close the gates' shortly
11012     * before resetting the PXP, then the PSWRQ would send a write
11013     * request to PGLUE. Then when PXP is reset, PGLUE would try to
11014     * read the payload data from PSWWR, but PSWWR would not
11015     * respond. The write queue in PGLUE would stuck, dmae commands
11016     * would not return. Therefore it's important to reset the second
11017     * reset register (containing the
11018     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
11019     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
11020     * bit).
11021     */
11022    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11023           reset_mask2 & (~not_reset_mask2));
11024
11025    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
11026           reset_mask1 & (~not_reset_mask1));
11027
11028    mb();
11029    wmb();
11030
11031    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11032           reset_mask2 & (~stay_reset2));
11033
11034    mb();
11035    wmb();
11036
11037    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11038    wmb();
11039}
11040
11041static int
11042bxe_process_kill(struct bxe_softc *sc,
11043                 uint8_t          global)
11044{
11045    int cnt = 1000;
11046    uint32_t val = 0;
11047    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11048    uint32_t tags_63_32 = 0;
11049
11050    /* Empty the Tetris buffer, wait for 1s */
11051    do {
11052        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11053        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11054        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11055        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11056        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11057        if (CHIP_IS_E3(sc)) {
11058            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11059        }
11060
11061        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11062            ((port_is_idle_0 & 0x1) == 0x1) &&
11063            ((port_is_idle_1 & 0x1) == 0x1) &&
11064            (pgl_exp_rom2 == 0xffffffff) &&
11065            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11066            break;
11067        DELAY(1000);
11068    } while (cnt-- > 0);
11069
11070    if (cnt <= 0) {
11071        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11072                  "are still outstanding read requests after 1s! "
11073                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11074                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11075              sr_cnt, blk_cnt, port_is_idle_0,
11076              port_is_idle_1, pgl_exp_rom2);
11077        return (-1);
11078    }
11079
11080    mb();
11081
11082    /* Close gates #2, #3 and #4 */
11083    bxe_set_234_gates(sc, TRUE);
11084
11085    /* Poll for IGU VQs for 57712 and newer chips */
11086    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11087        return (-1);
11088    }
11089
11090    /* XXX indicate that "process kill" is in progress to MCP */
11091
11092    /* clear "unprepared" bit */
11093    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11094    mb();
11095
11096    /* Make sure all is written to the chip before the reset */
11097    wmb();
11098
11099    /*
11100     * Wait for 1ms to empty GLUE and PCI-E core queues,
11101     * PSWHST, GRC and PSWRD Tetris buffer.
11102     */
11103    DELAY(1000);
11104
11105    /* Prepare to chip reset: */
11106    /* MCP */
11107    if (global) {
11108        bxe_reset_mcp_prep(sc, &val);
11109    }
11110
11111    /* PXP */
11112    bxe_pxp_prep(sc);
11113    mb();
11114
11115    /* reset the chip */
11116    bxe_process_kill_chip_reset(sc, global);
11117    mb();
11118
11119    /* clear errors in PGB */
11120    if (!CHIP_IS_E1(sc))
11121        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11122
11123    /* Recover after reset: */
11124    /* MCP */
11125    if (global && bxe_reset_mcp_comp(sc, val)) {
11126        return (-1);
11127    }
11128
11129    /* XXX add resetting the NO_MCP mode DB here */
11130
11131    /* Open the gates #2, #3 and #4 */
11132    bxe_set_234_gates(sc, FALSE);
11133
11134    /* XXX
11135     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11136     * re-enable attentions
11137     */
11138
11139    return (0);
11140}
11141
11142static int
11143bxe_leader_reset(struct bxe_softc *sc)
11144{
11145    int rc = 0;
11146    uint8_t global = bxe_reset_is_global(sc);
11147    uint32_t load_code;
11148
11149    /*
11150     * If not going to reset MCP, load "fake" driver to reset HW while
11151     * driver is owner of the HW.
11152     */
11153    if (!global && !BXE_NOMCP(sc)) {
11154        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11155                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11156        if (!load_code) {
11157            BLOGE(sc, "MCP response failure, aborting\n");
11158            rc = -1;
11159            goto exit_leader_reset;
11160        }
11161
11162        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11163            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11164            BLOGE(sc, "MCP unexpected response, aborting\n");
11165            rc = -1;
11166            goto exit_leader_reset2;
11167        }
11168
11169        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11170        if (!load_code) {
11171            BLOGE(sc, "MCP response failure, aborting\n");
11172            rc = -1;
11173            goto exit_leader_reset2;
11174        }
11175    }
11176
11177    /* try to recover after the failure */
11178    if (bxe_process_kill(sc, global)) {
11179        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11180        rc = -1;
11181        goto exit_leader_reset2;
11182    }
11183
11184    /*
11185     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11186     * state.
11187     */
11188    bxe_set_reset_done(sc);
11189    if (global) {
11190        bxe_clear_reset_global(sc);
11191    }
11192
11193exit_leader_reset2:
11194
11195    /* unload "fake driver" if it was loaded */
11196    if (!global && !BXE_NOMCP(sc)) {
11197        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11198        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11199    }
11200
11201exit_leader_reset:
11202
11203    sc->is_leader = 0;
11204    bxe_release_leader_lock(sc);
11205
11206    mb();
11207    return (rc);
11208}
11209
11210/*
11211 * prepare INIT transition, parameters configured:
11212 *   - HC configuration
11213 *   - Queue's CDU context
11214 */
11215static void
11216bxe_pf_q_prep_init(struct bxe_softc               *sc,
11217                   struct bxe_fastpath            *fp,
11218                   struct ecore_queue_init_params *init_params)
11219{
11220    uint8_t cos;
11221    int cxt_index, cxt_offset;
11222
11223    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11224    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11225
11226    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11227    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11228
11229    /* HC rate */
11230    init_params->rx.hc_rate =
11231        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11232    init_params->tx.hc_rate =
11233        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11234
11235    /* FW SB ID */
11236    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11237
11238    /* CQ index among the SB indices */
11239    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11240    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11241
11242    /* set maximum number of COSs supported by this queue */
11243    init_params->max_cos = sc->max_cos;
11244
11245    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11246          fp->index, init_params->max_cos);
11247
11248    /* set the context pointers queue object */
11249    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11250        /* XXX change index/cid here if ever support multiple tx CoS */
11251        /* fp->txdata[cos]->cid */
11252        cxt_index = fp->index / ILT_PAGE_CIDS;
11253        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11254        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11255    }
11256}
11257
11258/* set flags that are common for the Tx-only and not normal connections */
11259static unsigned long
11260bxe_get_common_flags(struct bxe_softc    *sc,
11261                     struct bxe_fastpath *fp,
11262                     uint8_t             zero_stats)
11263{
11264    unsigned long flags = 0;
11265
11266    /* PF driver will always initialize the Queue to an ACTIVE state */
11267    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11268
11269    /*
11270     * tx only connections collect statistics (on the same index as the
11271     * parent connection). The statistics are zeroed when the parent
11272     * connection is initialized.
11273     */
11274
11275    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11276    if (zero_stats) {
11277        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11278    }
11279
11280    /*
11281     * tx only connections can support tx-switching, though their
11282     * CoS-ness doesn't survive the loopback
11283     */
11284    if (sc->flags & BXE_TX_SWITCHING) {
11285        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11286    }
11287
11288    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11289
11290    return (flags);
11291}
11292
11293static unsigned long
11294bxe_get_q_flags(struct bxe_softc    *sc,
11295                struct bxe_fastpath *fp,
11296                uint8_t             leading)
11297{
11298    unsigned long flags = 0;
11299
11300    if (IS_MF_SD(sc)) {
11301        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11302    }
11303
11304    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11305        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11306        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11307    }
11308
11309    if (leading) {
11310        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11311        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11312    }
11313
11314    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11315
11316    /* merge with common flags */
11317    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11318}
11319
11320static void
11321bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11322                      struct bxe_fastpath               *fp,
11323                      struct ecore_general_setup_params *gen_init,
11324                      uint8_t                           cos)
11325{
11326    gen_init->stat_id = bxe_stats_id(fp);
11327    gen_init->spcl_id = fp->cl_id;
11328    gen_init->mtu = sc->mtu;
11329    gen_init->cos = cos;
11330}
11331
11332static void
11333bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11334                 struct bxe_fastpath           *fp,
11335                 struct rxq_pause_params       *pause,
11336                 struct ecore_rxq_setup_params *rxq_init)
11337{
11338    uint8_t max_sge = 0;
11339    uint16_t sge_sz = 0;
11340    uint16_t tpa_agg_size = 0;
11341
11342    pause->sge_th_lo = SGE_TH_LO(sc);
11343    pause->sge_th_hi = SGE_TH_HI(sc);
11344
11345    /* validate SGE ring has enough to cross high threshold */
11346    if (sc->dropless_fc &&
11347            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11348            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11349        BLOGW(sc, "sge ring threshold limit\n");
11350    }
11351
11352    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11353    tpa_agg_size = (2 * sc->mtu);
11354    if (tpa_agg_size < sc->max_aggregation_size) {
11355        tpa_agg_size = sc->max_aggregation_size;
11356    }
11357
11358    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11359    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11360                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11361    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11362
11363    /* pause - not for e1 */
11364    if (!CHIP_IS_E1(sc)) {
11365        pause->bd_th_lo = BD_TH_LO(sc);
11366        pause->bd_th_hi = BD_TH_HI(sc);
11367
11368        pause->rcq_th_lo = RCQ_TH_LO(sc);
11369        pause->rcq_th_hi = RCQ_TH_HI(sc);
11370
11371        /* validate rings have enough entries to cross high thresholds */
11372        if (sc->dropless_fc &&
11373            pause->bd_th_hi + FW_PREFETCH_CNT >
11374            sc->rx_ring_size) {
11375            BLOGW(sc, "rx bd ring threshold limit\n");
11376        }
11377
11378        if (sc->dropless_fc &&
11379            pause->rcq_th_hi + FW_PREFETCH_CNT >
11380            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11381            BLOGW(sc, "rcq ring threshold limit\n");
11382        }
11383
11384        pause->pri_map = 1;
11385    }
11386
11387    /* rxq setup */
11388    rxq_init->dscr_map   = fp->rx_dma.paddr;
11389    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11390    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11391    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11392
11393    /*
11394     * This should be a maximum number of data bytes that may be
11395     * placed on the BD (not including paddings).
11396     */
11397    rxq_init->buf_sz = (fp->rx_buf_size -
11398                        IP_HEADER_ALIGNMENT_PADDING);
11399
11400    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11401    rxq_init->tpa_agg_sz      = tpa_agg_size;
11402    rxq_init->sge_buf_sz      = sge_sz;
11403    rxq_init->max_sges_pkt    = max_sge;
11404    rxq_init->rss_engine_id   = SC_FUNC(sc);
11405    rxq_init->mcast_engine_id = SC_FUNC(sc);
11406
11407    /*
11408     * Maximum number or simultaneous TPA aggregation for this Queue.
11409     * For PF Clients it should be the maximum available number.
11410     * VF driver(s) may want to define it to a smaller value.
11411     */
11412    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11413
11414    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11415    rxq_init->fw_sb_id = fp->fw_sb_id;
11416
11417    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11418
11419    /*
11420     * configure silent vlan removal
11421     * if multi function mode is afex, then mask default vlan
11422     */
11423    if (IS_MF_AFEX(sc)) {
11424        rxq_init->silent_removal_value =
11425            sc->devinfo.mf_info.afex_def_vlan_tag;
11426        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11427    }
11428}
11429
11430static void
11431bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11432                 struct bxe_fastpath           *fp,
11433                 struct ecore_txq_setup_params *txq_init,
11434                 uint8_t                       cos)
11435{
11436    /*
11437     * XXX If multiple CoS is ever supported then each fastpath structure
11438     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11439     * fp->txdata[cos]->tx_dma.paddr;
11440     */
11441    txq_init->dscr_map     = fp->tx_dma.paddr;
11442    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11443    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11444    txq_init->fw_sb_id     = fp->fw_sb_id;
11445
11446    /*
11447     * set the TSS leading client id for TX classfication to the
11448     * leading RSS client id
11449     */
11450    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11451}
11452
11453/*
11454 * This function performs 2 steps in a queue state machine:
11455 *   1) RESET->INIT
11456 *   2) INIT->SETUP
11457 */
11458static int
11459bxe_setup_queue(struct bxe_softc    *sc,
11460                struct bxe_fastpath *fp,
11461                uint8_t             leading)
11462{
11463    struct ecore_queue_state_params q_params = { NULL };
11464    struct ecore_queue_setup_params *setup_params =
11465                        &q_params.params.setup;
11466    int rc;
11467
11468    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11469
11470    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11471
11472    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11473
11474    /* we want to wait for completion in this context */
11475    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11476
11477    /* prepare the INIT parameters */
11478    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11479
11480    /* Set the command */
11481    q_params.cmd = ECORE_Q_CMD_INIT;
11482
11483    /* Change the state to INIT */
11484    rc = ecore_queue_state_change(sc, &q_params);
11485    if (rc) {
11486        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11487        return (rc);
11488    }
11489
11490    BLOGD(sc, DBG_LOAD, "init complete\n");
11491
11492    /* now move the Queue to the SETUP state */
11493    memset(setup_params, 0, sizeof(*setup_params));
11494
11495    /* set Queue flags */
11496    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11497
11498    /* set general SETUP parameters */
11499    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11500                          FIRST_TX_COS_INDEX);
11501
11502    bxe_pf_rx_q_prep(sc, fp,
11503                     &setup_params->pause_params,
11504                     &setup_params->rxq_params);
11505
11506    bxe_pf_tx_q_prep(sc, fp,
11507                     &setup_params->txq_params,
11508                     FIRST_TX_COS_INDEX);
11509
11510    /* Set the command */
11511    q_params.cmd = ECORE_Q_CMD_SETUP;
11512
11513    /* change the state to SETUP */
11514    rc = ecore_queue_state_change(sc, &q_params);
11515    if (rc) {
11516        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11517        return (rc);
11518    }
11519
11520    return (rc);
11521}
11522
11523static int
11524bxe_setup_leading(struct bxe_softc *sc)
11525{
11526    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11527}
11528
11529static int
11530bxe_config_rss_pf(struct bxe_softc            *sc,
11531                  struct ecore_rss_config_obj *rss_obj,
11532                  uint8_t                     config_hash)
11533{
11534    struct ecore_config_rss_params params = { NULL };
11535    int i;
11536
11537    /*
11538     * Although RSS is meaningless when there is a single HW queue we
11539     * still need it enabled in order to have HW Rx hash generated.
11540     */
11541
11542    params.rss_obj = rss_obj;
11543
11544    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11545
11546    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11547
11548    /* RSS configuration */
11549    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11550    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11551    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11552    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11553    if (rss_obj->udp_rss_v4) {
11554        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11555    }
11556    if (rss_obj->udp_rss_v6) {
11557        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11558    }
11559
11560    /* Hash bits */
11561    params.rss_result_mask = MULTI_MASK;
11562
11563    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11564
11565    if (config_hash) {
11566        /* RSS keys */
11567        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11568            params.rss_key[i] = arc4random();
11569        }
11570
11571        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11572    }
11573
11574    return (ecore_config_rss(sc, &params));
11575}
11576
11577static int
11578bxe_config_rss_eth(struct bxe_softc *sc,
11579                   uint8_t          config_hash)
11580{
11581    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11582}
11583
11584static int
11585bxe_init_rss_pf(struct bxe_softc *sc)
11586{
11587    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11588    int i;
11589
11590    /*
11591     * Prepare the initial contents of the indirection table if
11592     * RSS is enabled
11593     */
11594    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11595        sc->rss_conf_obj.ind_table[i] =
11596            (sc->fp->cl_id + (i % num_eth_queues));
11597    }
11598
11599    if (sc->udp_rss) {
11600        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11601    }
11602
11603    /*
11604     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11605     * per-port, so if explicit configuration is needed, do it only
11606     * for a PMF.
11607     *
11608     * For 57712 and newer it's a per-function configuration.
11609     */
11610    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11611}
11612
11613static int
11614bxe_set_mac_one(struct bxe_softc          *sc,
11615                uint8_t                   *mac,
11616                struct ecore_vlan_mac_obj *obj,
11617                uint8_t                   set,
11618                int                       mac_type,
11619                unsigned long             *ramrod_flags)
11620{
11621    struct ecore_vlan_mac_ramrod_params ramrod_param;
11622    int rc;
11623
11624    memset(&ramrod_param, 0, sizeof(ramrod_param));
11625
11626    /* fill in general parameters */
11627    ramrod_param.vlan_mac_obj = obj;
11628    ramrod_param.ramrod_flags = *ramrod_flags;
11629
11630    /* fill a user request section if needed */
11631    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11632        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11633
11634        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11635
11636        /* Set the command: ADD or DEL */
11637        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11638                                            ECORE_VLAN_MAC_DEL;
11639    }
11640
11641    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11642
11643    if (rc == ECORE_EXISTS) {
11644        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11645        /* do not treat adding same MAC as error */
11646        rc = 0;
11647    } else if (rc < 0) {
11648        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11649    }
11650
11651    return (rc);
11652}
11653
11654static int
11655bxe_set_eth_mac(struct bxe_softc *sc,
11656                uint8_t          set)
11657{
11658    unsigned long ramrod_flags = 0;
11659
11660    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11661
11662    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11663
11664    /* Eth MAC is set on RSS leading client (fp[0]) */
11665    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11666                            &sc->sp_objs->mac_obj,
11667                            set, ECORE_ETH_MAC, &ramrod_flags));
11668}
11669
11670static int
11671bxe_get_cur_phy_idx(struct bxe_softc *sc)
11672{
11673    uint32_t sel_phy_idx = 0;
11674
11675    if (sc->link_params.num_phys <= 1) {
11676        return (ELINK_INT_PHY);
11677    }
11678
11679    if (sc->link_vars.link_up) {
11680        sel_phy_idx = ELINK_EXT_PHY1;
11681        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11682        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11683            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11684             ELINK_SUPPORTED_FIBRE))
11685            sel_phy_idx = ELINK_EXT_PHY2;
11686    } else {
11687        switch (elink_phy_selection(&sc->link_params)) {
11688        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11689        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11690        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11691               sel_phy_idx = ELINK_EXT_PHY1;
11692               break;
11693        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11694        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11695               sel_phy_idx = ELINK_EXT_PHY2;
11696               break;
11697        }
11698    }
11699
11700    return (sel_phy_idx);
11701}
11702
11703static int
11704bxe_get_link_cfg_idx(struct bxe_softc *sc)
11705{
11706    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11707
11708    /*
11709     * The selected activated PHY is always after swapping (in case PHY
11710     * swapping is enabled). So when swapping is enabled, we need to reverse
11711     * the configuration
11712     */
11713
11714    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11715        if (sel_phy_idx == ELINK_EXT_PHY1)
11716            sel_phy_idx = ELINK_EXT_PHY2;
11717        else if (sel_phy_idx == ELINK_EXT_PHY2)
11718            sel_phy_idx = ELINK_EXT_PHY1;
11719    }
11720
11721    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11722}
11723
11724static void
11725bxe_set_requested_fc(struct bxe_softc *sc)
11726{
11727    /*
11728     * Initialize link parameters structure variables
11729     * It is recommended to turn off RX FC for jumbo frames
11730     * for better performance
11731     */
11732    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11733        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11734    } else {
11735        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11736    }
11737}
11738
11739static void
11740bxe_calc_fc_adv(struct bxe_softc *sc)
11741{
11742    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11743    switch (sc->link_vars.ieee_fc &
11744            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11745    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
11746    default:
11747        sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11748                                           ADVERTISED_Pause);
11749        break;
11750
11751    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11752        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11753                                          ADVERTISED_Pause);
11754        break;
11755
11756    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11757        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11758        break;
11759    }
11760}
11761
11762static uint16_t
11763bxe_get_mf_speed(struct bxe_softc *sc)
11764{
11765    uint16_t line_speed = sc->link_vars.line_speed;
11766    if (IS_MF(sc)) {
11767        uint16_t maxCfg =
11768            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11769
11770        /* calculate the current MAX line speed limit for the MF devices */
11771        if (IS_MF_SI(sc)) {
11772            line_speed = (line_speed * maxCfg) / 100;
11773        } else { /* SD mode */
11774            uint16_t vn_max_rate = maxCfg * 100;
11775
11776            if (vn_max_rate < line_speed) {
11777                line_speed = vn_max_rate;
11778            }
11779        }
11780    }
11781
11782    return (line_speed);
11783}
11784
11785static void
11786bxe_fill_report_data(struct bxe_softc            *sc,
11787                     struct bxe_link_report_data *data)
11788{
11789    uint16_t line_speed = bxe_get_mf_speed(sc);
11790
11791    memset(data, 0, sizeof(*data));
11792
11793    /* fill the report data with the effective line speed */
11794    data->line_speed = line_speed;
11795
11796    /* Link is down */
11797    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11798        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11799    }
11800
11801    /* Full DUPLEX */
11802    if (sc->link_vars.duplex == DUPLEX_FULL) {
11803        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11804    }
11805
11806    /* Rx Flow Control is ON */
11807    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11808        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11809    }
11810
11811    /* Tx Flow Control is ON */
11812    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11813        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11814    }
11815}
11816
11817/* report link status to OS, should be called under phy_lock */
11818static void
11819bxe_link_report_locked(struct bxe_softc *sc)
11820{
11821    struct bxe_link_report_data cur_data;
11822
11823    /* reread mf_cfg */
11824    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11825        bxe_read_mf_cfg(sc);
11826    }
11827
11828    /* Read the current link report info */
11829    bxe_fill_report_data(sc, &cur_data);
11830
11831    /* Don't report link down or exactly the same link status twice */
11832    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11833        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11834                      &sc->last_reported_link.link_report_flags) &&
11835         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11836                      &cur_data.link_report_flags))) {
11837        return;
11838    }
11839
11840    sc->link_cnt++;
11841
11842    /* report new link params and remember the state for the next time */
11843    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11844
11845    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11846                     &cur_data.link_report_flags)) {
11847        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11848        BLOGI(sc, "NIC Link is Down\n");
11849    } else {
11850        const char *duplex;
11851        const char *flow;
11852
11853        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11854                                   &cur_data.link_report_flags)) {
11855            duplex = "full";
11856        } else {
11857            duplex = "half";
11858        }
11859
11860        /*
11861         * Handle the FC at the end so that only these flags would be
11862         * possibly set. This way we may easily check if there is no FC
11863         * enabled.
11864         */
11865        if (cur_data.link_report_flags) {
11866            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11867                             &cur_data.link_report_flags) &&
11868                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11869                             &cur_data.link_report_flags)) {
11870                flow = "ON - receive & transmit";
11871            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11872                                    &cur_data.link_report_flags) &&
11873                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11874                                     &cur_data.link_report_flags)) {
11875                flow = "ON - receive";
11876            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11877                                     &cur_data.link_report_flags) &&
11878                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11879                                    &cur_data.link_report_flags)) {
11880                flow = "ON - transmit";
11881            } else {
11882                flow = "none"; /* possible? */
11883            }
11884        } else {
11885            flow = "none";
11886        }
11887
11888        if_link_state_change(sc->ifp, LINK_STATE_UP);
11889        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11890              cur_data.line_speed, duplex, flow);
11891    }
11892}
11893
11894static void
11895bxe_link_report(struct bxe_softc *sc)
11896{
11897    bxe_acquire_phy_lock(sc);
11898    bxe_link_report_locked(sc);
11899    bxe_release_phy_lock(sc);
11900}
11901
11902static void
11903bxe_link_status_update(struct bxe_softc *sc)
11904{
11905    if (sc->state != BXE_STATE_OPEN) {
11906        return;
11907    }
11908
11909    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11910        elink_link_status_update(&sc->link_params, &sc->link_vars);
11911    } else {
11912        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11913                                  ELINK_SUPPORTED_10baseT_Full |
11914                                  ELINK_SUPPORTED_100baseT_Half |
11915                                  ELINK_SUPPORTED_100baseT_Full |
11916                                  ELINK_SUPPORTED_1000baseT_Full |
11917                                  ELINK_SUPPORTED_2500baseX_Full |
11918                                  ELINK_SUPPORTED_10000baseT_Full |
11919                                  ELINK_SUPPORTED_TP |
11920                                  ELINK_SUPPORTED_FIBRE |
11921                                  ELINK_SUPPORTED_Autoneg |
11922                                  ELINK_SUPPORTED_Pause |
11923                                  ELINK_SUPPORTED_Asym_Pause);
11924        sc->port.advertising[0] = sc->port.supported[0];
11925
11926        sc->link_params.sc                = sc;
11927        sc->link_params.port              = SC_PORT(sc);
11928        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11929        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11930        sc->link_params.req_line_speed[0] = SPEED_10000;
11931        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11932        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11933
11934        if (CHIP_REV_IS_FPGA(sc)) {
11935            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11936            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11937            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11938                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11939        } else {
11940            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11941            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11942            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11943                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11944        }
11945
11946        sc->link_vars.link_up = 1;
11947
11948        sc->link_vars.duplex    = DUPLEX_FULL;
11949        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11950
11951        if (IS_PF(sc)) {
11952            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11953            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11954            bxe_link_report(sc);
11955        }
11956    }
11957
11958    if (IS_PF(sc)) {
11959        if (sc->link_vars.link_up) {
11960            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11961        } else {
11962            bxe_stats_handle(sc, STATS_EVENT_STOP);
11963        }
11964        bxe_link_report(sc);
11965    } else {
11966        bxe_link_report(sc);
11967        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11968    }
11969}
11970
11971static int
11972bxe_initial_phy_init(struct bxe_softc *sc,
11973                     int              load_mode)
11974{
11975    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11976    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11977    struct elink_params *lp = &sc->link_params;
11978
11979    bxe_set_requested_fc(sc);
11980
11981    if (CHIP_REV_IS_SLOW(sc)) {
11982        uint32_t bond = CHIP_BOND_ID(sc);
11983        uint32_t feat = 0;
11984
11985        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11986            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11987        } else if (bond & 0x4) {
11988            if (CHIP_IS_E3(sc)) {
11989                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11990            } else {
11991                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11992            }
11993        } else if (bond & 0x8) {
11994            if (CHIP_IS_E3(sc)) {
11995                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11996            } else {
11997                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11998            }
11999        }
12000
12001        /* disable EMAC for E3 and above */
12002        if (bond & 0x2) {
12003            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12004        }
12005
12006        sc->link_params.feature_config_flags |= feat;
12007    }
12008
12009    bxe_acquire_phy_lock(sc);
12010
12011    if (load_mode == LOAD_DIAG) {
12012        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
12013        /* Prefer doing PHY loopback at 10G speed, if possible */
12014        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
12015            if (lp->speed_cap_mask[cfg_idx] &
12016                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
12017                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
12018            } else {
12019                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
12020            }
12021        }
12022    }
12023
12024    if (load_mode == LOAD_LOOPBACK_EXT) {
12025        lp->loopback_mode = ELINK_LOOPBACK_EXT;
12026    }
12027
12028    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12029
12030    bxe_release_phy_lock(sc);
12031
12032    bxe_calc_fc_adv(sc);
12033
12034    if (sc->link_vars.link_up) {
12035        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12036        bxe_link_report(sc);
12037    }
12038
12039    if (!CHIP_REV_IS_SLOW(sc)) {
12040        bxe_periodic_start(sc);
12041    }
12042
12043    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12044    return (rc);
12045}
12046
12047/* must be called under IF_ADDR_LOCK */
12048
12049static int
12050bxe_set_mc_list(struct bxe_softc *sc)
12051{
12052    struct ecore_mcast_ramrod_params rparam = { NULL };
12053    int rc = 0;
12054    int mc_count = 0;
12055    int mcnt, i;
12056    struct ecore_mcast_list_elem *mc_mac, *mc_mac_start;
12057    unsigned char *mta;
12058    if_t ifp = sc->ifp;
12059
12060    mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
12061    if (!mc_count)
12062        return (0);
12063
12064    mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
12065            mc_count, M_DEVBUF, M_NOWAIT);
12066
12067    if(mta == NULL) {
12068        BLOGE(sc, "Failed to allocate temp mcast list\n");
12069        return (-1);
12070    }
12071    bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count));
12072
12073    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO));
12074    mc_mac_start = mc_mac;
12075
12076    if (!mc_mac) {
12077        free(mta, M_DEVBUF);
12078        BLOGE(sc, "Failed to allocate temp mcast list\n");
12079        return (-1);
12080    }
12081    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12082
12083    /* mta and mcnt not expected to be  different */
12084    if_multiaddr_array(ifp, mta, &mcnt, mc_count);
12085
12086
12087    rparam.mcast_obj = &sc->mcast_obj;
12088    ECORE_LIST_INIT(&rparam.mcast_list);
12089
12090    for(i=0; i< mcnt; i++) {
12091
12092        mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN));
12093        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list);
12094
12095        BLOGD(sc, DBG_LOAD,
12096              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12097              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12098              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12099
12100        mc_mac++;
12101    }
12102    rparam.mcast_list_len = mc_count;
12103
12104    BXE_MCAST_LOCK(sc);
12105
12106    /* first, clear all configured multicast MACs */
12107    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12108    if (rc < 0) {
12109        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12110        BXE_MCAST_UNLOCK(sc);
12111    	free(mc_mac_start, M_DEVBUF);
12112        free(mta, M_DEVBUF);
12113        return (rc);
12114    }
12115
12116    /* Now add the new MACs */
12117    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12118    if (rc < 0) {
12119        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12120    }
12121
12122    BXE_MCAST_UNLOCK(sc);
12123
12124    free(mc_mac_start, M_DEVBUF);
12125    free(mta, M_DEVBUF);
12126
12127    return (rc);
12128}
12129
12130static int
12131bxe_set_uc_list(struct bxe_softc *sc)
12132{
12133    if_t ifp = sc->ifp;
12134    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12135    struct ifaddr *ifa;
12136    unsigned long ramrod_flags = 0;
12137    int rc;
12138
12139#if __FreeBSD_version < 800000
12140    IF_ADDR_LOCK(ifp);
12141#else
12142    if_addr_rlock(ifp);
12143#endif
12144
12145    /* first schedule a cleanup up of old configuration */
12146    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12147    if (rc < 0) {
12148        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12149#if __FreeBSD_version < 800000
12150        IF_ADDR_UNLOCK(ifp);
12151#else
12152        if_addr_runlock(ifp);
12153#endif
12154        return (rc);
12155    }
12156
12157    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12158    while (ifa) {
12159        if (ifa->ifa_addr->sa_family != AF_LINK) {
12160            ifa = TAILQ_NEXT(ifa, ifa_link);
12161            continue;
12162        }
12163
12164        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12165                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12166        if (rc == -EEXIST) {
12167            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12168            /* do not treat adding same MAC as an error */
12169            rc = 0;
12170        } else if (rc < 0) {
12171            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12172#if __FreeBSD_version < 800000
12173            IF_ADDR_UNLOCK(ifp);
12174#else
12175            if_addr_runlock(ifp);
12176#endif
12177            return (rc);
12178        }
12179
12180        ifa = TAILQ_NEXT(ifa, ifa_link);
12181    }
12182
12183#if __FreeBSD_version < 800000
12184    IF_ADDR_UNLOCK(ifp);
12185#else
12186    if_addr_runlock(ifp);
12187#endif
12188
12189    /* Execute the pending commands */
12190    bit_set(&ramrod_flags, RAMROD_CONT);
12191    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12192                            ECORE_UC_LIST_MAC, &ramrod_flags));
12193}
12194
12195static void
12196bxe_set_rx_mode(struct bxe_softc *sc)
12197{
12198    if_t ifp = sc->ifp;
12199    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12200
12201    if (sc->state != BXE_STATE_OPEN) {
12202        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12203        return;
12204    }
12205
12206    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12207
12208    if (if_getflags(ifp) & IFF_PROMISC) {
12209        rx_mode = BXE_RX_MODE_PROMISC;
12210    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12211               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12212                CHIP_IS_E1(sc))) {
12213        rx_mode = BXE_RX_MODE_ALLMULTI;
12214    } else {
12215        if (IS_PF(sc)) {
12216            /* some multicasts */
12217            if (bxe_set_mc_list(sc) < 0) {
12218                rx_mode = BXE_RX_MODE_ALLMULTI;
12219            }
12220            if (bxe_set_uc_list(sc) < 0) {
12221                rx_mode = BXE_RX_MODE_PROMISC;
12222            }
12223        }
12224    }
12225
12226    sc->rx_mode = rx_mode;
12227
12228    /* schedule the rx_mode command */
12229    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12230        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12231        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12232        return;
12233    }
12234
12235    if (IS_PF(sc)) {
12236        bxe_set_storm_rx_mode(sc);
12237    }
12238}
12239
12240
12241/* update flags in shmem */
12242static void
12243bxe_update_drv_flags(struct bxe_softc *sc,
12244                     uint32_t         flags,
12245                     uint32_t         set)
12246{
12247    uint32_t drv_flags;
12248
12249    if (SHMEM2_HAS(sc, drv_flags)) {
12250        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12251        drv_flags = SHMEM2_RD(sc, drv_flags);
12252
12253        if (set) {
12254            SET_FLAGS(drv_flags, flags);
12255        } else {
12256            RESET_FLAGS(drv_flags, flags);
12257        }
12258
12259        SHMEM2_WR(sc, drv_flags, drv_flags);
12260        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12261
12262        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12263    }
12264}
12265
12266/* periodic timer callout routine, only runs when the interface is up */
12267
12268static void
12269bxe_periodic_callout_func(void *xsc)
12270{
12271    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12272    int i;
12273
12274    if (!BXE_CORE_TRYLOCK(sc)) {
12275        /* just bail and try again next time */
12276
12277        if ((sc->state == BXE_STATE_OPEN) &&
12278            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12279            /* schedule the next periodic callout */
12280            callout_reset(&sc->periodic_callout, hz,
12281                          bxe_periodic_callout_func, sc);
12282        }
12283
12284        return;
12285    }
12286
12287    if ((sc->state != BXE_STATE_OPEN) ||
12288        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12289        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12290        BXE_CORE_UNLOCK(sc);
12291        return;
12292    }
12293
12294    /* Check for TX timeouts on any fastpath. */
12295    FOR_EACH_QUEUE(sc, i) {
12296        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12297            /* Ruh-Roh, chip was reset! */
12298            break;
12299        }
12300    }
12301
12302    if (!CHIP_REV_IS_SLOW(sc)) {
12303        /*
12304         * This barrier is needed to ensure the ordering between the writing
12305         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12306         * the reading here.
12307         */
12308        mb();
12309        if (sc->port.pmf) {
12310	    bxe_acquire_phy_lock(sc);
12311            elink_period_func(&sc->link_params, &sc->link_vars);
12312	    bxe_release_phy_lock(sc);
12313        }
12314    }
12315
12316    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12317        int mb_idx = SC_FW_MB_IDX(sc);
12318        uint32_t drv_pulse;
12319        uint32_t mcp_pulse;
12320
12321        ++sc->fw_drv_pulse_wr_seq;
12322        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12323
12324        drv_pulse = sc->fw_drv_pulse_wr_seq;
12325        bxe_drv_pulse(sc);
12326
12327        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12328                     MCP_PULSE_SEQ_MASK);
12329
12330        /*
12331         * The delta between driver pulse and mcp response should
12332         * be 1 (before mcp response) or 0 (after mcp response).
12333         */
12334        if ((drv_pulse != mcp_pulse) &&
12335            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12336            /* someone lost a heartbeat... */
12337            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12338                  drv_pulse, mcp_pulse);
12339        }
12340    }
12341
12342    /* state is BXE_STATE_OPEN */
12343    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12344
12345    BXE_CORE_UNLOCK(sc);
12346
12347    if ((sc->state == BXE_STATE_OPEN) &&
12348        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12349        /* schedule the next periodic callout */
12350        callout_reset(&sc->periodic_callout, hz,
12351                      bxe_periodic_callout_func, sc);
12352    }
12353}
12354
12355static void
12356bxe_periodic_start(struct bxe_softc *sc)
12357{
12358    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12359    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12360}
12361
12362static void
12363bxe_periodic_stop(struct bxe_softc *sc)
12364{
12365    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12366    callout_drain(&sc->periodic_callout);
12367}
12368
12369/* start the controller */
12370static __noinline int
12371bxe_nic_load(struct bxe_softc *sc,
12372             int              load_mode)
12373{
12374    uint32_t val;
12375    int load_code = 0;
12376    int i, rc = 0;
12377
12378    BXE_CORE_LOCK_ASSERT(sc);
12379
12380    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12381
12382    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12383
12384    if (IS_PF(sc)) {
12385        /* must be called before memory allocation and HW init */
12386        bxe_ilt_set_info(sc);
12387    }
12388
12389    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12390
12391    bxe_set_fp_rx_buf_size(sc);
12392
12393    if (bxe_alloc_fp_buffers(sc) != 0) {
12394        BLOGE(sc, "Failed to allocate fastpath memory\n");
12395        sc->state = BXE_STATE_CLOSED;
12396        rc = ENOMEM;
12397        goto bxe_nic_load_error0;
12398    }
12399
12400    if (bxe_alloc_mem(sc) != 0) {
12401        sc->state = BXE_STATE_CLOSED;
12402        rc = ENOMEM;
12403        goto bxe_nic_load_error0;
12404    }
12405
12406    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12407        sc->state = BXE_STATE_CLOSED;
12408        rc = ENOMEM;
12409        goto bxe_nic_load_error0;
12410    }
12411
12412    if (IS_PF(sc)) {
12413        /* set pf load just before approaching the MCP */
12414        bxe_set_pf_load(sc);
12415
12416        /* if MCP exists send load request and analyze response */
12417        if (!BXE_NOMCP(sc)) {
12418            /* attempt to load pf */
12419            if (bxe_nic_load_request(sc, &load_code) != 0) {
12420                sc->state = BXE_STATE_CLOSED;
12421                rc = ENXIO;
12422                goto bxe_nic_load_error1;
12423            }
12424
12425            /* what did the MCP say? */
12426            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12427                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12428                sc->state = BXE_STATE_CLOSED;
12429                rc = ENXIO;
12430                goto bxe_nic_load_error2;
12431            }
12432        } else {
12433            BLOGI(sc, "Device has no MCP!\n");
12434            load_code = bxe_nic_load_no_mcp(sc);
12435        }
12436
12437        /* mark PMF if applicable */
12438        bxe_nic_load_pmf(sc, load_code);
12439
12440        /* Init Function state controlling object */
12441        bxe_init_func_obj(sc);
12442
12443        /* Initialize HW */
12444        if (bxe_init_hw(sc, load_code) != 0) {
12445            BLOGE(sc, "HW init failed\n");
12446            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12447            sc->state = BXE_STATE_CLOSED;
12448            rc = ENXIO;
12449            goto bxe_nic_load_error2;
12450        }
12451    }
12452
12453    /* set ALWAYS_ALIVE bit in shmem */
12454    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12455    bxe_drv_pulse(sc);
12456    sc->flags |= BXE_NO_PULSE;
12457
12458    /* attach interrupts */
12459    if (bxe_interrupt_attach(sc) != 0) {
12460        sc->state = BXE_STATE_CLOSED;
12461        rc = ENXIO;
12462        goto bxe_nic_load_error2;
12463    }
12464
12465    bxe_nic_init(sc, load_code);
12466
12467    /* Init per-function objects */
12468    if (IS_PF(sc)) {
12469        bxe_init_objs(sc);
12470        // XXX bxe_iov_nic_init(sc);
12471
12472        /* set AFEX default VLAN tag to an invalid value */
12473        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12474        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12475
12476        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12477        rc = bxe_func_start(sc);
12478        if (rc) {
12479            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12480            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12481            sc->state = BXE_STATE_ERROR;
12482            goto bxe_nic_load_error3;
12483        }
12484
12485        /* send LOAD_DONE command to MCP */
12486        if (!BXE_NOMCP(sc)) {
12487            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12488            if (!load_code) {
12489                BLOGE(sc, "MCP response failure, aborting\n");
12490                sc->state = BXE_STATE_ERROR;
12491                rc = ENXIO;
12492                goto bxe_nic_load_error3;
12493            }
12494        }
12495
12496        rc = bxe_setup_leading(sc);
12497        if (rc) {
12498            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12499            sc->state = BXE_STATE_ERROR;
12500            goto bxe_nic_load_error3;
12501        }
12502
12503        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12504            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12505            if (rc) {
12506                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12507                sc->state = BXE_STATE_ERROR;
12508                goto bxe_nic_load_error3;
12509            }
12510        }
12511
12512        rc = bxe_init_rss_pf(sc);
12513        if (rc) {
12514            BLOGE(sc, "PF RSS init failed\n");
12515            sc->state = BXE_STATE_ERROR;
12516            goto bxe_nic_load_error3;
12517        }
12518    }
12519    /* XXX VF */
12520
12521    /* now when Clients are configured we are ready to work */
12522    sc->state = BXE_STATE_OPEN;
12523
12524    /* Configure a ucast MAC */
12525    if (IS_PF(sc)) {
12526        rc = bxe_set_eth_mac(sc, TRUE);
12527    }
12528    if (rc) {
12529        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12530        sc->state = BXE_STATE_ERROR;
12531        goto bxe_nic_load_error3;
12532    }
12533
12534    if (sc->port.pmf) {
12535        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12536        if (rc) {
12537            sc->state = BXE_STATE_ERROR;
12538            goto bxe_nic_load_error3;
12539        }
12540    }
12541
12542    sc->link_params.feature_config_flags &=
12543        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12544
12545    /* start fast path */
12546
12547    /* Initialize Rx filter */
12548    bxe_set_rx_mode(sc);
12549
12550    /* start the Tx */
12551    switch (/* XXX load_mode */LOAD_OPEN) {
12552    case LOAD_NORMAL:
12553    case LOAD_OPEN:
12554        break;
12555
12556    case LOAD_DIAG:
12557    case LOAD_LOOPBACK_EXT:
12558        sc->state = BXE_STATE_DIAG;
12559        break;
12560
12561    default:
12562        break;
12563    }
12564
12565    if (sc->port.pmf) {
12566        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12567    } else {
12568        bxe_link_status_update(sc);
12569    }
12570
12571    /* start the periodic timer callout */
12572    bxe_periodic_start(sc);
12573
12574    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12575        /* mark driver is loaded in shmem2 */
12576        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12577        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12578                  (val |
12579                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12580                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12581    }
12582
12583    /* wait for all pending SP commands to complete */
12584    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12585        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12586        bxe_periodic_stop(sc);
12587        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12588        return (ENXIO);
12589    }
12590
12591    /* Tell the stack the driver is running! */
12592    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12593
12594    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12595
12596    return (0);
12597
12598bxe_nic_load_error3:
12599
12600    if (IS_PF(sc)) {
12601        bxe_int_disable_sync(sc, 1);
12602
12603        /* clean out queued objects */
12604        bxe_squeeze_objects(sc);
12605    }
12606
12607    bxe_interrupt_detach(sc);
12608
12609bxe_nic_load_error2:
12610
12611    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12612        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12613        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12614    }
12615
12616    sc->port.pmf = 0;
12617
12618bxe_nic_load_error1:
12619
12620    /* clear pf_load status, as it was already set */
12621    if (IS_PF(sc)) {
12622        bxe_clear_pf_load(sc);
12623    }
12624
12625bxe_nic_load_error0:
12626
12627    bxe_free_fw_stats_mem(sc);
12628    bxe_free_fp_buffers(sc);
12629    bxe_free_mem(sc);
12630
12631    return (rc);
12632}
12633
12634static int
12635bxe_init_locked(struct bxe_softc *sc)
12636{
12637    int other_engine = SC_PATH(sc) ? 0 : 1;
12638    uint8_t other_load_status, load_status;
12639    uint8_t global = FALSE;
12640    int rc;
12641
12642    BXE_CORE_LOCK_ASSERT(sc);
12643
12644    /* check if the driver is already running */
12645    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12646        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12647        return (0);
12648    }
12649
12650    bxe_set_power_state(sc, PCI_PM_D0);
12651
12652    /*
12653     * If parity occurred during the unload, then attentions and/or
12654     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12655     * loaded on the current engine to complete the recovery. Parity recovery
12656     * is only relevant for PF driver.
12657     */
12658    if (IS_PF(sc)) {
12659        other_load_status = bxe_get_load_status(sc, other_engine);
12660        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12661
12662        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12663            bxe_chk_parity_attn(sc, &global, TRUE)) {
12664            do {
12665                /*
12666                 * If there are attentions and they are in global blocks, set
12667                 * the GLOBAL_RESET bit regardless whether it will be this
12668                 * function that will complete the recovery or not.
12669                 */
12670                if (global) {
12671                    bxe_set_reset_global(sc);
12672                }
12673
12674                /*
12675                 * Only the first function on the current engine should try
12676                 * to recover in open. In case of attentions in global blocks
12677                 * only the first in the chip should try to recover.
12678                 */
12679                if ((!load_status && (!global || !other_load_status)) &&
12680                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12681                    BLOGI(sc, "Recovered during init\n");
12682                    break;
12683                }
12684
12685                /* recovery has failed... */
12686                bxe_set_power_state(sc, PCI_PM_D3hot);
12687                sc->recovery_state = BXE_RECOVERY_FAILED;
12688
12689                BLOGE(sc, "Recovery flow hasn't properly "
12690                          "completed yet, try again later. "
12691                          "If you still see this message after a "
12692                          "few retries then power cycle is required.\n");
12693
12694                rc = ENXIO;
12695                goto bxe_init_locked_done;
12696            } while (0);
12697        }
12698    }
12699
12700    sc->recovery_state = BXE_RECOVERY_DONE;
12701
12702    rc = bxe_nic_load(sc, LOAD_OPEN);
12703
12704bxe_init_locked_done:
12705
12706    if (rc) {
12707        /* Tell the stack the driver is NOT running! */
12708        BLOGE(sc, "Initialization failed, "
12709                  "stack notified driver is NOT running!\n");
12710	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12711    }
12712
12713    return (rc);
12714}
12715
12716static int
12717bxe_stop_locked(struct bxe_softc *sc)
12718{
12719    BXE_CORE_LOCK_ASSERT(sc);
12720    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12721}
12722
12723/*
12724 * Handles controller initialization when called from an unlocked routine.
12725 * ifconfig calls this function.
12726 *
12727 * Returns:
12728 *   void
12729 */
12730static void
12731bxe_init(void *xsc)
12732{
12733    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12734
12735    BXE_CORE_LOCK(sc);
12736    bxe_init_locked(sc);
12737    BXE_CORE_UNLOCK(sc);
12738}
12739
12740static int
12741bxe_init_ifnet(struct bxe_softc *sc)
12742{
12743    if_t ifp;
12744    int capabilities;
12745
12746    /* ifconfig entrypoint for media type/status reporting */
12747    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12748                 bxe_ifmedia_update,
12749                 bxe_ifmedia_status);
12750
12751    /* set the default interface values */
12752    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12753    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12754    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12755
12756    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12757
12758    /* allocate the ifnet structure */
12759    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12760        BLOGE(sc, "Interface allocation failed!\n");
12761        return (ENXIO);
12762    }
12763
12764    if_setsoftc(ifp, sc);
12765    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12766    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12767    if_setioctlfn(ifp, bxe_ioctl);
12768    if_setstartfn(ifp, bxe_tx_start);
12769    if_setgetcounterfn(ifp, bxe_get_counter);
12770#if __FreeBSD_version >= 800000
12771    if_settransmitfn(ifp, bxe_tx_mq_start);
12772    if_setqflushfn(ifp, bxe_mq_flush);
12773#endif
12774#ifdef FreeBSD8_0
12775    if_settimer(ifp, 0);
12776#endif
12777    if_setinitfn(ifp, bxe_init);
12778    if_setmtu(ifp, sc->mtu);
12779    if_sethwassist(ifp, (CSUM_IP      |
12780                        CSUM_TCP      |
12781                        CSUM_UDP      |
12782                        CSUM_TSO      |
12783                        CSUM_TCP_IPV6 |
12784                        CSUM_UDP_IPV6));
12785
12786    capabilities =
12787#if __FreeBSD_version < 700000
12788        (IFCAP_VLAN_MTU       |
12789         IFCAP_VLAN_HWTAGGING |
12790         IFCAP_HWCSUM         |
12791         IFCAP_JUMBO_MTU      |
12792         IFCAP_LRO);
12793#else
12794        (IFCAP_VLAN_MTU       |
12795         IFCAP_VLAN_HWTAGGING |
12796         IFCAP_VLAN_HWTSO     |
12797         IFCAP_VLAN_HWFILTER  |
12798         IFCAP_VLAN_HWCSUM    |
12799         IFCAP_HWCSUM         |
12800         IFCAP_JUMBO_MTU      |
12801         IFCAP_LRO            |
12802         IFCAP_TSO4           |
12803         IFCAP_TSO6           |
12804         IFCAP_WOL_MAGIC);
12805#endif
12806    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12807    if_setbaudrate(ifp, IF_Gbps(10));
12808/* XXX */
12809    if_setsendqlen(ifp, sc->tx_ring_size);
12810    if_setsendqready(ifp);
12811/* XXX */
12812
12813    sc->ifp = ifp;
12814
12815    /* attach to the Ethernet interface list */
12816    ether_ifattach(ifp, sc->link_params.mac_addr);
12817
12818    return (0);
12819}
12820
12821static void
12822bxe_deallocate_bars(struct bxe_softc *sc)
12823{
12824    int i;
12825
12826    for (i = 0; i < MAX_BARS; i++) {
12827        if (sc->bar[i].resource != NULL) {
12828            bus_release_resource(sc->dev,
12829                                 SYS_RES_MEMORY,
12830                                 sc->bar[i].rid,
12831                                 sc->bar[i].resource);
12832            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12833                  i, PCIR_BAR(i));
12834        }
12835    }
12836}
12837
12838static int
12839bxe_allocate_bars(struct bxe_softc *sc)
12840{
12841    u_int flags;
12842    int i;
12843
12844    memset(sc->bar, 0, sizeof(sc->bar));
12845
12846    for (i = 0; i < MAX_BARS; i++) {
12847
12848        /* memory resources reside at BARs 0, 2, 4 */
12849        /* Run `pciconf -lb` to see mappings */
12850        if ((i != 0) && (i != 2) && (i != 4)) {
12851            continue;
12852        }
12853
12854        sc->bar[i].rid = PCIR_BAR(i);
12855
12856        flags = RF_ACTIVE;
12857        if (i == 0) {
12858            flags |= RF_SHAREABLE;
12859        }
12860
12861        if ((sc->bar[i].resource =
12862             bus_alloc_resource_any(sc->dev,
12863                                    SYS_RES_MEMORY,
12864                                    &sc->bar[i].rid,
12865                                    flags)) == NULL) {
12866            return (0);
12867        }
12868
12869        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12870        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12871        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12872
12873        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n",
12874              i, PCIR_BAR(i),
12875              (void *)rman_get_start(sc->bar[i].resource),
12876              (void *)rman_get_end(sc->bar[i].resource),
12877              rman_get_size(sc->bar[i].resource),
12878              (void *)sc->bar[i].kva);
12879    }
12880
12881    return (0);
12882}
12883
12884static void
12885bxe_get_function_num(struct bxe_softc *sc)
12886{
12887    uint32_t val = 0;
12888
12889    /*
12890     * Read the ME register to get the function number. The ME register
12891     * holds the relative-function number and absolute-function number. The
12892     * absolute-function number appears only in E2 and above. Before that
12893     * these bits always contained zero, therefore we cannot blindly use them.
12894     */
12895
12896    val = REG_RD(sc, BAR_ME_REGISTER);
12897
12898    sc->pfunc_rel =
12899        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12900    sc->path_id =
12901        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12902
12903    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12904        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12905    } else {
12906        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12907    }
12908
12909    BLOGD(sc, DBG_LOAD,
12910          "Relative function %d, Absolute function %d, Path %d\n",
12911          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12912}
12913
12914static uint32_t
12915bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12916{
12917    uint32_t shmem2_size;
12918    uint32_t offset;
12919    uint32_t mf_cfg_offset_value;
12920
12921    /* Non 57712 */
12922    offset = (SHMEM_RD(sc, func_mb) +
12923              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12924
12925    /* 57712 plus */
12926    if (sc->devinfo.shmem2_base != 0) {
12927        shmem2_size = SHMEM2_RD(sc, size);
12928        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12929            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12930            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12931                offset = mf_cfg_offset_value;
12932            }
12933        }
12934    }
12935
12936    return (offset);
12937}
12938
12939static uint32_t
12940bxe_pcie_capability_read(struct bxe_softc *sc,
12941                         int    reg,
12942                         int    width)
12943{
12944    int pcie_reg;
12945
12946    /* ensure PCIe capability is enabled */
12947    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12948        if (pcie_reg != 0) {
12949            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12950            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12951        }
12952    }
12953
12954    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12955
12956    return (0);
12957}
12958
12959static uint8_t
12960bxe_is_pcie_pending(struct bxe_softc *sc)
12961{
12962    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12963            PCIM_EXP_STA_TRANSACTION_PND);
12964}
12965
12966/*
12967 * Walk the PCI capabiites list for the device to find what features are
12968 * supported. These capabilites may be enabled/disabled by firmware so it's
12969 * best to walk the list rather than make assumptions.
12970 */
12971static void
12972bxe_probe_pci_caps(struct bxe_softc *sc)
12973{
12974    uint16_t link_status;
12975    int reg;
12976
12977    /* check if PCI Power Management is enabled */
12978    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12979        if (reg != 0) {
12980            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12981
12982            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12983            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12984        }
12985    }
12986
12987    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12988
12989    /* handle PCIe 2.0 workarounds for 57710 */
12990    if (CHIP_IS_E1(sc)) {
12991        /* workaround for 57710 errata E4_57710_27462 */
12992        sc->devinfo.pcie_link_speed =
12993            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12994
12995        /* workaround for 57710 errata E4_57710_27488 */
12996        sc->devinfo.pcie_link_width =
12997            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12998        if (sc->devinfo.pcie_link_speed > 1) {
12999            sc->devinfo.pcie_link_width =
13000                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
13001        }
13002    } else {
13003        sc->devinfo.pcie_link_speed =
13004            (link_status & PCIM_LINK_STA_SPEED);
13005        sc->devinfo.pcie_link_width =
13006            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13007    }
13008
13009    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13010          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13011
13012    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13013    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13014
13015    /* check if MSI capability is enabled */
13016    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13017        if (reg != 0) {
13018            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13019
13020            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13021            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13022        }
13023    }
13024
13025    /* check if MSI-X capability is enabled */
13026    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13027        if (reg != 0) {
13028            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13029
13030            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13031            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13032        }
13033    }
13034}
13035
13036static int
13037bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13038{
13039    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13040    uint32_t val;
13041
13042    /* get the outer vlan if we're in switch-dependent mode */
13043
13044    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13045    mf_info->ext_id = (uint16_t)val;
13046
13047    mf_info->multi_vnics_mode = 1;
13048
13049    if (!VALID_OVLAN(mf_info->ext_id)) {
13050        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13051        return (1);
13052    }
13053
13054    /* get the capabilities */
13055    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13056        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13057        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13058    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13059               FUNC_MF_CFG_PROTOCOL_FCOE) {
13060        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13061    } else {
13062        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13063    }
13064
13065    mf_info->vnics_per_port =
13066        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13067
13068    return (0);
13069}
13070
13071static uint32_t
13072bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13073{
13074    uint32_t retval = 0;
13075    uint32_t val;
13076
13077    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13078
13079    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13080        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13081            retval |= MF_PROTO_SUPPORT_ETHERNET;
13082        }
13083        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13084            retval |= MF_PROTO_SUPPORT_ISCSI;
13085        }
13086        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13087            retval |= MF_PROTO_SUPPORT_FCOE;
13088        }
13089    }
13090
13091    return (retval);
13092}
13093
13094static int
13095bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13096{
13097    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13098    uint32_t val;
13099
13100    /*
13101     * There is no outer vlan if we're in switch-independent mode.
13102     * If the mac is valid then assume multi-function.
13103     */
13104
13105    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13106
13107    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13108
13109    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13110
13111    mf_info->vnics_per_port =
13112        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13113
13114    return (0);
13115}
13116
13117static int
13118bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13119{
13120    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13121    uint32_t e1hov_tag;
13122    uint32_t func_config;
13123    uint32_t niv_config;
13124
13125    mf_info->multi_vnics_mode = 1;
13126
13127    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13128    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13129    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13130
13131    mf_info->ext_id =
13132        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13133                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13134
13135    mf_info->default_vlan =
13136        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13137                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13138
13139    mf_info->niv_allowed_priorities =
13140        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13141                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13142
13143    mf_info->niv_default_cos =
13144        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13145                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13146
13147    mf_info->afex_vlan_mode =
13148        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13149         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13150
13151    mf_info->niv_mba_enabled =
13152        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13153         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13154
13155    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13156
13157    mf_info->vnics_per_port =
13158        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13159
13160    return (0);
13161}
13162
13163static int
13164bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13165{
13166    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13167    uint32_t mf_cfg1;
13168    uint32_t mf_cfg2;
13169    uint32_t ovlan1;
13170    uint32_t ovlan2;
13171    uint8_t i, j;
13172
13173    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13174          SC_PORT(sc));
13175    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13176          mf_info->mf_config[SC_VN(sc)]);
13177    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13178          mf_info->multi_vnics_mode);
13179    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13180          mf_info->vnics_per_port);
13181    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13182          mf_info->ext_id);
13183    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13184          mf_info->min_bw[0], mf_info->min_bw[1],
13185          mf_info->min_bw[2], mf_info->min_bw[3]);
13186    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13187          mf_info->max_bw[0], mf_info->max_bw[1],
13188          mf_info->max_bw[2], mf_info->max_bw[3]);
13189    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13190          sc->mac_addr_str);
13191
13192    /* various MF mode sanity checks... */
13193
13194    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13195        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13196              SC_PORT(sc));
13197        return (1);
13198    }
13199
13200    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13201        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13202              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13203        return (1);
13204    }
13205
13206    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13207        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13208        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13209            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13210                  SC_VN(sc), OVLAN(sc));
13211            return (1);
13212        }
13213
13214        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13215            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13216                  mf_info->multi_vnics_mode, OVLAN(sc));
13217            return (1);
13218        }
13219
13220        /*
13221         * Verify all functions are either MF or SF mode. If MF, make sure
13222         * sure that all non-hidden functions have a valid ovlan. If SF,
13223         * make sure that all non-hidden functions have an invalid ovlan.
13224         */
13225        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13226            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13227            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13228            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13229                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13230                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13231                BLOGE(sc, "mf_mode=SD function %d MF config "
13232                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13233                      i, mf_info->multi_vnics_mode, ovlan1);
13234                return (1);
13235            }
13236        }
13237
13238        /* Verify all funcs on the same port each have a different ovlan. */
13239        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13240            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13241            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13242            /* iterate from the next function on the port to the max func */
13243            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13244                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13245                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13246                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13247                    VALID_OVLAN(ovlan1) &&
13248                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13249                    VALID_OVLAN(ovlan2) &&
13250                    (ovlan1 == ovlan2)) {
13251                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13252                              "have the same ovlan (%d)\n",
13253                          i, j, ovlan1);
13254                    return (1);
13255                }
13256            }
13257        }
13258    } /* MULTI_FUNCTION_SD */
13259
13260    return (0);
13261}
13262
13263static int
13264bxe_get_mf_cfg_info(struct bxe_softc *sc)
13265{
13266    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13267    uint32_t val, mac_upper;
13268    uint8_t i, vnic;
13269
13270    /* initialize mf_info defaults */
13271    mf_info->vnics_per_port   = 1;
13272    mf_info->multi_vnics_mode = FALSE;
13273    mf_info->path_has_ovlan   = FALSE;
13274    mf_info->mf_mode          = SINGLE_FUNCTION;
13275
13276    if (!CHIP_IS_MF_CAP(sc)) {
13277        return (0);
13278    }
13279
13280    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13281        BLOGE(sc, "Invalid mf_cfg_base!\n");
13282        return (1);
13283    }
13284
13285    /* get the MF mode (switch dependent / independent / single-function) */
13286
13287    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13288
13289    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13290    {
13291    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13292
13293        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13294
13295        /* check for legal upper mac bytes */
13296        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13297            mf_info->mf_mode = MULTI_FUNCTION_SI;
13298        } else {
13299            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13300        }
13301
13302        break;
13303
13304    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13305    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13306
13307        /* get outer vlan configuration */
13308        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13309
13310        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13311            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13312            mf_info->mf_mode = MULTI_FUNCTION_SD;
13313        } else {
13314            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13315        }
13316
13317        break;
13318
13319    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13320
13321        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13322        return (0);
13323
13324    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13325
13326        /*
13327         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13328         * and the MAC address is valid.
13329         */
13330        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13331
13332        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13333            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13334            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13335        } else {
13336            BLOGE(sc, "Invalid config for AFEX mode\n");
13337        }
13338
13339        break;
13340
13341    default:
13342
13343        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13344              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13345
13346        return (1);
13347    }
13348
13349    /* set path mf_mode (which could be different than function mf_mode) */
13350    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13351        mf_info->path_has_ovlan = TRUE;
13352    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13353        /*
13354         * Decide on path multi vnics mode. If we're not in MF mode and in
13355         * 4-port mode, this is good enough to check vnic-0 of the other port
13356         * on the same path
13357         */
13358        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13359            uint8_t other_port = !(PORT_ID(sc) & 1);
13360            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13361
13362            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13363
13364            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13365        }
13366    }
13367
13368    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13369        /* invalid MF config */
13370        if (SC_VN(sc) >= 1) {
13371            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13372            return (1);
13373        }
13374
13375        return (0);
13376    }
13377
13378    /* get the MF configuration */
13379    mf_info->mf_config[SC_VN(sc)] =
13380        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13381
13382    switch(mf_info->mf_mode)
13383    {
13384    case MULTI_FUNCTION_SD:
13385
13386        bxe_get_shmem_mf_cfg_info_sd(sc);
13387        break;
13388
13389    case MULTI_FUNCTION_SI:
13390
13391        bxe_get_shmem_mf_cfg_info_si(sc);
13392        break;
13393
13394    case MULTI_FUNCTION_AFEX:
13395
13396        bxe_get_shmem_mf_cfg_info_niv(sc);
13397        break;
13398
13399    default:
13400
13401        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13402              mf_info->mf_mode);
13403        return (1);
13404    }
13405
13406    /* get the congestion management parameters */
13407
13408    vnic = 0;
13409    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13410        /* get min/max bw */
13411        val = MFCFG_RD(sc, func_mf_config[i].config);
13412        mf_info->min_bw[vnic] =
13413            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13414        mf_info->max_bw[vnic] =
13415            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13416        vnic++;
13417    }
13418
13419    return (bxe_check_valid_mf_cfg(sc));
13420}
13421
13422static int
13423bxe_get_shmem_info(struct bxe_softc *sc)
13424{
13425    int port;
13426    uint32_t mac_hi, mac_lo, val;
13427
13428    port = SC_PORT(sc);
13429    mac_hi = mac_lo = 0;
13430
13431    sc->link_params.sc   = sc;
13432    sc->link_params.port = port;
13433
13434    /* get the hardware config info */
13435    sc->devinfo.hw_config =
13436        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13437    sc->devinfo.hw_config2 =
13438        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13439
13440    sc->link_params.hw_led_mode =
13441        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13442         SHARED_HW_CFG_LED_MODE_SHIFT);
13443
13444    /* get the port feature config */
13445    sc->port.config =
13446        SHMEM_RD(sc, dev_info.port_feature_config[port].config),
13447
13448    /* get the link params */
13449    sc->link_params.speed_cap_mask[0] =
13450        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13451    sc->link_params.speed_cap_mask[1] =
13452        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13453
13454    /* get the lane config */
13455    sc->link_params.lane_config =
13456        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13457
13458    /* get the link config */
13459    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13460    sc->port.link_config[ELINK_INT_PHY] = val;
13461    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13462    sc->port.link_config[ELINK_EXT_PHY1] =
13463        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13464
13465    /* get the override preemphasis flag and enable it or turn it off */
13466    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13467    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13468        sc->link_params.feature_config_flags |=
13469            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13470    } else {
13471        sc->link_params.feature_config_flags &=
13472            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13473    }
13474
13475    /* get the initial value of the link params */
13476    sc->link_params.multi_phy_config =
13477        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13478
13479    /* get external phy info */
13480    sc->port.ext_phy_config =
13481        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13482
13483    /* get the multifunction configuration */
13484    bxe_get_mf_cfg_info(sc);
13485
13486    /* get the mac address */
13487    if (IS_MF(sc)) {
13488        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13489        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13490    } else {
13491        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13492        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13493    }
13494
13495    if ((mac_lo == 0) && (mac_hi == 0)) {
13496        *sc->mac_addr_str = 0;
13497        BLOGE(sc, "No Ethernet address programmed!\n");
13498    } else {
13499        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13500        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13501        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13502        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13503        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13504        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13505        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13506                 "%02x:%02x:%02x:%02x:%02x:%02x",
13507                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13508                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13509                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13510        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13511    }
13512
13513    return (0);
13514}
13515
13516static void
13517bxe_get_tunable_params(struct bxe_softc *sc)
13518{
13519    /* sanity checks */
13520
13521    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13522        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13523        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13524        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13525        bxe_interrupt_mode = INTR_MODE_MSIX;
13526    }
13527
13528    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13529        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13530        bxe_queue_count = 0;
13531    }
13532
13533    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13534        if (bxe_max_rx_bufs == 0) {
13535            bxe_max_rx_bufs = RX_BD_USABLE;
13536        } else {
13537            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13538            bxe_max_rx_bufs = 2048;
13539        }
13540    }
13541
13542    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13543        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13544        bxe_hc_rx_ticks = 25;
13545    }
13546
13547    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13548        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13549        bxe_hc_tx_ticks = 50;
13550    }
13551
13552    if (bxe_max_aggregation_size == 0) {
13553        bxe_max_aggregation_size = TPA_AGG_SIZE;
13554    }
13555
13556    if (bxe_max_aggregation_size > 0xffff) {
13557        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13558              bxe_max_aggregation_size);
13559        bxe_max_aggregation_size = TPA_AGG_SIZE;
13560    }
13561
13562    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13563        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13564        bxe_mrrs = -1;
13565    }
13566
13567    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13568        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13569        bxe_autogreeen = 0;
13570    }
13571
13572    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13573        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13574        bxe_udp_rss = 0;
13575    }
13576
13577    /* pull in user settings */
13578
13579    sc->interrupt_mode       = bxe_interrupt_mode;
13580    sc->max_rx_bufs          = bxe_max_rx_bufs;
13581    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13582    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13583    sc->max_aggregation_size = bxe_max_aggregation_size;
13584    sc->mrrs                 = bxe_mrrs;
13585    sc->autogreeen           = bxe_autogreeen;
13586    sc->udp_rss              = bxe_udp_rss;
13587
13588    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13589        sc->num_queues = 1;
13590    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13591        sc->num_queues =
13592            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13593                MAX_RSS_CHAINS);
13594        if (sc->num_queues > mp_ncpus) {
13595            sc->num_queues = mp_ncpus;
13596        }
13597    }
13598
13599    BLOGD(sc, DBG_LOAD,
13600          "User Config: "
13601          "debug=0x%lx "
13602          "interrupt_mode=%d "
13603          "queue_count=%d "
13604          "hc_rx_ticks=%d "
13605          "hc_tx_ticks=%d "
13606          "rx_budget=%d "
13607          "max_aggregation_size=%d "
13608          "mrrs=%d "
13609          "autogreeen=%d "
13610          "udp_rss=%d\n",
13611          bxe_debug,
13612          sc->interrupt_mode,
13613          sc->num_queues,
13614          sc->hc_rx_ticks,
13615          sc->hc_tx_ticks,
13616          bxe_rx_budget,
13617          sc->max_aggregation_size,
13618          sc->mrrs,
13619          sc->autogreeen,
13620          sc->udp_rss);
13621}
13622
13623static void
13624bxe_media_detect(struct bxe_softc *sc)
13625{
13626    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13627    switch (sc->link_params.phy[phy_idx].media_type) {
13628    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13629    case ELINK_ETH_PHY_XFP_FIBER:
13630        BLOGI(sc, "Found 10Gb Fiber media.\n");
13631        sc->media = IFM_10G_SR;
13632        break;
13633    case ELINK_ETH_PHY_SFP_1G_FIBER:
13634        BLOGI(sc, "Found 1Gb Fiber media.\n");
13635        sc->media = IFM_1000_SX;
13636        break;
13637    case ELINK_ETH_PHY_KR:
13638    case ELINK_ETH_PHY_CX4:
13639        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13640        sc->media = IFM_10G_CX4;
13641        break;
13642    case ELINK_ETH_PHY_DA_TWINAX:
13643        BLOGI(sc, "Found 10Gb Twinax media.\n");
13644        sc->media = IFM_10G_TWINAX;
13645        break;
13646    case ELINK_ETH_PHY_BASE_T:
13647        if (sc->link_params.speed_cap_mask[0] &
13648            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13649            BLOGI(sc, "Found 10GBase-T media.\n");
13650            sc->media = IFM_10G_T;
13651        } else {
13652            BLOGI(sc, "Found 1000Base-T media.\n");
13653            sc->media = IFM_1000_T;
13654        }
13655        break;
13656    case ELINK_ETH_PHY_NOT_PRESENT:
13657        BLOGI(sc, "Media not present.\n");
13658        sc->media = 0;
13659        break;
13660    case ELINK_ETH_PHY_UNSPECIFIED:
13661    default:
13662        BLOGI(sc, "Unknown media!\n");
13663        sc->media = 0;
13664        break;
13665    }
13666}
13667
13668#define GET_FIELD(value, fname)                     \
13669    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13670#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13671#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13672
13673static int
13674bxe_get_igu_cam_info(struct bxe_softc *sc)
13675{
13676    int pfid = SC_FUNC(sc);
13677    int igu_sb_id;
13678    uint32_t val;
13679    uint8_t fid, igu_sb_cnt = 0;
13680
13681    sc->igu_base_sb = 0xff;
13682
13683    if (CHIP_INT_MODE_IS_BC(sc)) {
13684        int vn = SC_VN(sc);
13685        igu_sb_cnt = sc->igu_sb_cnt;
13686        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13687                           FP_SB_MAX_E1x);
13688        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13689                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13690        return (0);
13691    }
13692
13693    /* IGU in normal mode - read CAM */
13694    for (igu_sb_id = 0;
13695         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13696         igu_sb_id++) {
13697        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13698        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13699            continue;
13700        }
13701        fid = IGU_FID(val);
13702        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13703            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13704                continue;
13705            }
13706            if (IGU_VEC(val) == 0) {
13707                /* default status block */
13708                sc->igu_dsb_id = igu_sb_id;
13709            } else {
13710                if (sc->igu_base_sb == 0xff) {
13711                    sc->igu_base_sb = igu_sb_id;
13712                }
13713                igu_sb_cnt++;
13714            }
13715        }
13716    }
13717
13718    /*
13719     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13720     * that number of CAM entries will not be equal to the value advertised in
13721     * PCI. Driver should use the minimal value of both as the actual status
13722     * block count
13723     */
13724    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13725
13726    if (igu_sb_cnt == 0) {
13727        BLOGE(sc, "CAM configuration error\n");
13728        return (-1);
13729    }
13730
13731    return (0);
13732}
13733
13734/*
13735 * Gather various information from the device config space, the device itself,
13736 * shmem, and the user input.
13737 */
13738static int
13739bxe_get_device_info(struct bxe_softc *sc)
13740{
13741    uint32_t val;
13742    int rc;
13743
13744    /* Get the data for the device */
13745    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13746    sc->devinfo.device_id    = pci_get_device(sc->dev);
13747    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13748    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13749
13750    /* get the chip revision (chip metal comes from pci config space) */
13751    sc->devinfo.chip_id     =
13752    sc->link_params.chip_id =
13753        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13754         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13755         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13756         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13757
13758    /* force 57811 according to MISC register */
13759    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13760        if (CHIP_IS_57810(sc)) {
13761            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13762                                   (sc->devinfo.chip_id & 0x0000ffff));
13763        } else if (CHIP_IS_57810_MF(sc)) {
13764            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13765                                   (sc->devinfo.chip_id & 0x0000ffff));
13766        }
13767        sc->devinfo.chip_id |= 0x1;
13768    }
13769
13770    BLOGD(sc, DBG_LOAD,
13771          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13772          sc->devinfo.chip_id,
13773          ((sc->devinfo.chip_id >> 16) & 0xffff),
13774          ((sc->devinfo.chip_id >> 12) & 0xf),
13775          ((sc->devinfo.chip_id >>  4) & 0xff),
13776          ((sc->devinfo.chip_id >>  0) & 0xf));
13777
13778    val = (REG_RD(sc, 0x2874) & 0x55);
13779    if ((sc->devinfo.chip_id & 0x1) ||
13780        (CHIP_IS_E1(sc) && val) ||
13781        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13782        sc->flags |= BXE_ONE_PORT_FLAG;
13783        BLOGD(sc, DBG_LOAD, "single port device\n");
13784    }
13785
13786    /* set the doorbell size */
13787    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13788
13789    /* determine whether the device is in 2 port or 4 port mode */
13790    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13791    if (CHIP_IS_E2E3(sc)) {
13792        /*
13793         * Read port4mode_en_ovwr[0]:
13794         *   If 1, four port mode is in port4mode_en_ovwr[1].
13795         *   If 0, four port mode is in port4mode_en[0].
13796         */
13797        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13798        if (val & 1) {
13799            val = ((val >> 1) & 1);
13800        } else {
13801            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13802        }
13803
13804        sc->devinfo.chip_port_mode =
13805            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13806
13807        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13808    }
13809
13810    /* get the function and path info for the device */
13811    bxe_get_function_num(sc);
13812
13813    /* get the shared memory base address */
13814    sc->devinfo.shmem_base     =
13815    sc->link_params.shmem_base =
13816        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13817    sc->devinfo.shmem2_base =
13818        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13819                                  MISC_REG_GENERIC_CR_0));
13820
13821    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13822          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13823
13824    if (!sc->devinfo.shmem_base) {
13825        /* this should ONLY prevent upcoming shmem reads */
13826        BLOGI(sc, "MCP not active\n");
13827        sc->flags |= BXE_NO_MCP_FLAG;
13828        return (0);
13829    }
13830
13831    /* make sure the shared memory contents are valid */
13832    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13833    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13834        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13835        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13836        return (0);
13837    }
13838    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13839
13840    /* get the bootcode version */
13841    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13842    snprintf(sc->devinfo.bc_ver_str,
13843             sizeof(sc->devinfo.bc_ver_str),
13844             "%d.%d.%d",
13845             ((sc->devinfo.bc_ver >> 24) & 0xff),
13846             ((sc->devinfo.bc_ver >> 16) & 0xff),
13847             ((sc->devinfo.bc_ver >>  8) & 0xff));
13848    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13849
13850    /* get the bootcode shmem address */
13851    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13852    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13853
13854    /* clean indirect addresses as they're not used */
13855    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13856    if (IS_PF(sc)) {
13857        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13858        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13859        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13860        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13861        if (CHIP_IS_E1x(sc)) {
13862            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13863            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13864            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13865            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13866        }
13867
13868        /*
13869         * Enable internal target-read (in case we are probed after PF
13870         * FLR). Must be done prior to any BAR read access. Only for
13871         * 57712 and up
13872         */
13873        if (!CHIP_IS_E1x(sc)) {
13874            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13875        }
13876    }
13877
13878    /* get the nvram size */
13879    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13880    sc->devinfo.flash_size =
13881        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13882    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13883
13884    /* get PCI capabilites */
13885    bxe_probe_pci_caps(sc);
13886
13887    bxe_set_power_state(sc, PCI_PM_D0);
13888
13889    /* get various configuration parameters from shmem */
13890    bxe_get_shmem_info(sc);
13891
13892    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13893        val = pci_read_config(sc->dev,
13894                              (sc->devinfo.pcie_msix_cap_reg +
13895                               PCIR_MSIX_CTRL),
13896                              2);
13897        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13898    } else {
13899        sc->igu_sb_cnt = 1;
13900    }
13901
13902    sc->igu_base_addr = BAR_IGU_INTMEM;
13903
13904    /* initialize IGU parameters */
13905    if (CHIP_IS_E1x(sc)) {
13906        sc->devinfo.int_block = INT_BLOCK_HC;
13907        sc->igu_dsb_id = DEF_SB_IGU_ID;
13908        sc->igu_base_sb = 0;
13909    } else {
13910        sc->devinfo.int_block = INT_BLOCK_IGU;
13911
13912        /* do not allow device reset during IGU info preocessing */
13913        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13914
13915        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13916
13917        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13918            int tout = 5000;
13919
13920            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13921
13922            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13923            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13924            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13925
13926            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13927                tout--;
13928                DELAY(1000);
13929            }
13930
13931            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13932                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13933                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13934                return (-1);
13935            }
13936        }
13937
13938        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13939            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13940            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13941        } else {
13942            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13943        }
13944
13945        rc = bxe_get_igu_cam_info(sc);
13946
13947        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13948
13949        if (rc) {
13950            return (rc);
13951        }
13952    }
13953
13954    /*
13955     * Get base FW non-default (fast path) status block ID. This value is
13956     * used to initialize the fw_sb_id saved on the fp/queue structure to
13957     * determine the id used by the FW.
13958     */
13959    if (CHIP_IS_E1x(sc)) {
13960        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13961    } else {
13962        /*
13963         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13964         * the same queue are indicated on the same IGU SB). So we prefer
13965         * FW and IGU SBs to be the same value.
13966         */
13967        sc->base_fw_ndsb = sc->igu_base_sb;
13968    }
13969
13970    BLOGD(sc, DBG_LOAD,
13971          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13972          sc->igu_dsb_id, sc->igu_base_sb,
13973          sc->igu_sb_cnt, sc->base_fw_ndsb);
13974
13975    elink_phy_probe(&sc->link_params);
13976
13977    return (0);
13978}
13979
13980static void
13981bxe_link_settings_supported(struct bxe_softc *sc,
13982                            uint32_t         switch_cfg)
13983{
13984    uint32_t cfg_size = 0;
13985    uint32_t idx;
13986    uint8_t port = SC_PORT(sc);
13987
13988    /* aggregation of supported attributes of all external phys */
13989    sc->port.supported[0] = 0;
13990    sc->port.supported[1] = 0;
13991
13992    switch (sc->link_params.num_phys) {
13993    case 1:
13994        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13995        cfg_size = 1;
13996        break;
13997    case 2:
13998        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13999        cfg_size = 1;
14000        break;
14001    case 3:
14002        if (sc->link_params.multi_phy_config &
14003            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14004            sc->port.supported[1] =
14005                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14006            sc->port.supported[0] =
14007                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14008        } else {
14009            sc->port.supported[0] =
14010                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14011            sc->port.supported[1] =
14012                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14013        }
14014        cfg_size = 2;
14015        break;
14016    }
14017
14018    if (!(sc->port.supported[0] || sc->port.supported[1])) {
14019        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14020              SHMEM_RD(sc,
14021                       dev_info.port_hw_config[port].external_phy_config),
14022              SHMEM_RD(sc,
14023                       dev_info.port_hw_config[port].external_phy_config2));
14024        return;
14025    }
14026
14027    if (CHIP_IS_E3(sc))
14028        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14029    else {
14030        switch (switch_cfg) {
14031        case ELINK_SWITCH_CFG_1G:
14032            sc->port.phy_addr =
14033                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14034            break;
14035        case ELINK_SWITCH_CFG_10G:
14036            sc->port.phy_addr =
14037                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14038            break;
14039        default:
14040            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14041                  sc->port.link_config[0]);
14042            return;
14043        }
14044    }
14045
14046    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14047
14048    /* mask what we support according to speed_cap_mask per configuration */
14049    for (idx = 0; idx < cfg_size; idx++) {
14050        if (!(sc->link_params.speed_cap_mask[idx] &
14051              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14052            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14053        }
14054
14055        if (!(sc->link_params.speed_cap_mask[idx] &
14056              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14057            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14058        }
14059
14060        if (!(sc->link_params.speed_cap_mask[idx] &
14061              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14062            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14063        }
14064
14065        if (!(sc->link_params.speed_cap_mask[idx] &
14066              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14067            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14068        }
14069
14070        if (!(sc->link_params.speed_cap_mask[idx] &
14071              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14072            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14073        }
14074
14075        if (!(sc->link_params.speed_cap_mask[idx] &
14076              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14077            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14078        }
14079
14080        if (!(sc->link_params.speed_cap_mask[idx] &
14081              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14082            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14083        }
14084
14085        if (!(sc->link_params.speed_cap_mask[idx] &
14086              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14087            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14088        }
14089    }
14090
14091    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14092          sc->port.supported[0], sc->port.supported[1]);
14093}
14094
14095static void
14096bxe_link_settings_requested(struct bxe_softc *sc)
14097{
14098    uint32_t link_config;
14099    uint32_t idx;
14100    uint32_t cfg_size = 0;
14101
14102    sc->port.advertising[0] = 0;
14103    sc->port.advertising[1] = 0;
14104
14105    switch (sc->link_params.num_phys) {
14106    case 1:
14107    case 2:
14108        cfg_size = 1;
14109        break;
14110    case 3:
14111        cfg_size = 2;
14112        break;
14113    }
14114
14115    for (idx = 0; idx < cfg_size; idx++) {
14116        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14117        link_config = sc->port.link_config[idx];
14118
14119        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14120        case PORT_FEATURE_LINK_SPEED_AUTO:
14121            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14122                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14123                sc->port.advertising[idx] |= sc->port.supported[idx];
14124                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14125                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14126                    sc->port.advertising[idx] |=
14127                        (ELINK_SUPPORTED_100baseT_Half |
14128                         ELINK_SUPPORTED_100baseT_Full);
14129            } else {
14130                /* force 10G, no AN */
14131                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14132                sc->port.advertising[idx] |=
14133                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14134                continue;
14135            }
14136            break;
14137
14138        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14139            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14140                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14141                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14142                                              ADVERTISED_TP);
14143            } else {
14144                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14145                          "speed_cap_mask=0x%08x\n",
14146                      link_config, sc->link_params.speed_cap_mask[idx]);
14147                return;
14148            }
14149            break;
14150
14151        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14152            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14153                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14154                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14155                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14156                                              ADVERTISED_TP);
14157            } else {
14158                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14159                          "speed_cap_mask=0x%08x\n",
14160                      link_config, sc->link_params.speed_cap_mask[idx]);
14161                return;
14162            }
14163            break;
14164
14165        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14166            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14167                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14168                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14169                                              ADVERTISED_TP);
14170            } else {
14171                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14172                          "speed_cap_mask=0x%08x\n",
14173                      link_config, sc->link_params.speed_cap_mask[idx]);
14174                return;
14175            }
14176            break;
14177
14178        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14179            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14180                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14181                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14182                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14183                                              ADVERTISED_TP);
14184            } else {
14185                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14186                          "speed_cap_mask=0x%08x\n",
14187                      link_config, sc->link_params.speed_cap_mask[idx]);
14188                return;
14189            }
14190            break;
14191
14192        case PORT_FEATURE_LINK_SPEED_1G:
14193            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14194                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14195                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14196                                              ADVERTISED_TP);
14197            } else {
14198                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14199                          "speed_cap_mask=0x%08x\n",
14200                      link_config, sc->link_params.speed_cap_mask[idx]);
14201                return;
14202            }
14203            break;
14204
14205        case PORT_FEATURE_LINK_SPEED_2_5G:
14206            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14207                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14208                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14209                                              ADVERTISED_TP);
14210            } else {
14211                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14212                          "speed_cap_mask=0x%08x\n",
14213                      link_config, sc->link_params.speed_cap_mask[idx]);
14214                return;
14215            }
14216            break;
14217
14218        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14219            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14220                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14221                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14222                                              ADVERTISED_FIBRE);
14223            } else {
14224                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14225                          "speed_cap_mask=0x%08x\n",
14226                      link_config, sc->link_params.speed_cap_mask[idx]);
14227                return;
14228            }
14229            break;
14230
14231        case PORT_FEATURE_LINK_SPEED_20G:
14232            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14233            break;
14234
14235        default:
14236            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14237                      "speed_cap_mask=0x%08x\n",
14238                  link_config, sc->link_params.speed_cap_mask[idx]);
14239            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14240            sc->port.advertising[idx] = sc->port.supported[idx];
14241            break;
14242        }
14243
14244        sc->link_params.req_flow_ctrl[idx] =
14245            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14246
14247        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14248            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14249                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14250            } else {
14251                bxe_set_requested_fc(sc);
14252            }
14253        }
14254
14255        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14256                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14257              sc->link_params.req_line_speed[idx],
14258              sc->link_params.req_duplex[idx],
14259              sc->link_params.req_flow_ctrl[idx],
14260              sc->port.advertising[idx]);
14261    }
14262}
14263
14264static void
14265bxe_get_phy_info(struct bxe_softc *sc)
14266{
14267    uint8_t port = SC_PORT(sc);
14268    uint32_t config = sc->port.config;
14269    uint32_t eee_mode;
14270
14271    /* shmem data already read in bxe_get_shmem_info() */
14272
14273    BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14274                        "link_config0=0x%08x\n",
14275               sc->link_params.lane_config,
14276               sc->link_params.speed_cap_mask[0],
14277               sc->port.link_config[0]);
14278
14279    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14280    bxe_link_settings_requested(sc);
14281
14282    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14283        sc->link_params.feature_config_flags |=
14284            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14285    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14286        sc->link_params.feature_config_flags &=
14287            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14288    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14289        sc->link_params.feature_config_flags |=
14290            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14291    }
14292
14293    /* configure link feature according to nvram value */
14294    eee_mode =
14295        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14296          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14297         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14298    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14299        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14300                                    ELINK_EEE_MODE_ENABLE_LPI |
14301                                    ELINK_EEE_MODE_OUTPUT_TIME);
14302    } else {
14303        sc->link_params.eee_mode = 0;
14304    }
14305
14306    /* get the media type */
14307    bxe_media_detect(sc);
14308}
14309
14310static void
14311bxe_get_params(struct bxe_softc *sc)
14312{
14313    /* get user tunable params */
14314    bxe_get_tunable_params(sc);
14315
14316    /* select the RX and TX ring sizes */
14317    sc->tx_ring_size = TX_BD_USABLE;
14318    sc->rx_ring_size = RX_BD_USABLE;
14319
14320    /* XXX disable WoL */
14321    sc->wol = 0;
14322}
14323
14324static void
14325bxe_set_modes_bitmap(struct bxe_softc *sc)
14326{
14327    uint32_t flags = 0;
14328
14329    if (CHIP_REV_IS_FPGA(sc)) {
14330        SET_FLAGS(flags, MODE_FPGA);
14331    } else if (CHIP_REV_IS_EMUL(sc)) {
14332        SET_FLAGS(flags, MODE_EMUL);
14333    } else {
14334        SET_FLAGS(flags, MODE_ASIC);
14335    }
14336
14337    if (CHIP_IS_MODE_4_PORT(sc)) {
14338        SET_FLAGS(flags, MODE_PORT4);
14339    } else {
14340        SET_FLAGS(flags, MODE_PORT2);
14341    }
14342
14343    if (CHIP_IS_E2(sc)) {
14344        SET_FLAGS(flags, MODE_E2);
14345    } else if (CHIP_IS_E3(sc)) {
14346        SET_FLAGS(flags, MODE_E3);
14347        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14348            SET_FLAGS(flags, MODE_E3_A0);
14349        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14350            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14351        }
14352    }
14353
14354    if (IS_MF(sc)) {
14355        SET_FLAGS(flags, MODE_MF);
14356        switch (sc->devinfo.mf_info.mf_mode) {
14357        case MULTI_FUNCTION_SD:
14358            SET_FLAGS(flags, MODE_MF_SD);
14359            break;
14360        case MULTI_FUNCTION_SI:
14361            SET_FLAGS(flags, MODE_MF_SI);
14362            break;
14363        case MULTI_FUNCTION_AFEX:
14364            SET_FLAGS(flags, MODE_MF_AFEX);
14365            break;
14366        }
14367    } else {
14368        SET_FLAGS(flags, MODE_SF);
14369    }
14370
14371#if defined(__LITTLE_ENDIAN)
14372    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14373#else /* __BIG_ENDIAN */
14374    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14375#endif
14376
14377    INIT_MODE_FLAGS(sc) = flags;
14378}
14379
14380static int
14381bxe_alloc_hsi_mem(struct bxe_softc *sc)
14382{
14383    struct bxe_fastpath *fp;
14384    bus_addr_t busaddr;
14385    int max_agg_queues;
14386    int max_segments;
14387    bus_size_t max_size;
14388    bus_size_t max_seg_size;
14389    char buf[32];
14390    int rc;
14391    int i, j;
14392
14393    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14394
14395    /* allocate the parent bus DMA tag */
14396    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14397                            1,                        /* alignment */
14398                            0,                        /* boundary limit */
14399                            BUS_SPACE_MAXADDR,        /* restricted low */
14400                            BUS_SPACE_MAXADDR,        /* restricted hi */
14401                            NULL,                     /* addr filter() */
14402                            NULL,                     /* addr filter() arg */
14403                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14404                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14405                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14406                            0,                        /* flags */
14407                            NULL,                     /* lock() */
14408                            NULL,                     /* lock() arg */
14409                            &sc->parent_dma_tag);     /* returned dma tag */
14410    if (rc != 0) {
14411        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14412        return (1);
14413    }
14414
14415    /************************/
14416    /* DEFAULT STATUS BLOCK */
14417    /************************/
14418
14419    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14420                      &sc->def_sb_dma, "default status block") != 0) {
14421        /* XXX */
14422        bus_dma_tag_destroy(sc->parent_dma_tag);
14423        return (1);
14424    }
14425
14426    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14427
14428    /***************/
14429    /* EVENT QUEUE */
14430    /***************/
14431
14432    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14433                      &sc->eq_dma, "event queue") != 0) {
14434        /* XXX */
14435        bxe_dma_free(sc, &sc->def_sb_dma);
14436        sc->def_sb = NULL;
14437        bus_dma_tag_destroy(sc->parent_dma_tag);
14438        return (1);
14439    }
14440
14441    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14442
14443    /*************/
14444    /* SLOW PATH */
14445    /*************/
14446
14447    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14448                      &sc->sp_dma, "slow path") != 0) {
14449        /* XXX */
14450        bxe_dma_free(sc, &sc->eq_dma);
14451        sc->eq = NULL;
14452        bxe_dma_free(sc, &sc->def_sb_dma);
14453        sc->def_sb = NULL;
14454        bus_dma_tag_destroy(sc->parent_dma_tag);
14455        return (1);
14456    }
14457
14458    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14459
14460    /*******************/
14461    /* SLOW PATH QUEUE */
14462    /*******************/
14463
14464    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14465                      &sc->spq_dma, "slow path queue") != 0) {
14466        /* XXX */
14467        bxe_dma_free(sc, &sc->sp_dma);
14468        sc->sp = NULL;
14469        bxe_dma_free(sc, &sc->eq_dma);
14470        sc->eq = NULL;
14471        bxe_dma_free(sc, &sc->def_sb_dma);
14472        sc->def_sb = NULL;
14473        bus_dma_tag_destroy(sc->parent_dma_tag);
14474        return (1);
14475    }
14476
14477    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14478
14479    /***************************/
14480    /* FW DECOMPRESSION BUFFER */
14481    /***************************/
14482
14483    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14484                      "fw decompression buffer") != 0) {
14485        /* XXX */
14486        bxe_dma_free(sc, &sc->spq_dma);
14487        sc->spq = NULL;
14488        bxe_dma_free(sc, &sc->sp_dma);
14489        sc->sp = NULL;
14490        bxe_dma_free(sc, &sc->eq_dma);
14491        sc->eq = NULL;
14492        bxe_dma_free(sc, &sc->def_sb_dma);
14493        sc->def_sb = NULL;
14494        bus_dma_tag_destroy(sc->parent_dma_tag);
14495        return (1);
14496    }
14497
14498    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14499
14500    if ((sc->gz_strm =
14501         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14502        /* XXX */
14503        bxe_dma_free(sc, &sc->gz_buf_dma);
14504        sc->gz_buf = NULL;
14505        bxe_dma_free(sc, &sc->spq_dma);
14506        sc->spq = NULL;
14507        bxe_dma_free(sc, &sc->sp_dma);
14508        sc->sp = NULL;
14509        bxe_dma_free(sc, &sc->eq_dma);
14510        sc->eq = NULL;
14511        bxe_dma_free(sc, &sc->def_sb_dma);
14512        sc->def_sb = NULL;
14513        bus_dma_tag_destroy(sc->parent_dma_tag);
14514        return (1);
14515    }
14516
14517    /*************/
14518    /* FASTPATHS */
14519    /*************/
14520
14521    /* allocate DMA memory for each fastpath structure */
14522    for (i = 0; i < sc->num_queues; i++) {
14523        fp = &sc->fp[i];
14524        fp->sc    = sc;
14525        fp->index = i;
14526
14527        /*******************/
14528        /* FP STATUS BLOCK */
14529        /*******************/
14530
14531        snprintf(buf, sizeof(buf), "fp %d status block", i);
14532        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14533                          &fp->sb_dma, buf) != 0) {
14534            /* XXX unwind and free previous fastpath allocations */
14535            BLOGE(sc, "Failed to alloc %s\n", buf);
14536            return (1);
14537        } else {
14538            if (CHIP_IS_E2E3(sc)) {
14539                fp->status_block.e2_sb =
14540                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14541            } else {
14542                fp->status_block.e1x_sb =
14543                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14544            }
14545        }
14546
14547        /******************/
14548        /* FP TX BD CHAIN */
14549        /******************/
14550
14551        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14552        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14553                          &fp->tx_dma, buf) != 0) {
14554            /* XXX unwind and free previous fastpath allocations */
14555            BLOGE(sc, "Failed to alloc %s\n", buf);
14556            return (1);
14557        } else {
14558            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14559        }
14560
14561        /* link together the tx bd chain pages */
14562        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14563            /* index into the tx bd chain array to last entry per page */
14564            struct eth_tx_next_bd *tx_next_bd =
14565                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14566            /* point to the next page and wrap from last page */
14567            busaddr = (fp->tx_dma.paddr +
14568                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14569            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14570            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14571        }
14572
14573        /******************/
14574        /* FP RX BD CHAIN */
14575        /******************/
14576
14577        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14578        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14579                          &fp->rx_dma, buf) != 0) {
14580            /* XXX unwind and free previous fastpath allocations */
14581            BLOGE(sc, "Failed to alloc %s\n", buf);
14582            return (1);
14583        } else {
14584            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14585        }
14586
14587        /* link together the rx bd chain pages */
14588        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14589            /* index into the rx bd chain array to last entry per page */
14590            struct eth_rx_bd *rx_bd =
14591                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14592            /* point to the next page and wrap from last page */
14593            busaddr = (fp->rx_dma.paddr +
14594                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14595            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14596            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14597        }
14598
14599        /*******************/
14600        /* FP RX RCQ CHAIN */
14601        /*******************/
14602
14603        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14604        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14605                          &fp->rcq_dma, buf) != 0) {
14606            /* XXX unwind and free previous fastpath allocations */
14607            BLOGE(sc, "Failed to alloc %s\n", buf);
14608            return (1);
14609        } else {
14610            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14611        }
14612
14613        /* link together the rcq chain pages */
14614        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14615            /* index into the rcq chain array to last entry per page */
14616            struct eth_rx_cqe_next_page *rx_cqe_next =
14617                (struct eth_rx_cqe_next_page *)
14618                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14619            /* point to the next page and wrap from last page */
14620            busaddr = (fp->rcq_dma.paddr +
14621                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14622            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14623            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14624        }
14625
14626        /*******************/
14627        /* FP RX SGE CHAIN */
14628        /*******************/
14629
14630        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14631        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14632                          &fp->rx_sge_dma, buf) != 0) {
14633            /* XXX unwind and free previous fastpath allocations */
14634            BLOGE(sc, "Failed to alloc %s\n", buf);
14635            return (1);
14636        } else {
14637            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14638        }
14639
14640        /* link together the sge chain pages */
14641        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14642            /* index into the rcq chain array to last entry per page */
14643            struct eth_rx_sge *rx_sge =
14644                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14645            /* point to the next page and wrap from last page */
14646            busaddr = (fp->rx_sge_dma.paddr +
14647                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14648            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14649            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14650        }
14651
14652        /***********************/
14653        /* FP TX MBUF DMA MAPS */
14654        /***********************/
14655
14656        /* set required sizes before mapping to conserve resources */
14657        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14658            max_size     = BXE_TSO_MAX_SIZE;
14659            max_segments = BXE_TSO_MAX_SEGMENTS;
14660            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14661        } else {
14662            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14663            max_segments = BXE_MAX_SEGMENTS;
14664            max_seg_size = MCLBYTES;
14665        }
14666
14667        /* create a dma tag for the tx mbufs */
14668        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14669                                1,                  /* alignment */
14670                                0,                  /* boundary limit */
14671                                BUS_SPACE_MAXADDR,  /* restricted low */
14672                                BUS_SPACE_MAXADDR,  /* restricted hi */
14673                                NULL,               /* addr filter() */
14674                                NULL,               /* addr filter() arg */
14675                                max_size,           /* max map size */
14676                                max_segments,       /* num discontinuous */
14677                                max_seg_size,       /* max seg size */
14678                                0,                  /* flags */
14679                                NULL,               /* lock() */
14680                                NULL,               /* lock() arg */
14681                                &fp->tx_mbuf_tag);  /* returned dma tag */
14682        if (rc != 0) {
14683            /* XXX unwind and free previous fastpath allocations */
14684            BLOGE(sc, "Failed to create dma tag for "
14685                      "'fp %d tx mbufs' (%d)\n", i, rc);
14686            return (1);
14687        }
14688
14689        /* create dma maps for each of the tx mbuf clusters */
14690        for (j = 0; j < TX_BD_TOTAL; j++) {
14691            if (bus_dmamap_create(fp->tx_mbuf_tag,
14692                                  BUS_DMA_NOWAIT,
14693                                  &fp->tx_mbuf_chain[j].m_map)) {
14694                /* XXX unwind and free previous fastpath allocations */
14695                BLOGE(sc, "Failed to create dma map for "
14696                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14697                return (1);
14698            }
14699        }
14700
14701        /***********************/
14702        /* FP RX MBUF DMA MAPS */
14703        /***********************/
14704
14705        /* create a dma tag for the rx mbufs */
14706        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14707                                1,                  /* alignment */
14708                                0,                  /* boundary limit */
14709                                BUS_SPACE_MAXADDR,  /* restricted low */
14710                                BUS_SPACE_MAXADDR,  /* restricted hi */
14711                                NULL,               /* addr filter() */
14712                                NULL,               /* addr filter() arg */
14713                                MJUM9BYTES,         /* max map size */
14714                                1,                  /* num discontinuous */
14715                                MJUM9BYTES,         /* max seg size */
14716                                0,                  /* flags */
14717                                NULL,               /* lock() */
14718                                NULL,               /* lock() arg */
14719                                &fp->rx_mbuf_tag);  /* returned dma tag */
14720        if (rc != 0) {
14721            /* XXX unwind and free previous fastpath allocations */
14722            BLOGE(sc, "Failed to create dma tag for "
14723                      "'fp %d rx mbufs' (%d)\n", i, rc);
14724            return (1);
14725        }
14726
14727        /* create dma maps for each of the rx mbuf clusters */
14728        for (j = 0; j < RX_BD_TOTAL; j++) {
14729            if (bus_dmamap_create(fp->rx_mbuf_tag,
14730                                  BUS_DMA_NOWAIT,
14731                                  &fp->rx_mbuf_chain[j].m_map)) {
14732                /* XXX unwind and free previous fastpath allocations */
14733                BLOGE(sc, "Failed to create dma map for "
14734                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14735                return (1);
14736            }
14737        }
14738
14739        /* create dma map for the spare rx mbuf cluster */
14740        if (bus_dmamap_create(fp->rx_mbuf_tag,
14741                              BUS_DMA_NOWAIT,
14742                              &fp->rx_mbuf_spare_map)) {
14743            /* XXX unwind and free previous fastpath allocations */
14744            BLOGE(sc, "Failed to create dma map for "
14745                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14746            return (1);
14747        }
14748
14749        /***************************/
14750        /* FP RX SGE MBUF DMA MAPS */
14751        /***************************/
14752
14753        /* create a dma tag for the rx sge mbufs */
14754        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14755                                1,                  /* alignment */
14756                                0,                  /* boundary limit */
14757                                BUS_SPACE_MAXADDR,  /* restricted low */
14758                                BUS_SPACE_MAXADDR,  /* restricted hi */
14759                                NULL,               /* addr filter() */
14760                                NULL,               /* addr filter() arg */
14761                                BCM_PAGE_SIZE,      /* max map size */
14762                                1,                  /* num discontinuous */
14763                                BCM_PAGE_SIZE,      /* max seg size */
14764                                0,                  /* flags */
14765                                NULL,               /* lock() */
14766                                NULL,               /* lock() arg */
14767                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14768        if (rc != 0) {
14769            /* XXX unwind and free previous fastpath allocations */
14770            BLOGE(sc, "Failed to create dma tag for "
14771                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14772            return (1);
14773        }
14774
14775        /* create dma maps for the rx sge mbuf clusters */
14776        for (j = 0; j < RX_SGE_TOTAL; j++) {
14777            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14778                                  BUS_DMA_NOWAIT,
14779                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14780                /* XXX unwind and free previous fastpath allocations */
14781                BLOGE(sc, "Failed to create dma map for "
14782                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14783                return (1);
14784            }
14785        }
14786
14787        /* create dma map for the spare rx sge mbuf cluster */
14788        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14789                              BUS_DMA_NOWAIT,
14790                              &fp->rx_sge_mbuf_spare_map)) {
14791            /* XXX unwind and free previous fastpath allocations */
14792            BLOGE(sc, "Failed to create dma map for "
14793                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14794            return (1);
14795        }
14796
14797        /***************************/
14798        /* FP RX TPA MBUF DMA MAPS */
14799        /***************************/
14800
14801        /* create dma maps for the rx tpa mbuf clusters */
14802        max_agg_queues = MAX_AGG_QS(sc);
14803
14804        for (j = 0; j < max_agg_queues; j++) {
14805            if (bus_dmamap_create(fp->rx_mbuf_tag,
14806                                  BUS_DMA_NOWAIT,
14807                                  &fp->rx_tpa_info[j].bd.m_map)) {
14808                /* XXX unwind and free previous fastpath allocations */
14809                BLOGE(sc, "Failed to create dma map for "
14810                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14811                return (1);
14812            }
14813        }
14814
14815        /* create dma map for the spare rx tpa mbuf cluster */
14816        if (bus_dmamap_create(fp->rx_mbuf_tag,
14817                              BUS_DMA_NOWAIT,
14818                              &fp->rx_tpa_info_mbuf_spare_map)) {
14819            /* XXX unwind and free previous fastpath allocations */
14820            BLOGE(sc, "Failed to create dma map for "
14821                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14822            return (1);
14823        }
14824
14825        bxe_init_sge_ring_bit_mask(fp);
14826    }
14827
14828    return (0);
14829}
14830
14831static void
14832bxe_free_hsi_mem(struct bxe_softc *sc)
14833{
14834    struct bxe_fastpath *fp;
14835    int max_agg_queues;
14836    int i, j;
14837
14838    if (sc->parent_dma_tag == NULL) {
14839        return; /* assume nothing was allocated */
14840    }
14841
14842    for (i = 0; i < sc->num_queues; i++) {
14843        fp = &sc->fp[i];
14844
14845        /*******************/
14846        /* FP STATUS BLOCK */
14847        /*******************/
14848
14849        bxe_dma_free(sc, &fp->sb_dma);
14850        memset(&fp->status_block, 0, sizeof(fp->status_block));
14851
14852        /******************/
14853        /* FP TX BD CHAIN */
14854        /******************/
14855
14856        bxe_dma_free(sc, &fp->tx_dma);
14857        fp->tx_chain = NULL;
14858
14859        /******************/
14860        /* FP RX BD CHAIN */
14861        /******************/
14862
14863        bxe_dma_free(sc, &fp->rx_dma);
14864        fp->rx_chain = NULL;
14865
14866        /*******************/
14867        /* FP RX RCQ CHAIN */
14868        /*******************/
14869
14870        bxe_dma_free(sc, &fp->rcq_dma);
14871        fp->rcq_chain = NULL;
14872
14873        /*******************/
14874        /* FP RX SGE CHAIN */
14875        /*******************/
14876
14877        bxe_dma_free(sc, &fp->rx_sge_dma);
14878        fp->rx_sge_chain = NULL;
14879
14880        /***********************/
14881        /* FP TX MBUF DMA MAPS */
14882        /***********************/
14883
14884        if (fp->tx_mbuf_tag != NULL) {
14885            for (j = 0; j < TX_BD_TOTAL; j++) {
14886                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14887                    bus_dmamap_unload(fp->tx_mbuf_tag,
14888                                      fp->tx_mbuf_chain[j].m_map);
14889                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14890                                       fp->tx_mbuf_chain[j].m_map);
14891                }
14892            }
14893
14894            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14895            fp->tx_mbuf_tag = NULL;
14896        }
14897
14898        /***********************/
14899        /* FP RX MBUF DMA MAPS */
14900        /***********************/
14901
14902        if (fp->rx_mbuf_tag != NULL) {
14903            for (j = 0; j < RX_BD_TOTAL; j++) {
14904                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14905                    bus_dmamap_unload(fp->rx_mbuf_tag,
14906                                      fp->rx_mbuf_chain[j].m_map);
14907                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14908                                       fp->rx_mbuf_chain[j].m_map);
14909                }
14910            }
14911
14912            if (fp->rx_mbuf_spare_map != NULL) {
14913                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14914                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14915            }
14916
14917            /***************************/
14918            /* FP RX TPA MBUF DMA MAPS */
14919            /***************************/
14920
14921            max_agg_queues = MAX_AGG_QS(sc);
14922
14923            for (j = 0; j < max_agg_queues; j++) {
14924                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14925                    bus_dmamap_unload(fp->rx_mbuf_tag,
14926                                      fp->rx_tpa_info[j].bd.m_map);
14927                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14928                                       fp->rx_tpa_info[j].bd.m_map);
14929                }
14930            }
14931
14932            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14933                bus_dmamap_unload(fp->rx_mbuf_tag,
14934                                  fp->rx_tpa_info_mbuf_spare_map);
14935                bus_dmamap_destroy(fp->rx_mbuf_tag,
14936                                   fp->rx_tpa_info_mbuf_spare_map);
14937            }
14938
14939            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14940            fp->rx_mbuf_tag = NULL;
14941        }
14942
14943        /***************************/
14944        /* FP RX SGE MBUF DMA MAPS */
14945        /***************************/
14946
14947        if (fp->rx_sge_mbuf_tag != NULL) {
14948            for (j = 0; j < RX_SGE_TOTAL; j++) {
14949                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14950                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14951                                      fp->rx_sge_mbuf_chain[j].m_map);
14952                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14953                                       fp->rx_sge_mbuf_chain[j].m_map);
14954                }
14955            }
14956
14957            if (fp->rx_sge_mbuf_spare_map != NULL) {
14958                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14959                                  fp->rx_sge_mbuf_spare_map);
14960                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14961                                   fp->rx_sge_mbuf_spare_map);
14962            }
14963
14964            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14965            fp->rx_sge_mbuf_tag = NULL;
14966        }
14967    }
14968
14969    /***************************/
14970    /* FW DECOMPRESSION BUFFER */
14971    /***************************/
14972
14973    bxe_dma_free(sc, &sc->gz_buf_dma);
14974    sc->gz_buf = NULL;
14975    free(sc->gz_strm, M_DEVBUF);
14976    sc->gz_strm = NULL;
14977
14978    /*******************/
14979    /* SLOW PATH QUEUE */
14980    /*******************/
14981
14982    bxe_dma_free(sc, &sc->spq_dma);
14983    sc->spq = NULL;
14984
14985    /*************/
14986    /* SLOW PATH */
14987    /*************/
14988
14989    bxe_dma_free(sc, &sc->sp_dma);
14990    sc->sp = NULL;
14991
14992    /***************/
14993    /* EVENT QUEUE */
14994    /***************/
14995
14996    bxe_dma_free(sc, &sc->eq_dma);
14997    sc->eq = NULL;
14998
14999    /************************/
15000    /* DEFAULT STATUS BLOCK */
15001    /************************/
15002
15003    bxe_dma_free(sc, &sc->def_sb_dma);
15004    sc->def_sb = NULL;
15005
15006    bus_dma_tag_destroy(sc->parent_dma_tag);
15007    sc->parent_dma_tag = NULL;
15008}
15009
15010/*
15011 * Previous driver DMAE transaction may have occurred when pre-boot stage
15012 * ended and boot began. This would invalidate the addresses of the
15013 * transaction, resulting in was-error bit set in the PCI causing all
15014 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15015 * the interrupt which detected this from the pglueb and the was-done bit
15016 */
15017static void
15018bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15019{
15020    uint32_t val;
15021
15022    if (!CHIP_IS_E1x(sc)) {
15023        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15024        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15025            BLOGD(sc, DBG_LOAD,
15026                  "Clearing 'was-error' bit that was set in pglueb");
15027            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15028        }
15029    }
15030}
15031
15032static int
15033bxe_prev_mcp_done(struct bxe_softc *sc)
15034{
15035    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15036                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15037    if (!rc) {
15038        BLOGE(sc, "MCP response failure, aborting\n");
15039        return (-1);
15040    }
15041
15042    return (0);
15043}
15044
15045static struct bxe_prev_list_node *
15046bxe_prev_path_get_entry(struct bxe_softc *sc)
15047{
15048    struct bxe_prev_list_node *tmp;
15049
15050    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15051        if ((sc->pcie_bus == tmp->bus) &&
15052            (sc->pcie_device == tmp->slot) &&
15053            (SC_PATH(sc) == tmp->path)) {
15054            return (tmp);
15055        }
15056    }
15057
15058    return (NULL);
15059}
15060
15061static uint8_t
15062bxe_prev_is_path_marked(struct bxe_softc *sc)
15063{
15064    struct bxe_prev_list_node *tmp;
15065    int rc = FALSE;
15066
15067    mtx_lock(&bxe_prev_mtx);
15068
15069    tmp = bxe_prev_path_get_entry(sc);
15070    if (tmp) {
15071        if (tmp->aer) {
15072            BLOGD(sc, DBG_LOAD,
15073                  "Path %d/%d/%d was marked by AER\n",
15074                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15075        } else {
15076            rc = TRUE;
15077            BLOGD(sc, DBG_LOAD,
15078                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15079                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15080        }
15081    }
15082
15083    mtx_unlock(&bxe_prev_mtx);
15084
15085    return (rc);
15086}
15087
15088static int
15089bxe_prev_mark_path(struct bxe_softc *sc,
15090                   uint8_t          after_undi)
15091{
15092    struct bxe_prev_list_node *tmp;
15093
15094    mtx_lock(&bxe_prev_mtx);
15095
15096    /* Check whether the entry for this path already exists */
15097    tmp = bxe_prev_path_get_entry(sc);
15098    if (tmp) {
15099        if (!tmp->aer) {
15100            BLOGD(sc, DBG_LOAD,
15101                  "Re-marking AER in path %d/%d/%d\n",
15102                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15103        } else {
15104            BLOGD(sc, DBG_LOAD,
15105                  "Removing AER indication from path %d/%d/%d\n",
15106                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15107            tmp->aer = 0;
15108        }
15109
15110        mtx_unlock(&bxe_prev_mtx);
15111        return (0);
15112    }
15113
15114    mtx_unlock(&bxe_prev_mtx);
15115
15116    /* Create an entry for this path and add it */
15117    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15118                 (M_NOWAIT | M_ZERO));
15119    if (!tmp) {
15120        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15121        return (-1);
15122    }
15123
15124    tmp->bus  = sc->pcie_bus;
15125    tmp->slot = sc->pcie_device;
15126    tmp->path = SC_PATH(sc);
15127    tmp->aer  = 0;
15128    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15129
15130    mtx_lock(&bxe_prev_mtx);
15131
15132    BLOGD(sc, DBG_LOAD,
15133          "Marked path %d/%d/%d - finished previous unload\n",
15134          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15135    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15136
15137    mtx_unlock(&bxe_prev_mtx);
15138
15139    return (0);
15140}
15141
15142static int
15143bxe_do_flr(struct bxe_softc *sc)
15144{
15145    int i;
15146
15147    /* only E2 and onwards support FLR */
15148    if (CHIP_IS_E1x(sc)) {
15149        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15150        return (-1);
15151    }
15152
15153    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15154    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15155        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15156              sc->devinfo.bc_ver);
15157        return (-1);
15158    }
15159
15160    /* Wait for Transaction Pending bit clean */
15161    for (i = 0; i < 4; i++) {
15162        if (i) {
15163            DELAY(((1 << (i - 1)) * 100) * 1000);
15164        }
15165
15166        if (!bxe_is_pcie_pending(sc)) {
15167            goto clear;
15168        }
15169    }
15170
15171    BLOGE(sc, "PCIE transaction is not cleared, "
15172              "proceeding with reset anyway\n");
15173
15174clear:
15175
15176    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15177    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15178
15179    return (0);
15180}
15181
15182struct bxe_mac_vals {
15183    uint32_t xmac_addr;
15184    uint32_t xmac_val;
15185    uint32_t emac_addr;
15186    uint32_t emac_val;
15187    uint32_t umac_addr;
15188    uint32_t umac_val;
15189    uint32_t bmac_addr;
15190    uint32_t bmac_val[2];
15191};
15192
15193static void
15194bxe_prev_unload_close_mac(struct bxe_softc *sc,
15195                          struct bxe_mac_vals *vals)
15196{
15197    uint32_t val, base_addr, offset, mask, reset_reg;
15198    uint8_t mac_stopped = FALSE;
15199    uint8_t port = SC_PORT(sc);
15200    uint32_t wb_data[2];
15201
15202    /* reset addresses as they also mark which values were changed */
15203    vals->bmac_addr = 0;
15204    vals->umac_addr = 0;
15205    vals->xmac_addr = 0;
15206    vals->emac_addr = 0;
15207
15208    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15209
15210    if (!CHIP_IS_E3(sc)) {
15211        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15212        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15213        if ((mask & reset_reg) && val) {
15214            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15215            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15216                                    : NIG_REG_INGRESS_BMAC0_MEM;
15217            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15218                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15219
15220            /*
15221             * use rd/wr since we cannot use dmae. This is safe
15222             * since MCP won't access the bus due to the request
15223             * to unload, and no function on the path can be
15224             * loaded at this time.
15225             */
15226            wb_data[0] = REG_RD(sc, base_addr + offset);
15227            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15228            vals->bmac_addr = base_addr + offset;
15229            vals->bmac_val[0] = wb_data[0];
15230            vals->bmac_val[1] = wb_data[1];
15231            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15232            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15233            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15234        }
15235
15236        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15237        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15238        vals->emac_val = REG_RD(sc, vals->emac_addr);
15239        REG_WR(sc, vals->emac_addr, 0);
15240        mac_stopped = TRUE;
15241    } else {
15242        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15243            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15244            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15245            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15246            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15247            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15248            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15249            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15250            REG_WR(sc, vals->xmac_addr, 0);
15251            mac_stopped = TRUE;
15252        }
15253
15254        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15255        if (mask & reset_reg) {
15256            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15257            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15258            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15259            vals->umac_val = REG_RD(sc, vals->umac_addr);
15260            REG_WR(sc, vals->umac_addr, 0);
15261            mac_stopped = TRUE;
15262        }
15263    }
15264
15265    if (mac_stopped) {
15266        DELAY(20000);
15267    }
15268}
15269
15270#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15271#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15272#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15273#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15274
15275static void
15276bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15277                         uint8_t          port,
15278                         uint8_t          inc)
15279{
15280    uint16_t rcq, bd;
15281    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15282
15283    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15284    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15285
15286    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15287    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15288
15289    BLOGD(sc, DBG_LOAD,
15290          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15291          port, bd, rcq);
15292}
15293
15294static int
15295bxe_prev_unload_common(struct bxe_softc *sc)
15296{
15297    uint32_t reset_reg, tmp_reg = 0, rc;
15298    uint8_t prev_undi = FALSE;
15299    struct bxe_mac_vals mac_vals;
15300    uint32_t timer_count = 1000;
15301    uint32_t prev_brb;
15302
15303    /*
15304     * It is possible a previous function received 'common' answer,
15305     * but hasn't loaded yet, therefore creating a scenario of
15306     * multiple functions receiving 'common' on the same path.
15307     */
15308    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15309
15310    memset(&mac_vals, 0, sizeof(mac_vals));
15311
15312    if (bxe_prev_is_path_marked(sc)) {
15313        return (bxe_prev_mcp_done(sc));
15314    }
15315
15316    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15317
15318    /* Reset should be performed after BRB is emptied */
15319    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15320        /* Close the MAC Rx to prevent BRB from filling up */
15321        bxe_prev_unload_close_mac(sc, &mac_vals);
15322
15323        /* close LLH filters towards the BRB */
15324        elink_set_rx_filter(&sc->link_params, 0);
15325
15326        /*
15327         * Check if the UNDI driver was previously loaded.
15328         * UNDI driver initializes CID offset for normal bell to 0x7
15329         */
15330        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15331            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15332            if (tmp_reg == 0x7) {
15333                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15334                prev_undi = TRUE;
15335                /* clear the UNDI indication */
15336                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15337                /* clear possible idle check errors */
15338                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15339            }
15340        }
15341
15342        /* wait until BRB is empty */
15343        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15344        while (timer_count) {
15345            prev_brb = tmp_reg;
15346
15347            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15348            if (!tmp_reg) {
15349                break;
15350            }
15351
15352            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15353
15354            /* reset timer as long as BRB actually gets emptied */
15355            if (prev_brb > tmp_reg) {
15356                timer_count = 1000;
15357            } else {
15358                timer_count--;
15359            }
15360
15361            /* If UNDI resides in memory, manually increment it */
15362            if (prev_undi) {
15363                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15364            }
15365
15366            DELAY(10);
15367        }
15368
15369        if (!timer_count) {
15370            BLOGE(sc, "Failed to empty BRB\n");
15371        }
15372    }
15373
15374    /* No packets are in the pipeline, path is ready for reset */
15375    bxe_reset_common(sc);
15376
15377    if (mac_vals.xmac_addr) {
15378        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15379    }
15380    if (mac_vals.umac_addr) {
15381        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15382    }
15383    if (mac_vals.emac_addr) {
15384        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15385    }
15386    if (mac_vals.bmac_addr) {
15387        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15388        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15389    }
15390
15391    rc = bxe_prev_mark_path(sc, prev_undi);
15392    if (rc) {
15393        bxe_prev_mcp_done(sc);
15394        return (rc);
15395    }
15396
15397    return (bxe_prev_mcp_done(sc));
15398}
15399
15400static int
15401bxe_prev_unload_uncommon(struct bxe_softc *sc)
15402{
15403    int rc;
15404
15405    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15406
15407    /* Test if previous unload process was already finished for this path */
15408    if (bxe_prev_is_path_marked(sc)) {
15409        return (bxe_prev_mcp_done(sc));
15410    }
15411
15412    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15413
15414    /*
15415     * If function has FLR capabilities, and existing FW version matches
15416     * the one required, then FLR will be sufficient to clean any residue
15417     * left by previous driver
15418     */
15419    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15420    if (!rc) {
15421        /* fw version is good */
15422        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15423        rc = bxe_do_flr(sc);
15424    }
15425
15426    if (!rc) {
15427        /* FLR was performed */
15428        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15429        return (0);
15430    }
15431
15432    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15433
15434    /* Close the MCP request, return failure*/
15435    rc = bxe_prev_mcp_done(sc);
15436    if (!rc) {
15437        rc = BXE_PREV_WAIT_NEEDED;
15438    }
15439
15440    return (rc);
15441}
15442
15443static int
15444bxe_prev_unload(struct bxe_softc *sc)
15445{
15446    int time_counter = 10;
15447    uint32_t fw, hw_lock_reg, hw_lock_val;
15448    uint32_t rc = 0;
15449
15450    /*
15451     * Clear HW from errors which may have resulted from an interrupted
15452     * DMAE transaction.
15453     */
15454    bxe_prev_interrupted_dmae(sc);
15455
15456    /* Release previously held locks */
15457    hw_lock_reg =
15458        (SC_FUNC(sc) <= 5) ?
15459            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15460            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15461
15462    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15463    if (hw_lock_val) {
15464        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15465            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15466            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15467                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15468        }
15469        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15470        REG_WR(sc, hw_lock_reg, 0xffffffff);
15471    } else {
15472        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15473    }
15474
15475    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15476        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15477        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15478    }
15479
15480    do {
15481        /* Lock MCP using an unload request */
15482        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15483        if (!fw) {
15484            BLOGE(sc, "MCP response failure, aborting\n");
15485            rc = -1;
15486            break;
15487        }
15488
15489        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15490            rc = bxe_prev_unload_common(sc);
15491            break;
15492        }
15493
15494        /* non-common reply from MCP night require looping */
15495        rc = bxe_prev_unload_uncommon(sc);
15496        if (rc != BXE_PREV_WAIT_NEEDED) {
15497            break;
15498        }
15499
15500        DELAY(20000);
15501    } while (--time_counter);
15502
15503    if (!time_counter || rc) {
15504        BLOGE(sc, "Failed to unload previous driver!"
15505            " time_counter %d rc %d\n", time_counter, rc);
15506        rc = -1;
15507    }
15508
15509    return (rc);
15510}
15511
15512void
15513bxe_dcbx_set_state(struct bxe_softc *sc,
15514                   uint8_t          dcb_on,
15515                   uint32_t         dcbx_enabled)
15516{
15517    if (!CHIP_IS_E1x(sc)) {
15518        sc->dcb_state = dcb_on;
15519        sc->dcbx_enabled = dcbx_enabled;
15520    } else {
15521        sc->dcb_state = FALSE;
15522        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15523    }
15524    BLOGD(sc, DBG_LOAD,
15525          "DCB state [%s:%s]\n",
15526          dcb_on ? "ON" : "OFF",
15527          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15528          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15529          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15530          "on-chip with negotiation" : "invalid");
15531}
15532
15533/* must be called after sriov-enable */
15534static int
15535bxe_set_qm_cid_count(struct bxe_softc *sc)
15536{
15537    int cid_count = BXE_L2_MAX_CID(sc);
15538
15539    if (IS_SRIOV(sc)) {
15540        cid_count += BXE_VF_CIDS;
15541    }
15542
15543    if (CNIC_SUPPORT(sc)) {
15544        cid_count += CNIC_CID_MAX;
15545    }
15546
15547    return (roundup(cid_count, QM_CID_ROUND));
15548}
15549
15550static void
15551bxe_init_multi_cos(struct bxe_softc *sc)
15552{
15553    int pri, cos;
15554
15555    uint32_t pri_map = 0; /* XXX change to user config */
15556
15557    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15558        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15559        if (cos < sc->max_cos) {
15560            sc->prio_to_cos[pri] = cos;
15561        } else {
15562            BLOGW(sc, "Invalid COS %d for priority %d "
15563                      "(max COS is %d), setting to 0\n",
15564                  cos, pri, (sc->max_cos - 1));
15565            sc->prio_to_cos[pri] = 0;
15566        }
15567    }
15568}
15569
15570static int
15571bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15572{
15573    struct bxe_softc *sc;
15574    int error, result;
15575
15576    result = 0;
15577    error = sysctl_handle_int(oidp, &result, 0, req);
15578
15579    if (error || !req->newptr) {
15580        return (error);
15581    }
15582
15583    if (result == 1) {
15584        uint32_t  temp;
15585        sc = (struct bxe_softc *)arg1;
15586
15587        BLOGI(sc, "... dumping driver state ...\n");
15588        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15589        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15590    }
15591
15592    return (error);
15593}
15594
15595static int
15596bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS)
15597{
15598    struct bxe_softc *sc;
15599    int error, result;
15600
15601    result = 0;
15602    error = sysctl_handle_int(oidp, &result, 0, req);
15603
15604    if (error || !req->newptr) {
15605        return (error);
15606    }
15607
15608    if (result == 1) {
15609        sc = (struct bxe_softc *)arg1;
15610
15611        BLOGI(sc, "... grcdump start ...\n");
15612        bxe_grc_dump(sc);
15613        BLOGI(sc, "... grcdump done ...\n");
15614    }
15615
15616    return (error);
15617}
15618
15619static int
15620bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15621{
15622    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15623    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15624    uint32_t *offset;
15625    uint64_t value = 0;
15626    int index = (int)arg2;
15627
15628    if (index >= BXE_NUM_ETH_STATS) {
15629        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15630        return (-1);
15631    }
15632
15633    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15634
15635    switch (bxe_eth_stats_arr[index].size) {
15636    case 4:
15637        value = (uint64_t)*offset;
15638        break;
15639    case 8:
15640        value = HILO_U64(*offset, *(offset + 1));
15641        break;
15642    default:
15643        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15644              index, bxe_eth_stats_arr[index].size);
15645        return (-1);
15646    }
15647
15648    return (sysctl_handle_64(oidp, &value, 0, req));
15649}
15650
15651static int
15652bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15653{
15654    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15655    uint32_t *eth_stats;
15656    uint32_t *offset;
15657    uint64_t value = 0;
15658    uint32_t q_stat = (uint32_t)arg2;
15659    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15660    uint32_t index = (q_stat & 0xffff);
15661
15662    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15663
15664    if (index >= BXE_NUM_ETH_Q_STATS) {
15665        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15666        return (-1);
15667    }
15668
15669    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15670
15671    switch (bxe_eth_q_stats_arr[index].size) {
15672    case 4:
15673        value = (uint64_t)*offset;
15674        break;
15675    case 8:
15676        value = HILO_U64(*offset, *(offset + 1));
15677        break;
15678    default:
15679        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15680              index, bxe_eth_q_stats_arr[index].size);
15681        return (-1);
15682    }
15683
15684    return (sysctl_handle_64(oidp, &value, 0, req));
15685}
15686
15687static void
15688bxe_add_sysctls(struct bxe_softc *sc)
15689{
15690    struct sysctl_ctx_list *ctx;
15691    struct sysctl_oid_list *children;
15692    struct sysctl_oid *queue_top, *queue;
15693    struct sysctl_oid_list *queue_top_children, *queue_children;
15694    char queue_num_buf[32];
15695    uint32_t q_stat;
15696    int i, j;
15697
15698    ctx = device_get_sysctl_ctx(sc->dev);
15699    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15700
15701    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15702                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15703                      "version");
15704
15705    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15706             BCM_5710_FW_MAJOR_VERSION,
15707             BCM_5710_FW_MINOR_VERSION,
15708             BCM_5710_FW_REVISION_VERSION,
15709             BCM_5710_FW_ENGINEERING_VERSION);
15710
15711    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15712        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15713         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15714         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15715         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15716                                                                "Unknown"));
15717    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15718                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15719                    "multifunction vnics per port");
15720
15721    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15722        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15723         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15724         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15725                                              "???GT/s"),
15726        sc->devinfo.pcie_link_width);
15727
15728    sc->debug = bxe_debug;
15729
15730#if __FreeBSD_version >= 900000
15731    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15732                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15733                      "bootcode version");
15734    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15735                      CTLFLAG_RD, sc->fw_ver_str, 0,
15736                      "firmware version");
15737    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15738                      CTLFLAG_RD, sc->mf_mode_str, 0,
15739                      "multifunction mode");
15740    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15741                      CTLFLAG_RD, sc->mac_addr_str, 0,
15742                      "mac address");
15743    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15744                      CTLFLAG_RD, sc->pci_link_str, 0,
15745                      "pci link status");
15746    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15747                    CTLFLAG_RW, &sc->debug,
15748                    "debug logging mode");
15749#else
15750    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15751                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15752                      "bootcode version");
15753    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15754                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15755                      "firmware version");
15756    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15757                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15758                      "multifunction mode");
15759    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15760                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15761                      "mac address");
15762    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15763                      CTLFLAG_RD, &sc->pci_link_str, 0,
15764                      "pci link status");
15765    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15766                    CTLFLAG_RW, &sc->debug, 0,
15767                    "debug logging mode");
15768#endif /* #if __FreeBSD_version >= 900000 */
15769
15770    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump",
15771                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15772                    bxe_sysctl_trigger_grcdump, "IU",
15773                    "set by driver when a grcdump is needed");
15774
15775    sc->grcdump_done = 0;
15776    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15777                   CTLFLAG_RW, &sc->grcdump_done, 0,
15778                   "set by driver when grcdump is done");
15779
15780    sc->rx_budget = bxe_rx_budget;
15781    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15782                    CTLFLAG_RW, &sc->rx_budget, 0,
15783                    "rx processing budget");
15784
15785    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15786                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15787                    bxe_sysctl_state, "IU", "dump driver state");
15788
15789    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15790        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15791                        bxe_eth_stats_arr[i].string,
15792                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15793                        bxe_sysctl_eth_stat, "LU",
15794                        bxe_eth_stats_arr[i].string);
15795    }
15796
15797    /* add a new parent node for all queues "dev.bxe.#.queue" */
15798    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15799                                CTLFLAG_RD, NULL, "queue");
15800    queue_top_children = SYSCTL_CHILDREN(queue_top);
15801
15802    for (i = 0; i < sc->num_queues; i++) {
15803        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15804        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15805        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15806                                queue_num_buf, CTLFLAG_RD, NULL,
15807                                "single queue");
15808        queue_children = SYSCTL_CHILDREN(queue);
15809
15810        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15811            q_stat = ((i << 16) | j);
15812            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15813                            bxe_eth_q_stats_arr[j].string,
15814                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15815                            bxe_sysctl_eth_q_stat, "LU",
15816                            bxe_eth_q_stats_arr[j].string);
15817        }
15818    }
15819}
15820
15821static int
15822bxe_alloc_buf_rings(struct bxe_softc *sc)
15823{
15824#if __FreeBSD_version >= 800000
15825
15826    int i;
15827    struct bxe_fastpath *fp;
15828
15829    for (i = 0; i < sc->num_queues; i++) {
15830
15831        fp = &sc->fp[i];
15832
15833        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15834                                   M_NOWAIT, &fp->tx_mtx);
15835        if (fp->tx_br == NULL)
15836            return (-1);
15837    }
15838#endif
15839    return (0);
15840}
15841
15842static void
15843bxe_free_buf_rings(struct bxe_softc *sc)
15844{
15845#if __FreeBSD_version >= 800000
15846
15847    int i;
15848    struct bxe_fastpath *fp;
15849
15850    for (i = 0; i < sc->num_queues; i++) {
15851
15852        fp = &sc->fp[i];
15853
15854        if (fp->tx_br) {
15855            buf_ring_free(fp->tx_br, M_DEVBUF);
15856            fp->tx_br = NULL;
15857        }
15858    }
15859
15860#endif
15861}
15862
15863static void
15864bxe_init_fp_mutexs(struct bxe_softc *sc)
15865{
15866    int i;
15867    struct bxe_fastpath *fp;
15868
15869    for (i = 0; i < sc->num_queues; i++) {
15870
15871        fp = &sc->fp[i];
15872
15873        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15874            "bxe%d_fp%d_tx_lock", sc->unit, i);
15875        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15876
15877        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15878            "bxe%d_fp%d_rx_lock", sc->unit, i);
15879        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15880    }
15881}
15882
15883static void
15884bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15885{
15886    int i;
15887    struct bxe_fastpath *fp;
15888
15889    for (i = 0; i < sc->num_queues; i++) {
15890
15891        fp = &sc->fp[i];
15892
15893        if (mtx_initialized(&fp->tx_mtx)) {
15894            mtx_destroy(&fp->tx_mtx);
15895        }
15896
15897        if (mtx_initialized(&fp->rx_mtx)) {
15898            mtx_destroy(&fp->rx_mtx);
15899        }
15900    }
15901}
15902
15903
15904/*
15905 * Device attach function.
15906 *
15907 * Allocates device resources, performs secondary chip identification, and
15908 * initializes driver instance variables. This function is called from driver
15909 * load after a successful probe.
15910 *
15911 * Returns:
15912 *   0 = Success, >0 = Failure
15913 */
15914static int
15915bxe_attach(device_t dev)
15916{
15917    struct bxe_softc *sc;
15918
15919    sc = device_get_softc(dev);
15920
15921    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15922
15923    sc->state = BXE_STATE_CLOSED;
15924
15925    sc->dev  = dev;
15926    sc->unit = device_get_unit(dev);
15927
15928    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15929
15930    sc->pcie_bus    = pci_get_bus(dev);
15931    sc->pcie_device = pci_get_slot(dev);
15932    sc->pcie_func   = pci_get_function(dev);
15933
15934    /* enable bus master capability */
15935    pci_enable_busmaster(dev);
15936
15937    /* get the BARs */
15938    if (bxe_allocate_bars(sc) != 0) {
15939        return (ENXIO);
15940    }
15941
15942    /* initialize the mutexes */
15943    bxe_init_mutexes(sc);
15944
15945    /* prepare the periodic callout */
15946    callout_init(&sc->periodic_callout, 0);
15947
15948    /* prepare the chip taskqueue */
15949    sc->chip_tq_flags = CHIP_TQ_NONE;
15950    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15951             "bxe%d_chip_tq", sc->unit);
15952    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15953    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15954                                   taskqueue_thread_enqueue,
15955                                   &sc->chip_tq);
15956    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15957                            "%s", sc->chip_tq_name);
15958
15959    /* get device info and set params */
15960    if (bxe_get_device_info(sc) != 0) {
15961        BLOGE(sc, "getting device info\n");
15962        bxe_deallocate_bars(sc);
15963        pci_disable_busmaster(dev);
15964        return (ENXIO);
15965    }
15966
15967    /* get final misc params */
15968    bxe_get_params(sc);
15969
15970    /* set the default MTU (changed via ifconfig) */
15971    sc->mtu = ETHERMTU;
15972
15973    bxe_set_modes_bitmap(sc);
15974
15975    /* XXX
15976     * If in AFEX mode and the function is configured for FCoE
15977     * then bail... no L2 allowed.
15978     */
15979
15980    /* get phy settings from shmem and 'and' against admin settings */
15981    bxe_get_phy_info(sc);
15982
15983    /* initialize the FreeBSD ifnet interface */
15984    if (bxe_init_ifnet(sc) != 0) {
15985        bxe_release_mutexes(sc);
15986        bxe_deallocate_bars(sc);
15987        pci_disable_busmaster(dev);
15988        return (ENXIO);
15989    }
15990
15991    if (bxe_add_cdev(sc) != 0) {
15992        if (sc->ifp != NULL) {
15993            ether_ifdetach(sc->ifp);
15994        }
15995        ifmedia_removeall(&sc->ifmedia);
15996        bxe_release_mutexes(sc);
15997        bxe_deallocate_bars(sc);
15998        pci_disable_busmaster(dev);
15999        return (ENXIO);
16000    }
16001
16002    /* allocate device interrupts */
16003    if (bxe_interrupt_alloc(sc) != 0) {
16004        bxe_del_cdev(sc);
16005        if (sc->ifp != NULL) {
16006            ether_ifdetach(sc->ifp);
16007        }
16008        ifmedia_removeall(&sc->ifmedia);
16009        bxe_release_mutexes(sc);
16010        bxe_deallocate_bars(sc);
16011        pci_disable_busmaster(dev);
16012        return (ENXIO);
16013    }
16014
16015    bxe_init_fp_mutexs(sc);
16016
16017    if (bxe_alloc_buf_rings(sc) != 0) {
16018	bxe_free_buf_rings(sc);
16019        bxe_interrupt_free(sc);
16020        bxe_del_cdev(sc);
16021        if (sc->ifp != NULL) {
16022            ether_ifdetach(sc->ifp);
16023        }
16024        ifmedia_removeall(&sc->ifmedia);
16025        bxe_release_mutexes(sc);
16026        bxe_deallocate_bars(sc);
16027        pci_disable_busmaster(dev);
16028        return (ENXIO);
16029    }
16030
16031    /* allocate ilt */
16032    if (bxe_alloc_ilt_mem(sc) != 0) {
16033	bxe_free_buf_rings(sc);
16034        bxe_interrupt_free(sc);
16035        bxe_del_cdev(sc);
16036        if (sc->ifp != NULL) {
16037            ether_ifdetach(sc->ifp);
16038        }
16039        ifmedia_removeall(&sc->ifmedia);
16040        bxe_release_mutexes(sc);
16041        bxe_deallocate_bars(sc);
16042        pci_disable_busmaster(dev);
16043        return (ENXIO);
16044    }
16045
16046    /* allocate the host hardware/software hsi structures */
16047    if (bxe_alloc_hsi_mem(sc) != 0) {
16048        bxe_free_ilt_mem(sc);
16049	bxe_free_buf_rings(sc);
16050        bxe_interrupt_free(sc);
16051        bxe_del_cdev(sc);
16052        if (sc->ifp != NULL) {
16053            ether_ifdetach(sc->ifp);
16054        }
16055        ifmedia_removeall(&sc->ifmedia);
16056        bxe_release_mutexes(sc);
16057        bxe_deallocate_bars(sc);
16058        pci_disable_busmaster(dev);
16059        return (ENXIO);
16060    }
16061
16062    /* need to reset chip if UNDI was active */
16063    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16064        /* init fw_seq */
16065        sc->fw_seq =
16066            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16067             DRV_MSG_SEQ_NUMBER_MASK);
16068        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16069        bxe_prev_unload(sc);
16070    }
16071
16072#if 1
16073    /* XXX */
16074    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16075#else
16076    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16077        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16078        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16079        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16080        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16081        bxe_dcbx_init_params(sc);
16082    } else {
16083        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16084    }
16085#endif
16086
16087    /* calculate qm_cid_count */
16088    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16089    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16090
16091    sc->max_cos = 1;
16092    bxe_init_multi_cos(sc);
16093
16094    bxe_add_sysctls(sc);
16095
16096    return (0);
16097}
16098
16099/*
16100 * Device detach function.
16101 *
16102 * Stops the controller, resets the controller, and releases resources.
16103 *
16104 * Returns:
16105 *   0 = Success, >0 = Failure
16106 */
16107static int
16108bxe_detach(device_t dev)
16109{
16110    struct bxe_softc *sc;
16111    if_t ifp;
16112
16113    sc = device_get_softc(dev);
16114
16115    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16116
16117    ifp = sc->ifp;
16118    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16119        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16120        return(EBUSY);
16121    }
16122
16123    bxe_del_cdev(sc);
16124
16125    /* stop the periodic callout */
16126    bxe_periodic_stop(sc);
16127
16128    /* stop the chip taskqueue */
16129    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16130    if (sc->chip_tq) {
16131        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16132        taskqueue_free(sc->chip_tq);
16133        sc->chip_tq = NULL;
16134    }
16135
16136    /* stop and reset the controller if it was open */
16137    if (sc->state != BXE_STATE_CLOSED) {
16138        BXE_CORE_LOCK(sc);
16139        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16140        BXE_CORE_UNLOCK(sc);
16141    }
16142
16143    /* release the network interface */
16144    if (ifp != NULL) {
16145        ether_ifdetach(ifp);
16146    }
16147    ifmedia_removeall(&sc->ifmedia);
16148
16149    /* XXX do the following based on driver state... */
16150
16151    /* free the host hardware/software hsi structures */
16152    bxe_free_hsi_mem(sc);
16153
16154    /* free ilt */
16155    bxe_free_ilt_mem(sc);
16156
16157    bxe_free_buf_rings(sc);
16158
16159    /* release the interrupts */
16160    bxe_interrupt_free(sc);
16161
16162    /* Release the mutexes*/
16163    bxe_destroy_fp_mutexs(sc);
16164    bxe_release_mutexes(sc);
16165
16166
16167    /* Release the PCIe BAR mapped memory */
16168    bxe_deallocate_bars(sc);
16169
16170    /* Release the FreeBSD interface. */
16171    if (sc->ifp != NULL) {
16172        if_free(sc->ifp);
16173    }
16174
16175    pci_disable_busmaster(dev);
16176
16177    return (0);
16178}
16179
16180/*
16181 * Device shutdown function.
16182 *
16183 * Stops and resets the controller.
16184 *
16185 * Returns:
16186 *   Nothing
16187 */
16188static int
16189bxe_shutdown(device_t dev)
16190{
16191    struct bxe_softc *sc;
16192
16193    sc = device_get_softc(dev);
16194
16195    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16196
16197    /* stop the periodic callout */
16198    bxe_periodic_stop(sc);
16199
16200    BXE_CORE_LOCK(sc);
16201    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16202    BXE_CORE_UNLOCK(sc);
16203
16204    return (0);
16205}
16206
16207void
16208bxe_igu_ack_sb(struct bxe_softc *sc,
16209               uint8_t          igu_sb_id,
16210               uint8_t          segment,
16211               uint16_t         index,
16212               uint8_t          op,
16213               uint8_t          update)
16214{
16215    uint32_t igu_addr = sc->igu_base_addr;
16216    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16217    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16218}
16219
16220static void
16221bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16222                     uint8_t          func,
16223                     uint8_t          idu_sb_id,
16224                     uint8_t          is_pf)
16225{
16226    uint32_t data, ctl, cnt = 100;
16227    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16228    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16229    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16230    uint32_t sb_bit =  1 << (idu_sb_id%32);
16231    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16232    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16233
16234    /* Not supported in BC mode */
16235    if (CHIP_INT_MODE_IS_BC(sc)) {
16236        return;
16237    }
16238
16239    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16240             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16241            IGU_REGULAR_CLEANUP_SET |
16242            IGU_REGULAR_BCLEANUP);
16243
16244    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16245           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16246           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16247
16248    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16249            data, igu_addr_data);
16250    REG_WR(sc, igu_addr_data, data);
16251
16252    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16253                      BUS_SPACE_BARRIER_WRITE);
16254    mb();
16255
16256    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16257            ctl, igu_addr_ctl);
16258    REG_WR(sc, igu_addr_ctl, ctl);
16259
16260    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16261                      BUS_SPACE_BARRIER_WRITE);
16262    mb();
16263
16264    /* wait for clean up to finish */
16265    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16266        DELAY(20000);
16267    }
16268
16269    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16270        BLOGD(sc, DBG_LOAD,
16271              "Unable to finish IGU cleanup: "
16272              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16273              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16274    }
16275}
16276
16277static void
16278bxe_igu_clear_sb(struct bxe_softc *sc,
16279                 uint8_t          idu_sb_id)
16280{
16281    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16282}
16283
16284
16285
16286
16287
16288
16289
16290/*******************/
16291/* ECORE CALLBACKS */
16292/*******************/
16293
16294static void
16295bxe_reset_common(struct bxe_softc *sc)
16296{
16297    uint32_t val = 0x1400;
16298
16299    /* reset_common */
16300    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16301
16302    if (CHIP_IS_E3(sc)) {
16303        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16304        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16305    }
16306
16307    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16308}
16309
16310static void
16311bxe_common_init_phy(struct bxe_softc *sc)
16312{
16313    uint32_t shmem_base[2];
16314    uint32_t shmem2_base[2];
16315
16316    /* Avoid common init in case MFW supports LFA */
16317    if (SHMEM2_RD(sc, size) >
16318        (uint32_t)offsetof(struct shmem2_region,
16319                           lfa_host_addr[SC_PORT(sc)])) {
16320        return;
16321    }
16322
16323    shmem_base[0]  = sc->devinfo.shmem_base;
16324    shmem2_base[0] = sc->devinfo.shmem2_base;
16325
16326    if (!CHIP_IS_E1x(sc)) {
16327        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16328        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16329    }
16330
16331    bxe_acquire_phy_lock(sc);
16332    elink_common_init_phy(sc, shmem_base, shmem2_base,
16333                          sc->devinfo.chip_id, 0);
16334    bxe_release_phy_lock(sc);
16335}
16336
16337static void
16338bxe_pf_disable(struct bxe_softc *sc)
16339{
16340    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16341
16342    val &= ~IGU_PF_CONF_FUNC_EN;
16343
16344    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16345    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16346    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16347}
16348
16349static void
16350bxe_init_pxp(struct bxe_softc *sc)
16351{
16352    uint16_t devctl;
16353    int r_order, w_order;
16354
16355    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16356
16357    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16358
16359    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16360
16361    if (sc->mrrs == -1) {
16362        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16363    } else {
16364        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16365        r_order = sc->mrrs;
16366    }
16367
16368    ecore_init_pxp_arb(sc, r_order, w_order);
16369}
16370
16371static uint32_t
16372bxe_get_pretend_reg(struct bxe_softc *sc)
16373{
16374    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16375    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16376    return (base + (SC_ABS_FUNC(sc)) * stride);
16377}
16378
16379/*
16380 * Called only on E1H or E2.
16381 * When pretending to be PF, the pretend value is the function number 0..7.
16382 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16383 * combination.
16384 */
16385static int
16386bxe_pretend_func(struct bxe_softc *sc,
16387                 uint16_t         pretend_func_val)
16388{
16389    uint32_t pretend_reg;
16390
16391    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16392        return (-1);
16393    }
16394
16395    /* get my own pretend register */
16396    pretend_reg = bxe_get_pretend_reg(sc);
16397    REG_WR(sc, pretend_reg, pretend_func_val);
16398    REG_RD(sc, pretend_reg);
16399    return (0);
16400}
16401
16402static void
16403bxe_iov_init_dmae(struct bxe_softc *sc)
16404{
16405    return;
16406}
16407
16408static void
16409bxe_iov_init_dq(struct bxe_softc *sc)
16410{
16411    return;
16412}
16413
16414/* send a NIG loopback debug packet */
16415static void
16416bxe_lb_pckt(struct bxe_softc *sc)
16417{
16418    uint32_t wb_write[3];
16419
16420    /* Ethernet source and destination addresses */
16421    wb_write[0] = 0x55555555;
16422    wb_write[1] = 0x55555555;
16423    wb_write[2] = 0x20;     /* SOP */
16424    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16425
16426    /* NON-IP protocol */
16427    wb_write[0] = 0x09000000;
16428    wb_write[1] = 0x55555555;
16429    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16430    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16431}
16432
16433/*
16434 * Some of the internal memories are not directly readable from the driver.
16435 * To test them we send debug packets.
16436 */
16437static int
16438bxe_int_mem_test(struct bxe_softc *sc)
16439{
16440    int factor;
16441    int count, i;
16442    uint32_t val = 0;
16443
16444    if (CHIP_REV_IS_FPGA(sc)) {
16445        factor = 120;
16446    } else if (CHIP_REV_IS_EMUL(sc)) {
16447        factor = 200;
16448    } else {
16449        factor = 1;
16450    }
16451
16452    /* disable inputs of parser neighbor blocks */
16453    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16454    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16455    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16456    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16457
16458    /*  write 0 to parser credits for CFC search request */
16459    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16460
16461    /* send Ethernet packet */
16462    bxe_lb_pckt(sc);
16463
16464    /* TODO do i reset NIG statistic? */
16465    /* Wait until NIG register shows 1 packet of size 0x10 */
16466    count = 1000 * factor;
16467    while (count) {
16468        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16469        val = *BXE_SP(sc, wb_data[0]);
16470        if (val == 0x10) {
16471            break;
16472        }
16473
16474        DELAY(10000);
16475        count--;
16476    }
16477
16478    if (val != 0x10) {
16479        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16480        return (-1);
16481    }
16482
16483    /* wait until PRS register shows 1 packet */
16484    count = (1000 * factor);
16485    while (count) {
16486        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16487        if (val == 1) {
16488            break;
16489        }
16490
16491        DELAY(10000);
16492        count--;
16493    }
16494
16495    if (val != 0x1) {
16496        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16497        return (-2);
16498    }
16499
16500    /* Reset and init BRB, PRS */
16501    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16502    DELAY(50000);
16503    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16504    DELAY(50000);
16505    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16506    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16507
16508    /* Disable inputs of parser neighbor blocks */
16509    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16510    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16511    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16512    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16513
16514    /* Write 0 to parser credits for CFC search request */
16515    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16516
16517    /* send 10 Ethernet packets */
16518    for (i = 0; i < 10; i++) {
16519        bxe_lb_pckt(sc);
16520    }
16521
16522    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16523    count = (1000 * factor);
16524    while (count) {
16525        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16526        val = *BXE_SP(sc, wb_data[0]);
16527        if (val == 0xb0) {
16528            break;
16529        }
16530
16531        DELAY(10000);
16532        count--;
16533    }
16534
16535    if (val != 0xb0) {
16536        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16537        return (-3);
16538    }
16539
16540    /* Wait until PRS register shows 2 packets */
16541    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16542    if (val != 2) {
16543        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16544    }
16545
16546    /* Write 1 to parser credits for CFC search request */
16547    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16548
16549    /* Wait until PRS register shows 3 packets */
16550    DELAY(10000 * factor);
16551
16552    /* Wait until NIG register shows 1 packet of size 0x10 */
16553    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16554    if (val != 3) {
16555        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16556    }
16557
16558    /* clear NIG EOP FIFO */
16559    for (i = 0; i < 11; i++) {
16560        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16561    }
16562
16563    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16564    if (val != 1) {
16565        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16566        return (-4);
16567    }
16568
16569    /* Reset and init BRB, PRS, NIG */
16570    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16571    DELAY(50000);
16572    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16573    DELAY(50000);
16574    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16575    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16576    if (!CNIC_SUPPORT(sc)) {
16577        /* set NIC mode */
16578        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16579    }
16580
16581    /* Enable inputs of parser neighbor blocks */
16582    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16583    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16584    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16585    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16586
16587    return (0);
16588}
16589
16590static void
16591bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16592{
16593    int is_required;
16594    uint32_t val;
16595    int port;
16596
16597    is_required = 0;
16598    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16599           SHARED_HW_CFG_FAN_FAILURE_MASK);
16600
16601    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16602        is_required = 1;
16603    }
16604    /*
16605     * The fan failure mechanism is usually related to the PHY type since
16606     * the power consumption of the board is affected by the PHY. Currently,
16607     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16608     */
16609    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16610        for (port = PORT_0; port < PORT_MAX; port++) {
16611            is_required |= elink_fan_failure_det_req(sc,
16612                                                     sc->devinfo.shmem_base,
16613                                                     sc->devinfo.shmem2_base,
16614                                                     port);
16615        }
16616    }
16617
16618    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16619
16620    if (is_required == 0) {
16621        return;
16622    }
16623
16624    /* Fan failure is indicated by SPIO 5 */
16625    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16626
16627    /* set to active low mode */
16628    val = REG_RD(sc, MISC_REG_SPIO_INT);
16629    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16630    REG_WR(sc, MISC_REG_SPIO_INT, val);
16631
16632    /* enable interrupt to signal the IGU */
16633    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16634    val |= MISC_SPIO_SPIO5;
16635    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16636}
16637
16638static void
16639bxe_enable_blocks_attention(struct bxe_softc *sc)
16640{
16641    uint32_t val;
16642
16643    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16644    if (!CHIP_IS_E1x(sc)) {
16645        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16646    } else {
16647        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16648    }
16649    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16650    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16651    /*
16652     * mask read length error interrupts in brb for parser
16653     * (parsing unit and 'checksum and crc' unit)
16654     * these errors are legal (PU reads fixed length and CAC can cause
16655     * read length error on truncated packets)
16656     */
16657    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16658    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16659    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16660    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16661    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16662    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16663/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16664/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16665    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16666    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16667    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16668/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16669/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16670    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16671    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16672    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16673    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16674/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16675/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16676
16677    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16678           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16679           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16680    if (!CHIP_IS_E1x(sc)) {
16681        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16682                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16683    }
16684    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16685
16686    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16687    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16688    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16689/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16690
16691    if (!CHIP_IS_E1x(sc)) {
16692        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16693        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16694    }
16695
16696    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16697    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16698/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16699    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16700}
16701
16702/**
16703 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16704 *
16705 * @sc:     driver handle
16706 */
16707static int
16708bxe_init_hw_common(struct bxe_softc *sc)
16709{
16710    uint8_t abs_func_id;
16711    uint32_t val;
16712
16713    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16714          SC_ABS_FUNC(sc));
16715
16716    /*
16717     * take the RESET lock to protect undi_unload flow from accessing
16718     * registers while we are resetting the chip
16719     */
16720    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16721
16722    bxe_reset_common(sc);
16723
16724    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16725
16726    val = 0xfffc;
16727    if (CHIP_IS_E3(sc)) {
16728        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16729        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16730    }
16731
16732    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16733
16734    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16735
16736    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16737    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16738
16739    if (!CHIP_IS_E1x(sc)) {
16740        /*
16741         * 4-port mode or 2-port mode we need to turn off master-enable for
16742         * everyone. After that we turn it back on for self. So, we disregard
16743         * multi-function, and always disable all functions on the given path,
16744         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16745         */
16746        for (abs_func_id = SC_PATH(sc);
16747             abs_func_id < (E2_FUNC_MAX * 2);
16748             abs_func_id += 2) {
16749            if (abs_func_id == SC_ABS_FUNC(sc)) {
16750                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16751                continue;
16752            }
16753
16754            bxe_pretend_func(sc, abs_func_id);
16755
16756            /* clear pf enable */
16757            bxe_pf_disable(sc);
16758
16759            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16760        }
16761    }
16762
16763    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16764
16765    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16766
16767    if (CHIP_IS_E1(sc)) {
16768        /*
16769         * enable HW interrupt from PXP on USDM overflow
16770         * bit 16 on INT_MASK_0
16771         */
16772        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16773    }
16774
16775    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16776    bxe_init_pxp(sc);
16777
16778#ifdef __BIG_ENDIAN
16779    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16780    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16781    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16782    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16783    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16784    /* make sure this value is 0 */
16785    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16786
16787    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16788    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16789    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16790    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16791    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16792#endif
16793
16794    ecore_ilt_init_page_size(sc, INITOP_SET);
16795
16796    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16797        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16798    }
16799
16800    /* let the HW do it's magic... */
16801    DELAY(100000);
16802
16803    /* finish PXP init */
16804    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16805    if (val != 1) {
16806        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16807            val);
16808        return (-1);
16809    }
16810    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16811    if (val != 1) {
16812        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16813        return (-1);
16814    }
16815
16816    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16817
16818    /*
16819     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16820     * entries with value "0" and valid bit on. This needs to be done by the
16821     * first PF that is loaded in a path (i.e. common phase)
16822     */
16823    if (!CHIP_IS_E1x(sc)) {
16824/*
16825 * In E2 there is a bug in the timers block that can cause function 6 / 7
16826 * (i.e. vnic3) to start even if it is marked as "scan-off".
16827 * This occurs when a different function (func2,3) is being marked
16828 * as "scan-off". Real-life scenario for example: if a driver is being
16829 * load-unloaded while func6,7 are down. This will cause the timer to access
16830 * the ilt, translate to a logical address and send a request to read/write.
16831 * Since the ilt for the function that is down is not valid, this will cause
16832 * a translation error which is unrecoverable.
16833 * The Workaround is intended to make sure that when this happens nothing
16834 * fatal will occur. The workaround:
16835 *  1.  First PF driver which loads on a path will:
16836 *      a.  After taking the chip out of reset, by using pretend,
16837 *          it will write "0" to the following registers of
16838 *          the other vnics.
16839 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16840 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16841 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16842 *          And for itself it will write '1' to
16843 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16844 *          dmae-operations (writing to pram for example.)
16845 *          note: can be done for only function 6,7 but cleaner this
16846 *            way.
16847 *      b.  Write zero+valid to the entire ILT.
16848 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16849 *          VNIC3 (of that port). The range allocated will be the
16850 *          entire ILT. This is needed to prevent  ILT range error.
16851 *  2.  Any PF driver load flow:
16852 *      a.  ILT update with the physical addresses of the allocated
16853 *          logical pages.
16854 *      b.  Wait 20msec. - note that this timeout is needed to make
16855 *          sure there are no requests in one of the PXP internal
16856 *          queues with "old" ILT addresses.
16857 *      c.  PF enable in the PGLC.
16858 *      d.  Clear the was_error of the PF in the PGLC. (could have
16859 *          occurred while driver was down)
16860 *      e.  PF enable in the CFC (WEAK + STRONG)
16861 *      f.  Timers scan enable
16862 *  3.  PF driver unload flow:
16863 *      a.  Clear the Timers scan_en.
16864 *      b.  Polling for scan_on=0 for that PF.
16865 *      c.  Clear the PF enable bit in the PXP.
16866 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16867 *      e.  Write zero+valid to all ILT entries (The valid bit must
16868 *          stay set)
16869 *      f.  If this is VNIC 3 of a port then also init
16870 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16871 *          to the last enrty in the ILT.
16872 *
16873 *      Notes:
16874 *      Currently the PF error in the PGLC is non recoverable.
16875 *      In the future the there will be a recovery routine for this error.
16876 *      Currently attention is masked.
16877 *      Having an MCP lock on the load/unload process does not guarantee that
16878 *      there is no Timer disable during Func6/7 enable. This is because the
16879 *      Timers scan is currently being cleared by the MCP on FLR.
16880 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16881 *      there is error before clearing it. But the flow above is simpler and
16882 *      more general.
16883 *      All ILT entries are written by zero+valid and not just PF6/7
16884 *      ILT entries since in the future the ILT entries allocation for
16885 *      PF-s might be dynamic.
16886 */
16887        struct ilt_client_info ilt_cli;
16888        struct ecore_ilt ilt;
16889
16890        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16891        memset(&ilt, 0, sizeof(struct ecore_ilt));
16892
16893        /* initialize dummy TM client */
16894        ilt_cli.start      = 0;
16895        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16896        ilt_cli.client_num = ILT_CLIENT_TM;
16897
16898        /*
16899         * Step 1: set zeroes to all ilt page entries with valid bit on
16900         * Step 2: set the timers first/last ilt entry to point
16901         * to the entire range to prevent ILT range error for 3rd/4th
16902         * vnic (this code assumes existence of the vnic)
16903         *
16904         * both steps performed by call to ecore_ilt_client_init_op()
16905         * with dummy TM client
16906         *
16907         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16908         * and his brother are split registers
16909         */
16910
16911        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16912        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16913        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16914
16915        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16916        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16917        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16918    }
16919
16920    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16921    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16922
16923    if (!CHIP_IS_E1x(sc)) {
16924        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16925                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16926
16927        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16928        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16929
16930        /* let the HW do it's magic... */
16931        do {
16932            DELAY(200000);
16933            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16934        } while (factor-- && (val != 1));
16935
16936        if (val != 1) {
16937            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16938            return (-1);
16939        }
16940    }
16941
16942    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16943
16944    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16945
16946    bxe_iov_init_dmae(sc);
16947
16948    /* clean the DMAE memory */
16949    sc->dmae_ready = 1;
16950    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16951
16952    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16953
16954    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16955
16956    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16957
16958    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16959
16960    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16961    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16962    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16963    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16964
16965    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16966
16967    /* QM queues pointers table */
16968    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16969
16970    /* soft reset pulse */
16971    REG_WR(sc, QM_REG_SOFT_RESET, 1);
16972    REG_WR(sc, QM_REG_SOFT_RESET, 0);
16973
16974    if (CNIC_SUPPORT(sc))
16975        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
16976
16977    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
16978    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
16979    if (!CHIP_REV_IS_SLOW(sc)) {
16980        /* enable hw interrupt from doorbell Q */
16981        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16982    }
16983
16984    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16985
16986    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16987    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
16988
16989    if (!CHIP_IS_E1(sc)) {
16990        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
16991    }
16992
16993    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
16994        if (IS_MF_AFEX(sc)) {
16995            /*
16996             * configure that AFEX and VLAN headers must be
16997             * received in AFEX mode
16998             */
16999            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17000            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17001            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17002            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17003            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17004        } else {
17005            /*
17006             * Bit-map indicating which L2 hdrs may appear
17007             * after the basic Ethernet header
17008             */
17009            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17010                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17011        }
17012    }
17013
17014    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17015    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17016    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17017    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17018
17019    if (!CHIP_IS_E1x(sc)) {
17020        /* reset VFC memories */
17021        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17022               VFC_MEMORIES_RST_REG_CAM_RST |
17023               VFC_MEMORIES_RST_REG_RAM_RST);
17024        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17025               VFC_MEMORIES_RST_REG_CAM_RST |
17026               VFC_MEMORIES_RST_REG_RAM_RST);
17027
17028        DELAY(20000);
17029    }
17030
17031    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17032    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17033    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17034    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17035
17036    /* sync semi rtc */
17037    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17038           0x80000000);
17039    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17040           0x80000000);
17041
17042    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17043    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17044    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17045
17046    if (!CHIP_IS_E1x(sc)) {
17047        if (IS_MF_AFEX(sc)) {
17048            /*
17049             * configure that AFEX and VLAN headers must be
17050             * sent in AFEX mode
17051             */
17052            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17053            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17054            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17055            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17056            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17057        } else {
17058            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17059                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17060        }
17061    }
17062
17063    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17064
17065    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17066
17067    if (CNIC_SUPPORT(sc)) {
17068        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17069        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17070        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17071        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17072        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17073        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17074        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17075        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17076        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17077        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17078    }
17079    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17080
17081    if (sizeof(union cdu_context) != 1024) {
17082        /* we currently assume that a context is 1024 bytes */
17083        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17084              (long)sizeof(union cdu_context));
17085    }
17086
17087    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17088    val = (4 << 24) + (0 << 12) + 1024;
17089    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17090
17091    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17092
17093    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17094    /* enable context validation interrupt from CFC */
17095    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17096
17097    /* set the thresholds to prevent CFC/CDU race */
17098    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17099    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17100
17101    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17102        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17103    }
17104
17105    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17106    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17107
17108    /* Reset PCIE errors for debug */
17109    REG_WR(sc, 0x2814, 0xffffffff);
17110    REG_WR(sc, 0x3820, 0xffffffff);
17111
17112    if (!CHIP_IS_E1x(sc)) {
17113        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17114               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17115                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17116        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17117               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17118                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17119                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17120        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17121               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17122                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17123                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17124    }
17125
17126    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17127
17128    if (!CHIP_IS_E1(sc)) {
17129        /* in E3 this done in per-port section */
17130        if (!CHIP_IS_E3(sc))
17131            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17132    }
17133
17134    if (CHIP_IS_E1H(sc)) {
17135        /* not applicable for E2 (and above ...) */
17136        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17137    }
17138
17139    if (CHIP_REV_IS_SLOW(sc)) {
17140        DELAY(200000);
17141    }
17142
17143    /* finish CFC init */
17144    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17145    if (val != 1) {
17146        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17147        return (-1);
17148    }
17149    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17150    if (val != 1) {
17151        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17152        return (-1);
17153    }
17154    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17155    if (val != 1) {
17156        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17157        return (-1);
17158    }
17159    REG_WR(sc, CFC_REG_DEBUG0, 0);
17160
17161    if (CHIP_IS_E1(sc)) {
17162        /* read NIG statistic to see if this is our first up since powerup */
17163        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17164        val = *BXE_SP(sc, wb_data[0]);
17165
17166        /* do internal memory self test */
17167        if ((val == 0) && bxe_int_mem_test(sc)) {
17168            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17169            return (-1);
17170        }
17171    }
17172
17173    bxe_setup_fan_failure_detection(sc);
17174
17175    /* clear PXP2 attentions */
17176    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17177
17178    bxe_enable_blocks_attention(sc);
17179
17180    if (!CHIP_REV_IS_SLOW(sc)) {
17181        ecore_enable_blocks_parity(sc);
17182    }
17183
17184    if (!BXE_NOMCP(sc)) {
17185        if (CHIP_IS_E1x(sc)) {
17186            bxe_common_init_phy(sc);
17187        }
17188    }
17189
17190    return (0);
17191}
17192
17193/**
17194 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17195 *
17196 * @sc:     driver handle
17197 */
17198static int
17199bxe_init_hw_common_chip(struct bxe_softc *sc)
17200{
17201    int rc = bxe_init_hw_common(sc);
17202
17203    if (rc) {
17204        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17205        return (rc);
17206    }
17207
17208    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17209    if (!BXE_NOMCP(sc)) {
17210        bxe_common_init_phy(sc);
17211    }
17212
17213    return (0);
17214}
17215
17216static int
17217bxe_init_hw_port(struct bxe_softc *sc)
17218{
17219    int port = SC_PORT(sc);
17220    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17221    uint32_t low, high;
17222    uint32_t val;
17223
17224    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17225
17226    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17227
17228    ecore_init_block(sc, BLOCK_MISC, init_phase);
17229    ecore_init_block(sc, BLOCK_PXP, init_phase);
17230    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17231
17232    /*
17233     * Timers bug workaround: disables the pf_master bit in pglue at
17234     * common phase, we need to enable it here before any dmae access are
17235     * attempted. Therefore we manually added the enable-master to the
17236     * port phase (it also happens in the function phase)
17237     */
17238    if (!CHIP_IS_E1x(sc)) {
17239        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17240    }
17241
17242    ecore_init_block(sc, BLOCK_ATC, init_phase);
17243    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17244    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17245    ecore_init_block(sc, BLOCK_QM, init_phase);
17246
17247    ecore_init_block(sc, BLOCK_TCM, init_phase);
17248    ecore_init_block(sc, BLOCK_UCM, init_phase);
17249    ecore_init_block(sc, BLOCK_CCM, init_phase);
17250    ecore_init_block(sc, BLOCK_XCM, init_phase);
17251
17252    /* QM cid (connection) count */
17253    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17254
17255    if (CNIC_SUPPORT(sc)) {
17256        ecore_init_block(sc, BLOCK_TM, init_phase);
17257        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17258        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17259    }
17260
17261    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17262
17263    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17264
17265    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17266        if (IS_MF(sc)) {
17267            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17268        } else if (sc->mtu > 4096) {
17269            if (BXE_ONE_PORT(sc)) {
17270                low = 160;
17271            } else {
17272                val = sc->mtu;
17273                /* (24*1024 + val*4)/256 */
17274                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17275            }
17276        } else {
17277            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17278        }
17279        high = (low + 56); /* 14*1024/256 */
17280        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17281        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17282    }
17283
17284    if (CHIP_IS_MODE_4_PORT(sc)) {
17285        REG_WR(sc, SC_PORT(sc) ?
17286               BRB1_REG_MAC_GUARANTIED_1 :
17287               BRB1_REG_MAC_GUARANTIED_0, 40);
17288    }
17289
17290    ecore_init_block(sc, BLOCK_PRS, init_phase);
17291    if (CHIP_IS_E3B0(sc)) {
17292        if (IS_MF_AFEX(sc)) {
17293            /* configure headers for AFEX mode */
17294            REG_WR(sc, SC_PORT(sc) ?
17295                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17296                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17297            REG_WR(sc, SC_PORT(sc) ?
17298                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17299                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17300            REG_WR(sc, SC_PORT(sc) ?
17301                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17302                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17303        } else {
17304            /* Ovlan exists only if we are in multi-function +
17305             * switch-dependent mode, in switch-independent there
17306             * is no ovlan headers
17307             */
17308            REG_WR(sc, SC_PORT(sc) ?
17309                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17310                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17311                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17312        }
17313    }
17314
17315    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17316    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17317    ecore_init_block(sc, BLOCK_USDM, init_phase);
17318    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17319
17320    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17321    ecore_init_block(sc, BLOCK_USEM, init_phase);
17322    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17323    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17324
17325    ecore_init_block(sc, BLOCK_UPB, init_phase);
17326    ecore_init_block(sc, BLOCK_XPB, init_phase);
17327
17328    ecore_init_block(sc, BLOCK_PBF, init_phase);
17329
17330    if (CHIP_IS_E1x(sc)) {
17331        /* configure PBF to work without PAUSE mtu 9000 */
17332        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17333
17334        /* update threshold */
17335        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17336        /* update init credit */
17337        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17338
17339        /* probe changes */
17340        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17341        DELAY(50);
17342        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17343    }
17344
17345    if (CNIC_SUPPORT(sc)) {
17346        ecore_init_block(sc, BLOCK_SRC, init_phase);
17347    }
17348
17349    ecore_init_block(sc, BLOCK_CDU, init_phase);
17350    ecore_init_block(sc, BLOCK_CFC, init_phase);
17351
17352    if (CHIP_IS_E1(sc)) {
17353        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17354        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17355    }
17356    ecore_init_block(sc, BLOCK_HC, init_phase);
17357
17358    ecore_init_block(sc, BLOCK_IGU, init_phase);
17359
17360    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17361    /* init aeu_mask_attn_func_0/1:
17362     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17363     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17364     *             bits 4-7 are used for "per vn group attention" */
17365    val = IS_MF(sc) ? 0xF7 : 0x7;
17366    /* Enable DCBX attention for all but E1 */
17367    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17368    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17369
17370    ecore_init_block(sc, BLOCK_NIG, init_phase);
17371
17372    if (!CHIP_IS_E1x(sc)) {
17373        /* Bit-map indicating which L2 hdrs may appear after the
17374         * basic Ethernet header
17375         */
17376        if (IS_MF_AFEX(sc)) {
17377            REG_WR(sc, SC_PORT(sc) ?
17378                   NIG_REG_P1_HDRS_AFTER_BASIC :
17379                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17380        } else {
17381            REG_WR(sc, SC_PORT(sc) ?
17382                   NIG_REG_P1_HDRS_AFTER_BASIC :
17383                   NIG_REG_P0_HDRS_AFTER_BASIC,
17384                   IS_MF_SD(sc) ? 7 : 6);
17385        }
17386
17387        if (CHIP_IS_E3(sc)) {
17388            REG_WR(sc, SC_PORT(sc) ?
17389                   NIG_REG_LLH1_MF_MODE :
17390                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17391        }
17392    }
17393    if (!CHIP_IS_E3(sc)) {
17394        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17395    }
17396
17397    if (!CHIP_IS_E1(sc)) {
17398        /* 0x2 disable mf_ov, 0x1 enable */
17399        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17400               (IS_MF_SD(sc) ? 0x1 : 0x2));
17401
17402        if (!CHIP_IS_E1x(sc)) {
17403            val = 0;
17404            switch (sc->devinfo.mf_info.mf_mode) {
17405            case MULTI_FUNCTION_SD:
17406                val = 1;
17407                break;
17408            case MULTI_FUNCTION_SI:
17409            case MULTI_FUNCTION_AFEX:
17410                val = 2;
17411                break;
17412            }
17413
17414            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17415                        NIG_REG_LLH0_CLS_TYPE), val);
17416        }
17417        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17418        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17419        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17420    }
17421
17422    /* If SPIO5 is set to generate interrupts, enable it for this port */
17423    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17424    if (val & MISC_SPIO_SPIO5) {
17425        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17426                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17427        val = REG_RD(sc, reg_addr);
17428        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17429        REG_WR(sc, reg_addr, val);
17430    }
17431
17432    return (0);
17433}
17434
17435static uint32_t
17436bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17437                       uint32_t         reg,
17438                       uint32_t         expected,
17439                       uint32_t         poll_count)
17440{
17441    uint32_t cur_cnt = poll_count;
17442    uint32_t val;
17443
17444    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17445        DELAY(FLR_WAIT_INTERVAL);
17446    }
17447
17448    return (val);
17449}
17450
17451static int
17452bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17453                              uint32_t         reg,
17454                              char             *msg,
17455                              uint32_t         poll_cnt)
17456{
17457    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17458
17459    if (val != 0) {
17460        BLOGE(sc, "%s usage count=%d\n", msg, val);
17461        return (1);
17462    }
17463
17464    return (0);
17465}
17466
17467/* Common routines with VF FLR cleanup */
17468static uint32_t
17469bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17470{
17471    /* adjust polling timeout */
17472    if (CHIP_REV_IS_EMUL(sc)) {
17473        return (FLR_POLL_CNT * 2000);
17474    }
17475
17476    if (CHIP_REV_IS_FPGA(sc)) {
17477        return (FLR_POLL_CNT * 120);
17478    }
17479
17480    return (FLR_POLL_CNT);
17481}
17482
17483static int
17484bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17485                           uint32_t         poll_cnt)
17486{
17487    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17488    if (bxe_flr_clnup_poll_hw_counter(sc,
17489                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17490                                      "CFC PF usage counter timed out",
17491                                      poll_cnt)) {
17492        return (1);
17493    }
17494
17495    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17496    if (bxe_flr_clnup_poll_hw_counter(sc,
17497                                      DORQ_REG_PF_USAGE_CNT,
17498                                      "DQ PF usage counter timed out",
17499                                      poll_cnt)) {
17500        return (1);
17501    }
17502
17503    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17504    if (bxe_flr_clnup_poll_hw_counter(sc,
17505                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17506                                      "QM PF usage counter timed out",
17507                                      poll_cnt)) {
17508        return (1);
17509    }
17510
17511    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17512    if (bxe_flr_clnup_poll_hw_counter(sc,
17513                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17514                                      "Timers VNIC usage counter timed out",
17515                                      poll_cnt)) {
17516        return (1);
17517    }
17518
17519    if (bxe_flr_clnup_poll_hw_counter(sc,
17520                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17521                                      "Timers NUM_SCANS usage counter timed out",
17522                                      poll_cnt)) {
17523        return (1);
17524    }
17525
17526    /* Wait DMAE PF usage counter to zero */
17527    if (bxe_flr_clnup_poll_hw_counter(sc,
17528                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17529                                      "DMAE dommand register timed out",
17530                                      poll_cnt)) {
17531        return (1);
17532    }
17533
17534    return (0);
17535}
17536
17537#define OP_GEN_PARAM(param)                                            \
17538    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17539#define OP_GEN_TYPE(type)                                           \
17540    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17541#define OP_GEN_AGG_VECT(index)                                             \
17542    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17543
17544static int
17545bxe_send_final_clnup(struct bxe_softc *sc,
17546                     uint8_t          clnup_func,
17547                     uint32_t         poll_cnt)
17548{
17549    uint32_t op_gen_command = 0;
17550    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17551                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17552    int ret = 0;
17553
17554    if (REG_RD(sc, comp_addr)) {
17555        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17556        return (1);
17557    }
17558
17559    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17560    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17561    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17562    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17563
17564    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17565    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17566
17567    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17568        BLOGE(sc, "FW final cleanup did not succeed\n");
17569        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17570              (REG_RD(sc, comp_addr)));
17571        bxe_panic(sc, ("FLR cleanup failed\n"));
17572        return (1);
17573    }
17574
17575    /* Zero completion for nxt FLR */
17576    REG_WR(sc, comp_addr, 0);
17577
17578    return (ret);
17579}
17580
17581static void
17582bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17583                       struct pbf_pN_buf_regs *regs,
17584                       uint32_t               poll_count)
17585{
17586    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17587    uint32_t cur_cnt = poll_count;
17588
17589    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17590    crd = crd_start = REG_RD(sc, regs->crd);
17591    init_crd = REG_RD(sc, regs->init_crd);
17592
17593    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17594    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17595    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17596
17597    while ((crd != init_crd) &&
17598           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17599            (init_crd - crd_start))) {
17600        if (cur_cnt--) {
17601            DELAY(FLR_WAIT_INTERVAL);
17602            crd = REG_RD(sc, regs->crd);
17603            crd_freed = REG_RD(sc, regs->crd_freed);
17604        } else {
17605            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17606            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17607            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17608            break;
17609        }
17610    }
17611
17612    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17613          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17614}
17615
17616static void
17617bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17618                       struct pbf_pN_cmd_regs *regs,
17619                       uint32_t               poll_count)
17620{
17621    uint32_t occup, to_free, freed, freed_start;
17622    uint32_t cur_cnt = poll_count;
17623
17624    occup = to_free = REG_RD(sc, regs->lines_occup);
17625    freed = freed_start = REG_RD(sc, regs->lines_freed);
17626
17627    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17628    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17629
17630    while (occup &&
17631           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17632        if (cur_cnt--) {
17633            DELAY(FLR_WAIT_INTERVAL);
17634            occup = REG_RD(sc, regs->lines_occup);
17635            freed = REG_RD(sc, regs->lines_freed);
17636        } else {
17637            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17638            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17639            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17640            break;
17641        }
17642    }
17643
17644    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17645          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17646}
17647
17648static void
17649bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17650{
17651    struct pbf_pN_cmd_regs cmd_regs[] = {
17652        {0, (CHIP_IS_E3B0(sc)) ?
17653            PBF_REG_TQ_OCCUPANCY_Q0 :
17654            PBF_REG_P0_TQ_OCCUPANCY,
17655            (CHIP_IS_E3B0(sc)) ?
17656            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17657            PBF_REG_P0_TQ_LINES_FREED_CNT},
17658        {1, (CHIP_IS_E3B0(sc)) ?
17659            PBF_REG_TQ_OCCUPANCY_Q1 :
17660            PBF_REG_P1_TQ_OCCUPANCY,
17661            (CHIP_IS_E3B0(sc)) ?
17662            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17663            PBF_REG_P1_TQ_LINES_FREED_CNT},
17664        {4, (CHIP_IS_E3B0(sc)) ?
17665            PBF_REG_TQ_OCCUPANCY_LB_Q :
17666            PBF_REG_P4_TQ_OCCUPANCY,
17667            (CHIP_IS_E3B0(sc)) ?
17668            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17669            PBF_REG_P4_TQ_LINES_FREED_CNT}
17670    };
17671
17672    struct pbf_pN_buf_regs buf_regs[] = {
17673        {0, (CHIP_IS_E3B0(sc)) ?
17674            PBF_REG_INIT_CRD_Q0 :
17675            PBF_REG_P0_INIT_CRD ,
17676            (CHIP_IS_E3B0(sc)) ?
17677            PBF_REG_CREDIT_Q0 :
17678            PBF_REG_P0_CREDIT,
17679            (CHIP_IS_E3B0(sc)) ?
17680            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17681            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17682        {1, (CHIP_IS_E3B0(sc)) ?
17683            PBF_REG_INIT_CRD_Q1 :
17684            PBF_REG_P1_INIT_CRD,
17685            (CHIP_IS_E3B0(sc)) ?
17686            PBF_REG_CREDIT_Q1 :
17687            PBF_REG_P1_CREDIT,
17688            (CHIP_IS_E3B0(sc)) ?
17689            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17690            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17691        {4, (CHIP_IS_E3B0(sc)) ?
17692            PBF_REG_INIT_CRD_LB_Q :
17693            PBF_REG_P4_INIT_CRD,
17694            (CHIP_IS_E3B0(sc)) ?
17695            PBF_REG_CREDIT_LB_Q :
17696            PBF_REG_P4_CREDIT,
17697            (CHIP_IS_E3B0(sc)) ?
17698            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17699            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17700    };
17701
17702    int i;
17703
17704    /* Verify the command queues are flushed P0, P1, P4 */
17705    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17706        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17707    }
17708
17709    /* Verify the transmission buffers are flushed P0, P1, P4 */
17710    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17711        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17712    }
17713}
17714
17715static void
17716bxe_hw_enable_status(struct bxe_softc *sc)
17717{
17718    uint32_t val;
17719
17720    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17721    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17722
17723    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17724    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17725
17726    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17727    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17728
17729    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17730    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17731
17732    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17733    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17734
17735    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17736    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17737
17738    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17739    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17740
17741    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17742    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17743}
17744
17745static int
17746bxe_pf_flr_clnup(struct bxe_softc *sc)
17747{
17748    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17749
17750    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17751
17752    /* Re-enable PF target read access */
17753    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17754
17755    /* Poll HW usage counters */
17756    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17757    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17758        return (-1);
17759    }
17760
17761    /* Zero the igu 'trailing edge' and 'leading edge' */
17762
17763    /* Send the FW cleanup command */
17764    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17765        return (-1);
17766    }
17767
17768    /* ATC cleanup */
17769
17770    /* Verify TX hw is flushed */
17771    bxe_tx_hw_flushed(sc, poll_cnt);
17772
17773    /* Wait 100ms (not adjusted according to platform) */
17774    DELAY(100000);
17775
17776    /* Verify no pending pci transactions */
17777    if (bxe_is_pcie_pending(sc)) {
17778        BLOGE(sc, "PCIE Transactions still pending\n");
17779    }
17780
17781    /* Debug */
17782    bxe_hw_enable_status(sc);
17783
17784    /*
17785     * Master enable - Due to WB DMAE writes performed before this
17786     * register is re-initialized as part of the regular function init
17787     */
17788    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17789
17790    return (0);
17791}
17792
17793static int
17794bxe_init_hw_func(struct bxe_softc *sc)
17795{
17796    int port = SC_PORT(sc);
17797    int func = SC_FUNC(sc);
17798    int init_phase = PHASE_PF0 + func;
17799    struct ecore_ilt *ilt = sc->ilt;
17800    uint16_t cdu_ilt_start;
17801    uint32_t addr, val;
17802    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17803    int i, main_mem_width, rc;
17804
17805    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17806
17807    /* FLR cleanup */
17808    if (!CHIP_IS_E1x(sc)) {
17809        rc = bxe_pf_flr_clnup(sc);
17810        if (rc) {
17811            BLOGE(sc, "FLR cleanup failed!\n");
17812            // XXX bxe_fw_dump(sc);
17813            // XXX bxe_idle_chk(sc);
17814            return (rc);
17815        }
17816    }
17817
17818    /* set MSI reconfigure capability */
17819    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17820        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17821        val = REG_RD(sc, addr);
17822        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17823        REG_WR(sc, addr, val);
17824    }
17825
17826    ecore_init_block(sc, BLOCK_PXP, init_phase);
17827    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17828
17829    ilt = sc->ilt;
17830    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17831
17832    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17833        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17834        ilt->lines[cdu_ilt_start + i].page_mapping =
17835            sc->context[i].vcxt_dma.paddr;
17836        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17837    }
17838    ecore_ilt_init_op(sc, INITOP_SET);
17839
17840    /* Set NIC mode */
17841    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17842    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17843
17844    if (!CHIP_IS_E1x(sc)) {
17845        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17846
17847        /* Turn on a single ISR mode in IGU if driver is going to use
17848         * INT#x or MSI
17849         */
17850        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17851            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17852        }
17853
17854        /*
17855         * Timers workaround bug: function init part.
17856         * Need to wait 20msec after initializing ILT,
17857         * needed to make sure there are no requests in
17858         * one of the PXP internal queues with "old" ILT addresses
17859         */
17860        DELAY(20000);
17861
17862        /*
17863         * Master enable - Due to WB DMAE writes performed before this
17864         * register is re-initialized as part of the regular function
17865         * init
17866         */
17867        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17868        /* Enable the function in IGU */
17869        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17870    }
17871
17872    sc->dmae_ready = 1;
17873
17874    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17875
17876    if (!CHIP_IS_E1x(sc))
17877        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17878
17879    ecore_init_block(sc, BLOCK_ATC, init_phase);
17880    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17881    ecore_init_block(sc, BLOCK_NIG, init_phase);
17882    ecore_init_block(sc, BLOCK_SRC, init_phase);
17883    ecore_init_block(sc, BLOCK_MISC, init_phase);
17884    ecore_init_block(sc, BLOCK_TCM, init_phase);
17885    ecore_init_block(sc, BLOCK_UCM, init_phase);
17886    ecore_init_block(sc, BLOCK_CCM, init_phase);
17887    ecore_init_block(sc, BLOCK_XCM, init_phase);
17888    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17889    ecore_init_block(sc, BLOCK_USEM, init_phase);
17890    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17891    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17892
17893    if (!CHIP_IS_E1x(sc))
17894        REG_WR(sc, QM_REG_PF_EN, 1);
17895
17896    if (!CHIP_IS_E1x(sc)) {
17897        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17898        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17899        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17900        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17901    }
17902    ecore_init_block(sc, BLOCK_QM, init_phase);
17903
17904    ecore_init_block(sc, BLOCK_TM, init_phase);
17905    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17906
17907    bxe_iov_init_dq(sc);
17908
17909    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17910    ecore_init_block(sc, BLOCK_PRS, init_phase);
17911    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17912    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17913    ecore_init_block(sc, BLOCK_USDM, init_phase);
17914    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17915    ecore_init_block(sc, BLOCK_UPB, init_phase);
17916    ecore_init_block(sc, BLOCK_XPB, init_phase);
17917    ecore_init_block(sc, BLOCK_PBF, init_phase);
17918    if (!CHIP_IS_E1x(sc))
17919        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17920
17921    ecore_init_block(sc, BLOCK_CDU, init_phase);
17922
17923    ecore_init_block(sc, BLOCK_CFC, init_phase);
17924
17925    if (!CHIP_IS_E1x(sc))
17926        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17927
17928    if (IS_MF(sc)) {
17929        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17930        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17931    }
17932
17933    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17934
17935    /* HC init per function */
17936    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17937        if (CHIP_IS_E1H(sc)) {
17938            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17939
17940            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17941            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17942        }
17943        ecore_init_block(sc, BLOCK_HC, init_phase);
17944
17945    } else {
17946        int num_segs, sb_idx, prod_offset;
17947
17948        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17949
17950        if (!CHIP_IS_E1x(sc)) {
17951            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17952            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17953        }
17954
17955        ecore_init_block(sc, BLOCK_IGU, init_phase);
17956
17957        if (!CHIP_IS_E1x(sc)) {
17958            int dsb_idx = 0;
17959            /**
17960             * Producer memory:
17961             * E2 mode: address 0-135 match to the mapping memory;
17962             * 136 - PF0 default prod; 137 - PF1 default prod;
17963             * 138 - PF2 default prod; 139 - PF3 default prod;
17964             * 140 - PF0 attn prod;    141 - PF1 attn prod;
17965             * 142 - PF2 attn prod;    143 - PF3 attn prod;
17966             * 144-147 reserved.
17967             *
17968             * E1.5 mode - In backward compatible mode;
17969             * for non default SB; each even line in the memory
17970             * holds the U producer and each odd line hold
17971             * the C producer. The first 128 producers are for
17972             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
17973             * producers are for the DSB for each PF.
17974             * Each PF has five segments: (the order inside each
17975             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
17976             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
17977             * 144-147 attn prods;
17978             */
17979            /* non-default-status-blocks */
17980            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17981                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
17982            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
17983                prod_offset = (sc->igu_base_sb + sb_idx) *
17984                    num_segs;
17985
17986                for (i = 0; i < num_segs; i++) {
17987                    addr = IGU_REG_PROD_CONS_MEMORY +
17988                            (prod_offset + i) * 4;
17989                    REG_WR(sc, addr, 0);
17990                }
17991                /* send consumer update with value 0 */
17992                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
17993                           USTORM_ID, 0, IGU_INT_NOP, 1);
17994                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
17995            }
17996
17997            /* default-status-blocks */
17998            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17999                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18000
18001            if (CHIP_IS_MODE_4_PORT(sc))
18002                dsb_idx = SC_FUNC(sc);
18003            else
18004                dsb_idx = SC_VN(sc);
18005
18006            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18007                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18008                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18009
18010            /*
18011             * igu prods come in chunks of E1HVN_MAX (4) -
18012             * does not matters what is the current chip mode
18013             */
18014            for (i = 0; i < (num_segs * E1HVN_MAX);
18015                 i += E1HVN_MAX) {
18016                addr = IGU_REG_PROD_CONS_MEMORY +
18017                            (prod_offset + i)*4;
18018                REG_WR(sc, addr, 0);
18019            }
18020            /* send consumer update with 0 */
18021            if (CHIP_INT_MODE_IS_BC(sc)) {
18022                bxe_ack_sb(sc, sc->igu_dsb_id,
18023                           USTORM_ID, 0, IGU_INT_NOP, 1);
18024                bxe_ack_sb(sc, sc->igu_dsb_id,
18025                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18026                bxe_ack_sb(sc, sc->igu_dsb_id,
18027                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18028                bxe_ack_sb(sc, sc->igu_dsb_id,
18029                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18030                bxe_ack_sb(sc, sc->igu_dsb_id,
18031                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18032            } else {
18033                bxe_ack_sb(sc, sc->igu_dsb_id,
18034                           USTORM_ID, 0, IGU_INT_NOP, 1);
18035                bxe_ack_sb(sc, sc->igu_dsb_id,
18036                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18037            }
18038            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18039
18040            /* !!! these should become driver const once
18041               rf-tool supports split-68 const */
18042            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18043            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18044            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18045            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18046            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18047            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18048        }
18049    }
18050
18051    /* Reset PCIE errors for debug */
18052    REG_WR(sc, 0x2114, 0xffffffff);
18053    REG_WR(sc, 0x2120, 0xffffffff);
18054
18055    if (CHIP_IS_E1x(sc)) {
18056        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18057        main_mem_base = HC_REG_MAIN_MEMORY +
18058                SC_PORT(sc) * (main_mem_size * 4);
18059        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18060        main_mem_width = 8;
18061
18062        val = REG_RD(sc, main_mem_prty_clr);
18063        if (val) {
18064            BLOGD(sc, DBG_LOAD,
18065                  "Parity errors in HC block during function init (0x%x)!\n",
18066                  val);
18067        }
18068
18069        /* Clear "false" parity errors in MSI-X table */
18070        for (i = main_mem_base;
18071             i < main_mem_base + main_mem_size * 4;
18072             i += main_mem_width) {
18073            bxe_read_dmae(sc, i, main_mem_width / 4);
18074            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18075                           i, main_mem_width / 4);
18076        }
18077        /* Clear HC parity attention */
18078        REG_RD(sc, main_mem_prty_clr);
18079    }
18080
18081#if 1
18082    /* Enable STORMs SP logging */
18083    REG_WR8(sc, BAR_USTRORM_INTMEM +
18084           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18085    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18086           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18087    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18088           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18089    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18090           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18091#endif
18092
18093    elink_phy_probe(&sc->link_params);
18094
18095    return (0);
18096}
18097
18098static void
18099bxe_link_reset(struct bxe_softc *sc)
18100{
18101    if (!BXE_NOMCP(sc)) {
18102	bxe_acquire_phy_lock(sc);
18103        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18104	bxe_release_phy_lock(sc);
18105    } else {
18106        if (!CHIP_REV_IS_SLOW(sc)) {
18107            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18108        }
18109    }
18110}
18111
18112static void
18113bxe_reset_port(struct bxe_softc *sc)
18114{
18115    int port = SC_PORT(sc);
18116    uint32_t val;
18117
18118    /* reset physical Link */
18119    bxe_link_reset(sc);
18120
18121    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18122
18123    /* Do not rcv packets to BRB */
18124    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18125    /* Do not direct rcv packets that are not for MCP to the BRB */
18126    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18127               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18128
18129    /* Configure AEU */
18130    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18131
18132    DELAY(100000);
18133
18134    /* Check for BRB port occupancy */
18135    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18136    if (val) {
18137        BLOGD(sc, DBG_LOAD,
18138              "BRB1 is not empty, %d blocks are occupied\n", val);
18139    }
18140
18141    /* TODO: Close Doorbell port? */
18142}
18143
18144static void
18145bxe_ilt_wr(struct bxe_softc *sc,
18146           uint32_t         index,
18147           bus_addr_t       addr)
18148{
18149    int reg;
18150    uint32_t wb_write[2];
18151
18152    if (CHIP_IS_E1(sc)) {
18153        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18154    } else {
18155        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18156    }
18157
18158    wb_write[0] = ONCHIP_ADDR1(addr);
18159    wb_write[1] = ONCHIP_ADDR2(addr);
18160    REG_WR_DMAE(sc, reg, wb_write, 2);
18161}
18162
18163static void
18164bxe_clear_func_ilt(struct bxe_softc *sc,
18165                   uint32_t         func)
18166{
18167    uint32_t i, base = FUNC_ILT_BASE(func);
18168    for (i = base; i < base + ILT_PER_FUNC; i++) {
18169        bxe_ilt_wr(sc, i, 0);
18170    }
18171}
18172
18173static void
18174bxe_reset_func(struct bxe_softc *sc)
18175{
18176    struct bxe_fastpath *fp;
18177    int port = SC_PORT(sc);
18178    int func = SC_FUNC(sc);
18179    int i;
18180
18181    /* Disable the function in the FW */
18182    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18183    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18184    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18185    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18186
18187    /* FP SBs */
18188    FOR_EACH_ETH_QUEUE(sc, i) {
18189        fp = &sc->fp[i];
18190        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18191                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18192                SB_DISABLED);
18193    }
18194
18195    /* SP SB */
18196    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18197            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18198            SB_DISABLED);
18199
18200    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18201        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18202    }
18203
18204    /* Configure IGU */
18205    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18206        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18207        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18208    } else {
18209        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18210        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18211    }
18212
18213    if (CNIC_LOADED(sc)) {
18214        /* Disable Timer scan */
18215        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18216        /*
18217         * Wait for at least 10ms and up to 2 second for the timers
18218         * scan to complete
18219         */
18220        for (i = 0; i < 200; i++) {
18221            DELAY(10000);
18222            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18223                break;
18224        }
18225    }
18226
18227    /* Clear ILT */
18228    bxe_clear_func_ilt(sc, func);
18229
18230    /*
18231     * Timers workaround bug for E2: if this is vnic-3,
18232     * we need to set the entire ilt range for this timers.
18233     */
18234    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18235        struct ilt_client_info ilt_cli;
18236        /* use dummy TM client */
18237        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18238        ilt_cli.start = 0;
18239        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18240        ilt_cli.client_num = ILT_CLIENT_TM;
18241
18242        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18243    }
18244
18245    /* this assumes that reset_port() called before reset_func()*/
18246    if (!CHIP_IS_E1x(sc)) {
18247        bxe_pf_disable(sc);
18248    }
18249
18250    sc->dmae_ready = 0;
18251}
18252
18253static int
18254bxe_gunzip_init(struct bxe_softc *sc)
18255{
18256    return (0);
18257}
18258
18259static void
18260bxe_gunzip_end(struct bxe_softc *sc)
18261{
18262    return;
18263}
18264
18265static int
18266bxe_init_firmware(struct bxe_softc *sc)
18267{
18268    if (CHIP_IS_E1(sc)) {
18269        ecore_init_e1_firmware(sc);
18270        sc->iro_array = e1_iro_arr;
18271    } else if (CHIP_IS_E1H(sc)) {
18272        ecore_init_e1h_firmware(sc);
18273        sc->iro_array = e1h_iro_arr;
18274    } else if (!CHIP_IS_E1x(sc)) {
18275        ecore_init_e2_firmware(sc);
18276        sc->iro_array = e2_iro_arr;
18277    } else {
18278        BLOGE(sc, "Unsupported chip revision\n");
18279        return (-1);
18280    }
18281
18282    return (0);
18283}
18284
18285static void
18286bxe_release_firmware(struct bxe_softc *sc)
18287{
18288    /* Do nothing */
18289    return;
18290}
18291
18292static int
18293ecore_gunzip(struct bxe_softc *sc,
18294             const uint8_t    *zbuf,
18295             int              len)
18296{
18297    /* XXX : Implement... */
18298    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18299    return (FALSE);
18300}
18301
18302static void
18303ecore_reg_wr_ind(struct bxe_softc *sc,
18304                 uint32_t         addr,
18305                 uint32_t         val)
18306{
18307    bxe_reg_wr_ind(sc, addr, val);
18308}
18309
18310static void
18311ecore_write_dmae_phys_len(struct bxe_softc *sc,
18312                          bus_addr_t       phys_addr,
18313                          uint32_t         addr,
18314                          uint32_t         len)
18315{
18316    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18317}
18318
18319void
18320ecore_storm_memset_struct(struct bxe_softc *sc,
18321                          uint32_t         addr,
18322                          size_t           size,
18323                          uint32_t         *data)
18324{
18325    uint8_t i;
18326    for (i = 0; i < size/4; i++) {
18327        REG_WR(sc, addr + (i * 4), data[i]);
18328    }
18329}
18330
18331
18332/*
18333 * character device - ioctl interface definitions
18334 */
18335
18336
18337#include "bxe_dump.h"
18338#include "bxe_ioctl.h"
18339#include <sys/conf.h>
18340
18341static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18342                struct thread *td);
18343
18344static struct cdevsw bxe_cdevsw = {
18345    .d_version = D_VERSION,
18346    .d_ioctl = bxe_eioctl,
18347    .d_name = "bxecnic",
18348};
18349
18350#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18351
18352
18353#define DUMP_ALL_PRESETS        0x1FFF
18354#define DUMP_MAX_PRESETS        13
18355#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18356#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18357#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18358#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18359#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18360
18361#define IS_REG_IN_PRESET(presets, idx)  \
18362                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18363
18364
18365static int
18366bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18367{
18368    if (CHIP_IS_E1(sc))
18369        return dump_num_registers[0][preset-1];
18370    else if (CHIP_IS_E1H(sc))
18371        return dump_num_registers[1][preset-1];
18372    else if (CHIP_IS_E2(sc))
18373        return dump_num_registers[2][preset-1];
18374    else if (CHIP_IS_E3A0(sc))
18375        return dump_num_registers[3][preset-1];
18376    else if (CHIP_IS_E3B0(sc))
18377        return dump_num_registers[4][preset-1];
18378    else
18379        return 0;
18380}
18381
18382static int
18383bxe_get_total_regs_len32(struct bxe_softc *sc)
18384{
18385    uint32_t preset_idx;
18386    int regdump_len32 = 0;
18387
18388
18389    /* Calculate the total preset regs length */
18390    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18391        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18392    }
18393
18394    return regdump_len32;
18395}
18396
18397static const uint32_t *
18398__bxe_get_page_addr_ar(struct bxe_softc *sc)
18399{
18400    if (CHIP_IS_E2(sc))
18401        return page_vals_e2;
18402    else if (CHIP_IS_E3(sc))
18403        return page_vals_e3;
18404    else
18405        return NULL;
18406}
18407
18408static uint32_t
18409__bxe_get_page_reg_num(struct bxe_softc *sc)
18410{
18411    if (CHIP_IS_E2(sc))
18412        return PAGE_MODE_VALUES_E2;
18413    else if (CHIP_IS_E3(sc))
18414        return PAGE_MODE_VALUES_E3;
18415    else
18416        return 0;
18417}
18418
18419static const uint32_t *
18420__bxe_get_page_write_ar(struct bxe_softc *sc)
18421{
18422    if (CHIP_IS_E2(sc))
18423        return page_write_regs_e2;
18424    else if (CHIP_IS_E3(sc))
18425        return page_write_regs_e3;
18426    else
18427        return NULL;
18428}
18429
18430static uint32_t
18431__bxe_get_page_write_num(struct bxe_softc *sc)
18432{
18433    if (CHIP_IS_E2(sc))
18434        return PAGE_WRITE_REGS_E2;
18435    else if (CHIP_IS_E3(sc))
18436        return PAGE_WRITE_REGS_E3;
18437    else
18438        return 0;
18439}
18440
18441static const struct reg_addr *
18442__bxe_get_page_read_ar(struct bxe_softc *sc)
18443{
18444    if (CHIP_IS_E2(sc))
18445        return page_read_regs_e2;
18446    else if (CHIP_IS_E3(sc))
18447        return page_read_regs_e3;
18448    else
18449        return NULL;
18450}
18451
18452static uint32_t
18453__bxe_get_page_read_num(struct bxe_softc *sc)
18454{
18455    if (CHIP_IS_E2(sc))
18456        return PAGE_READ_REGS_E2;
18457    else if (CHIP_IS_E3(sc))
18458        return PAGE_READ_REGS_E3;
18459    else
18460        return 0;
18461}
18462
18463static bool
18464bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18465{
18466    if (CHIP_IS_E1(sc))
18467        return IS_E1_REG(reg_info->chips);
18468    else if (CHIP_IS_E1H(sc))
18469        return IS_E1H_REG(reg_info->chips);
18470    else if (CHIP_IS_E2(sc))
18471        return IS_E2_REG(reg_info->chips);
18472    else if (CHIP_IS_E3A0(sc))
18473        return IS_E3A0_REG(reg_info->chips);
18474    else if (CHIP_IS_E3B0(sc))
18475        return IS_E3B0_REG(reg_info->chips);
18476    else
18477        return 0;
18478}
18479
18480static bool
18481bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18482{
18483    if (CHIP_IS_E1(sc))
18484        return IS_E1_REG(wreg_info->chips);
18485    else if (CHIP_IS_E1H(sc))
18486        return IS_E1H_REG(wreg_info->chips);
18487    else if (CHIP_IS_E2(sc))
18488        return IS_E2_REG(wreg_info->chips);
18489    else if (CHIP_IS_E3A0(sc))
18490        return IS_E3A0_REG(wreg_info->chips);
18491    else if (CHIP_IS_E3B0(sc))
18492        return IS_E3B0_REG(wreg_info->chips);
18493    else
18494        return 0;
18495}
18496
18497/**
18498 * bxe_read_pages_regs - read "paged" registers
18499 *
18500 * @bp          device handle
18501 * @p           output buffer
18502 *
18503 * Reads "paged" memories: memories that may only be read by first writing to a
18504 * specific address ("write address") and then reading from a specific address
18505 * ("read address"). There may be more than one write address per "page" and
18506 * more than one read address per write address.
18507 */
18508static void
18509bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18510{
18511    uint32_t i, j, k, n;
18512
18513    /* addresses of the paged registers */
18514    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18515    /* number of paged registers */
18516    int num_pages = __bxe_get_page_reg_num(sc);
18517    /* write addresses */
18518    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18519    /* number of write addresses */
18520    int write_num = __bxe_get_page_write_num(sc);
18521    /* read addresses info */
18522    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18523    /* number of read addresses */
18524    int read_num = __bxe_get_page_read_num(sc);
18525    uint32_t addr, size;
18526
18527    for (i = 0; i < num_pages; i++) {
18528        for (j = 0; j < write_num; j++) {
18529            REG_WR(sc, write_addr[j], page_addr[i]);
18530
18531            for (k = 0; k < read_num; k++) {
18532                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18533                    size = read_addr[k].size;
18534                    for (n = 0; n < size; n++) {
18535                        addr = read_addr[k].addr + n*4;
18536                        *p++ = REG_RD(sc, addr);
18537                    }
18538                }
18539            }
18540        }
18541    }
18542    return;
18543}
18544
18545
18546static int
18547bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18548{
18549    uint32_t i, j, addr;
18550    const struct wreg_addr *wreg_addr_p = NULL;
18551
18552    if (CHIP_IS_E1(sc))
18553        wreg_addr_p = &wreg_addr_e1;
18554    else if (CHIP_IS_E1H(sc))
18555        wreg_addr_p = &wreg_addr_e1h;
18556    else if (CHIP_IS_E2(sc))
18557        wreg_addr_p = &wreg_addr_e2;
18558    else if (CHIP_IS_E3A0(sc))
18559        wreg_addr_p = &wreg_addr_e3;
18560    else if (CHIP_IS_E3B0(sc))
18561        wreg_addr_p = &wreg_addr_e3b0;
18562    else
18563        return (-1);
18564
18565    /* Read the idle_chk registers */
18566    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18567        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18568            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18569            for (j = 0; j < idle_reg_addrs[i].size; j++)
18570                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18571        }
18572    }
18573
18574    /* Read the regular registers */
18575    for (i = 0; i < REGS_COUNT; i++) {
18576        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18577            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18578            for (j = 0; j < reg_addrs[i].size; j++)
18579                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18580        }
18581    }
18582
18583    /* Read the CAM registers */
18584    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18585        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18586        for (i = 0; i < wreg_addr_p->size; i++) {
18587            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18588
18589            /* In case of wreg_addr register, read additional
18590               registers from read_regs array
18591             */
18592            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18593                addr = *(wreg_addr_p->read_regs);
18594                *p++ = REG_RD(sc, addr + j*4);
18595            }
18596        }
18597    }
18598
18599    /* Paged registers are supported in E2 & E3 only */
18600    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18601        /* Read "paged" registers */
18602        bxe_read_pages_regs(sc, p, preset);
18603    }
18604
18605    return 0;
18606}
18607
18608static int
18609bxe_grc_dump(struct bxe_softc *sc)
18610{
18611    int rval = 0;
18612    uint32_t preset_idx;
18613    uint8_t *buf;
18614    uint32_t size;
18615    struct  dump_header *d_hdr;
18616
18617    if (sc->grcdump_done)
18618	return (rval);
18619
18620    ecore_disable_blocks_parity(sc);
18621
18622    buf = sc->grc_dump;
18623    d_hdr = sc->grc_dump;
18624
18625    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18626    d_hdr->version = BNX2X_DUMP_VERSION;
18627    d_hdr->preset = DUMP_ALL_PRESETS;
18628
18629    if (CHIP_IS_E1(sc)) {
18630        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18631    } else if (CHIP_IS_E1H(sc)) {
18632        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18633    } else if (CHIP_IS_E2(sc)) {
18634        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18635                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18636    } else if (CHIP_IS_E3A0(sc)) {
18637        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18638                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18639    } else if (CHIP_IS_E3B0(sc)) {
18640        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18641                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18642    }
18643
18644    buf += sizeof(struct  dump_header);
18645
18646    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18647
18648        /* Skip presets with IOR */
18649        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18650            (preset_idx == 11))
18651            continue;
18652
18653        rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx);
18654
18655	if (rval)
18656            break;
18657
18658        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18659
18660        buf += size;
18661    }
18662
18663    ecore_clear_blocks_parity(sc);
18664    ecore_enable_blocks_parity(sc);
18665
18666    sc->grcdump_done = 1;
18667    return(rval);
18668}
18669
18670static int
18671bxe_add_cdev(struct bxe_softc *sc)
18672{
18673    int grc_dump_size;
18674
18675    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18676				sizeof(struct  dump_header);
18677
18678    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18679
18680    if (sc->grc_dump == NULL)
18681        return (-1);
18682
18683    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18684                            sc->ifp->if_dunit,
18685                            UID_ROOT,
18686                            GID_WHEEL,
18687                            0600,
18688                            "%s",
18689                            if_name(sc->ifp));
18690
18691    if (sc->ioctl_dev == NULL) {
18692
18693        free(sc->grc_dump, M_DEVBUF);
18694
18695        return (-1);
18696    }
18697
18698    sc->ioctl_dev->si_drv1 = sc;
18699
18700    return (0);
18701}
18702
18703static void
18704bxe_del_cdev(struct bxe_softc *sc)
18705{
18706    if (sc->ioctl_dev != NULL)
18707        destroy_dev(sc->ioctl_dev);
18708
18709    if (sc->grc_dump == NULL)
18710        free(sc->grc_dump, M_DEVBUF);
18711
18712    return;
18713}
18714
18715static int
18716bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18717        struct thread *td)
18718{
18719    struct bxe_softc    *sc;
18720    int                 rval = 0;
18721    device_t            pci_dev;
18722    bxe_grcdump_t       *dump = NULL;
18723    int grc_dump_size;
18724
18725    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
18726        return ENXIO;
18727
18728    pci_dev= sc->dev;
18729
18730    dump = (bxe_grcdump_t *)data;
18731
18732    switch(cmd) {
18733
18734        case BXE_GRC_DUMP_SIZE:
18735            dump->pci_func = sc->pcie_func;
18736            dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18737					sizeof(struct  dump_header);
18738            break;
18739
18740        case BXE_GRC_DUMP:
18741
18742            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18743				sizeof(struct  dump_header);
18744
18745            if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) ||
18746                (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) {
18747                rval = EINVAL;
18748                break;
18749            }
18750	    dump->grcdump_dwords = grc_dump_size >> 2;
18751            rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
18752            sc->grcdump_done = 0;
18753
18754            break;
18755
18756        default:
18757            break;
18758    }
18759
18760    return (rval);
18761}
18762