bxe.c revision 295830
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/bxe/bxe.c 295830 2016-02-19 22:46:52Z davidcs $");
29
30#define BXE_DRIVER_VERSION "1.78.79"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        BRCM_VENDORID,
171        CHIP_NUM_57840_MF,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 MF 10GbE"
174    },
175    {
176        0, 0, 0, 0, NULL
177    }
178};
179
180MALLOC_DECLARE(M_BXE_ILT);
181MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
182
183/*
184 * FreeBSD device entry points.
185 */
186static int bxe_probe(device_t);
187static int bxe_attach(device_t);
188static int bxe_detach(device_t);
189static int bxe_shutdown(device_t);
190
191/*
192 * FreeBSD KLD module/device interface event handler method.
193 */
194static device_method_t bxe_methods[] = {
195    /* Device interface (device_if.h) */
196    DEVMETHOD(device_probe,     bxe_probe),
197    DEVMETHOD(device_attach,    bxe_attach),
198    DEVMETHOD(device_detach,    bxe_detach),
199    DEVMETHOD(device_shutdown,  bxe_shutdown),
200    /* Bus interface (bus_if.h) */
201    DEVMETHOD(bus_print_child,  bus_generic_print_child),
202    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
203    KOBJMETHOD_END
204};
205
206/*
207 * FreeBSD KLD Module data declaration
208 */
209static driver_t bxe_driver = {
210    "bxe",                   /* module name */
211    bxe_methods,             /* event handler */
212    sizeof(struct bxe_softc) /* extra data */
213};
214
215/*
216 * FreeBSD dev class is needed to manage dev instances and
217 * to associate with a bus type
218 */
219static devclass_t bxe_devclass;
220
221MODULE_DEPEND(bxe, pci, 1, 1, 1);
222MODULE_DEPEND(bxe, ether, 1, 1, 1);
223DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
224
225/* resources needed for unloading a previously loaded device */
226
227#define BXE_PREV_WAIT_NEEDED 1
228struct mtx bxe_prev_mtx;
229MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
230struct bxe_prev_list_node {
231    LIST_ENTRY(bxe_prev_list_node) node;
232    uint8_t bus;
233    uint8_t slot;
234    uint8_t path;
235    uint8_t aer; /* XXX automatic error recovery */
236    uint8_t undi;
237};
238static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
239
240static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
241
242/* Tunable device values... */
243
244SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
245
246/* Debug */
247unsigned long bxe_debug = 0;
248SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
249             &bxe_debug, 0, "Debug logging mode");
250
251/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
252static int bxe_interrupt_mode = INTR_MODE_MSIX;
253SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
254           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
255
256/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
257static int bxe_queue_count = 4;
258SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
259           &bxe_queue_count, 0, "Multi-Queue queue count");
260
261/* max number of buffers per queue (default RX_BD_USABLE) */
262static int bxe_max_rx_bufs = 0;
263SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
264           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
265
266/* Host interrupt coalescing RX tick timer (usecs) */
267static int bxe_hc_rx_ticks = 25;
268SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
269           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
270
271/* Host interrupt coalescing TX tick timer (usecs) */
272static int bxe_hc_tx_ticks = 50;
273SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
274           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
275
276/* Maximum number of Rx packets to process at a time */
277static int bxe_rx_budget = 0xffffffff;
278SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
279           &bxe_rx_budget, 0, "Rx processing budget");
280
281/* Maximum LRO aggregation size */
282static int bxe_max_aggregation_size = 0;
283SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
284           &bxe_max_aggregation_size, 0, "max aggregation size");
285
286/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
287static int bxe_mrrs = -1;
288SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
289           &bxe_mrrs, 0, "PCIe maximum read request size");
290
291/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
292static int bxe_autogreeen = 0;
293SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
294           &bxe_autogreeen, 0, "AutoGrEEEn support");
295
296/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
297static int bxe_udp_rss = 0;
298SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
299           &bxe_udp_rss, 0, "UDP RSS support");
300
301
302#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
303
304#define STATS_OFFSET32(stat_name)                   \
305    (offsetof(struct bxe_eth_stats, stat_name) / 4)
306
307#define Q_STATS_OFFSET32(stat_name)                   \
308    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
309
310static const struct {
311    uint32_t offset;
312    uint32_t size;
313    uint32_t flags;
314#define STATS_FLAGS_PORT  1
315#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
316#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
317    char string[STAT_NAME_LEN];
318} bxe_eth_stats_arr[] = {
319    { STATS_OFFSET32(total_bytes_received_hi),
320                8, STATS_FLAGS_BOTH, "rx_bytes" },
321    { STATS_OFFSET32(error_bytes_received_hi),
322                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
323    { STATS_OFFSET32(total_unicast_packets_received_hi),
324                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
325    { STATS_OFFSET32(total_multicast_packets_received_hi),
326                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
327    { STATS_OFFSET32(total_broadcast_packets_received_hi),
328                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
329    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
330                8, STATS_FLAGS_PORT, "rx_crc_errors" },
331    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
332                8, STATS_FLAGS_PORT, "rx_align_errors" },
333    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
334                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
335    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
336                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
337    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
338                8, STATS_FLAGS_PORT, "rx_fragments" },
339    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
340                8, STATS_FLAGS_PORT, "rx_jabbers" },
341    { STATS_OFFSET32(no_buff_discard_hi),
342                8, STATS_FLAGS_BOTH, "rx_discards" },
343    { STATS_OFFSET32(mac_filter_discard),
344                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
345    { STATS_OFFSET32(mf_tag_discard),
346                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
347    { STATS_OFFSET32(pfc_frames_received_hi),
348                8, STATS_FLAGS_PORT, "pfc_frames_received" },
349    { STATS_OFFSET32(pfc_frames_sent_hi),
350                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
351    { STATS_OFFSET32(brb_drop_hi),
352                8, STATS_FLAGS_PORT, "rx_brb_discard" },
353    { STATS_OFFSET32(brb_truncate_hi),
354                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
355    { STATS_OFFSET32(pause_frames_received_hi),
356                8, STATS_FLAGS_PORT, "rx_pause_frames" },
357    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
358                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
359    { STATS_OFFSET32(nig_timer_max),
360                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
361    { STATS_OFFSET32(total_bytes_transmitted_hi),
362                8, STATS_FLAGS_BOTH, "tx_bytes" },
363    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
364                8, STATS_FLAGS_PORT, "tx_error_bytes" },
365    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
366                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
367    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
368                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
369    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
370                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
371    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
372                8, STATS_FLAGS_PORT, "tx_mac_errors" },
373    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
374                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
375    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
376                8, STATS_FLAGS_PORT, "tx_single_collisions" },
377    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
378                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
379    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
380                8, STATS_FLAGS_PORT, "tx_deferred" },
381    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
382                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
383    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
384                8, STATS_FLAGS_PORT, "tx_late_collisions" },
385    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
386                8, STATS_FLAGS_PORT, "tx_total_collisions" },
387    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
388                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
389    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
390                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
391    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
392                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
393    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
394                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
395    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
396                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
397    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
398                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
399    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
400                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
401    { STATS_OFFSET32(pause_frames_sent_hi),
402                8, STATS_FLAGS_PORT, "tx_pause_frames" },
403    { STATS_OFFSET32(total_tpa_aggregations_hi),
404                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
405    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
406                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
407    { STATS_OFFSET32(total_tpa_bytes_hi),
408                8, STATS_FLAGS_FUNC, "tpa_bytes"},
409    { STATS_OFFSET32(eee_tx_lpi),
410                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
411    { STATS_OFFSET32(rx_calls),
412                4, STATS_FLAGS_FUNC, "rx_calls"},
413    { STATS_OFFSET32(rx_pkts),
414                4, STATS_FLAGS_FUNC, "rx_pkts"},
415    { STATS_OFFSET32(rx_tpa_pkts),
416                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
417    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
418                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
419    { STATS_OFFSET32(rx_bxe_service_rxsgl),
420                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
421    { STATS_OFFSET32(rx_jumbo_sge_pkts),
422                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
423    { STATS_OFFSET32(rx_soft_errors),
424                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
425    { STATS_OFFSET32(rx_hw_csum_errors),
426                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
427    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
428                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
429    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
430                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
431    { STATS_OFFSET32(rx_budget_reached),
432                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
433    { STATS_OFFSET32(tx_pkts),
434                4, STATS_FLAGS_FUNC, "tx_pkts"},
435    { STATS_OFFSET32(tx_soft_errors),
436                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
437    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
438                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
439    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
440                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
441    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
442                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
443    { STATS_OFFSET32(tx_ofld_frames_lso),
444                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
445    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
446                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
447    { STATS_OFFSET32(tx_encap_failures),
448                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
449    { STATS_OFFSET32(tx_hw_queue_full),
450                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
451    { STATS_OFFSET32(tx_hw_max_queue_depth),
452                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
453    { STATS_OFFSET32(tx_dma_mapping_failure),
454                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
455    { STATS_OFFSET32(tx_max_drbr_queue_depth),
456                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
457    { STATS_OFFSET32(tx_window_violation_std),
458                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
459    { STATS_OFFSET32(tx_window_violation_tso),
460                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
461    { STATS_OFFSET32(tx_chain_lost_mbuf),
462                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
463    { STATS_OFFSET32(tx_frames_deferred),
464                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
465    { STATS_OFFSET32(tx_queue_xoff),
466                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
467    { STATS_OFFSET32(mbuf_defrag_attempts),
468                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
469    { STATS_OFFSET32(mbuf_defrag_failures),
470                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
471    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
472                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
473    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
474                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
475    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
476                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
477    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
478                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
479    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
480                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
481    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
482                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
483    { STATS_OFFSET32(mbuf_alloc_tx),
484                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
485    { STATS_OFFSET32(mbuf_alloc_rx),
486                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
487    { STATS_OFFSET32(mbuf_alloc_sge),
488                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
489    { STATS_OFFSET32(mbuf_alloc_tpa),
490                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}
491};
492
493static const struct {
494    uint32_t offset;
495    uint32_t size;
496    char string[STAT_NAME_LEN];
497} bxe_eth_q_stats_arr[] = {
498    { Q_STATS_OFFSET32(total_bytes_received_hi),
499                8, "rx_bytes" },
500    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
501                8, "rx_ucast_packets" },
502    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
503                8, "rx_mcast_packets" },
504    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
505                8, "rx_bcast_packets" },
506    { Q_STATS_OFFSET32(no_buff_discard_hi),
507                8, "rx_discards" },
508    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
509                8, "tx_bytes" },
510    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
511                8, "tx_ucast_packets" },
512    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
513                8, "tx_mcast_packets" },
514    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
515                8, "tx_bcast_packets" },
516    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
517                8, "tpa_aggregations" },
518    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
519                8, "tpa_aggregated_frames"},
520    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
521                8, "tpa_bytes"},
522    { Q_STATS_OFFSET32(rx_calls),
523                4, "rx_calls"},
524    { Q_STATS_OFFSET32(rx_pkts),
525                4, "rx_pkts"},
526    { Q_STATS_OFFSET32(rx_tpa_pkts),
527                4, "rx_tpa_pkts"},
528    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
529                4, "rx_erroneous_jumbo_sge_pkts"},
530    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
531                4, "rx_bxe_service_rxsgl"},
532    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
533                4, "rx_jumbo_sge_pkts"},
534    { Q_STATS_OFFSET32(rx_soft_errors),
535                4, "rx_soft_errors"},
536    { Q_STATS_OFFSET32(rx_hw_csum_errors),
537                4, "rx_hw_csum_errors"},
538    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
539                4, "rx_ofld_frames_csum_ip"},
540    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
541                4, "rx_ofld_frames_csum_tcp_udp"},
542    { Q_STATS_OFFSET32(rx_budget_reached),
543                4, "rx_budget_reached"},
544    { Q_STATS_OFFSET32(tx_pkts),
545                4, "tx_pkts"},
546    { Q_STATS_OFFSET32(tx_soft_errors),
547                4, "tx_soft_errors"},
548    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
549                4, "tx_ofld_frames_csum_ip"},
550    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
551                4, "tx_ofld_frames_csum_tcp"},
552    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
553                4, "tx_ofld_frames_csum_udp"},
554    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
555                4, "tx_ofld_frames_lso"},
556    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
557                4, "tx_ofld_frames_lso_hdr_splits"},
558    { Q_STATS_OFFSET32(tx_encap_failures),
559                4, "tx_encap_failures"},
560    { Q_STATS_OFFSET32(tx_hw_queue_full),
561                4, "tx_hw_queue_full"},
562    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
563                4, "tx_hw_max_queue_depth"},
564    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
565                4, "tx_dma_mapping_failure"},
566    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
567                4, "tx_max_drbr_queue_depth"},
568    { Q_STATS_OFFSET32(tx_window_violation_std),
569                4, "tx_window_violation_std"},
570    { Q_STATS_OFFSET32(tx_window_violation_tso),
571                4, "tx_window_violation_tso"},
572    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
573                4, "tx_chain_lost_mbuf"},
574    { Q_STATS_OFFSET32(tx_frames_deferred),
575                4, "tx_frames_deferred"},
576    { Q_STATS_OFFSET32(tx_queue_xoff),
577                4, "tx_queue_xoff"},
578    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
579                4, "mbuf_defrag_attempts"},
580    { Q_STATS_OFFSET32(mbuf_defrag_failures),
581                4, "mbuf_defrag_failures"},
582    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
583                4, "mbuf_rx_bd_alloc_failed"},
584    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
585                4, "mbuf_rx_bd_mapping_failed"},
586    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
587                4, "mbuf_rx_tpa_alloc_failed"},
588    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
589                4, "mbuf_rx_tpa_mapping_failed"},
590    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
591                4, "mbuf_rx_sge_alloc_failed"},
592    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
593                4, "mbuf_rx_sge_mapping_failed"},
594    { Q_STATS_OFFSET32(mbuf_alloc_tx),
595                4, "mbuf_alloc_tx"},
596    { Q_STATS_OFFSET32(mbuf_alloc_rx),
597                4, "mbuf_alloc_rx"},
598    { Q_STATS_OFFSET32(mbuf_alloc_sge),
599                4, "mbuf_alloc_sge"},
600    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
601                4, "mbuf_alloc_tpa"}
602};
603
604#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
605#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
606
607
608static void    bxe_cmng_fns_init(struct bxe_softc *sc,
609                                 uint8_t          read_cfg,
610                                 uint8_t          cmng_type);
611static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
612static void    storm_memset_cmng(struct bxe_softc *sc,
613                                 struct cmng_init *cmng,
614                                 uint8_t          port);
615static void    bxe_set_reset_global(struct bxe_softc *sc);
616static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
617static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
618                                 int              engine);
619static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
620static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
621                                   uint8_t          *global,
622                                   uint8_t          print);
623static void    bxe_int_disable(struct bxe_softc *sc);
624static int     bxe_release_leader_lock(struct bxe_softc *sc);
625static void    bxe_pf_disable(struct bxe_softc *sc);
626static void    bxe_free_fp_buffers(struct bxe_softc *sc);
627static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
628                                      struct bxe_fastpath *fp,
629                                      uint16_t            rx_bd_prod,
630                                      uint16_t            rx_cq_prod,
631                                      uint16_t            rx_sge_prod);
632static void    bxe_link_report_locked(struct bxe_softc *sc);
633static void    bxe_link_report(struct bxe_softc *sc);
634static void    bxe_link_status_update(struct bxe_softc *sc);
635static void    bxe_periodic_callout_func(void *xsc);
636static void    bxe_periodic_start(struct bxe_softc *sc);
637static void    bxe_periodic_stop(struct bxe_softc *sc);
638static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
639                                    uint16_t prev_index,
640                                    uint16_t index);
641static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
642                                     int                 queue);
643static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
644                                     uint16_t            index);
645static uint8_t bxe_txeof(struct bxe_softc *sc,
646                         struct bxe_fastpath *fp);
647static void    bxe_task_fp(struct bxe_fastpath *fp);
648static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
649                                     struct mbuf      *m,
650                                     uint8_t          contents);
651static int     bxe_alloc_mem(struct bxe_softc *sc);
652static void    bxe_free_mem(struct bxe_softc *sc);
653static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
654static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
655static int     bxe_interrupt_attach(struct bxe_softc *sc);
656static void    bxe_interrupt_detach(struct bxe_softc *sc);
657static void    bxe_set_rx_mode(struct bxe_softc *sc);
658static int     bxe_init_locked(struct bxe_softc *sc);
659static int     bxe_stop_locked(struct bxe_softc *sc);
660static __noinline int bxe_nic_load(struct bxe_softc *sc,
661                                   int              load_mode);
662static __noinline int bxe_nic_unload(struct bxe_softc *sc,
663                                     uint32_t         unload_mode,
664                                     uint8_t          keep_link);
665
666static void bxe_handle_sp_tq(void *context, int pending);
667static void bxe_handle_fp_tq(void *context, int pending);
668
669static int bxe_add_cdev(struct bxe_softc *sc);
670static void bxe_del_cdev(struct bxe_softc *sc);
671static int bxe_grc_dump(struct bxe_softc *sc);
672
673/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
674uint32_t
675calc_crc32(uint8_t  *crc32_packet,
676           uint32_t crc32_length,
677           uint32_t crc32_seed,
678           uint8_t  complement)
679{
680   uint32_t byte         = 0;
681   uint32_t bit          = 0;
682   uint8_t  msb          = 0;
683   uint32_t temp         = 0;
684   uint32_t shft         = 0;
685   uint8_t  current_byte = 0;
686   uint32_t crc32_result = crc32_seed;
687   const uint32_t CRC32_POLY = 0x1edc6f41;
688
689   if ((crc32_packet == NULL) ||
690       (crc32_length == 0) ||
691       ((crc32_length % 8) != 0))
692    {
693        return (crc32_result);
694    }
695
696    for (byte = 0; byte < crc32_length; byte = byte + 1)
697    {
698        current_byte = crc32_packet[byte];
699        for (bit = 0; bit < 8; bit = bit + 1)
700        {
701            /* msb = crc32_result[31]; */
702            msb = (uint8_t)(crc32_result >> 31);
703
704            crc32_result = crc32_result << 1;
705
706            /* it (msb != current_byte[bit]) */
707            if (msb != (0x1 & (current_byte >> bit)))
708            {
709                crc32_result = crc32_result ^ CRC32_POLY;
710                /* crc32_result[0] = 1 */
711                crc32_result |= 1;
712            }
713        }
714    }
715
716    /* Last step is to:
717     * 1. "mirror" every bit
718     * 2. swap the 4 bytes
719     * 3. complement each bit
720     */
721
722    /* Mirror */
723    temp = crc32_result;
724    shft = sizeof(crc32_result) * 8 - 1;
725
726    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
727    {
728        temp <<= 1;
729        temp |= crc32_result & 1;
730        shft-- ;
731    }
732
733    /* temp[31-bit] = crc32_result[bit] */
734    temp <<= shft;
735
736    /* Swap */
737    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
738    {
739        uint32_t t0, t1, t2, t3;
740        t0 = (0x000000ff & (temp >> 24));
741        t1 = (0x0000ff00 & (temp >> 8));
742        t2 = (0x00ff0000 & (temp << 8));
743        t3 = (0xff000000 & (temp << 24));
744        crc32_result = t0 | t1 | t2 | t3;
745    }
746
747    /* Complement */
748    if (complement)
749    {
750        crc32_result = ~crc32_result;
751    }
752
753    return (crc32_result);
754}
755
756int
757bxe_test_bit(int                    nr,
758             volatile unsigned long *addr)
759{
760    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
761}
762
763void
764bxe_set_bit(unsigned int           nr,
765            volatile unsigned long *addr)
766{
767    atomic_set_acq_long(addr, (1 << nr));
768}
769
770void
771bxe_clear_bit(int                    nr,
772              volatile unsigned long *addr)
773{
774    atomic_clear_acq_long(addr, (1 << nr));
775}
776
777int
778bxe_test_and_set_bit(int                    nr,
779                       volatile unsigned long *addr)
780{
781    unsigned long x;
782    nr = (1 << nr);
783    do {
784        x = *addr;
785    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
786    // if (x & nr) bit_was_set; else bit_was_not_set;
787    return (x & nr);
788}
789
790int
791bxe_test_and_clear_bit(int                    nr,
792                       volatile unsigned long *addr)
793{
794    unsigned long x;
795    nr = (1 << nr);
796    do {
797        x = *addr;
798    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
799    // if (x & nr) bit_was_set; else bit_was_not_set;
800    return (x & nr);
801}
802
803int
804bxe_cmpxchg(volatile int *addr,
805            int          old,
806            int          new)
807{
808    int x;
809    do {
810        x = *addr;
811    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
812    return (x);
813}
814
815/*
816 * Get DMA memory from the OS.
817 *
818 * Validates that the OS has provided DMA buffers in response to a
819 * bus_dmamap_load call and saves the physical address of those buffers.
820 * When the callback is used the OS will return 0 for the mapping function
821 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
822 * failures back to the caller.
823 *
824 * Returns:
825 *   Nothing.
826 */
827static void
828bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
829{
830    struct bxe_dma *dma = arg;
831
832    if (error) {
833        dma->paddr = 0;
834        dma->nseg  = 0;
835        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
836    } else {
837        dma->paddr = segs->ds_addr;
838        dma->nseg  = nseg;
839    }
840}
841
842/*
843 * Allocate a block of memory and map it for DMA. No partial completions
844 * allowed and release any resources acquired if we can't acquire all
845 * resources.
846 *
847 * Returns:
848 *   0 = Success, !0 = Failure
849 */
850int
851bxe_dma_alloc(struct bxe_softc *sc,
852              bus_size_t       size,
853              struct bxe_dma   *dma,
854              const char       *msg)
855{
856    int rc;
857
858    if (dma->size > 0) {
859        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
860              (unsigned long)dma->size);
861        return (1);
862    }
863
864    memset(dma, 0, sizeof(*dma)); /* sanity */
865    dma->sc   = sc;
866    dma->size = size;
867    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
868
869    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
870                            BCM_PAGE_SIZE,      /* alignment */
871                            0,                  /* boundary limit */
872                            BUS_SPACE_MAXADDR,  /* restricted low */
873                            BUS_SPACE_MAXADDR,  /* restricted hi */
874                            NULL,               /* addr filter() */
875                            NULL,               /* addr filter() arg */
876                            size,               /* max map size */
877                            1,                  /* num discontinuous */
878                            size,               /* max seg size */
879                            BUS_DMA_ALLOCNOW,   /* flags */
880                            NULL,               /* lock() */
881                            NULL,               /* lock() arg */
882                            &dma->tag);         /* returned dma tag */
883    if (rc != 0) {
884        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
885        memset(dma, 0, sizeof(*dma));
886        return (1);
887    }
888
889    rc = bus_dmamem_alloc(dma->tag,
890                          (void **)&dma->vaddr,
891                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
892                          &dma->map);
893    if (rc != 0) {
894        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
895        bus_dma_tag_destroy(dma->tag);
896        memset(dma, 0, sizeof(*dma));
897        return (1);
898    }
899
900    rc = bus_dmamap_load(dma->tag,
901                         dma->map,
902                         dma->vaddr,
903                         size,
904                         bxe_dma_map_addr, /* BLOGD in here */
905                         dma,
906                         BUS_DMA_NOWAIT);
907    if (rc != 0) {
908        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
909        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
910        bus_dma_tag_destroy(dma->tag);
911        memset(dma, 0, sizeof(*dma));
912        return (1);
913    }
914
915    return (0);
916}
917
918void
919bxe_dma_free(struct bxe_softc *sc,
920             struct bxe_dma   *dma)
921{
922    if (dma->size > 0) {
923        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
924
925        bus_dmamap_sync(dma->tag, dma->map,
926                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
927        bus_dmamap_unload(dma->tag, dma->map);
928        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
929        bus_dma_tag_destroy(dma->tag);
930    }
931
932    memset(dma, 0, sizeof(*dma));
933}
934
935/*
936 * These indirect read and write routines are only during init.
937 * The locking is handled by the MCP.
938 */
939
940void
941bxe_reg_wr_ind(struct bxe_softc *sc,
942               uint32_t         addr,
943               uint32_t         val)
944{
945    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
946    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
947    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
948}
949
950uint32_t
951bxe_reg_rd_ind(struct bxe_softc *sc,
952               uint32_t         addr)
953{
954    uint32_t val;
955
956    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
957    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
958    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
959
960    return (val);
961}
962
963static int
964bxe_acquire_hw_lock(struct bxe_softc *sc,
965                    uint32_t         resource)
966{
967    uint32_t lock_status;
968    uint32_t resource_bit = (1 << resource);
969    int func = SC_FUNC(sc);
970    uint32_t hw_lock_control_reg;
971    int cnt;
972
973    /* validate the resource is within range */
974    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
975        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
976            " resource_bit 0x%x\n", resource, resource_bit);
977        return (-1);
978    }
979
980    if (func <= 5) {
981        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
982    } else {
983        hw_lock_control_reg =
984                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
985    }
986
987    /* validate the resource is not already taken */
988    lock_status = REG_RD(sc, hw_lock_control_reg);
989    if (lock_status & resource_bit) {
990        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
991              resource, lock_status, resource_bit);
992        return (-1);
993    }
994
995    /* try every 5ms for 5 seconds */
996    for (cnt = 0; cnt < 1000; cnt++) {
997        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
998        lock_status = REG_RD(sc, hw_lock_control_reg);
999        if (lock_status & resource_bit) {
1000            return (0);
1001        }
1002        DELAY(5000);
1003    }
1004
1005    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1006        resource, resource_bit);
1007    return (-1);
1008}
1009
1010static int
1011bxe_release_hw_lock(struct bxe_softc *sc,
1012                    uint32_t         resource)
1013{
1014    uint32_t lock_status;
1015    uint32_t resource_bit = (1 << resource);
1016    int func = SC_FUNC(sc);
1017    uint32_t hw_lock_control_reg;
1018
1019    /* validate the resource is within range */
1020    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1021        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1022            " resource_bit 0x%x\n", resource, resource_bit);
1023        return (-1);
1024    }
1025
1026    if (func <= 5) {
1027        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1028    } else {
1029        hw_lock_control_reg =
1030                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1031    }
1032
1033    /* validate the resource is currently taken */
1034    lock_status = REG_RD(sc, hw_lock_control_reg);
1035    if (!(lock_status & resource_bit)) {
1036        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1037              resource, lock_status, resource_bit);
1038        return (-1);
1039    }
1040
1041    REG_WR(sc, hw_lock_control_reg, resource_bit);
1042    return (0);
1043}
1044static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1045{
1046	BXE_PHY_LOCK(sc);
1047	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1048}
1049
1050static void bxe_release_phy_lock(struct bxe_softc *sc)
1051{
1052	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1053	BXE_PHY_UNLOCK(sc);
1054}
1055/*
1056 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1057 * had we done things the other way around, if two pfs from the same port
1058 * would attempt to access nvram at the same time, we could run into a
1059 * scenario such as:
1060 * pf A takes the port lock.
1061 * pf B succeeds in taking the same lock since they are from the same port.
1062 * pf A takes the per pf misc lock. Performs eeprom access.
1063 * pf A finishes. Unlocks the per pf misc lock.
1064 * Pf B takes the lock and proceeds to perform it's own access.
1065 * pf A unlocks the per port lock, while pf B is still working (!).
1066 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1067 * access corrupted by pf B).*
1068 */
1069static int
1070bxe_acquire_nvram_lock(struct bxe_softc *sc)
1071{
1072    int port = SC_PORT(sc);
1073    int count, i;
1074    uint32_t val = 0;
1075
1076    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1077    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1078
1079    /* adjust timeout for emulation/FPGA */
1080    count = NVRAM_TIMEOUT_COUNT;
1081    if (CHIP_REV_IS_SLOW(sc)) {
1082        count *= 100;
1083    }
1084
1085    /* request access to nvram interface */
1086    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1087           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1088
1089    for (i = 0; i < count*10; i++) {
1090        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1091        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1092            break;
1093        }
1094
1095        DELAY(5);
1096    }
1097
1098    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1099        BLOGE(sc, "Cannot get access to nvram interface "
1100            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1101            port, val);
1102        return (-1);
1103    }
1104
1105    return (0);
1106}
1107
1108static int
1109bxe_release_nvram_lock(struct bxe_softc *sc)
1110{
1111    int port = SC_PORT(sc);
1112    int count, i;
1113    uint32_t val = 0;
1114
1115    /* adjust timeout for emulation/FPGA */
1116    count = NVRAM_TIMEOUT_COUNT;
1117    if (CHIP_REV_IS_SLOW(sc)) {
1118        count *= 100;
1119    }
1120
1121    /* relinquish nvram interface */
1122    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1123           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1124
1125    for (i = 0; i < count*10; i++) {
1126        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1127        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1128            break;
1129        }
1130
1131        DELAY(5);
1132    }
1133
1134    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1135        BLOGE(sc, "Cannot free access to nvram interface "
1136            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1137            port, val);
1138        return (-1);
1139    }
1140
1141    /* release HW lock: protect against other PFs in PF Direct Assignment */
1142    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1143
1144    return (0);
1145}
1146
1147static void
1148bxe_enable_nvram_access(struct bxe_softc *sc)
1149{
1150    uint32_t val;
1151
1152    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1153
1154    /* enable both bits, even on read */
1155    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1156           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1157}
1158
1159static void
1160bxe_disable_nvram_access(struct bxe_softc *sc)
1161{
1162    uint32_t val;
1163
1164    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1165
1166    /* disable both bits, even after read */
1167    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1168           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1169                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1170}
1171
1172static int
1173bxe_nvram_read_dword(struct bxe_softc *sc,
1174                     uint32_t         offset,
1175                     uint32_t         *ret_val,
1176                     uint32_t         cmd_flags)
1177{
1178    int count, i, rc;
1179    uint32_t val;
1180
1181    /* build the command word */
1182    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1183
1184    /* need to clear DONE bit separately */
1185    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1186
1187    /* address of the NVRAM to read from */
1188    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1189           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1190
1191    /* issue a read command */
1192    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1193
1194    /* adjust timeout for emulation/FPGA */
1195    count = NVRAM_TIMEOUT_COUNT;
1196    if (CHIP_REV_IS_SLOW(sc)) {
1197        count *= 100;
1198    }
1199
1200    /* wait for completion */
1201    *ret_val = 0;
1202    rc = -1;
1203    for (i = 0; i < count; i++) {
1204        DELAY(5);
1205        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1206
1207        if (val & MCPR_NVM_COMMAND_DONE) {
1208            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1209            /* we read nvram data in cpu order
1210             * but ethtool sees it as an array of bytes
1211             * converting to big-endian will do the work
1212             */
1213            *ret_val = htobe32(val);
1214            rc = 0;
1215            break;
1216        }
1217    }
1218
1219    if (rc == -1) {
1220        BLOGE(sc, "nvram read timeout expired "
1221            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1222            offset, cmd_flags, val);
1223    }
1224
1225    return (rc);
1226}
1227
1228static int
1229bxe_nvram_read(struct bxe_softc *sc,
1230               uint32_t         offset,
1231               uint8_t          *ret_buf,
1232               int              buf_size)
1233{
1234    uint32_t cmd_flags;
1235    uint32_t val;
1236    int rc;
1237
1238    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1239        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1240              offset, buf_size);
1241        return (-1);
1242    }
1243
1244    if ((offset + buf_size) > sc->devinfo.flash_size) {
1245        BLOGE(sc, "Invalid parameter, "
1246                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1247              offset, buf_size, sc->devinfo.flash_size);
1248        return (-1);
1249    }
1250
1251    /* request access to nvram interface */
1252    rc = bxe_acquire_nvram_lock(sc);
1253    if (rc) {
1254        return (rc);
1255    }
1256
1257    /* enable access to nvram interface */
1258    bxe_enable_nvram_access(sc);
1259
1260    /* read the first word(s) */
1261    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1262    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1263        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1264        memcpy(ret_buf, &val, 4);
1265
1266        /* advance to the next dword */
1267        offset += sizeof(uint32_t);
1268        ret_buf += sizeof(uint32_t);
1269        buf_size -= sizeof(uint32_t);
1270        cmd_flags = 0;
1271    }
1272
1273    if (rc == 0) {
1274        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1275        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1276        memcpy(ret_buf, &val, 4);
1277    }
1278
1279    /* disable access to nvram interface */
1280    bxe_disable_nvram_access(sc);
1281    bxe_release_nvram_lock(sc);
1282
1283    return (rc);
1284}
1285
1286static int
1287bxe_nvram_write_dword(struct bxe_softc *sc,
1288                      uint32_t         offset,
1289                      uint32_t         val,
1290                      uint32_t         cmd_flags)
1291{
1292    int count, i, rc;
1293
1294    /* build the command word */
1295    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1296
1297    /* need to clear DONE bit separately */
1298    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1299
1300    /* write the data */
1301    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1302
1303    /* address of the NVRAM to write to */
1304    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1305           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1306
1307    /* issue the write command */
1308    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1309
1310    /* adjust timeout for emulation/FPGA */
1311    count = NVRAM_TIMEOUT_COUNT;
1312    if (CHIP_REV_IS_SLOW(sc)) {
1313        count *= 100;
1314    }
1315
1316    /* wait for completion */
1317    rc = -1;
1318    for (i = 0; i < count; i++) {
1319        DELAY(5);
1320        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1321        if (val & MCPR_NVM_COMMAND_DONE) {
1322            rc = 0;
1323            break;
1324        }
1325    }
1326
1327    if (rc == -1) {
1328        BLOGE(sc, "nvram write timeout expired "
1329            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1330            offset, cmd_flags, val);
1331    }
1332
1333    return (rc);
1334}
1335
1336#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1337
1338static int
1339bxe_nvram_write1(struct bxe_softc *sc,
1340                 uint32_t         offset,
1341                 uint8_t          *data_buf,
1342                 int              buf_size)
1343{
1344    uint32_t cmd_flags;
1345    uint32_t align_offset;
1346    uint32_t val;
1347    int rc;
1348
1349    if ((offset + buf_size) > sc->devinfo.flash_size) {
1350        BLOGE(sc, "Invalid parameter, "
1351                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1352              offset, buf_size, sc->devinfo.flash_size);
1353        return (-1);
1354    }
1355
1356    /* request access to nvram interface */
1357    rc = bxe_acquire_nvram_lock(sc);
1358    if (rc) {
1359        return (rc);
1360    }
1361
1362    /* enable access to nvram interface */
1363    bxe_enable_nvram_access(sc);
1364
1365    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1366    align_offset = (offset & ~0x03);
1367    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1368
1369    if (rc == 0) {
1370        val &= ~(0xff << BYTE_OFFSET(offset));
1371        val |= (*data_buf << BYTE_OFFSET(offset));
1372
1373        /* nvram data is returned as an array of bytes
1374         * convert it back to cpu order
1375         */
1376        val = be32toh(val);
1377
1378        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1379    }
1380
1381    /* disable access to nvram interface */
1382    bxe_disable_nvram_access(sc);
1383    bxe_release_nvram_lock(sc);
1384
1385    return (rc);
1386}
1387
1388static int
1389bxe_nvram_write(struct bxe_softc *sc,
1390                uint32_t         offset,
1391                uint8_t          *data_buf,
1392                int              buf_size)
1393{
1394    uint32_t cmd_flags;
1395    uint32_t val;
1396    uint32_t written_so_far;
1397    int rc;
1398
1399    if (buf_size == 1) {
1400        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1401    }
1402
1403    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1404        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1405              offset, buf_size);
1406        return (-1);
1407    }
1408
1409    if (buf_size == 0) {
1410        return (0); /* nothing to do */
1411    }
1412
1413    if ((offset + buf_size) > sc->devinfo.flash_size) {
1414        BLOGE(sc, "Invalid parameter, "
1415                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1416              offset, buf_size, sc->devinfo.flash_size);
1417        return (-1);
1418    }
1419
1420    /* request access to nvram interface */
1421    rc = bxe_acquire_nvram_lock(sc);
1422    if (rc) {
1423        return (rc);
1424    }
1425
1426    /* enable access to nvram interface */
1427    bxe_enable_nvram_access(sc);
1428
1429    written_so_far = 0;
1430    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1431    while ((written_so_far < buf_size) && (rc == 0)) {
1432        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1433            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1434        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1435            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1436        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1437            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1438        }
1439
1440        memcpy(&val, data_buf, 4);
1441
1442        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1443
1444        /* advance to the next dword */
1445        offset += sizeof(uint32_t);
1446        data_buf += sizeof(uint32_t);
1447        written_so_far += sizeof(uint32_t);
1448        cmd_flags = 0;
1449    }
1450
1451    /* disable access to nvram interface */
1452    bxe_disable_nvram_access(sc);
1453    bxe_release_nvram_lock(sc);
1454
1455    return (rc);
1456}
1457
1458/* copy command into DMAE command memory and set DMAE command Go */
1459void
1460bxe_post_dmae(struct bxe_softc    *sc,
1461              struct dmae_command *dmae,
1462              int                 idx)
1463{
1464    uint32_t cmd_offset;
1465    int i;
1466
1467    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx));
1468    for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) {
1469        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1470    }
1471
1472    REG_WR(sc, dmae_reg_go_c[idx], 1);
1473}
1474
1475uint32_t
1476bxe_dmae_opcode_add_comp(uint32_t opcode,
1477                         uint8_t  comp_type)
1478{
1479    return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
1480                      DMAE_COMMAND_C_TYPE_ENABLE));
1481}
1482
1483uint32_t
1484bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1485{
1486    return (opcode & ~DMAE_COMMAND_SRC_RESET);
1487}
1488
1489uint32_t
1490bxe_dmae_opcode(struct bxe_softc *sc,
1491                uint8_t          src_type,
1492                uint8_t          dst_type,
1493                uint8_t          with_comp,
1494                uint8_t          comp_type)
1495{
1496    uint32_t opcode = 0;
1497
1498    opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
1499               (dst_type << DMAE_COMMAND_DST_SHIFT));
1500
1501    opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET);
1502
1503    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1504
1505    opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) |
1506               (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT));
1507
1508    opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
1509
1510#ifdef __BIG_ENDIAN
1511    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1512#else
1513    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1514#endif
1515
1516    if (with_comp) {
1517        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1518    }
1519
1520    return (opcode);
1521}
1522
1523static void
1524bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1525                        struct dmae_command *dmae,
1526                        uint8_t             src_type,
1527                        uint8_t             dst_type)
1528{
1529    memset(dmae, 0, sizeof(struct dmae_command));
1530
1531    /* set the opcode */
1532    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1533                                   TRUE, DMAE_COMP_PCI);
1534
1535    /* fill in the completion parameters */
1536    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1537    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1538    dmae->comp_val     = DMAE_COMP_VAL;
1539}
1540
1541/* issue a DMAE command over the init channel and wait for completion */
1542static int
1543bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1544                         struct dmae_command *dmae)
1545{
1546    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1547    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1548
1549    BXE_DMAE_LOCK(sc);
1550
1551    /* reset completion */
1552    *wb_comp = 0;
1553
1554    /* post the command on the channel used for initializations */
1555    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1556
1557    /* wait for completion */
1558    DELAY(5);
1559
1560    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1561        if (!timeout ||
1562            (sc->recovery_state != BXE_RECOVERY_DONE &&
1563             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1564            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1565                *wb_comp, sc->recovery_state);
1566            BXE_DMAE_UNLOCK(sc);
1567            return (DMAE_TIMEOUT);
1568        }
1569
1570        timeout--;
1571        DELAY(50);
1572    }
1573
1574    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1575        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1576                *wb_comp, sc->recovery_state);
1577        BXE_DMAE_UNLOCK(sc);
1578        return (DMAE_PCI_ERROR);
1579    }
1580
1581    BXE_DMAE_UNLOCK(sc);
1582    return (0);
1583}
1584
1585void
1586bxe_read_dmae(struct bxe_softc *sc,
1587              uint32_t         src_addr,
1588              uint32_t         len32)
1589{
1590    struct dmae_command dmae;
1591    uint32_t *data;
1592    int i, rc;
1593
1594    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1595
1596    if (!sc->dmae_ready) {
1597        data = BXE_SP(sc, wb_data[0]);
1598
1599        for (i = 0; i < len32; i++) {
1600            data[i] = (CHIP_IS_E1(sc)) ?
1601                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1602                          REG_RD(sc, (src_addr + (i * 4)));
1603        }
1604
1605        return;
1606    }
1607
1608    /* set opcode and fixed command fields */
1609    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1610
1611    /* fill in addresses and len */
1612    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1613    dmae.src_addr_hi = 0;
1614    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1615    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1616    dmae.len         = len32;
1617
1618    /* issue the command and wait for completion */
1619    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1620        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1621    };
1622}
1623
1624void
1625bxe_write_dmae(struct bxe_softc *sc,
1626               bus_addr_t       dma_addr,
1627               uint32_t         dst_addr,
1628               uint32_t         len32)
1629{
1630    struct dmae_command dmae;
1631    int rc;
1632
1633    if (!sc->dmae_ready) {
1634        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1635
1636        if (CHIP_IS_E1(sc)) {
1637            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1638        } else {
1639            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1640        }
1641
1642        return;
1643    }
1644
1645    /* set opcode and fixed command fields */
1646    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1647
1648    /* fill in addresses and len */
1649    dmae.src_addr_lo = U64_LO(dma_addr);
1650    dmae.src_addr_hi = U64_HI(dma_addr);
1651    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1652    dmae.dst_addr_hi = 0;
1653    dmae.len         = len32;
1654
1655    /* issue the command and wait for completion */
1656    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1657        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1658    }
1659}
1660
1661void
1662bxe_write_dmae_phys_len(struct bxe_softc *sc,
1663                        bus_addr_t       phys_addr,
1664                        uint32_t         addr,
1665                        uint32_t         len)
1666{
1667    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1668    int offset = 0;
1669
1670    while (len > dmae_wr_max) {
1671        bxe_write_dmae(sc,
1672                       (phys_addr + offset), /* src DMA address */
1673                       (addr + offset),      /* dst GRC address */
1674                       dmae_wr_max);
1675        offset += (dmae_wr_max * 4);
1676        len -= dmae_wr_max;
1677    }
1678
1679    bxe_write_dmae(sc,
1680                   (phys_addr + offset), /* src DMA address */
1681                   (addr + offset),      /* dst GRC address */
1682                   len);
1683}
1684
1685void
1686bxe_set_ctx_validation(struct bxe_softc   *sc,
1687                       struct eth_context *cxt,
1688                       uint32_t           cid)
1689{
1690    /* ustorm cxt validation */
1691    cxt->ustorm_ag_context.cdu_usage =
1692        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1693            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1694    /* xcontext validation */
1695    cxt->xstorm_ag_context.cdu_reserved =
1696        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1697            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1698}
1699
1700static void
1701bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1702                            uint8_t          port,
1703                            uint8_t          fw_sb_id,
1704                            uint8_t          sb_index,
1705                            uint8_t          ticks)
1706{
1707    uint32_t addr =
1708        (BAR_CSTRORM_INTMEM +
1709         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1710
1711    REG_WR8(sc, addr, ticks);
1712
1713    BLOGD(sc, DBG_LOAD,
1714          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1715          port, fw_sb_id, sb_index, ticks);
1716}
1717
1718static void
1719bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1720                            uint8_t          port,
1721                            uint16_t         fw_sb_id,
1722                            uint8_t          sb_index,
1723                            uint8_t          disable)
1724{
1725    uint32_t enable_flag =
1726        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1727    uint32_t addr =
1728        (BAR_CSTRORM_INTMEM +
1729         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1730    uint8_t flags;
1731
1732    /* clear and set */
1733    flags = REG_RD8(sc, addr);
1734    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1735    flags |= enable_flag;
1736    REG_WR8(sc, addr, flags);
1737
1738    BLOGD(sc, DBG_LOAD,
1739          "port %d fw_sb_id %d sb_index %d disable %d\n",
1740          port, fw_sb_id, sb_index, disable);
1741}
1742
1743void
1744bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1745                             uint8_t          fw_sb_id,
1746                             uint8_t          sb_index,
1747                             uint8_t          disable,
1748                             uint16_t         usec)
1749{
1750    int port = SC_PORT(sc);
1751    uint8_t ticks = (usec / 4); /* XXX ??? */
1752
1753    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1754
1755    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1756    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1757}
1758
1759void
1760elink_cb_udelay(struct bxe_softc *sc,
1761                uint32_t         usecs)
1762{
1763    DELAY(usecs);
1764}
1765
1766uint32_t
1767elink_cb_reg_read(struct bxe_softc *sc,
1768                  uint32_t         reg_addr)
1769{
1770    return (REG_RD(sc, reg_addr));
1771}
1772
1773void
1774elink_cb_reg_write(struct bxe_softc *sc,
1775                   uint32_t         reg_addr,
1776                   uint32_t         val)
1777{
1778    REG_WR(sc, reg_addr, val);
1779}
1780
1781void
1782elink_cb_reg_wb_write(struct bxe_softc *sc,
1783                      uint32_t         offset,
1784                      uint32_t         *wb_write,
1785                      uint16_t         len)
1786{
1787    REG_WR_DMAE(sc, offset, wb_write, len);
1788}
1789
1790void
1791elink_cb_reg_wb_read(struct bxe_softc *sc,
1792                     uint32_t         offset,
1793                     uint32_t         *wb_write,
1794                     uint16_t         len)
1795{
1796    REG_RD_DMAE(sc, offset, wb_write, len);
1797}
1798
1799uint8_t
1800elink_cb_path_id(struct bxe_softc *sc)
1801{
1802    return (SC_PATH(sc));
1803}
1804
1805void
1806elink_cb_event_log(struct bxe_softc     *sc,
1807                   const elink_log_id_t elink_log_id,
1808                   ...)
1809{
1810    /* XXX */
1811    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1812}
1813
1814static int
1815bxe_set_spio(struct bxe_softc *sc,
1816             int              spio,
1817             uint32_t         mode)
1818{
1819    uint32_t spio_reg;
1820
1821    /* Only 2 SPIOs are configurable */
1822    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1823        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1824        return (-1);
1825    }
1826
1827    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1828
1829    /* read SPIO and mask except the float bits */
1830    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1831
1832    switch (mode) {
1833    case MISC_SPIO_OUTPUT_LOW:
1834        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1835        /* clear FLOAT and set CLR */
1836        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1837        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1838        break;
1839
1840    case MISC_SPIO_OUTPUT_HIGH:
1841        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1842        /* clear FLOAT and set SET */
1843        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1844        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1845        break;
1846
1847    case MISC_SPIO_INPUT_HI_Z:
1848        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1849        /* set FLOAT */
1850        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1851        break;
1852
1853    default:
1854        break;
1855    }
1856
1857    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1858    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1859
1860    return (0);
1861}
1862
1863static int
1864bxe_gpio_read(struct bxe_softc *sc,
1865              int              gpio_num,
1866              uint8_t          port)
1867{
1868    /* The GPIO should be swapped if swap register is set and active */
1869    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1870                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1871    int gpio_shift = (gpio_num +
1872                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1873    uint32_t gpio_mask = (1 << gpio_shift);
1874    uint32_t gpio_reg;
1875
1876    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1877        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1878            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1879            gpio_mask);
1880        return (-1);
1881    }
1882
1883    /* read GPIO value */
1884    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1885
1886    /* get the requested pin value */
1887    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1888}
1889
1890static int
1891bxe_gpio_write(struct bxe_softc *sc,
1892               int              gpio_num,
1893               uint32_t         mode,
1894               uint8_t          port)
1895{
1896    /* The GPIO should be swapped if swap register is set and active */
1897    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1898                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1899    int gpio_shift = (gpio_num +
1900                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1901    uint32_t gpio_mask = (1 << gpio_shift);
1902    uint32_t gpio_reg;
1903
1904    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1905        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1906            " gpio_shift %d gpio_mask 0x%x\n",
1907            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1908        return (-1);
1909    }
1910
1911    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1912
1913    /* read GPIO and mask except the float bits */
1914    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1915
1916    switch (mode) {
1917    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1918        BLOGD(sc, DBG_PHY,
1919              "Set GPIO %d (shift %d) -> output low\n",
1920              gpio_num, gpio_shift);
1921        /* clear FLOAT and set CLR */
1922        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1923        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1924        break;
1925
1926    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1927        BLOGD(sc, DBG_PHY,
1928              "Set GPIO %d (shift %d) -> output high\n",
1929              gpio_num, gpio_shift);
1930        /* clear FLOAT and set SET */
1931        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1933        break;
1934
1935    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1936        BLOGD(sc, DBG_PHY,
1937              "Set GPIO %d (shift %d) -> input\n",
1938              gpio_num, gpio_shift);
1939        /* set FLOAT */
1940        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1941        break;
1942
1943    default:
1944        break;
1945    }
1946
1947    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1948    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1949
1950    return (0);
1951}
1952
1953static int
1954bxe_gpio_mult_write(struct bxe_softc *sc,
1955                    uint8_t          pins,
1956                    uint32_t         mode)
1957{
1958    uint32_t gpio_reg;
1959
1960    /* any port swapping should be handled by caller */
1961
1962    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1963
1964    /* read GPIO and mask except the float bits */
1965    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1966    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1967    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1968    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1969
1970    switch (mode) {
1971    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1972        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
1973        /* set CLR */
1974        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
1975        break;
1976
1977    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1978        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
1979        /* set SET */
1980        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
1981        break;
1982
1983    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1984        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
1985        /* set FLOAT */
1986        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1987        break;
1988
1989    default:
1990        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
1991            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
1992        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1993        return (-1);
1994    }
1995
1996    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1997    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1998
1999    return (0);
2000}
2001
2002static int
2003bxe_gpio_int_write(struct bxe_softc *sc,
2004                   int              gpio_num,
2005                   uint32_t         mode,
2006                   uint8_t          port)
2007{
2008    /* The GPIO should be swapped if swap register is set and active */
2009    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2010                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2011    int gpio_shift = (gpio_num +
2012                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2013    uint32_t gpio_mask = (1 << gpio_shift);
2014    uint32_t gpio_reg;
2015
2016    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2017        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2018            " gpio_shift %d gpio_mask 0x%x\n",
2019            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2020        return (-1);
2021    }
2022
2023    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2024
2025    /* read GPIO int */
2026    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2027
2028    switch (mode) {
2029    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2030        BLOGD(sc, DBG_PHY,
2031              "Clear GPIO INT %d (shift %d) -> output low\n",
2032              gpio_num, gpio_shift);
2033        /* clear SET and set CLR */
2034        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2035        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2036        break;
2037
2038    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2039        BLOGD(sc, DBG_PHY,
2040              "Set GPIO INT %d (shift %d) -> output high\n",
2041              gpio_num, gpio_shift);
2042        /* clear CLR and set SET */
2043        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2045        break;
2046
2047    default:
2048        break;
2049    }
2050
2051    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2052    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2053
2054    return (0);
2055}
2056
2057uint32_t
2058elink_cb_gpio_read(struct bxe_softc *sc,
2059                   uint16_t         gpio_num,
2060                   uint8_t          port)
2061{
2062    return (bxe_gpio_read(sc, gpio_num, port));
2063}
2064
2065uint8_t
2066elink_cb_gpio_write(struct bxe_softc *sc,
2067                    uint16_t         gpio_num,
2068                    uint8_t          mode, /* 0=low 1=high */
2069                    uint8_t          port)
2070{
2071    return (bxe_gpio_write(sc, gpio_num, mode, port));
2072}
2073
2074uint8_t
2075elink_cb_gpio_mult_write(struct bxe_softc *sc,
2076                         uint8_t          pins,
2077                         uint8_t          mode) /* 0=low 1=high */
2078{
2079    return (bxe_gpio_mult_write(sc, pins, mode));
2080}
2081
2082uint8_t
2083elink_cb_gpio_int_write(struct bxe_softc *sc,
2084                        uint16_t         gpio_num,
2085                        uint8_t          mode, /* 0=low 1=high */
2086                        uint8_t          port)
2087{
2088    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2089}
2090
2091void
2092elink_cb_notify_link_changed(struct bxe_softc *sc)
2093{
2094    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2095                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2096}
2097
2098/* send the MCP a request, block until there is a reply */
2099uint32_t
2100elink_cb_fw_command(struct bxe_softc *sc,
2101                    uint32_t         command,
2102                    uint32_t         param)
2103{
2104    int mb_idx = SC_FW_MB_IDX(sc);
2105    uint32_t seq;
2106    uint32_t rc = 0;
2107    uint32_t cnt = 1;
2108    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2109
2110    BXE_FWMB_LOCK(sc);
2111
2112    seq = ++sc->fw_seq;
2113    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2114    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2115
2116    BLOGD(sc, DBG_PHY,
2117          "wrote command 0x%08x to FW MB param 0x%08x\n",
2118          (command | seq), param);
2119
2120    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2121    do {
2122        DELAY(delay * 1000);
2123        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2124    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2125
2126    BLOGD(sc, DBG_PHY,
2127          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2128          cnt*delay, rc, seq);
2129
2130    /* is this a reply to our command? */
2131    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2132        rc &= FW_MSG_CODE_MASK;
2133    } else {
2134        /* Ruh-roh! */
2135        BLOGE(sc, "FW failed to respond!\n");
2136        // XXX bxe_fw_dump(sc);
2137        rc = 0;
2138    }
2139
2140    BXE_FWMB_UNLOCK(sc);
2141    return (rc);
2142}
2143
2144static uint32_t
2145bxe_fw_command(struct bxe_softc *sc,
2146               uint32_t         command,
2147               uint32_t         param)
2148{
2149    return (elink_cb_fw_command(sc, command, param));
2150}
2151
2152static void
2153__storm_memset_dma_mapping(struct bxe_softc *sc,
2154                           uint32_t         addr,
2155                           bus_addr_t       mapping)
2156{
2157    REG_WR(sc, addr, U64_LO(mapping));
2158    REG_WR(sc, (addr + 4), U64_HI(mapping));
2159}
2160
2161static void
2162storm_memset_spq_addr(struct bxe_softc *sc,
2163                      bus_addr_t       mapping,
2164                      uint16_t         abs_fid)
2165{
2166    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2167                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2168    __storm_memset_dma_mapping(sc, addr, mapping);
2169}
2170
2171static void
2172storm_memset_vf_to_pf(struct bxe_softc *sc,
2173                      uint16_t         abs_fid,
2174                      uint16_t         pf_id)
2175{
2176    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2177    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2178    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2179    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2180}
2181
2182static void
2183storm_memset_func_en(struct bxe_softc *sc,
2184                     uint16_t         abs_fid,
2185                     uint8_t          enable)
2186{
2187    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2188    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2189    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2190    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2191}
2192
2193static void
2194storm_memset_eq_data(struct bxe_softc       *sc,
2195                     struct event_ring_data *eq_data,
2196                     uint16_t               pfid)
2197{
2198    uint32_t addr;
2199    size_t size;
2200
2201    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2202    size = sizeof(struct event_ring_data);
2203    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2204}
2205
2206static void
2207storm_memset_eq_prod(struct bxe_softc *sc,
2208                     uint16_t         eq_prod,
2209                     uint16_t         pfid)
2210{
2211    uint32_t addr = (BAR_CSTRORM_INTMEM +
2212                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2213    REG_WR16(sc, addr, eq_prod);
2214}
2215
2216/*
2217 * Post a slowpath command.
2218 *
2219 * A slowpath command is used to propogate a configuration change through
2220 * the controller in a controlled manner, allowing each STORM processor and
2221 * other H/W blocks to phase in the change.  The commands sent on the
2222 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2223 * completion of the ramrod will occur in different ways.  Here's a
2224 * breakdown of ramrods and how they complete:
2225 *
2226 * RAMROD_CMD_ID_ETH_PORT_SETUP
2227 *   Used to setup the leading connection on a port.  Completes on the
2228 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2229 *
2230 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2231 *   Used to setup an additional connection on a port.  Completes on the
2232 *   RCQ of the multi-queue/RSS connection being initialized.
2233 *
2234 * RAMROD_CMD_ID_ETH_STAT_QUERY
2235 *   Used to force the storm processors to update the statistics database
2236 *   in host memory.  This ramrod is send on the leading connection CID and
2237 *   completes as an index increment of the CSTORM on the default status
2238 *   block.
2239 *
2240 * RAMROD_CMD_ID_ETH_UPDATE
2241 *   Used to update the state of the leading connection, usually to udpate
2242 *   the RSS indirection table.  Completes on the RCQ of the leading
2243 *   connection. (Not currently used under FreeBSD until OS support becomes
2244 *   available.)
2245 *
2246 * RAMROD_CMD_ID_ETH_HALT
2247 *   Used when tearing down a connection prior to driver unload.  Completes
2248 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2249 *   use this on the leading connection.
2250 *
2251 * RAMROD_CMD_ID_ETH_SET_MAC
2252 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2253 *   the RCQ of the leading connection.
2254 *
2255 * RAMROD_CMD_ID_ETH_CFC_DEL
2256 *   Used when tearing down a conneciton prior to driver unload.  Completes
2257 *   on the RCQ of the leading connection (since the current connection
2258 *   has been completely removed from controller memory).
2259 *
2260 * RAMROD_CMD_ID_ETH_PORT_DEL
2261 *   Used to tear down the leading connection prior to driver unload,
2262 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2263 *   default status block.
2264 *
2265 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2266 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2267 *   RSS connection that is being offloaded.  (Not currently used under
2268 *   FreeBSD.)
2269 *
2270 * There can only be one command pending per function.
2271 *
2272 * Returns:
2273 *   0 = Success, !0 = Failure.
2274 */
2275
2276/* must be called under the spq lock */
2277static inline
2278struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2279{
2280    struct eth_spe *next_spe = sc->spq_prod_bd;
2281
2282    if (sc->spq_prod_bd == sc->spq_last_bd) {
2283        /* wrap back to the first eth_spq */
2284        sc->spq_prod_bd = sc->spq;
2285        sc->spq_prod_idx = 0;
2286    } else {
2287        sc->spq_prod_bd++;
2288        sc->spq_prod_idx++;
2289    }
2290
2291    return (next_spe);
2292}
2293
2294/* must be called under the spq lock */
2295static inline
2296void bxe_sp_prod_update(struct bxe_softc *sc)
2297{
2298    int func = SC_FUNC(sc);
2299
2300    /*
2301     * Make sure that BD data is updated before writing the producer.
2302     * BD data is written to the memory, the producer is read from the
2303     * memory, thus we need a full memory barrier to ensure the ordering.
2304     */
2305    mb();
2306
2307    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2308             sc->spq_prod_idx);
2309
2310    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2311                      BUS_SPACE_BARRIER_WRITE);
2312}
2313
2314/**
2315 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2316 *
2317 * @cmd:      command to check
2318 * @cmd_type: command type
2319 */
2320static inline
2321int bxe_is_contextless_ramrod(int cmd,
2322                              int cmd_type)
2323{
2324    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2325        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2326        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2327        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2328        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2329        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2330        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2331        return (TRUE);
2332    } else {
2333        return (FALSE);
2334    }
2335}
2336
2337/**
2338 * bxe_sp_post - place a single command on an SP ring
2339 *
2340 * @sc:         driver handle
2341 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2342 * @cid:        SW CID the command is related to
2343 * @data_hi:    command private data address (high 32 bits)
2344 * @data_lo:    command private data address (low 32 bits)
2345 * @cmd_type:   command type (e.g. NONE, ETH)
2346 *
2347 * SP data is handled as if it's always an address pair, thus data fields are
2348 * not swapped to little endian in upper functions. Instead this function swaps
2349 * data as if it's two uint32 fields.
2350 */
2351int
2352bxe_sp_post(struct bxe_softc *sc,
2353            int              command,
2354            int              cid,
2355            uint32_t         data_hi,
2356            uint32_t         data_lo,
2357            int              cmd_type)
2358{
2359    struct eth_spe *spe;
2360    uint16_t type;
2361    int common;
2362
2363    common = bxe_is_contextless_ramrod(command, cmd_type);
2364
2365    BXE_SP_LOCK(sc);
2366
2367    if (common) {
2368        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2369            BLOGE(sc, "EQ ring is full!\n");
2370            BXE_SP_UNLOCK(sc);
2371            return (-1);
2372        }
2373    } else {
2374        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2375            BLOGE(sc, "SPQ ring is full!\n");
2376            BXE_SP_UNLOCK(sc);
2377            return (-1);
2378        }
2379    }
2380
2381    spe = bxe_sp_get_next(sc);
2382
2383    /* CID needs port number to be encoded int it */
2384    spe->hdr.conn_and_cmd_data =
2385        htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid));
2386
2387    type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
2388
2389    /* TBD: Check if it works for VFs */
2390    type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) &
2391             SPE_HDR_FUNCTION_ID);
2392
2393    spe->hdr.type = htole16(type);
2394
2395    spe->data.update_data_addr.hi = htole32(data_hi);
2396    spe->data.update_data_addr.lo = htole32(data_lo);
2397
2398    /*
2399     * It's ok if the actual decrement is issued towards the memory
2400     * somewhere between the lock and unlock. Thus no more explict
2401     * memory barrier is needed.
2402     */
2403    if (common) {
2404        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2405    } else {
2406        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2407    }
2408
2409    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2410    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2411          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2412    BLOGD(sc, DBG_SP,
2413          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2414          sc->spq_prod_idx,
2415          (uint32_t)U64_HI(sc->spq_dma.paddr),
2416          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2417          command,
2418          common,
2419          HW_CID(sc, cid),
2420          data_hi,
2421          data_lo,
2422          type,
2423          atomic_load_acq_long(&sc->cq_spq_left),
2424          atomic_load_acq_long(&sc->eq_spq_left));
2425
2426    bxe_sp_prod_update(sc);
2427
2428    BXE_SP_UNLOCK(sc);
2429    return (0);
2430}
2431
2432/**
2433 * bxe_debug_print_ind_table - prints the indirection table configuration.
2434 *
2435 * @sc: driver hanlde
2436 * @p:  pointer to rss configuration
2437 */
2438
2439/*
2440 * FreeBSD Device probe function.
2441 *
2442 * Compares the device found to the driver's list of supported devices and
2443 * reports back to the bsd loader whether this is the right driver for the device.
2444 * This is the driver entry function called from the "kldload" command.
2445 *
2446 * Returns:
2447 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2448 */
2449static int
2450bxe_probe(device_t dev)
2451{
2452    struct bxe_softc *sc;
2453    struct bxe_device_type *t;
2454    char *descbuf;
2455    uint16_t did, sdid, svid, vid;
2456
2457    /* Find our device structure */
2458    sc = device_get_softc(dev);
2459    sc->dev = dev;
2460    t = bxe_devs;
2461
2462    /* Get the data for the device to be probed. */
2463    vid  = pci_get_vendor(dev);
2464    did  = pci_get_device(dev);
2465    svid = pci_get_subvendor(dev);
2466    sdid = pci_get_subdevice(dev);
2467
2468    BLOGD(sc, DBG_LOAD,
2469          "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2470          "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2471
2472    /* Look through the list of known devices for a match. */
2473    while (t->bxe_name != NULL) {
2474        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2475            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2476            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2477            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2478            if (descbuf == NULL)
2479                return (ENOMEM);
2480
2481            /* Print out the device identity. */
2482            snprintf(descbuf, BXE_DEVDESC_MAX,
2483                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2484                     (((pci_read_config(dev, PCIR_REVID, 4) &
2485                        0xf0) >> 4) + 'A'),
2486                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2487                     BXE_DRIVER_VERSION);
2488
2489            device_set_desc_copy(dev, descbuf);
2490            free(descbuf, M_TEMP);
2491            return (BUS_PROBE_DEFAULT);
2492        }
2493        t++;
2494    }
2495
2496    return (ENXIO);
2497}
2498
2499static void
2500bxe_init_mutexes(struct bxe_softc *sc)
2501{
2502#ifdef BXE_CORE_LOCK_SX
2503    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2504             "bxe%d_core_lock", sc->unit);
2505    sx_init(&sc->core_sx, sc->core_sx_name);
2506#else
2507    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2508             "bxe%d_core_lock", sc->unit);
2509    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2510#endif
2511
2512    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2513             "bxe%d_sp_lock", sc->unit);
2514    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2515
2516    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2517             "bxe%d_dmae_lock", sc->unit);
2518    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2519
2520    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2521             "bxe%d_phy_lock", sc->unit);
2522    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2523
2524    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2525             "bxe%d_fwmb_lock", sc->unit);
2526    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2527
2528    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2529             "bxe%d_print_lock", sc->unit);
2530    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2531
2532    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2533             "bxe%d_stats_lock", sc->unit);
2534    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2535
2536    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2537             "bxe%d_mcast_lock", sc->unit);
2538    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2539}
2540
2541static void
2542bxe_release_mutexes(struct bxe_softc *sc)
2543{
2544#ifdef BXE_CORE_LOCK_SX
2545    sx_destroy(&sc->core_sx);
2546#else
2547    if (mtx_initialized(&sc->core_mtx)) {
2548        mtx_destroy(&sc->core_mtx);
2549    }
2550#endif
2551
2552    if (mtx_initialized(&sc->sp_mtx)) {
2553        mtx_destroy(&sc->sp_mtx);
2554    }
2555
2556    if (mtx_initialized(&sc->dmae_mtx)) {
2557        mtx_destroy(&sc->dmae_mtx);
2558    }
2559
2560    if (mtx_initialized(&sc->port.phy_mtx)) {
2561        mtx_destroy(&sc->port.phy_mtx);
2562    }
2563
2564    if (mtx_initialized(&sc->fwmb_mtx)) {
2565        mtx_destroy(&sc->fwmb_mtx);
2566    }
2567
2568    if (mtx_initialized(&sc->print_mtx)) {
2569        mtx_destroy(&sc->print_mtx);
2570    }
2571
2572    if (mtx_initialized(&sc->stats_mtx)) {
2573        mtx_destroy(&sc->stats_mtx);
2574    }
2575
2576    if (mtx_initialized(&sc->mcast_mtx)) {
2577        mtx_destroy(&sc->mcast_mtx);
2578    }
2579}
2580
2581static void
2582bxe_tx_disable(struct bxe_softc* sc)
2583{
2584    if_t ifp = sc->ifp;
2585
2586    /* tell the stack the driver is stopped and TX queue is full */
2587    if (ifp !=  NULL) {
2588        if_setdrvflags(ifp, 0);
2589    }
2590}
2591
2592static void
2593bxe_drv_pulse(struct bxe_softc *sc)
2594{
2595    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2596             sc->fw_drv_pulse_wr_seq);
2597}
2598
2599static inline uint16_t
2600bxe_tx_avail(struct bxe_softc *sc,
2601             struct bxe_fastpath *fp)
2602{
2603    int16_t  used;
2604    uint16_t prod;
2605    uint16_t cons;
2606
2607    prod = fp->tx_bd_prod;
2608    cons = fp->tx_bd_cons;
2609
2610    used = SUB_S16(prod, cons);
2611
2612    return (int16_t)(sc->tx_ring_size) - used;
2613}
2614
2615static inline int
2616bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2617{
2618    uint16_t hw_cons;
2619
2620    mb(); /* status block fields can change */
2621    hw_cons = le16toh(*fp->tx_cons_sb);
2622    return (hw_cons != fp->tx_pkt_cons);
2623}
2624
2625static inline uint8_t
2626bxe_has_tx_work(struct bxe_fastpath *fp)
2627{
2628    /* expand this for multi-cos if ever supported */
2629    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2630}
2631
2632static inline int
2633bxe_has_rx_work(struct bxe_fastpath *fp)
2634{
2635    uint16_t rx_cq_cons_sb;
2636
2637    mb(); /* status block fields can change */
2638    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2639    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2640        rx_cq_cons_sb++;
2641    return (fp->rx_cq_cons != rx_cq_cons_sb);
2642}
2643
2644static void
2645bxe_sp_event(struct bxe_softc    *sc,
2646             struct bxe_fastpath *fp,
2647             union eth_rx_cqe    *rr_cqe)
2648{
2649    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2650    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2651    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2652    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2653
2654    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2655          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2656
2657    switch (command) {
2658    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2659        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2660        drv_cmd = ECORE_Q_CMD_UPDATE;
2661        break;
2662
2663    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2664        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2665        drv_cmd = ECORE_Q_CMD_SETUP;
2666        break;
2667
2668    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2669        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2670        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2671        break;
2672
2673    case (RAMROD_CMD_ID_ETH_HALT):
2674        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2675        drv_cmd = ECORE_Q_CMD_HALT;
2676        break;
2677
2678    case (RAMROD_CMD_ID_ETH_TERMINATE):
2679        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2680        drv_cmd = ECORE_Q_CMD_TERMINATE;
2681        break;
2682
2683    case (RAMROD_CMD_ID_ETH_EMPTY):
2684        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2685        drv_cmd = ECORE_Q_CMD_EMPTY;
2686        break;
2687
2688    default:
2689        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2690              command, fp->index);
2691        return;
2692    }
2693
2694    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2695        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2696        /*
2697         * q_obj->complete_cmd() failure means that this was
2698         * an unexpected completion.
2699         *
2700         * In this case we don't want to increase the sc->spq_left
2701         * because apparently we haven't sent this command the first
2702         * place.
2703         */
2704        // bxe_panic(sc, ("Unexpected SP completion\n"));
2705        return;
2706    }
2707
2708    atomic_add_acq_long(&sc->cq_spq_left, 1);
2709
2710    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2711          atomic_load_acq_long(&sc->cq_spq_left));
2712}
2713
2714/*
2715 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2716 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2717 * the current aggregation queue as in-progress.
2718 */
2719static void
2720bxe_tpa_start(struct bxe_softc            *sc,
2721              struct bxe_fastpath         *fp,
2722              uint16_t                    queue,
2723              uint16_t                    cons,
2724              uint16_t                    prod,
2725              struct eth_fast_path_rx_cqe *cqe)
2726{
2727    struct bxe_sw_rx_bd tmp_bd;
2728    struct bxe_sw_rx_bd *rx_buf;
2729    struct eth_rx_bd *rx_bd;
2730    int max_agg_queues;
2731    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2732    uint16_t index;
2733
2734    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2735                       "cons=%d prod=%d\n",
2736          fp->index, queue, cons, prod);
2737
2738    max_agg_queues = MAX_AGG_QS(sc);
2739
2740    KASSERT((queue < max_agg_queues),
2741            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2742             fp->index, queue, max_agg_queues));
2743
2744    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2745            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2746             fp->index, queue));
2747
2748    /* copy the existing mbuf and mapping from the TPA pool */
2749    tmp_bd = tpa_info->bd;
2750
2751    if (tmp_bd.m == NULL) {
2752        uint32_t *tmp;
2753
2754        tmp = (uint32_t *)cqe;
2755
2756        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2757              fp->index, queue, cons, prod);
2758        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2759            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2760
2761        /* XXX Error handling? */
2762        return;
2763    }
2764
2765    /* change the TPA queue to the start state */
2766    tpa_info->state            = BXE_TPA_STATE_START;
2767    tpa_info->placement_offset = cqe->placement_offset;
2768    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2769    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2770    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2771
2772    fp->rx_tpa_queue_used |= (1 << queue);
2773
2774    /*
2775     * If all the buffer descriptors are filled with mbufs then fill in
2776     * the current consumer index with a new BD. Else if a maximum Rx
2777     * buffer limit is imposed then fill in the next producer index.
2778     */
2779    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2780                prod : cons;
2781
2782    /* move the received mbuf and mapping to TPA pool */
2783    tpa_info->bd = fp->rx_mbuf_chain[cons];
2784
2785    /* release any existing RX BD mbuf mappings */
2786    if (cons != index) {
2787        rx_buf = &fp->rx_mbuf_chain[cons];
2788
2789        if (rx_buf->m_map != NULL) {
2790            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2791                            BUS_DMASYNC_POSTREAD);
2792            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2793        }
2794
2795        /*
2796         * We get here when the maximum number of rx buffers is less than
2797         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2798         * it out here without concern of a memory leak.
2799         */
2800        fp->rx_mbuf_chain[cons].m = NULL;
2801    }
2802
2803    /* update the Rx SW BD with the mbuf info from the TPA pool */
2804    fp->rx_mbuf_chain[index] = tmp_bd;
2805
2806    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2807    rx_bd = &fp->rx_chain[index];
2808    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2809    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2810}
2811
2812/*
2813 * When a TPA aggregation is completed, loop through the individual mbufs
2814 * of the aggregation, combining them into a single mbuf which will be sent
2815 * up the stack. Refill all freed SGEs with mbufs as we go along.
2816 */
2817static int
2818bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2819                   struct bxe_fastpath       *fp,
2820                   struct bxe_sw_tpa_info    *tpa_info,
2821                   uint16_t                  queue,
2822                   uint16_t                  pages,
2823                   struct mbuf               *m,
2824			       struct eth_end_agg_rx_cqe *cqe,
2825                   uint16_t                  cqe_idx)
2826{
2827    struct mbuf *m_frag;
2828    uint32_t frag_len, frag_size, i;
2829    uint16_t sge_idx;
2830    int rc = 0;
2831    int j;
2832
2833    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2834
2835    BLOGD(sc, DBG_LRO,
2836          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2837          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2838
2839    /* make sure the aggregated frame is not too big to handle */
2840    if (pages > 8 * PAGES_PER_SGE) {
2841
2842        uint32_t *tmp = (uint32_t *)cqe;
2843
2844        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2845                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2846              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2847              tpa_info->len_on_bd, frag_size);
2848
2849        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2850            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2851
2852        bxe_panic(sc, ("sge page count error\n"));
2853        return (EINVAL);
2854    }
2855
2856    /*
2857     * Scan through the scatter gather list pulling individual mbufs into a
2858     * single mbuf for the host stack.
2859     */
2860    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2861        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2862
2863        /*
2864         * Firmware gives the indices of the SGE as if the ring is an array
2865         * (meaning that the "next" element will consume 2 indices).
2866         */
2867        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2868
2869        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2870                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2871              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2872
2873        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2874
2875        /* allocate a new mbuf for the SGE */
2876        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2877        if (rc) {
2878            /* Leave all remaining SGEs in the ring! */
2879            return (rc);
2880        }
2881
2882        /* update the fragment length */
2883        m_frag->m_len = frag_len;
2884
2885        /* concatenate the fragment to the head mbuf */
2886        m_cat(m, m_frag);
2887        fp->eth_q_stats.mbuf_alloc_sge--;
2888
2889        /* update the TPA mbuf size and remaining fragment size */
2890        m->m_pkthdr.len += frag_len;
2891        frag_size -= frag_len;
2892    }
2893
2894    BLOGD(sc, DBG_LRO,
2895          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2896          fp->index, queue, frag_size);
2897
2898    return (rc);
2899}
2900
2901static inline void
2902bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2903{
2904    int i, j;
2905
2906    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2907        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2908
2909        for (j = 0; j < 2; j++) {
2910            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2911            idx--;
2912        }
2913    }
2914}
2915
2916static inline void
2917bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2918{
2919    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2920    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2921
2922    /*
2923     * Clear the two last indices in the page to 1. These are the indices that
2924     * correspond to the "next" element, hence will never be indicated and
2925     * should be removed from the calculations.
2926     */
2927    bxe_clear_sge_mask_next_elems(fp);
2928}
2929
2930static inline void
2931bxe_update_last_max_sge(struct bxe_fastpath *fp,
2932                        uint16_t            idx)
2933{
2934    uint16_t last_max = fp->last_max_sge;
2935
2936    if (SUB_S16(idx, last_max) > 0) {
2937        fp->last_max_sge = idx;
2938    }
2939}
2940
2941static inline void
2942bxe_update_sge_prod(struct bxe_softc          *sc,
2943                    struct bxe_fastpath       *fp,
2944                    uint16_t                  sge_len,
2945                    union eth_sgl_or_raw_data *cqe)
2946{
2947    uint16_t last_max, last_elem, first_elem;
2948    uint16_t delta = 0;
2949    uint16_t i;
2950
2951    if (!sge_len) {
2952        return;
2953    }
2954
2955    /* first mark all used pages */
2956    for (i = 0; i < sge_len; i++) {
2957        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2958                            RX_SGE(le16toh(cqe->sgl[i])));
2959    }
2960
2961    BLOGD(sc, DBG_LRO,
2962          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2963          fp->index, sge_len - 1,
2964          le16toh(cqe->sgl[sge_len - 1]));
2965
2966    /* assume that the last SGE index is the biggest */
2967    bxe_update_last_max_sge(fp,
2968                            le16toh(cqe->sgl[sge_len - 1]));
2969
2970    last_max = RX_SGE(fp->last_max_sge);
2971    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2972    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2973
2974    /* if ring is not full */
2975    if (last_elem + 1 != first_elem) {
2976        last_elem++;
2977    }
2978
2979    /* now update the prod */
2980    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
2981        if (__predict_true(fp->sge_mask[i])) {
2982            break;
2983        }
2984
2985        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
2986        delta += BIT_VEC64_ELEM_SZ;
2987    }
2988
2989    if (delta > 0) {
2990        fp->rx_sge_prod += delta;
2991        /* clear page-end entries */
2992        bxe_clear_sge_mask_next_elems(fp);
2993    }
2994
2995    BLOGD(sc, DBG_LRO,
2996          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
2997          fp->index, fp->last_max_sge, fp->rx_sge_prod);
2998}
2999
3000/*
3001 * The aggregation on the current TPA queue has completed. Pull the individual
3002 * mbuf fragments together into a single mbuf, perform all necessary checksum
3003 * calculations, and send the resuting mbuf to the stack.
3004 */
3005static void
3006bxe_tpa_stop(struct bxe_softc          *sc,
3007             struct bxe_fastpath       *fp,
3008             struct bxe_sw_tpa_info    *tpa_info,
3009             uint16_t                  queue,
3010             uint16_t                  pages,
3011			 struct eth_end_agg_rx_cqe *cqe,
3012             uint16_t                  cqe_idx)
3013{
3014    if_t ifp = sc->ifp;
3015    struct mbuf *m;
3016    int rc = 0;
3017
3018    BLOGD(sc, DBG_LRO,
3019          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3020          fp->index, queue, tpa_info->placement_offset,
3021          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3022
3023    m = tpa_info->bd.m;
3024
3025    /* allocate a replacement before modifying existing mbuf */
3026    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3027    if (rc) {
3028        /* drop the frame and log an error */
3029        fp->eth_q_stats.rx_soft_errors++;
3030        goto bxe_tpa_stop_exit;
3031    }
3032
3033    /* we have a replacement, fixup the current mbuf */
3034    m_adj(m, tpa_info->placement_offset);
3035    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3036
3037    /* mark the checksums valid (taken care of by the firmware) */
3038    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3039    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3040    m->m_pkthdr.csum_data = 0xffff;
3041    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3042                               CSUM_IP_VALID   |
3043                               CSUM_DATA_VALID |
3044                               CSUM_PSEUDO_HDR);
3045
3046    /* aggregate all of the SGEs into a single mbuf */
3047    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3048    if (rc) {
3049        /* drop the packet and log an error */
3050        fp->eth_q_stats.rx_soft_errors++;
3051        m_freem(m);
3052    } else {
3053        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) {
3054            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3055            m->m_flags |= M_VLANTAG;
3056        }
3057
3058        /* assign packet to this interface interface */
3059        if_setrcvif(m, ifp);
3060
3061#if __FreeBSD_version >= 800000
3062        /* specify what RSS queue was used for this flow */
3063        m->m_pkthdr.flowid = fp->index;
3064        M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3065#endif
3066
3067        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3068        fp->eth_q_stats.rx_tpa_pkts++;
3069
3070        /* pass the frame to the stack */
3071        if_input(ifp, m);
3072    }
3073
3074    /* we passed an mbuf up the stack or dropped the frame */
3075    fp->eth_q_stats.mbuf_alloc_tpa--;
3076
3077bxe_tpa_stop_exit:
3078
3079    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3080    fp->rx_tpa_queue_used &= ~(1 << queue);
3081}
3082
3083static uint8_t
3084bxe_service_rxsgl(
3085                 struct bxe_fastpath *fp,
3086                 uint16_t len,
3087                 uint16_t lenonbd,
3088                 struct mbuf *m,
3089                 struct eth_fast_path_rx_cqe *cqe_fp)
3090{
3091    struct mbuf *m_frag;
3092    uint16_t frags, frag_len;
3093    uint16_t sge_idx = 0;
3094    uint16_t j;
3095    uint8_t i, rc = 0;
3096    uint32_t frag_size;
3097
3098    /* adjust the mbuf */
3099    m->m_len = lenonbd;
3100
3101    frag_size =  len - lenonbd;
3102    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3103
3104    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3105        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3106
3107        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3108        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3109        m_frag->m_len = frag_len;
3110
3111       /* allocate a new mbuf for the SGE */
3112        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3113        if (rc) {
3114            /* Leave all remaining SGEs in the ring! */
3115            return (rc);
3116        }
3117        fp->eth_q_stats.mbuf_alloc_sge--;
3118
3119        /* concatenate the fragment to the head mbuf */
3120        m_cat(m, m_frag);
3121
3122        frag_size -= frag_len;
3123    }
3124
3125    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3126
3127    return rc;
3128}
3129
3130static uint8_t
3131bxe_rxeof(struct bxe_softc    *sc,
3132          struct bxe_fastpath *fp)
3133{
3134    if_t ifp = sc->ifp;
3135    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3136    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3137    int rx_pkts = 0;
3138    int rc = 0;
3139
3140    BXE_FP_RX_LOCK(fp);
3141
3142    /* CQ "next element" is of the size of the regular element */
3143    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3144    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3145        hw_cq_cons++;
3146    }
3147
3148    bd_cons = fp->rx_bd_cons;
3149    bd_prod = fp->rx_bd_prod;
3150    bd_prod_fw = bd_prod;
3151    sw_cq_cons = fp->rx_cq_cons;
3152    sw_cq_prod = fp->rx_cq_prod;
3153
3154    /*
3155     * Memory barrier necessary as speculative reads of the rx
3156     * buffer can be ahead of the index in the status block
3157     */
3158    rmb();
3159
3160    BLOGD(sc, DBG_RX,
3161          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3162          fp->index, hw_cq_cons, sw_cq_cons);
3163
3164    while (sw_cq_cons != hw_cq_cons) {
3165        struct bxe_sw_rx_bd *rx_buf = NULL;
3166        union eth_rx_cqe *cqe;
3167        struct eth_fast_path_rx_cqe *cqe_fp;
3168        uint8_t cqe_fp_flags;
3169        enum eth_rx_cqe_type cqe_fp_type;
3170        uint16_t len, lenonbd,  pad;
3171        struct mbuf *m = NULL;
3172
3173        comp_ring_cons = RCQ(sw_cq_cons);
3174        bd_prod = RX_BD(bd_prod);
3175        bd_cons = RX_BD(bd_cons);
3176
3177        cqe          = &fp->rcq_chain[comp_ring_cons];
3178        cqe_fp       = &cqe->fast_path_cqe;
3179        cqe_fp_flags = cqe_fp->type_error_flags;
3180        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3181
3182        BLOGD(sc, DBG_RX,
3183              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3184              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3185              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3186              fp->index,
3187              hw_cq_cons,
3188              sw_cq_cons,
3189              bd_prod,
3190              bd_cons,
3191              CQE_TYPE(cqe_fp_flags),
3192              cqe_fp_flags,
3193              cqe_fp->status_flags,
3194              le32toh(cqe_fp->rss_hash_result),
3195              le16toh(cqe_fp->vlan_tag),
3196              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3197              le16toh(cqe_fp->len_on_bd));
3198
3199        /* is this a slowpath msg? */
3200        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3201            bxe_sp_event(sc, fp, cqe);
3202            goto next_cqe;
3203        }
3204
3205        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3206
3207        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3208            struct bxe_sw_tpa_info *tpa_info;
3209            uint16_t frag_size, pages;
3210            uint8_t queue;
3211
3212            if (CQE_TYPE_START(cqe_fp_type)) {
3213                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3214                              bd_cons, bd_prod, cqe_fp);
3215                m = NULL; /* packet not ready yet */
3216                goto next_rx;
3217            }
3218
3219            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3220                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3221
3222            queue = cqe->end_agg_cqe.queue_index;
3223            tpa_info = &fp->rx_tpa_info[queue];
3224
3225            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3226                  fp->index, queue);
3227
3228            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3229                         tpa_info->len_on_bd);
3230            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3231
3232            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3233                         &cqe->end_agg_cqe, comp_ring_cons);
3234
3235            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3236
3237            goto next_cqe;
3238        }
3239
3240        /* non TPA */
3241
3242        /* is this an error packet? */
3243        if (__predict_false(cqe_fp_flags &
3244                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3245            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3246            fp->eth_q_stats.rx_soft_errors++;
3247            goto next_rx;
3248        }
3249
3250        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3251        lenonbd = le16toh(cqe_fp->len_on_bd);
3252        pad = cqe_fp->placement_offset;
3253
3254        m = rx_buf->m;
3255
3256        if (__predict_false(m == NULL)) {
3257            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3258                  bd_cons, fp->index);
3259            goto next_rx;
3260        }
3261
3262        /* XXX double copy if packet length under a threshold */
3263
3264        /*
3265         * If all the buffer descriptors are filled with mbufs then fill in
3266         * the current consumer index with a new BD. Else if a maximum Rx
3267         * buffer limit is imposed then fill in the next producer index.
3268         */
3269        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3270                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3271                                      bd_prod : bd_cons);
3272        if (rc != 0) {
3273
3274            /* we simply reuse the received mbuf and don't post it to the stack */
3275            m = NULL;
3276
3277            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3278                  fp->index, rc);
3279            fp->eth_q_stats.rx_soft_errors++;
3280
3281            if (sc->max_rx_bufs != RX_BD_USABLE) {
3282                /* copy this consumer index to the producer index */
3283                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3284                       sizeof(struct bxe_sw_rx_bd));
3285                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3286            }
3287
3288            goto next_rx;
3289        }
3290
3291        /* current mbuf was detached from the bd */
3292        fp->eth_q_stats.mbuf_alloc_rx--;
3293
3294        /* we allocated a replacement mbuf, fixup the current one */
3295        m_adj(m, pad);
3296        m->m_pkthdr.len = m->m_len = len;
3297
3298        if ((len > 60) && (len > lenonbd)) {
3299            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3300            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3301            if (rc)
3302                break;
3303            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3304        } else if (lenonbd < len) {
3305            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3306        }
3307
3308        /* assign packet to this interface interface */
3309	if_setrcvif(m, ifp);
3310
3311        /* assume no hardware checksum has complated */
3312        m->m_pkthdr.csum_flags = 0;
3313
3314        /* validate checksum if offload enabled */
3315        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3316            /* check for a valid IP frame */
3317            if (!(cqe->fast_path_cqe.status_flags &
3318                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3319                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3320                if (__predict_false(cqe_fp_flags &
3321                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3322                    fp->eth_q_stats.rx_hw_csum_errors++;
3323                } else {
3324                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3325                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3326                }
3327            }
3328
3329            /* check for a valid TCP/UDP frame */
3330            if (!(cqe->fast_path_cqe.status_flags &
3331                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3332                if (__predict_false(cqe_fp_flags &
3333                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3334                    fp->eth_q_stats.rx_hw_csum_errors++;
3335                } else {
3336                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3337                    m->m_pkthdr.csum_data = 0xFFFF;
3338                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3339                                               CSUM_PSEUDO_HDR);
3340                }
3341            }
3342        }
3343
3344        /* if there is a VLAN tag then flag that info */
3345        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) {
3346            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3347            m->m_flags |= M_VLANTAG;
3348        }
3349
3350#if __FreeBSD_version >= 800000
3351        /* specify what RSS queue was used for this flow */
3352        m->m_pkthdr.flowid = fp->index;
3353        M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3354#endif
3355
3356next_rx:
3357
3358        bd_cons    = RX_BD_NEXT(bd_cons);
3359        bd_prod    = RX_BD_NEXT(bd_prod);
3360        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3361
3362        /* pass the frame to the stack */
3363        if (__predict_true(m != NULL)) {
3364            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3365            rx_pkts++;
3366            if_input(ifp, m);
3367        }
3368
3369next_cqe:
3370
3371        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3372        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3373
3374        /* limit spinning on the queue */
3375        if (rc != 0)
3376            break;
3377
3378        if (rx_pkts == sc->rx_budget) {
3379            fp->eth_q_stats.rx_budget_reached++;
3380            break;
3381        }
3382    } /* while work to do */
3383
3384    fp->rx_bd_cons = bd_cons;
3385    fp->rx_bd_prod = bd_prod_fw;
3386    fp->rx_cq_cons = sw_cq_cons;
3387    fp->rx_cq_prod = sw_cq_prod;
3388
3389    /* Update producers */
3390    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3391
3392    fp->eth_q_stats.rx_pkts += rx_pkts;
3393    fp->eth_q_stats.rx_calls++;
3394
3395    BXE_FP_RX_UNLOCK(fp);
3396
3397    return (sw_cq_cons != hw_cq_cons);
3398}
3399
3400static uint16_t
3401bxe_free_tx_pkt(struct bxe_softc    *sc,
3402                struct bxe_fastpath *fp,
3403                uint16_t            idx)
3404{
3405    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3406    struct eth_tx_start_bd *tx_start_bd;
3407    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3408    uint16_t new_cons;
3409    int nbd;
3410
3411    /* unmap the mbuf from non-paged memory */
3412    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3413
3414    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3415    nbd = le16toh(tx_start_bd->nbd) - 1;
3416
3417    new_cons = (tx_buf->first_bd + nbd);
3418
3419    /* free the mbuf */
3420    if (__predict_true(tx_buf->m != NULL)) {
3421        m_freem(tx_buf->m);
3422        fp->eth_q_stats.mbuf_alloc_tx--;
3423    } else {
3424        fp->eth_q_stats.tx_chain_lost_mbuf++;
3425    }
3426
3427    tx_buf->m = NULL;
3428    tx_buf->first_bd = 0;
3429
3430    return (new_cons);
3431}
3432
3433/* transmit timeout watchdog */
3434static int
3435bxe_watchdog(struct bxe_softc    *sc,
3436             struct bxe_fastpath *fp)
3437{
3438    BXE_FP_TX_LOCK(fp);
3439
3440    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3441        BXE_FP_TX_UNLOCK(fp);
3442        return (0);
3443    }
3444
3445    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3446
3447    BXE_FP_TX_UNLOCK(fp);
3448
3449    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3450    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3451
3452    return (-1);
3453}
3454
3455/* processes transmit completions */
3456static uint8_t
3457bxe_txeof(struct bxe_softc    *sc,
3458          struct bxe_fastpath *fp)
3459{
3460    if_t ifp = sc->ifp;
3461    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3462    uint16_t tx_bd_avail;
3463
3464    BXE_FP_TX_LOCK_ASSERT(fp);
3465
3466    bd_cons = fp->tx_bd_cons;
3467    hw_cons = le16toh(*fp->tx_cons_sb);
3468    sw_cons = fp->tx_pkt_cons;
3469
3470    while (sw_cons != hw_cons) {
3471        pkt_cons = TX_BD(sw_cons);
3472
3473        BLOGD(sc, DBG_TX,
3474              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3475              fp->index, hw_cons, sw_cons, pkt_cons);
3476
3477        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3478
3479        sw_cons++;
3480    }
3481
3482    fp->tx_pkt_cons = sw_cons;
3483    fp->tx_bd_cons  = bd_cons;
3484
3485    BLOGD(sc, DBG_TX,
3486          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3487          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3488
3489    mb();
3490
3491    tx_bd_avail = bxe_tx_avail(sc, fp);
3492
3493    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3494        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3495    } else {
3496        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3497    }
3498
3499    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3500        /* reset the watchdog timer if there are pending transmits */
3501        fp->watchdog_timer = BXE_TX_TIMEOUT;
3502        return (TRUE);
3503    } else {
3504        /* clear watchdog when there are no pending transmits */
3505        fp->watchdog_timer = 0;
3506        return (FALSE);
3507    }
3508}
3509
3510static void
3511bxe_drain_tx_queues(struct bxe_softc *sc)
3512{
3513    struct bxe_fastpath *fp;
3514    int i, count;
3515
3516    /* wait until all TX fastpath tasks have completed */
3517    for (i = 0; i < sc->num_queues; i++) {
3518        fp = &sc->fp[i];
3519
3520        count = 1000;
3521
3522        while (bxe_has_tx_work(fp)) {
3523
3524            BXE_FP_TX_LOCK(fp);
3525            bxe_txeof(sc, fp);
3526            BXE_FP_TX_UNLOCK(fp);
3527
3528            if (count == 0) {
3529                BLOGE(sc, "Timeout waiting for fp[%d] "
3530                          "transmits to complete!\n", i);
3531                bxe_panic(sc, ("tx drain failure\n"));
3532                return;
3533            }
3534
3535            count--;
3536            DELAY(1000);
3537            rmb();
3538        }
3539    }
3540
3541    return;
3542}
3543
3544static int
3545bxe_del_all_macs(struct bxe_softc          *sc,
3546                 struct ecore_vlan_mac_obj *mac_obj,
3547                 int                       mac_type,
3548                 uint8_t                   wait_for_comp)
3549{
3550    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3551    int rc;
3552
3553    /* wait for completion of requested */
3554    if (wait_for_comp) {
3555        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3556    }
3557
3558    /* Set the mac type of addresses we want to clear */
3559    bxe_set_bit(mac_type, &vlan_mac_flags);
3560
3561    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3562    if (rc < 0) {
3563        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3564            rc, mac_type, wait_for_comp);
3565    }
3566
3567    return (rc);
3568}
3569
3570static int
3571bxe_fill_accept_flags(struct bxe_softc *sc,
3572                      uint32_t         rx_mode,
3573                      unsigned long    *rx_accept_flags,
3574                      unsigned long    *tx_accept_flags)
3575{
3576    /* Clear the flags first */
3577    *rx_accept_flags = 0;
3578    *tx_accept_flags = 0;
3579
3580    switch (rx_mode) {
3581    case BXE_RX_MODE_NONE:
3582        /*
3583         * 'drop all' supersedes any accept flags that may have been
3584         * passed to the function.
3585         */
3586        break;
3587
3588    case BXE_RX_MODE_NORMAL:
3589        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3590        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3591        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3592
3593        /* internal switching mode */
3594        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3595        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3596        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3597
3598        break;
3599
3600    case BXE_RX_MODE_ALLMULTI:
3601        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3602        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3603        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3604
3605        /* internal switching mode */
3606        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3607        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3608        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3609
3610        break;
3611
3612    case BXE_RX_MODE_PROMISC:
3613        /*
3614         * According to deffinition of SI mode, iface in promisc mode
3615         * should receive matched and unmatched (in resolution of port)
3616         * unicast packets.
3617         */
3618        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3619        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3620        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3621        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3622
3623        /* internal switching mode */
3624        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3625        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3626
3627        if (IS_MF_SI(sc)) {
3628            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3629        } else {
3630            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3631        }
3632
3633        break;
3634
3635    default:
3636        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3637        return (-1);
3638    }
3639
3640    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3641    if (rx_mode != BXE_RX_MODE_NONE) {
3642        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3643        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3644    }
3645
3646    return (0);
3647}
3648
3649static int
3650bxe_set_q_rx_mode(struct bxe_softc *sc,
3651                  uint8_t          cl_id,
3652                  unsigned long    rx_mode_flags,
3653                  unsigned long    rx_accept_flags,
3654                  unsigned long    tx_accept_flags,
3655                  unsigned long    ramrod_flags)
3656{
3657    struct ecore_rx_mode_ramrod_params ramrod_param;
3658    int rc;
3659
3660    memset(&ramrod_param, 0, sizeof(ramrod_param));
3661
3662    /* Prepare ramrod parameters */
3663    ramrod_param.cid = 0;
3664    ramrod_param.cl_id = cl_id;
3665    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3666    ramrod_param.func_id = SC_FUNC(sc);
3667
3668    ramrod_param.pstate = &sc->sp_state;
3669    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3670
3671    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3672    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3673
3674    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3675
3676    ramrod_param.ramrod_flags = ramrod_flags;
3677    ramrod_param.rx_mode_flags = rx_mode_flags;
3678
3679    ramrod_param.rx_accept_flags = rx_accept_flags;
3680    ramrod_param.tx_accept_flags = tx_accept_flags;
3681
3682    rc = ecore_config_rx_mode(sc, &ramrod_param);
3683    if (rc < 0) {
3684        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3685            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3686            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3687            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3688            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3689        return (rc);
3690    }
3691
3692    return (0);
3693}
3694
3695static int
3696bxe_set_storm_rx_mode(struct bxe_softc *sc)
3697{
3698    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3699    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3700    int rc;
3701
3702    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3703                               &tx_accept_flags);
3704    if (rc) {
3705        return (rc);
3706    }
3707
3708    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3709    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3710
3711    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3712    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3713                              rx_accept_flags, tx_accept_flags,
3714                              ramrod_flags));
3715}
3716
3717/* returns the "mcp load_code" according to global load_count array */
3718static int
3719bxe_nic_load_no_mcp(struct bxe_softc *sc)
3720{
3721    int path = SC_PATH(sc);
3722    int port = SC_PORT(sc);
3723
3724    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3725          path, load_count[path][0], load_count[path][1],
3726          load_count[path][2]);
3727    load_count[path][0]++;
3728    load_count[path][1 + port]++;
3729    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3730          path, load_count[path][0], load_count[path][1],
3731          load_count[path][2]);
3732    if (load_count[path][0] == 1) {
3733        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3734    } else if (load_count[path][1 + port] == 1) {
3735        return (FW_MSG_CODE_DRV_LOAD_PORT);
3736    } else {
3737        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3738    }
3739}
3740
3741/* returns the "mcp load_code" according to global load_count array */
3742static int
3743bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3744{
3745    int port = SC_PORT(sc);
3746    int path = SC_PATH(sc);
3747
3748    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3749          path, load_count[path][0], load_count[path][1],
3750          load_count[path][2]);
3751    load_count[path][0]--;
3752    load_count[path][1 + port]--;
3753    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3754          path, load_count[path][0], load_count[path][1],
3755          load_count[path][2]);
3756    if (load_count[path][0] == 0) {
3757        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3758    } else if (load_count[path][1 + port] == 0) {
3759        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3760    } else {
3761        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3762    }
3763}
3764
3765/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3766static uint32_t
3767bxe_send_unload_req(struct bxe_softc *sc,
3768                    int              unload_mode)
3769{
3770    uint32_t reset_code = 0;
3771
3772    /* Select the UNLOAD request mode */
3773    if (unload_mode == UNLOAD_NORMAL) {
3774        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3775    } else {
3776        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3777    }
3778
3779    /* Send the request to the MCP */
3780    if (!BXE_NOMCP(sc)) {
3781        reset_code = bxe_fw_command(sc, reset_code, 0);
3782    } else {
3783        reset_code = bxe_nic_unload_no_mcp(sc);
3784    }
3785
3786    return (reset_code);
3787}
3788
3789/* send UNLOAD_DONE command to the MCP */
3790static void
3791bxe_send_unload_done(struct bxe_softc *sc,
3792                     uint8_t          keep_link)
3793{
3794    uint32_t reset_param =
3795        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3796
3797    /* Report UNLOAD_DONE to MCP */
3798    if (!BXE_NOMCP(sc)) {
3799        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3800    }
3801}
3802
3803static int
3804bxe_func_wait_started(struct bxe_softc *sc)
3805{
3806    int tout = 50;
3807
3808    if (!sc->port.pmf) {
3809        return (0);
3810    }
3811
3812    /*
3813     * (assumption: No Attention from MCP at this stage)
3814     * PMF probably in the middle of TX disable/enable transaction
3815     * 1. Sync IRS for default SB
3816     * 2. Sync SP queue - this guarantees us that attention handling started
3817     * 3. Wait, that TX disable/enable transaction completes
3818     *
3819     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3820     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3821     * received completion for the transaction the state is TX_STOPPED.
3822     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3823     * transaction.
3824     */
3825
3826    /* XXX make sure default SB ISR is done */
3827    /* need a way to synchronize an irq (intr_mtx?) */
3828
3829    /* XXX flush any work queues */
3830
3831    while (ecore_func_get_state(sc, &sc->func_obj) !=
3832           ECORE_F_STATE_STARTED && tout--) {
3833        DELAY(20000);
3834    }
3835
3836    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3837        /*
3838         * Failed to complete the transaction in a "good way"
3839         * Force both transactions with CLR bit.
3840         */
3841        struct ecore_func_state_params func_params = { NULL };
3842
3843        BLOGE(sc, "Unexpected function state! "
3844                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3845
3846        func_params.f_obj = &sc->func_obj;
3847        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3848
3849        /* STARTED-->TX_STOPPED */
3850        func_params.cmd = ECORE_F_CMD_TX_STOP;
3851        ecore_func_state_change(sc, &func_params);
3852
3853        /* TX_STOPPED-->STARTED */
3854        func_params.cmd = ECORE_F_CMD_TX_START;
3855        return (ecore_func_state_change(sc, &func_params));
3856    }
3857
3858    return (0);
3859}
3860
3861static int
3862bxe_stop_queue(struct bxe_softc *sc,
3863               int              index)
3864{
3865    struct bxe_fastpath *fp = &sc->fp[index];
3866    struct ecore_queue_state_params q_params = { NULL };
3867    int rc;
3868
3869    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3870
3871    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3872    /* We want to wait for completion in this context */
3873    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3874
3875    /* Stop the primary connection: */
3876
3877    /* ...halt the connection */
3878    q_params.cmd = ECORE_Q_CMD_HALT;
3879    rc = ecore_queue_state_change(sc, &q_params);
3880    if (rc) {
3881        return (rc);
3882    }
3883
3884    /* ...terminate the connection */
3885    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3886    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3887    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3888    rc = ecore_queue_state_change(sc, &q_params);
3889    if (rc) {
3890        return (rc);
3891    }
3892
3893    /* ...delete cfc entry */
3894    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3895    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3896    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3897    return (ecore_queue_state_change(sc, &q_params));
3898}
3899
3900/* wait for the outstanding SP commands */
3901static inline uint8_t
3902bxe_wait_sp_comp(struct bxe_softc *sc,
3903                 unsigned long    mask)
3904{
3905    unsigned long tmp;
3906    int tout = 5000; /* wait for 5 secs tops */
3907
3908    while (tout--) {
3909        mb();
3910        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3911            return (TRUE);
3912        }
3913
3914        DELAY(1000);
3915    }
3916
3917    mb();
3918
3919    tmp = atomic_load_acq_long(&sc->sp_state);
3920    if (tmp & mask) {
3921        BLOGE(sc, "Filtering completion timed out: "
3922                  "sp_state 0x%lx, mask 0x%lx\n",
3923              tmp, mask);
3924        return (FALSE);
3925    }
3926
3927    return (FALSE);
3928}
3929
3930static int
3931bxe_func_stop(struct bxe_softc *sc)
3932{
3933    struct ecore_func_state_params func_params = { NULL };
3934    int rc;
3935
3936    /* prepare parameters for function state transitions */
3937    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3938    func_params.f_obj = &sc->func_obj;
3939    func_params.cmd = ECORE_F_CMD_STOP;
3940
3941    /*
3942     * Try to stop the function the 'good way'. If it fails (in case
3943     * of a parity error during bxe_chip_cleanup()) and we are
3944     * not in a debug mode, perform a state transaction in order to
3945     * enable further HW_RESET transaction.
3946     */
3947    rc = ecore_func_state_change(sc, &func_params);
3948    if (rc) {
3949        BLOGE(sc, "FUNC_STOP ramrod failed. "
3950                  "Running a dry transaction (%d)\n", rc);
3951        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3952        return (ecore_func_state_change(sc, &func_params));
3953    }
3954
3955    return (0);
3956}
3957
3958static int
3959bxe_reset_hw(struct bxe_softc *sc,
3960             uint32_t         load_code)
3961{
3962    struct ecore_func_state_params func_params = { NULL };
3963
3964    /* Prepare parameters for function state transitions */
3965    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3966
3967    func_params.f_obj = &sc->func_obj;
3968    func_params.cmd = ECORE_F_CMD_HW_RESET;
3969
3970    func_params.params.hw_init.load_phase = load_code;
3971
3972    return (ecore_func_state_change(sc, &func_params));
3973}
3974
3975static void
3976bxe_int_disable_sync(struct bxe_softc *sc,
3977                     int              disable_hw)
3978{
3979    if (disable_hw) {
3980        /* prevent the HW from sending interrupts */
3981        bxe_int_disable(sc);
3982    }
3983
3984    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
3985    /* make sure all ISRs are done */
3986
3987    /* XXX make sure sp_task is not running */
3988    /* cancel and flush work queues */
3989}
3990
3991static void
3992bxe_chip_cleanup(struct bxe_softc *sc,
3993                 uint32_t         unload_mode,
3994                 uint8_t          keep_link)
3995{
3996    int port = SC_PORT(sc);
3997    struct ecore_mcast_ramrod_params rparam = { NULL };
3998    uint32_t reset_code;
3999    int i, rc = 0;
4000
4001    bxe_drain_tx_queues(sc);
4002
4003    /* give HW time to discard old tx messages */
4004    DELAY(1000);
4005
4006    /* Clean all ETH MACs */
4007    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4008    if (rc < 0) {
4009        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4010    }
4011
4012    /* Clean up UC list  */
4013    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4014    if (rc < 0) {
4015        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4016    }
4017
4018    /* Disable LLH */
4019    if (!CHIP_IS_E1(sc)) {
4020        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4021    }
4022
4023    /* Set "drop all" to stop Rx */
4024
4025    /*
4026     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4027     * a race between the completion code and this code.
4028     */
4029    BXE_MCAST_LOCK(sc);
4030
4031    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4032        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4033    } else {
4034        bxe_set_storm_rx_mode(sc);
4035    }
4036
4037    /* Clean up multicast configuration */
4038    rparam.mcast_obj = &sc->mcast_obj;
4039    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4040    if (rc < 0) {
4041        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4042    }
4043
4044    BXE_MCAST_UNLOCK(sc);
4045
4046    // XXX bxe_iov_chip_cleanup(sc);
4047
4048    /*
4049     * Send the UNLOAD_REQUEST to the MCP. This will return if
4050     * this function should perform FUNCTION, PORT, or COMMON HW
4051     * reset.
4052     */
4053    reset_code = bxe_send_unload_req(sc, unload_mode);
4054
4055    /*
4056     * (assumption: No Attention from MCP at this stage)
4057     * PMF probably in the middle of TX disable/enable transaction
4058     */
4059    rc = bxe_func_wait_started(sc);
4060    if (rc) {
4061        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4062    }
4063
4064    /*
4065     * Close multi and leading connections
4066     * Completions for ramrods are collected in a synchronous way
4067     */
4068    for (i = 0; i < sc->num_queues; i++) {
4069        if (bxe_stop_queue(sc, i)) {
4070            goto unload_error;
4071        }
4072    }
4073
4074    /*
4075     * If SP settings didn't get completed so far - something
4076     * very wrong has happen.
4077     */
4078    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4079        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4080    }
4081
4082unload_error:
4083
4084    rc = bxe_func_stop(sc);
4085    if (rc) {
4086        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4087    }
4088
4089    /* disable HW interrupts */
4090    bxe_int_disable_sync(sc, TRUE);
4091
4092    /* detach interrupts */
4093    bxe_interrupt_detach(sc);
4094
4095    /* Reset the chip */
4096    rc = bxe_reset_hw(sc, reset_code);
4097    if (rc) {
4098        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4099    }
4100
4101    /* Report UNLOAD_DONE to MCP */
4102    bxe_send_unload_done(sc, keep_link);
4103}
4104
4105static void
4106bxe_disable_close_the_gate(struct bxe_softc *sc)
4107{
4108    uint32_t val;
4109    int port = SC_PORT(sc);
4110
4111    BLOGD(sc, DBG_LOAD,
4112          "Disabling 'close the gates'\n");
4113
4114    if (CHIP_IS_E1(sc)) {
4115        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4116                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4117        val = REG_RD(sc, addr);
4118        val &= ~(0x300);
4119        REG_WR(sc, addr, val);
4120    } else {
4121        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4122        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4123                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4124        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4125    }
4126}
4127
4128/*
4129 * Cleans the object that have internal lists without sending
4130 * ramrods. Should be run when interrutps are disabled.
4131 */
4132static void
4133bxe_squeeze_objects(struct bxe_softc *sc)
4134{
4135    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4136    struct ecore_mcast_ramrod_params rparam = { NULL };
4137    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4138    int rc;
4139
4140    /* Cleanup MACs' object first... */
4141
4142    /* Wait for completion of requested */
4143    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4144    /* Perform a dry cleanup */
4145    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4146
4147    /* Clean ETH primary MAC */
4148    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4149    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4150                             &ramrod_flags);
4151    if (rc != 0) {
4152        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4153    }
4154
4155    /* Cleanup UC list */
4156    vlan_mac_flags = 0;
4157    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4158    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4159                             &ramrod_flags);
4160    if (rc != 0) {
4161        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4162    }
4163
4164    /* Now clean mcast object... */
4165
4166    rparam.mcast_obj = &sc->mcast_obj;
4167    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4168
4169    /* Add a DEL command... */
4170    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4171    if (rc < 0) {
4172        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4173    }
4174
4175    /* now wait until all pending commands are cleared */
4176
4177    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4178    while (rc != 0) {
4179        if (rc < 0) {
4180            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4181            return;
4182        }
4183
4184        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4185    }
4186}
4187
4188/* stop the controller */
4189static __noinline int
4190bxe_nic_unload(struct bxe_softc *sc,
4191               uint32_t         unload_mode,
4192               uint8_t          keep_link)
4193{
4194    uint8_t global = FALSE;
4195    uint32_t val;
4196
4197    BXE_CORE_LOCK_ASSERT(sc);
4198
4199    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4200
4201    /* mark driver as unloaded in shmem2 */
4202    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4203        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4204        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4205                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4206    }
4207
4208    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4209        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4210        /*
4211         * We can get here if the driver has been unloaded
4212         * during parity error recovery and is either waiting for a
4213         * leader to complete or for other functions to unload and
4214         * then ifconfig down has been issued. In this case we want to
4215         * unload and let other functions to complete a recovery
4216         * process.
4217         */
4218        sc->recovery_state = BXE_RECOVERY_DONE;
4219        sc->is_leader = 0;
4220        bxe_release_leader_lock(sc);
4221        mb();
4222
4223        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4224        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4225            " state = 0x%x\n", sc->recovery_state, sc->state);
4226        return (-1);
4227    }
4228
4229    /*
4230     * Nothing to do during unload if previous bxe_nic_load()
4231     * did not completed succesfully - all resourses are released.
4232     */
4233    if ((sc->state == BXE_STATE_CLOSED) ||
4234        (sc->state == BXE_STATE_ERROR)) {
4235        return (0);
4236    }
4237
4238    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4239    mb();
4240
4241    /* stop tx */
4242    bxe_tx_disable(sc);
4243
4244    sc->rx_mode = BXE_RX_MODE_NONE;
4245    /* XXX set rx mode ??? */
4246
4247    if (IS_PF(sc) && !sc->grcdump_done) {
4248        /* set ALWAYS_ALIVE bit in shmem */
4249        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4250
4251        bxe_drv_pulse(sc);
4252
4253        bxe_stats_handle(sc, STATS_EVENT_STOP);
4254        bxe_save_statistics(sc);
4255    }
4256
4257    /* wait till consumers catch up with producers in all queues */
4258    bxe_drain_tx_queues(sc);
4259
4260    /* if VF indicate to PF this function is going down (PF will delete sp
4261     * elements and clear initializations
4262     */
4263    if (IS_VF(sc)) {
4264        ; /* bxe_vfpf_close_vf(sc); */
4265    } else if (unload_mode != UNLOAD_RECOVERY) {
4266        /* if this is a normal/close unload need to clean up chip */
4267        if (!sc->grcdump_done)
4268            bxe_chip_cleanup(sc, unload_mode, keep_link);
4269    } else {
4270        /* Send the UNLOAD_REQUEST to the MCP */
4271        bxe_send_unload_req(sc, unload_mode);
4272
4273        /*
4274         * Prevent transactions to host from the functions on the
4275         * engine that doesn't reset global blocks in case of global
4276         * attention once gloabl blocks are reset and gates are opened
4277         * (the engine which leader will perform the recovery
4278         * last).
4279         */
4280        if (!CHIP_IS_E1x(sc)) {
4281            bxe_pf_disable(sc);
4282        }
4283
4284        /* disable HW interrupts */
4285        bxe_int_disable_sync(sc, TRUE);
4286
4287        /* detach interrupts */
4288        bxe_interrupt_detach(sc);
4289
4290        /* Report UNLOAD_DONE to MCP */
4291        bxe_send_unload_done(sc, FALSE);
4292    }
4293
4294    /*
4295     * At this stage no more interrupts will arrive so we may safely clean
4296     * the queue'able objects here in case they failed to get cleaned so far.
4297     */
4298    if (IS_PF(sc)) {
4299        bxe_squeeze_objects(sc);
4300    }
4301
4302    /* There should be no more pending SP commands at this stage */
4303    sc->sp_state = 0;
4304
4305    sc->port.pmf = 0;
4306
4307    bxe_free_fp_buffers(sc);
4308
4309    if (IS_PF(sc)) {
4310        bxe_free_mem(sc);
4311    }
4312
4313    bxe_free_fw_stats_mem(sc);
4314
4315    sc->state = BXE_STATE_CLOSED;
4316
4317    /*
4318     * Check if there are pending parity attentions. If there are - set
4319     * RECOVERY_IN_PROGRESS.
4320     */
4321    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4322        bxe_set_reset_in_progress(sc);
4323
4324        /* Set RESET_IS_GLOBAL if needed */
4325        if (global) {
4326            bxe_set_reset_global(sc);
4327        }
4328    }
4329
4330    /*
4331     * The last driver must disable a "close the gate" if there is no
4332     * parity attention or "process kill" pending.
4333     */
4334    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4335        bxe_reset_is_done(sc, SC_PATH(sc))) {
4336        bxe_disable_close_the_gate(sc);
4337    }
4338
4339    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4340
4341    return (0);
4342}
4343
4344/*
4345 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4346 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4347 */
4348static int
4349bxe_ifmedia_update(struct ifnet  *ifp)
4350{
4351    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4352    struct ifmedia *ifm;
4353
4354    ifm = &sc->ifmedia;
4355
4356    /* We only support Ethernet media type. */
4357    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4358        return (EINVAL);
4359    }
4360
4361    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4362    case IFM_AUTO:
4363         break;
4364    case IFM_10G_CX4:
4365    case IFM_10G_SR:
4366    case IFM_10G_T:
4367    case IFM_10G_TWINAX:
4368    default:
4369        /* We don't support changing the media type. */
4370        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4371              IFM_SUBTYPE(ifm->ifm_media));
4372        return (EINVAL);
4373    }
4374
4375    return (0);
4376}
4377
4378/*
4379 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4380 */
4381static void
4382bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4383{
4384    struct bxe_softc *sc = if_getsoftc(ifp);
4385
4386    /* Report link down if the driver isn't running. */
4387    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4388        ifmr->ifm_active |= IFM_NONE;
4389        return;
4390    }
4391
4392    /* Setup the default interface info. */
4393    ifmr->ifm_status = IFM_AVALID;
4394    ifmr->ifm_active = IFM_ETHER;
4395
4396    if (sc->link_vars.link_up) {
4397        ifmr->ifm_status |= IFM_ACTIVE;
4398    } else {
4399        ifmr->ifm_active |= IFM_NONE;
4400        return;
4401    }
4402
4403    ifmr->ifm_active |= sc->media;
4404
4405    if (sc->link_vars.duplex == DUPLEX_FULL) {
4406        ifmr->ifm_active |= IFM_FDX;
4407    } else {
4408        ifmr->ifm_active |= IFM_HDX;
4409    }
4410}
4411
4412static int
4413bxe_ioctl_nvram(struct bxe_softc *sc,
4414                uint32_t         priv_op,
4415                struct ifreq     *ifr)
4416{
4417    struct bxe_nvram_data nvdata_base;
4418    struct bxe_nvram_data *nvdata;
4419    int len;
4420    int error = 0;
4421
4422    copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base));
4423
4424    len = (sizeof(struct bxe_nvram_data) +
4425           nvdata_base.len -
4426           sizeof(uint32_t));
4427
4428    if (len > sizeof(struct bxe_nvram_data)) {
4429        if ((nvdata = (struct bxe_nvram_data *)
4430                 malloc(len, M_DEVBUF,
4431                        (M_NOWAIT | M_ZERO))) == NULL) {
4432            BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed priv_op 0x%x "
4433                " len = 0x%x\n", priv_op, len);
4434            return (1);
4435        }
4436        memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data));
4437    } else {
4438        nvdata = &nvdata_base;
4439    }
4440
4441    if (priv_op == BXE_IOC_RD_NVRAM) {
4442        BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n",
4443              nvdata->offset, nvdata->len);
4444        error = bxe_nvram_read(sc,
4445                               nvdata->offset,
4446                               (uint8_t *)nvdata->value,
4447                               nvdata->len);
4448        copyout(nvdata, ifr->ifr_data, len);
4449    } else { /* BXE_IOC_WR_NVRAM */
4450        BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n",
4451              nvdata->offset, nvdata->len);
4452        copyin(ifr->ifr_data, nvdata, len);
4453        error = bxe_nvram_write(sc,
4454                                nvdata->offset,
4455                                (uint8_t *)nvdata->value,
4456                                nvdata->len);
4457    }
4458
4459    if (len > sizeof(struct bxe_nvram_data)) {
4460        free(nvdata, M_DEVBUF);
4461    }
4462
4463    return (error);
4464}
4465
4466static int
4467bxe_ioctl_stats_show(struct bxe_softc *sc,
4468                     uint32_t         priv_op,
4469                     struct ifreq     *ifr)
4470{
4471    const size_t str_size   = (BXE_NUM_ETH_STATS * STAT_NAME_LEN);
4472    const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t));
4473    caddr_t p_tmp;
4474    uint32_t *offset;
4475    int i;
4476
4477    switch (priv_op)
4478    {
4479    case BXE_IOC_STATS_SHOW_NUM:
4480        memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data));
4481        ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num =
4482            BXE_NUM_ETH_STATS;
4483        ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len =
4484            STAT_NAME_LEN;
4485        return (0);
4486
4487    case BXE_IOC_STATS_SHOW_STR:
4488        memset(ifr->ifr_data, 0, str_size);
4489        p_tmp = ifr->ifr_data;
4490        for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4491            strcpy(p_tmp, bxe_eth_stats_arr[i].string);
4492            p_tmp += STAT_NAME_LEN;
4493        }
4494        return (0);
4495
4496    case BXE_IOC_STATS_SHOW_CNT:
4497        memset(ifr->ifr_data, 0, stats_size);
4498        p_tmp = ifr->ifr_data;
4499        for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4500            offset = ((uint32_t *)&sc->eth_stats +
4501                      bxe_eth_stats_arr[i].offset);
4502            switch (bxe_eth_stats_arr[i].size) {
4503            case 4:
4504                *((uint64_t *)p_tmp) = (uint64_t)*offset;
4505                break;
4506            case 8:
4507                *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1));
4508                break;
4509            default:
4510                *((uint64_t *)p_tmp) = 0;
4511            }
4512            p_tmp += sizeof(uint64_t);
4513        }
4514        return (0);
4515
4516    default:
4517        return (-1);
4518    }
4519}
4520
4521static void
4522bxe_handle_chip_tq(void *context,
4523                   int  pending)
4524{
4525    struct bxe_softc *sc = (struct bxe_softc *)context;
4526    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4527
4528    switch (work)
4529    {
4530
4531    case CHIP_TQ_REINIT:
4532        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4533            /* restart the interface */
4534            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4535            bxe_periodic_stop(sc);
4536            BXE_CORE_LOCK(sc);
4537            bxe_stop_locked(sc);
4538            bxe_init_locked(sc);
4539            BXE_CORE_UNLOCK(sc);
4540        }
4541        break;
4542
4543    default:
4544        break;
4545    }
4546}
4547
4548/*
4549 * Handles any IOCTL calls from the operating system.
4550 *
4551 * Returns:
4552 *   0 = Success, >0 Failure
4553 */
4554static int
4555bxe_ioctl(if_t ifp,
4556          u_long       command,
4557          caddr_t      data)
4558{
4559    struct bxe_softc *sc = if_getsoftc(ifp);
4560    struct ifreq *ifr = (struct ifreq *)data;
4561    struct bxe_nvram_data *nvdata;
4562    uint32_t priv_op;
4563    int mask = 0;
4564    int reinit = 0;
4565    int error = 0;
4566
4567    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4568    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4569
4570    switch (command)
4571    {
4572    case SIOCSIFMTU:
4573        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4574              ifr->ifr_mtu);
4575
4576        if (sc->mtu == ifr->ifr_mtu) {
4577            /* nothing to change */
4578            break;
4579        }
4580
4581        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4582            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4583                  ifr->ifr_mtu, mtu_min, mtu_max);
4584            error = EINVAL;
4585            break;
4586        }
4587
4588        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4589                             (unsigned long)ifr->ifr_mtu);
4590	/*
4591        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4592                              (unsigned long)ifr->ifr_mtu);
4593	XXX - Not sure why it needs to be atomic
4594	*/
4595	if_setmtu(ifp, ifr->ifr_mtu);
4596        reinit = 1;
4597        break;
4598
4599    case SIOCSIFFLAGS:
4600        /* toggle the interface state up or down */
4601        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4602
4603	BXE_CORE_LOCK(sc);
4604        /* check if the interface is up */
4605        if (if_getflags(ifp) & IFF_UP) {
4606            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4607                /* set the receive mode flags */
4608                bxe_set_rx_mode(sc);
4609            } else {
4610		bxe_init_locked(sc);
4611            }
4612        } else {
4613            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4614		bxe_periodic_stop(sc);
4615		bxe_stop_locked(sc);
4616            }
4617        }
4618	BXE_CORE_UNLOCK(sc);
4619
4620        break;
4621
4622    case SIOCADDMULTI:
4623    case SIOCDELMULTI:
4624        /* add/delete multicast addresses */
4625        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4626
4627        /* check if the interface is up */
4628        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4629            /* set the receive mode flags */
4630	    BXE_CORE_LOCK(sc);
4631            bxe_set_rx_mode(sc);
4632	    BXE_CORE_UNLOCK(sc);
4633        }
4634
4635        break;
4636
4637    case SIOCSIFCAP:
4638        /* find out which capabilities have changed */
4639        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4640
4641        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4642              mask);
4643
4644        /* toggle the LRO capabilites enable flag */
4645        if (mask & IFCAP_LRO) {
4646	    if_togglecapenable(ifp, IFCAP_LRO);
4647            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4648                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4649            reinit = 1;
4650        }
4651
4652        /* toggle the TXCSUM checksum capabilites enable flag */
4653        if (mask & IFCAP_TXCSUM) {
4654	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4655            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4656                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4657            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4658                if_sethwassistbits(ifp, (CSUM_IP      |
4659                                    CSUM_TCP      |
4660                                    CSUM_UDP      |
4661                                    CSUM_TSO      |
4662                                    CSUM_TCP_IPV6 |
4663                                    CSUM_UDP_IPV6), 0);
4664            } else {
4665		if_clearhwassist(ifp); /* XXX */
4666            }
4667        }
4668
4669        /* toggle the RXCSUM checksum capabilities enable flag */
4670        if (mask & IFCAP_RXCSUM) {
4671	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4672            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4673                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4674            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4675                if_sethwassistbits(ifp, (CSUM_IP      |
4676                                    CSUM_TCP      |
4677                                    CSUM_UDP      |
4678                                    CSUM_TSO      |
4679                                    CSUM_TCP_IPV6 |
4680                                    CSUM_UDP_IPV6), 0);
4681            } else {
4682		if_clearhwassist(ifp); /* XXX */
4683            }
4684        }
4685
4686        /* toggle TSO4 capabilities enabled flag */
4687        if (mask & IFCAP_TSO4) {
4688            if_togglecapenable(ifp, IFCAP_TSO4);
4689            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4690                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4691        }
4692
4693        /* toggle TSO6 capabilities enabled flag */
4694        if (mask & IFCAP_TSO6) {
4695	    if_togglecapenable(ifp, IFCAP_TSO6);
4696            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4697                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4698        }
4699
4700        /* toggle VLAN_HWTSO capabilities enabled flag */
4701        if (mask & IFCAP_VLAN_HWTSO) {
4702
4703	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4704            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4705                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4706        }
4707
4708        /* toggle VLAN_HWCSUM capabilities enabled flag */
4709        if (mask & IFCAP_VLAN_HWCSUM) {
4710            /* XXX investigate this... */
4711            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4712            error = EINVAL;
4713        }
4714
4715        /* toggle VLAN_MTU capabilities enable flag */
4716        if (mask & IFCAP_VLAN_MTU) {
4717            /* XXX investigate this... */
4718            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4719            error = EINVAL;
4720        }
4721
4722        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4723        if (mask & IFCAP_VLAN_HWTAGGING) {
4724            /* XXX investigate this... */
4725            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4726            error = EINVAL;
4727        }
4728
4729        /* toggle VLAN_HWFILTER capabilities enabled flag */
4730        if (mask & IFCAP_VLAN_HWFILTER) {
4731            /* XXX investigate this... */
4732            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4733            error = EINVAL;
4734        }
4735
4736        /* XXX not yet...
4737         * IFCAP_WOL_MAGIC
4738         */
4739
4740        break;
4741
4742    case SIOCSIFMEDIA:
4743    case SIOCGIFMEDIA:
4744        /* set/get interface media */
4745        BLOGD(sc, DBG_IOCTL,
4746              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4747              (command & 0xff));
4748        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4749        break;
4750
4751    case SIOCGPRIVATE_0:
4752        copyin(ifr->ifr_data, &priv_op, sizeof(priv_op));
4753
4754        switch (priv_op)
4755        {
4756        case BXE_IOC_RD_NVRAM:
4757        case BXE_IOC_WR_NVRAM:
4758            nvdata = (struct bxe_nvram_data *)ifr->ifr_data;
4759            BLOGD(sc, DBG_IOCTL,
4760                  "Received Private NVRAM ioctl addr=0x%x size=%u\n",
4761                  nvdata->offset, nvdata->len);
4762            error = bxe_ioctl_nvram(sc, priv_op, ifr);
4763            break;
4764
4765        case BXE_IOC_STATS_SHOW_NUM:
4766        case BXE_IOC_STATS_SHOW_STR:
4767        case BXE_IOC_STATS_SHOW_CNT:
4768            BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n",
4769                  priv_op);
4770            error = bxe_ioctl_stats_show(sc, priv_op, ifr);
4771            break;
4772
4773        default:
4774            BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op);
4775            error = EINVAL;
4776            break;
4777        }
4778
4779        break;
4780
4781    default:
4782        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4783              (command & 0xff));
4784        error = ether_ioctl(ifp, command, data);
4785        break;
4786    }
4787
4788    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4789        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4790              "Re-initializing hardware from IOCTL change\n");
4791	bxe_periodic_stop(sc);
4792	BXE_CORE_LOCK(sc);
4793	bxe_stop_locked(sc);
4794	bxe_init_locked(sc);
4795	BXE_CORE_UNLOCK(sc);
4796    }
4797
4798    return (error);
4799}
4800
4801static __noinline void
4802bxe_dump_mbuf(struct bxe_softc *sc,
4803              struct mbuf      *m,
4804              uint8_t          contents)
4805{
4806    char * type;
4807    int i = 0;
4808
4809    if (!(sc->debug & DBG_MBUF)) {
4810        return;
4811    }
4812
4813    if (m == NULL) {
4814        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4815        return;
4816    }
4817
4818    while (m) {
4819        BLOGD(sc, DBG_MBUF,
4820              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4821              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4822
4823        if (m->m_flags & M_PKTHDR) {
4824             BLOGD(sc, DBG_MBUF,
4825                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4826                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4827                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4828        }
4829
4830        if (m->m_flags & M_EXT) {
4831            switch (m->m_ext.ext_type) {
4832            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4833            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4834            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4835            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4836            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4837            case EXT_PACKET:     type = "EXT_PACKET";     break;
4838            case EXT_MBUF:       type = "EXT_MBUF";       break;
4839            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4840            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4841            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4842            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4843            default:             type = "UNKNOWN";        break;
4844            }
4845
4846            BLOGD(sc, DBG_MBUF,
4847                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4848                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4849        }
4850
4851        if (contents) {
4852            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4853        }
4854
4855        m = m->m_next;
4856        i++;
4857    }
4858}
4859
4860/*
4861 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4862 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4863 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4864 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
4865 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4866 */
4867static int
4868bxe_chktso_window(struct bxe_softc  *sc,
4869                  int               nsegs,
4870                  bus_dma_segment_t *segs,
4871                  struct mbuf       *m)
4872{
4873    uint32_t num_wnds, wnd_size, wnd_sum;
4874    int32_t frag_idx, wnd_idx;
4875    unsigned short lso_mss;
4876    int defrag;
4877
4878    defrag = 0;
4879    wnd_sum = 0;
4880    wnd_size = 10;
4881    num_wnds = nsegs - wnd_size;
4882    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4883
4884    /*
4885     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4886     * first window sum of data while skipping the first assuming it is the
4887     * header in FreeBSD.
4888     */
4889    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4890        wnd_sum += htole16(segs[frag_idx].ds_len);
4891    }
4892
4893    /* check the first 10 bd window size */
4894    if (wnd_sum < lso_mss) {
4895        return (1);
4896    }
4897
4898    /* run through the windows */
4899    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4900        /* subtract the first mbuf->m_len of the last wndw(-header) */
4901        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4902        /* add the next mbuf len to the len of our new window */
4903        wnd_sum += htole16(segs[frag_idx].ds_len);
4904        if (wnd_sum < lso_mss) {
4905            return (1);
4906        }
4907    }
4908
4909    return (0);
4910}
4911
4912static uint8_t
4913bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4914                    struct mbuf         *m,
4915                    uint32_t            *parsing_data)
4916{
4917    struct ether_vlan_header *eh = NULL;
4918    struct ip *ip4 = NULL;
4919    struct ip6_hdr *ip6 = NULL;
4920    caddr_t ip = NULL;
4921    struct tcphdr *th = NULL;
4922    int e_hlen, ip_hlen, l4_off;
4923    uint16_t proto;
4924
4925    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4926        /* no L4 checksum offload needed */
4927        return (0);
4928    }
4929
4930    /* get the Ethernet header */
4931    eh = mtod(m, struct ether_vlan_header *);
4932
4933    /* handle VLAN encapsulation if present */
4934    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4935        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4936        proto  = ntohs(eh->evl_proto);
4937    } else {
4938        e_hlen = ETHER_HDR_LEN;
4939        proto  = ntohs(eh->evl_encap_proto);
4940    }
4941
4942    switch (proto) {
4943    case ETHERTYPE_IP:
4944        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4945        ip4 = (m->m_len < sizeof(struct ip)) ?
4946                  (struct ip *)m->m_next->m_data :
4947                  (struct ip *)(m->m_data + e_hlen);
4948        /* ip_hl is number of 32-bit words */
4949        ip_hlen = (ip4->ip_hl << 2);
4950        ip = (caddr_t)ip4;
4951        break;
4952    case ETHERTYPE_IPV6:
4953        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4954        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4955                  (struct ip6_hdr *)m->m_next->m_data :
4956                  (struct ip6_hdr *)(m->m_data + e_hlen);
4957        /* XXX cannot support offload with IPv6 extensions */
4958        ip_hlen = sizeof(struct ip6_hdr);
4959        ip = (caddr_t)ip6;
4960        break;
4961    default:
4962        /* We can't offload in this case... */
4963        /* XXX error stat ??? */
4964        return (0);
4965    }
4966
4967    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4968    l4_off = (e_hlen + ip_hlen);
4969
4970    *parsing_data |=
4971        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4972         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4973
4974    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4975                                  CSUM_TSO |
4976                                  CSUM_TCP_IPV6)) {
4977        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4978        th = (struct tcphdr *)(ip + ip_hlen);
4979        /* th_off is number of 32-bit words */
4980        *parsing_data |= ((th->th_off <<
4981                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4982                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4983        return (l4_off + (th->th_off << 2)); /* entire header length */
4984    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4985                                         CSUM_UDP_IPV6)) {
4986        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4987        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4988    } else {
4989        /* XXX error stat ??? */
4990        return (0);
4991    }
4992}
4993
4994static uint8_t
4995bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4996                 struct mbuf                *m,
4997                 struct eth_tx_parse_bd_e1x *pbd)
4998{
4999    struct ether_vlan_header *eh = NULL;
5000    struct ip *ip4 = NULL;
5001    struct ip6_hdr *ip6 = NULL;
5002    caddr_t ip = NULL;
5003    struct tcphdr *th = NULL;
5004    struct udphdr *uh = NULL;
5005    int e_hlen, ip_hlen;
5006    uint16_t proto;
5007    uint8_t hlen;
5008    uint16_t tmp_csum;
5009    uint32_t *tmp_uh;
5010
5011    /* get the Ethernet header */
5012    eh = mtod(m, struct ether_vlan_header *);
5013
5014    /* handle VLAN encapsulation if present */
5015    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5016        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5017        proto  = ntohs(eh->evl_proto);
5018    } else {
5019        e_hlen = ETHER_HDR_LEN;
5020        proto  = ntohs(eh->evl_encap_proto);
5021    }
5022
5023    switch (proto) {
5024    case ETHERTYPE_IP:
5025        /* get the IP header, if mbuf len < 20 then header in next mbuf */
5026        ip4 = (m->m_len < sizeof(struct ip)) ?
5027                  (struct ip *)m->m_next->m_data :
5028                  (struct ip *)(m->m_data + e_hlen);
5029        /* ip_hl is number of 32-bit words */
5030        ip_hlen = (ip4->ip_hl << 1);
5031        ip = (caddr_t)ip4;
5032        break;
5033    case ETHERTYPE_IPV6:
5034        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5035        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5036                  (struct ip6_hdr *)m->m_next->m_data :
5037                  (struct ip6_hdr *)(m->m_data + e_hlen);
5038        /* XXX cannot support offload with IPv6 extensions */
5039        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
5040        ip = (caddr_t)ip6;
5041        break;
5042    default:
5043        /* We can't offload in this case... */
5044        /* XXX error stat ??? */
5045        return (0);
5046    }
5047
5048    hlen = (e_hlen >> 1);
5049
5050    /* note that rest of global_data is indirectly zeroed here */
5051    if (m->m_flags & M_VLANTAG) {
5052        pbd->global_data =
5053            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5054    } else {
5055        pbd->global_data = htole16(hlen);
5056    }
5057
5058    pbd->ip_hlen_w = ip_hlen;
5059
5060    hlen += pbd->ip_hlen_w;
5061
5062    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5063
5064    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5065                                  CSUM_TSO |
5066                                  CSUM_TCP_IPV6)) {
5067        th = (struct tcphdr *)(ip + (ip_hlen << 1));
5068        /* th_off is number of 32-bit words */
5069        hlen += (uint16_t)(th->th_off << 1);
5070    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5071                                         CSUM_UDP_IPV6)) {
5072        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5073        hlen += (sizeof(struct udphdr) / 2);
5074    } else {
5075        /* valid case as only CSUM_IP was set */
5076        return (0);
5077    }
5078
5079    pbd->total_hlen_w = htole16(hlen);
5080
5081    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5082                                  CSUM_TSO |
5083                                  CSUM_TCP_IPV6)) {
5084        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5085        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5086    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5087                                         CSUM_UDP_IPV6)) {
5088        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5089
5090        /*
5091         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5092         * checksums and does not know anything about the UDP header and where
5093         * the checksum field is located. It only knows about TCP. Therefore
5094         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5095         * offload. Since the checksum field offset for TCP is 16 bytes and
5096         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5097         * bytes less than the start of the UDP header. This allows the
5098         * hardware to write the checksum in the correct spot. But the
5099         * hardware will compute a checksum which includes the last 10 bytes
5100         * of the IP header. To correct this we tweak the stack computed
5101         * pseudo checksum by folding in the calculation of the inverse
5102         * checksum for those final 10 bytes of the IP header. This allows
5103         * the correct checksum to be computed by the hardware.
5104         */
5105
5106        /* set pointer 10 bytes before UDP header */
5107        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5108
5109        /* calculate a pseudo header checksum over the first 10 bytes */
5110        tmp_csum = in_pseudo(*tmp_uh,
5111                             *(tmp_uh + 1),
5112                             *(uint16_t *)(tmp_uh + 2));
5113
5114        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5115    }
5116
5117    return (hlen * 2); /* entire header length, number of bytes */
5118}
5119
5120static void
5121bxe_set_pbd_lso_e2(struct mbuf *m,
5122                   uint32_t    *parsing_data)
5123{
5124    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5125                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5126                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5127
5128    /* XXX test for IPv6 with extension header... */
5129}
5130
5131static void
5132bxe_set_pbd_lso(struct mbuf                *m,
5133                struct eth_tx_parse_bd_e1x *pbd)
5134{
5135    struct ether_vlan_header *eh = NULL;
5136    struct ip *ip = NULL;
5137    struct tcphdr *th = NULL;
5138    int e_hlen;
5139
5140    /* get the Ethernet header */
5141    eh = mtod(m, struct ether_vlan_header *);
5142
5143    /* handle VLAN encapsulation if present */
5144    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5145                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5146
5147    /* get the IP and TCP header, with LSO entire header in first mbuf */
5148    /* XXX assuming IPv4 */
5149    ip = (struct ip *)(m->m_data + e_hlen);
5150    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5151
5152    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5153    pbd->tcp_send_seq = ntohl(th->th_seq);
5154    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5155
5156#if 1
5157        /* XXX IPv4 */
5158        pbd->ip_id = ntohs(ip->ip_id);
5159        pbd->tcp_pseudo_csum =
5160            ntohs(in_pseudo(ip->ip_src.s_addr,
5161                            ip->ip_dst.s_addr,
5162                            htons(IPPROTO_TCP)));
5163#else
5164        /* XXX IPv6 */
5165        pbd->tcp_pseudo_csum =
5166            ntohs(in_pseudo(&ip6->ip6_src,
5167                            &ip6->ip6_dst,
5168                            htons(IPPROTO_TCP)));
5169#endif
5170
5171    pbd->global_data |=
5172        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5173}
5174
5175/*
5176 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5177 * visible to the controller.
5178 *
5179 * If an mbuf is submitted to this routine and cannot be given to the
5180 * controller (e.g. it has too many fragments) then the function may free
5181 * the mbuf and return to the caller.
5182 *
5183 * Returns:
5184 *   0 = Success, !0 = Failure
5185 *   Note the side effect that an mbuf may be freed if it causes a problem.
5186 */
5187static int
5188bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5189{
5190    bus_dma_segment_t segs[32];
5191    struct mbuf *m0;
5192    struct bxe_sw_tx_bd *tx_buf;
5193    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5194    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5195    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5196    struct eth_tx_bd *tx_data_bd;
5197    struct eth_tx_bd *tx_total_pkt_size_bd;
5198    struct eth_tx_start_bd *tx_start_bd;
5199    uint16_t bd_prod, pkt_prod, total_pkt_size;
5200    uint8_t mac_type;
5201    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5202    struct bxe_softc *sc;
5203    uint16_t tx_bd_avail;
5204    struct ether_vlan_header *eh;
5205    uint32_t pbd_e2_parsing_data = 0;
5206    uint8_t hlen = 0;
5207    int tmp_bd;
5208    int i;
5209
5210    sc = fp->sc;
5211
5212    M_ASSERTPKTHDR(*m_head);
5213
5214    m0 = *m_head;
5215    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5216    tx_start_bd = NULL;
5217    tx_data_bd = NULL;
5218    tx_total_pkt_size_bd = NULL;
5219
5220    /* get the H/W pointer for packets and BDs */
5221    pkt_prod = fp->tx_pkt_prod;
5222    bd_prod = fp->tx_bd_prod;
5223
5224    mac_type = UNICAST_ADDRESS;
5225
5226    /* map the mbuf into the next open DMAable memory */
5227    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5228    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5229                                    tx_buf->m_map, m0,
5230                                    segs, &nsegs, BUS_DMA_NOWAIT);
5231
5232    /* mapping errors */
5233    if(__predict_false(error != 0)) {
5234        fp->eth_q_stats.tx_dma_mapping_failure++;
5235        if (error == ENOMEM) {
5236            /* resource issue, try again later */
5237            rc = ENOMEM;
5238        } else if (error == EFBIG) {
5239            /* possibly recoverable with defragmentation */
5240            fp->eth_q_stats.mbuf_defrag_attempts++;
5241            m0 = m_defrag(*m_head, M_NOWAIT);
5242            if (m0 == NULL) {
5243                fp->eth_q_stats.mbuf_defrag_failures++;
5244                rc = ENOBUFS;
5245            } else {
5246                /* defrag successful, try mapping again */
5247                *m_head = m0;
5248                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5249                                                tx_buf->m_map, m0,
5250                                                segs, &nsegs, BUS_DMA_NOWAIT);
5251                if (error) {
5252                    fp->eth_q_stats.tx_dma_mapping_failure++;
5253                    rc = error;
5254                }
5255            }
5256        } else {
5257            /* unknown, unrecoverable mapping error */
5258            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5259            bxe_dump_mbuf(sc, m0, FALSE);
5260            rc = error;
5261        }
5262
5263        goto bxe_tx_encap_continue;
5264    }
5265
5266    tx_bd_avail = bxe_tx_avail(sc, fp);
5267
5268    /* make sure there is enough room in the send queue */
5269    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5270        /* Recoverable, try again later. */
5271        fp->eth_q_stats.tx_hw_queue_full++;
5272        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5273        rc = ENOMEM;
5274        goto bxe_tx_encap_continue;
5275    }
5276
5277    /* capture the current H/W TX chain high watermark */
5278    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5279                        (TX_BD_USABLE - tx_bd_avail))) {
5280        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5281    }
5282
5283    /* make sure it fits in the packet window */
5284    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5285        /*
5286         * The mbuf may be to big for the controller to handle. If the frame
5287         * is a TSO frame we'll need to do an additional check.
5288         */
5289        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5290            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5291                goto bxe_tx_encap_continue; /* OK to send */
5292            } else {
5293                fp->eth_q_stats.tx_window_violation_tso++;
5294            }
5295        } else {
5296            fp->eth_q_stats.tx_window_violation_std++;
5297        }
5298
5299        /* lets try to defragment this mbuf and remap it */
5300        fp->eth_q_stats.mbuf_defrag_attempts++;
5301        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5302
5303        m0 = m_defrag(*m_head, M_NOWAIT);
5304        if (m0 == NULL) {
5305            fp->eth_q_stats.mbuf_defrag_failures++;
5306            /* Ugh, just drop the frame... :( */
5307            rc = ENOBUFS;
5308        } else {
5309            /* defrag successful, try mapping again */
5310            *m_head = m0;
5311            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5312                                            tx_buf->m_map, m0,
5313                                            segs, &nsegs, BUS_DMA_NOWAIT);
5314            if (error) {
5315                fp->eth_q_stats.tx_dma_mapping_failure++;
5316                /* No sense in trying to defrag/copy chain, drop it. :( */
5317                rc = error;
5318            }
5319            else {
5320                /* if the chain is still too long then drop it */
5321                if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5322                    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5323                    rc = ENODEV;
5324                }
5325            }
5326        }
5327    }
5328
5329bxe_tx_encap_continue:
5330
5331    /* Check for errors */
5332    if (rc) {
5333        if (rc == ENOMEM) {
5334            /* recoverable try again later  */
5335        } else {
5336            fp->eth_q_stats.tx_soft_errors++;
5337            fp->eth_q_stats.mbuf_alloc_tx--;
5338            m_freem(*m_head);
5339            *m_head = NULL;
5340        }
5341
5342        return (rc);
5343    }
5344
5345    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5346    if (m0->m_flags & M_BCAST) {
5347        mac_type = BROADCAST_ADDRESS;
5348    } else if (m0->m_flags & M_MCAST) {
5349        mac_type = MULTICAST_ADDRESS;
5350    }
5351
5352    /* store the mbuf into the mbuf ring */
5353    tx_buf->m        = m0;
5354    tx_buf->first_bd = fp->tx_bd_prod;
5355    tx_buf->flags    = 0;
5356
5357    /* prepare the first transmit (start) BD for the mbuf */
5358    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5359
5360    BLOGD(sc, DBG_TX,
5361          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5362          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5363
5364    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5365    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5366    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5367    total_pkt_size += tx_start_bd->nbytes;
5368    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5369
5370    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5371
5372    /* all frames have at least Start BD + Parsing BD */
5373    nbds = nsegs + 1;
5374    tx_start_bd->nbd = htole16(nbds);
5375
5376    if (m0->m_flags & M_VLANTAG) {
5377        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5378        tx_start_bd->bd_flags.as_bitfield |=
5379            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5380    } else {
5381        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5382        if (IS_VF(sc)) {
5383            /* map ethernet header to find type and header length */
5384            eh = mtod(m0, struct ether_vlan_header *);
5385            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5386        } else {
5387            /* used by FW for packet accounting */
5388            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5389        }
5390    }
5391
5392    /*
5393     * add a parsing BD from the chain. The parsing BD is always added
5394     * though it is only used for TSO and chksum
5395     */
5396    bd_prod = TX_BD_NEXT(bd_prod);
5397
5398    if (m0->m_pkthdr.csum_flags) {
5399        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5400            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5401            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5402        }
5403
5404        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5405            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5406                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5407        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5408            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5409                                                  ETH_TX_BD_FLAGS_IS_UDP |
5410                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5411        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5412                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5413            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5414        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5415            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5416                                                  ETH_TX_BD_FLAGS_IS_UDP);
5417        }
5418    }
5419
5420    if (!CHIP_IS_E1x(sc)) {
5421        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5422        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5423
5424        if (m0->m_pkthdr.csum_flags) {
5425            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5426        }
5427
5428        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5429                 mac_type);
5430    } else {
5431        uint16_t global_data = 0;
5432
5433        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5434        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5435
5436        if (m0->m_pkthdr.csum_flags) {
5437            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5438        }
5439
5440        SET_FLAG(global_data,
5441                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5442        pbd_e1x->global_data |= htole16(global_data);
5443    }
5444
5445    /* setup the parsing BD with TSO specific info */
5446    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5447        fp->eth_q_stats.tx_ofld_frames_lso++;
5448        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5449
5450        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5451            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5452
5453            /* split the first BD into header/data making the fw job easy */
5454            nbds++;
5455            tx_start_bd->nbd = htole16(nbds);
5456            tx_start_bd->nbytes = htole16(hlen);
5457
5458            bd_prod = TX_BD_NEXT(bd_prod);
5459
5460            /* new transmit BD after the tx_parse_bd */
5461            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5462            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5463            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5464            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5465            if (tx_total_pkt_size_bd == NULL) {
5466                tx_total_pkt_size_bd = tx_data_bd;
5467            }
5468
5469            BLOGD(sc, DBG_TX,
5470                  "TSO split header size is %d (%x:%x) nbds %d\n",
5471                  le16toh(tx_start_bd->nbytes),
5472                  le32toh(tx_start_bd->addr_hi),
5473                  le32toh(tx_start_bd->addr_lo),
5474                  nbds);
5475        }
5476
5477        if (!CHIP_IS_E1x(sc)) {
5478            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5479        } else {
5480            bxe_set_pbd_lso(m0, pbd_e1x);
5481        }
5482    }
5483
5484    if (pbd_e2_parsing_data) {
5485        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5486    }
5487
5488    /* prepare remaining BDs, start tx bd contains first seg/frag */
5489    for (i = 1; i < nsegs ; i++) {
5490        bd_prod = TX_BD_NEXT(bd_prod);
5491        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5492        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5493        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5494        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5495        if (tx_total_pkt_size_bd == NULL) {
5496            tx_total_pkt_size_bd = tx_data_bd;
5497        }
5498        total_pkt_size += tx_data_bd->nbytes;
5499    }
5500
5501    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5502
5503    if (tx_total_pkt_size_bd != NULL) {
5504        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5505    }
5506
5507    if (__predict_false(sc->debug & DBG_TX)) {
5508        tmp_bd = tx_buf->first_bd;
5509        for (i = 0; i < nbds; i++)
5510        {
5511            if (i == 0) {
5512                BLOGD(sc, DBG_TX,
5513                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5514                      "bd_flags=0x%x hdr_nbds=%d\n",
5515                      tx_start_bd,
5516                      tmp_bd,
5517                      le16toh(tx_start_bd->nbd),
5518                      le16toh(tx_start_bd->vlan_or_ethertype),
5519                      tx_start_bd->bd_flags.as_bitfield,
5520                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5521            } else if (i == 1) {
5522                if (pbd_e1x) {
5523                    BLOGD(sc, DBG_TX,
5524                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5525                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5526                          "tcp_seq=%u total_hlen_w=%u\n",
5527                          pbd_e1x,
5528                          tmp_bd,
5529                          pbd_e1x->global_data,
5530                          pbd_e1x->ip_hlen_w,
5531                          pbd_e1x->ip_id,
5532                          pbd_e1x->lso_mss,
5533                          pbd_e1x->tcp_flags,
5534                          pbd_e1x->tcp_pseudo_csum,
5535                          pbd_e1x->tcp_send_seq,
5536                          le16toh(pbd_e1x->total_hlen_w));
5537                } else { /* if (pbd_e2) */
5538                    BLOGD(sc, DBG_TX,
5539                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5540                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5541                          pbd_e2,
5542                          tmp_bd,
5543                          pbd_e2->data.mac_addr.dst_hi,
5544                          pbd_e2->data.mac_addr.dst_mid,
5545                          pbd_e2->data.mac_addr.dst_lo,
5546                          pbd_e2->data.mac_addr.src_hi,
5547                          pbd_e2->data.mac_addr.src_mid,
5548                          pbd_e2->data.mac_addr.src_lo,
5549                          pbd_e2->parsing_data);
5550                }
5551            }
5552
5553            if (i != 1) { /* skip parse db as it doesn't hold data */
5554                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5555                BLOGD(sc, DBG_TX,
5556                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5557                      tx_data_bd,
5558                      tmp_bd,
5559                      le16toh(tx_data_bd->nbytes),
5560                      le32toh(tx_data_bd->addr_hi),
5561                      le32toh(tx_data_bd->addr_lo));
5562            }
5563
5564            tmp_bd = TX_BD_NEXT(tmp_bd);
5565        }
5566    }
5567
5568    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5569
5570    /* update TX BD producer index value for next TX */
5571    bd_prod = TX_BD_NEXT(bd_prod);
5572
5573    /*
5574     * If the chain of tx_bd's describing this frame is adjacent to or spans
5575     * an eth_tx_next_bd element then we need to increment the nbds value.
5576     */
5577    if (TX_BD_IDX(bd_prod) < nbds) {
5578        nbds++;
5579    }
5580
5581    /* don't allow reordering of writes for nbd and packets */
5582    mb();
5583
5584    fp->tx_db.data.prod += nbds;
5585
5586    /* producer points to the next free tx_bd at this point */
5587    fp->tx_pkt_prod++;
5588    fp->tx_bd_prod = bd_prod;
5589
5590    DOORBELL(sc, fp->index, fp->tx_db.raw);
5591
5592    fp->eth_q_stats.tx_pkts++;
5593
5594    /* Prevent speculative reads from getting ahead of the status block. */
5595    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5596                      0, 0, BUS_SPACE_BARRIER_READ);
5597
5598    /* Prevent speculative reads from getting ahead of the doorbell. */
5599    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5600                      0, 0, BUS_SPACE_BARRIER_READ);
5601
5602    return (0);
5603}
5604
5605static void
5606bxe_tx_start_locked(struct bxe_softc *sc,
5607                    if_t ifp,
5608                    struct bxe_fastpath *fp)
5609{
5610    struct mbuf *m = NULL;
5611    int tx_count = 0;
5612    uint16_t tx_bd_avail;
5613
5614    BXE_FP_TX_LOCK_ASSERT(fp);
5615
5616    /* keep adding entries while there are frames to send */
5617    while (!if_sendq_empty(ifp)) {
5618
5619        /*
5620         * check for any frames to send
5621         * dequeue can still be NULL even if queue is not empty
5622         */
5623        m = if_dequeue(ifp);
5624        if (__predict_false(m == NULL)) {
5625            break;
5626        }
5627
5628        /* the mbuf now belongs to us */
5629        fp->eth_q_stats.mbuf_alloc_tx++;
5630
5631        /*
5632         * Put the frame into the transmit ring. If we don't have room,
5633         * place the mbuf back at the head of the TX queue, set the
5634         * OACTIVE flag, and wait for the NIC to drain the chain.
5635         */
5636        if (__predict_false(bxe_tx_encap(fp, &m))) {
5637            fp->eth_q_stats.tx_encap_failures++;
5638            if (m != NULL) {
5639                /* mark the TX queue as full and return the frame */
5640                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5641		if_sendq_prepend(ifp, m);
5642                fp->eth_q_stats.mbuf_alloc_tx--;
5643                fp->eth_q_stats.tx_queue_xoff++;
5644            }
5645
5646            /* stop looking for more work */
5647            break;
5648        }
5649
5650        /* the frame was enqueued successfully */
5651        tx_count++;
5652
5653        /* send a copy of the frame to any BPF listeners. */
5654        if_etherbpfmtap(ifp, m);
5655
5656        tx_bd_avail = bxe_tx_avail(sc, fp);
5657
5658        /* handle any completions if we're running low */
5659        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5660            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5661            bxe_txeof(sc, fp);
5662            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5663                break;
5664            }
5665        }
5666    }
5667
5668    /* all TX packets were dequeued and/or the tx ring is full */
5669    if (tx_count > 0) {
5670        /* reset the TX watchdog timeout timer */
5671        fp->watchdog_timer = BXE_TX_TIMEOUT;
5672    }
5673}
5674
5675/* Legacy (non-RSS) dispatch routine */
5676static void
5677bxe_tx_start(if_t ifp)
5678{
5679    struct bxe_softc *sc;
5680    struct bxe_fastpath *fp;
5681
5682    sc = if_getsoftc(ifp);
5683
5684    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5685        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5686        return;
5687    }
5688
5689    if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5690        BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
5691        return;
5692    }
5693
5694    if (!sc->link_vars.link_up) {
5695        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5696        return;
5697    }
5698
5699    fp = &sc->fp[0];
5700
5701    BXE_FP_TX_LOCK(fp);
5702    bxe_tx_start_locked(sc, ifp, fp);
5703    BXE_FP_TX_UNLOCK(fp);
5704}
5705
5706#if __FreeBSD_version >= 800000
5707
5708static int
5709bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5710                       if_t                ifp,
5711                       struct bxe_fastpath *fp,
5712                       struct mbuf         *m)
5713{
5714    struct buf_ring *tx_br = fp->tx_br;
5715    struct mbuf *next;
5716    int depth, rc, tx_count;
5717    uint16_t tx_bd_avail;
5718
5719    rc = tx_count = 0;
5720
5721    BXE_FP_TX_LOCK_ASSERT(fp);
5722
5723    if (!tx_br) {
5724        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5725        return (EINVAL);
5726    }
5727
5728    if (!sc->link_vars.link_up ||
5729        (ifp->if_drv_flags &
5730        (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
5731        rc = drbr_enqueue_drv(ifp, tx_br, m);
5732        goto bxe_tx_mq_start_locked_exit;
5733    }
5734
5735    /* fetch the depth of the driver queue */
5736    depth = drbr_inuse_drv(ifp, tx_br);
5737    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5738        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5739    }
5740
5741    if (m == NULL) {
5742        /* no new work, check for pending frames */
5743        next = drbr_dequeue_drv(ifp, tx_br);
5744    } else if (drbr_needs_enqueue_drv(ifp, tx_br)) {
5745        /* have both new and pending work, maintain packet order */
5746        rc = drbr_enqueue_drv(ifp, tx_br, m);
5747        if (rc != 0) {
5748            fp->eth_q_stats.tx_soft_errors++;
5749            goto bxe_tx_mq_start_locked_exit;
5750        }
5751        next = drbr_dequeue_drv(ifp, tx_br);
5752    } else {
5753        /* new work only and nothing pending */
5754        next = m;
5755    }
5756
5757    /* keep adding entries while there are frames to send */
5758    while (next != NULL) {
5759
5760        /* the mbuf now belongs to us */
5761        fp->eth_q_stats.mbuf_alloc_tx++;
5762
5763        /*
5764         * Put the frame into the transmit ring. If we don't have room,
5765         * place the mbuf back at the head of the TX queue, set the
5766         * OACTIVE flag, and wait for the NIC to drain the chain.
5767         */
5768        rc = bxe_tx_encap(fp, &next);
5769        if (__predict_false(rc != 0)) {
5770            fp->eth_q_stats.tx_encap_failures++;
5771            if (next != NULL) {
5772                /* mark the TX queue as full and save the frame */
5773                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5774                /* XXX this may reorder the frame */
5775                rc = drbr_enqueue_drv(ifp, tx_br, next);
5776                fp->eth_q_stats.mbuf_alloc_tx--;
5777                fp->eth_q_stats.tx_frames_deferred++;
5778            }
5779
5780            /* stop looking for more work */
5781            break;
5782        }
5783
5784        /* the transmit frame was enqueued successfully */
5785        tx_count++;
5786
5787        /* send a copy of the frame to any BPF listeners */
5788	if_etherbpfmtap(ifp, next);
5789
5790        tx_bd_avail = bxe_tx_avail(sc, fp);
5791
5792        /* handle any completions if we're running low */
5793        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5794            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5795            bxe_txeof(sc, fp);
5796            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5797                break;
5798            }
5799        }
5800
5801        next = drbr_dequeue_drv(ifp, tx_br);
5802    }
5803
5804    /* all TX packets were dequeued and/or the tx ring is full */
5805    if (tx_count > 0) {
5806        /* reset the TX watchdog timeout timer */
5807        fp->watchdog_timer = BXE_TX_TIMEOUT;
5808    }
5809
5810bxe_tx_mq_start_locked_exit:
5811
5812    return (rc);
5813}
5814
5815/* Multiqueue (TSS) dispatch routine. */
5816static int
5817bxe_tx_mq_start(struct ifnet *ifp,
5818                struct mbuf  *m)
5819{
5820    struct bxe_softc *sc = if_getsoftc(ifp);
5821    struct bxe_fastpath *fp;
5822    int fp_index, rc;
5823
5824    fp_index = 0; /* default is the first queue */
5825
5826    /* check if flowid is set */
5827    if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
5828        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5829
5830    fp = &sc->fp[fp_index];
5831
5832    if (BXE_FP_TX_TRYLOCK(fp)) {
5833        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5834        BXE_FP_TX_UNLOCK(fp);
5835    } else
5836        rc = drbr_enqueue_drv(ifp, fp->tx_br, m);
5837
5838    return (rc);
5839}
5840
5841static void
5842bxe_mq_flush(struct ifnet *ifp)
5843{
5844    struct bxe_softc *sc = if_getsoftc(ifp);
5845    struct bxe_fastpath *fp;
5846    struct mbuf *m;
5847    int i;
5848
5849    for (i = 0; i < sc->num_queues; i++) {
5850        fp = &sc->fp[i];
5851
5852        if (fp->state != BXE_FP_STATE_OPEN) {
5853            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5854                  fp->index, fp->state);
5855            continue;
5856        }
5857
5858        if (fp->tx_br != NULL) {
5859            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5860            BXE_FP_TX_LOCK(fp);
5861            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5862                m_freem(m);
5863            }
5864            BXE_FP_TX_UNLOCK(fp);
5865        }
5866    }
5867
5868    if_qflush(ifp);
5869}
5870
5871#endif /* FreeBSD_version >= 800000 */
5872
5873static uint16_t
5874bxe_cid_ilt_lines(struct bxe_softc *sc)
5875{
5876    if (IS_SRIOV(sc)) {
5877        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5878    }
5879    return (L2_ILT_LINES(sc));
5880}
5881
5882static void
5883bxe_ilt_set_info(struct bxe_softc *sc)
5884{
5885    struct ilt_client_info *ilt_client;
5886    struct ecore_ilt *ilt = sc->ilt;
5887    uint16_t line = 0;
5888
5889    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5890    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5891
5892    /* CDU */
5893    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5894    ilt_client->client_num = ILT_CLIENT_CDU;
5895    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5896    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5897    ilt_client->start = line;
5898    line += bxe_cid_ilt_lines(sc);
5899
5900    if (CNIC_SUPPORT(sc)) {
5901        line += CNIC_ILT_LINES;
5902    }
5903
5904    ilt_client->end = (line - 1);
5905
5906    BLOGD(sc, DBG_LOAD,
5907          "ilt client[CDU]: start %d, end %d, "
5908          "psz 0x%x, flags 0x%x, hw psz %d\n",
5909          ilt_client->start, ilt_client->end,
5910          ilt_client->page_size,
5911          ilt_client->flags,
5912          ilog2(ilt_client->page_size >> 12));
5913
5914    /* QM */
5915    if (QM_INIT(sc->qm_cid_count)) {
5916        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5917        ilt_client->client_num = ILT_CLIENT_QM;
5918        ilt_client->page_size = QM_ILT_PAGE_SZ;
5919        ilt_client->flags = 0;
5920        ilt_client->start = line;
5921
5922        /* 4 bytes for each cid */
5923        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5924                             QM_ILT_PAGE_SZ);
5925
5926        ilt_client->end = (line - 1);
5927
5928        BLOGD(sc, DBG_LOAD,
5929              "ilt client[QM]: start %d, end %d, "
5930              "psz 0x%x, flags 0x%x, hw psz %d\n",
5931              ilt_client->start, ilt_client->end,
5932              ilt_client->page_size, ilt_client->flags,
5933              ilog2(ilt_client->page_size >> 12));
5934    }
5935
5936    if (CNIC_SUPPORT(sc)) {
5937        /* SRC */
5938        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5939        ilt_client->client_num = ILT_CLIENT_SRC;
5940        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5941        ilt_client->flags = 0;
5942        ilt_client->start = line;
5943        line += SRC_ILT_LINES;
5944        ilt_client->end = (line - 1);
5945
5946        BLOGD(sc, DBG_LOAD,
5947              "ilt client[SRC]: start %d, end %d, "
5948              "psz 0x%x, flags 0x%x, hw psz %d\n",
5949              ilt_client->start, ilt_client->end,
5950              ilt_client->page_size, ilt_client->flags,
5951              ilog2(ilt_client->page_size >> 12));
5952
5953        /* TM */
5954        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5955        ilt_client->client_num = ILT_CLIENT_TM;
5956        ilt_client->page_size = TM_ILT_PAGE_SZ;
5957        ilt_client->flags = 0;
5958        ilt_client->start = line;
5959        line += TM_ILT_LINES;
5960        ilt_client->end = (line - 1);
5961
5962        BLOGD(sc, DBG_LOAD,
5963              "ilt client[TM]: start %d, end %d, "
5964              "psz 0x%x, flags 0x%x, hw psz %d\n",
5965              ilt_client->start, ilt_client->end,
5966              ilt_client->page_size, ilt_client->flags,
5967              ilog2(ilt_client->page_size >> 12));
5968    }
5969
5970    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5971}
5972
5973static void
5974bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5975{
5976    int i;
5977    uint32_t rx_buf_size;
5978
5979    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5980
5981    for (i = 0; i < sc->num_queues; i++) {
5982        if(rx_buf_size <= MCLBYTES){
5983            sc->fp[i].rx_buf_size = rx_buf_size;
5984            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5985        }else if (rx_buf_size <= MJUMPAGESIZE){
5986            sc->fp[i].rx_buf_size = rx_buf_size;
5987            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5988        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5989            sc->fp[i].rx_buf_size = MCLBYTES;
5990            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5991        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5992            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5993            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5994        }else {
5995            sc->fp[i].rx_buf_size = MCLBYTES;
5996            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5997        }
5998    }
5999}
6000
6001static int
6002bxe_alloc_ilt_mem(struct bxe_softc *sc)
6003{
6004    int rc = 0;
6005
6006    if ((sc->ilt =
6007         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
6008                                    M_BXE_ILT,
6009                                    (M_NOWAIT | M_ZERO))) == NULL) {
6010        rc = 1;
6011    }
6012
6013    return (rc);
6014}
6015
6016static int
6017bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6018{
6019    int rc = 0;
6020
6021    if ((sc->ilt->lines =
6022         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6023                                    M_BXE_ILT,
6024                                    (M_NOWAIT | M_ZERO))) == NULL) {
6025        rc = 1;
6026    }
6027
6028    return (rc);
6029}
6030
6031static void
6032bxe_free_ilt_mem(struct bxe_softc *sc)
6033{
6034    if (sc->ilt != NULL) {
6035        free(sc->ilt, M_BXE_ILT);
6036        sc->ilt = NULL;
6037    }
6038}
6039
6040static void
6041bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6042{
6043    if (sc->ilt->lines != NULL) {
6044        free(sc->ilt->lines, M_BXE_ILT);
6045        sc->ilt->lines = NULL;
6046    }
6047}
6048
6049static void
6050bxe_free_mem(struct bxe_softc *sc)
6051{
6052    int i;
6053
6054    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6055        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6056        sc->context[i].vcxt = NULL;
6057        sc->context[i].size = 0;
6058    }
6059
6060    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6061
6062    bxe_free_ilt_lines_mem(sc);
6063
6064}
6065
6066static int
6067bxe_alloc_mem(struct bxe_softc *sc)
6068{
6069    int context_size;
6070    int allocated;
6071    int i;
6072
6073    /*
6074     * Allocate memory for CDU context:
6075     * This memory is allocated separately and not in the generic ILT
6076     * functions because CDU differs in few aspects:
6077     * 1. There can be multiple entities allocating memory for context -
6078     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6079     * its own ILT lines.
6080     * 2. Since CDU page-size is not a single 4KB page (which is the case
6081     * for the other ILT clients), to be efficient we want to support
6082     * allocation of sub-page-size in the last entry.
6083     * 3. Context pointers are used by the driver to pass to FW / update
6084     * the context (for the other ILT clients the pointers are used just to
6085     * free the memory during unload).
6086     */
6087    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6088    for (i = 0, allocated = 0; allocated < context_size; i++) {
6089        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6090                                  (context_size - allocated));
6091
6092        if (bxe_dma_alloc(sc, sc->context[i].size,
6093                          &sc->context[i].vcxt_dma,
6094                          "cdu context") != 0) {
6095            bxe_free_mem(sc);
6096            return (-1);
6097        }
6098
6099        sc->context[i].vcxt =
6100            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6101
6102        allocated += sc->context[i].size;
6103    }
6104
6105    bxe_alloc_ilt_lines_mem(sc);
6106
6107    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6108          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6109    {
6110        for (i = 0; i < 4; i++) {
6111            BLOGD(sc, DBG_LOAD,
6112                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6113                  i,
6114                  sc->ilt->clients[i].page_size,
6115                  sc->ilt->clients[i].start,
6116                  sc->ilt->clients[i].end,
6117                  sc->ilt->clients[i].client_num,
6118                  sc->ilt->clients[i].flags);
6119        }
6120    }
6121    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6122        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6123        bxe_free_mem(sc);
6124        return (-1);
6125    }
6126
6127    return (0);
6128}
6129
6130static void
6131bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6132{
6133    struct bxe_softc *sc;
6134    int i;
6135
6136    sc = fp->sc;
6137
6138    if (fp->rx_mbuf_tag == NULL) {
6139        return;
6140    }
6141
6142    /* free all mbufs and unload all maps */
6143    for (i = 0; i < RX_BD_TOTAL; i++) {
6144        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6145            bus_dmamap_sync(fp->rx_mbuf_tag,
6146                            fp->rx_mbuf_chain[i].m_map,
6147                            BUS_DMASYNC_POSTREAD);
6148            bus_dmamap_unload(fp->rx_mbuf_tag,
6149                              fp->rx_mbuf_chain[i].m_map);
6150        }
6151
6152        if (fp->rx_mbuf_chain[i].m != NULL) {
6153            m_freem(fp->rx_mbuf_chain[i].m);
6154            fp->rx_mbuf_chain[i].m = NULL;
6155            fp->eth_q_stats.mbuf_alloc_rx--;
6156        }
6157    }
6158}
6159
6160static void
6161bxe_free_tpa_pool(struct bxe_fastpath *fp)
6162{
6163    struct bxe_softc *sc;
6164    int i, max_agg_queues;
6165
6166    sc = fp->sc;
6167
6168    if (fp->rx_mbuf_tag == NULL) {
6169        return;
6170    }
6171
6172    max_agg_queues = MAX_AGG_QS(sc);
6173
6174    /* release all mbufs and unload all DMA maps in the TPA pool */
6175    for (i = 0; i < max_agg_queues; i++) {
6176        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6177            bus_dmamap_sync(fp->rx_mbuf_tag,
6178                            fp->rx_tpa_info[i].bd.m_map,
6179                            BUS_DMASYNC_POSTREAD);
6180            bus_dmamap_unload(fp->rx_mbuf_tag,
6181                              fp->rx_tpa_info[i].bd.m_map);
6182        }
6183
6184        if (fp->rx_tpa_info[i].bd.m != NULL) {
6185            m_freem(fp->rx_tpa_info[i].bd.m);
6186            fp->rx_tpa_info[i].bd.m = NULL;
6187            fp->eth_q_stats.mbuf_alloc_tpa--;
6188        }
6189    }
6190}
6191
6192static void
6193bxe_free_sge_chain(struct bxe_fastpath *fp)
6194{
6195    struct bxe_softc *sc;
6196    int i;
6197
6198    sc = fp->sc;
6199
6200    if (fp->rx_sge_mbuf_tag == NULL) {
6201        return;
6202    }
6203
6204    /* rree all mbufs and unload all maps */
6205    for (i = 0; i < RX_SGE_TOTAL; i++) {
6206        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6207            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6208                            fp->rx_sge_mbuf_chain[i].m_map,
6209                            BUS_DMASYNC_POSTREAD);
6210            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6211                              fp->rx_sge_mbuf_chain[i].m_map);
6212        }
6213
6214        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6215            m_freem(fp->rx_sge_mbuf_chain[i].m);
6216            fp->rx_sge_mbuf_chain[i].m = NULL;
6217            fp->eth_q_stats.mbuf_alloc_sge--;
6218        }
6219    }
6220}
6221
6222static void
6223bxe_free_fp_buffers(struct bxe_softc *sc)
6224{
6225    struct bxe_fastpath *fp;
6226    int i;
6227
6228    for (i = 0; i < sc->num_queues; i++) {
6229        fp = &sc->fp[i];
6230
6231#if __FreeBSD_version >= 800000
6232        if (fp->tx_br != NULL) {
6233            /* just in case bxe_mq_flush() wasn't called */
6234            if (mtx_initialized(&fp->tx_mtx)) {
6235                struct mbuf *m;
6236
6237                BXE_FP_TX_LOCK(fp);
6238                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6239                    m_freem(m);
6240                BXE_FP_TX_UNLOCK(fp);
6241            }
6242            buf_ring_free(fp->tx_br, M_DEVBUF);
6243            fp->tx_br = NULL;
6244        }
6245#endif
6246
6247        /* free all RX buffers */
6248        bxe_free_rx_bd_chain(fp);
6249        bxe_free_tpa_pool(fp);
6250        bxe_free_sge_chain(fp);
6251
6252        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6253            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6254                  fp->eth_q_stats.mbuf_alloc_rx);
6255        }
6256
6257        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6258            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6259                  fp->eth_q_stats.mbuf_alloc_sge);
6260        }
6261
6262        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6263            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6264                  fp->eth_q_stats.mbuf_alloc_tpa);
6265        }
6266
6267        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6268            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6269                  fp->eth_q_stats.mbuf_alloc_tx);
6270        }
6271
6272        /* XXX verify all mbufs were reclaimed */
6273
6274        if (mtx_initialized(&fp->tx_mtx)) {
6275            mtx_destroy(&fp->tx_mtx);
6276        }
6277
6278        if (mtx_initialized(&fp->rx_mtx)) {
6279            mtx_destroy(&fp->rx_mtx);
6280        }
6281    }
6282}
6283
6284static int
6285bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6286                     uint16_t            prev_index,
6287                     uint16_t            index)
6288{
6289    struct bxe_sw_rx_bd *rx_buf;
6290    struct eth_rx_bd *rx_bd;
6291    bus_dma_segment_t segs[1];
6292    bus_dmamap_t map;
6293    struct mbuf *m;
6294    int nsegs, rc;
6295
6296    rc = 0;
6297
6298    /* allocate the new RX BD mbuf */
6299    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6300    if (__predict_false(m == NULL)) {
6301        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6302        return (ENOBUFS);
6303    }
6304
6305    fp->eth_q_stats.mbuf_alloc_rx++;
6306
6307    /* initialize the mbuf buffer length */
6308    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6309
6310    /* map the mbuf into non-paged pool */
6311    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6312                                 fp->rx_mbuf_spare_map,
6313                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6314    if (__predict_false(rc != 0)) {
6315        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6316        m_freem(m);
6317        fp->eth_q_stats.mbuf_alloc_rx--;
6318        return (rc);
6319    }
6320
6321    /* all mbufs must map to a single segment */
6322    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6323
6324    /* release any existing RX BD mbuf mappings */
6325
6326    if (prev_index != index) {
6327        rx_buf = &fp->rx_mbuf_chain[prev_index];
6328
6329        if (rx_buf->m_map != NULL) {
6330            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6331                            BUS_DMASYNC_POSTREAD);
6332            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6333        }
6334
6335        /*
6336         * We only get here from bxe_rxeof() when the maximum number
6337         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6338         * holds the mbuf in the prev_index so it's OK to NULL it out
6339         * here without concern of a memory leak.
6340         */
6341        fp->rx_mbuf_chain[prev_index].m = NULL;
6342    }
6343
6344    rx_buf = &fp->rx_mbuf_chain[index];
6345
6346    if (rx_buf->m_map != NULL) {
6347        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6348                        BUS_DMASYNC_POSTREAD);
6349        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6350    }
6351
6352    /* save the mbuf and mapping info for a future packet */
6353    map = (prev_index != index) ?
6354              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6355    rx_buf->m_map = fp->rx_mbuf_spare_map;
6356    fp->rx_mbuf_spare_map = map;
6357    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6358                    BUS_DMASYNC_PREREAD);
6359    rx_buf->m = m;
6360
6361    rx_bd = &fp->rx_chain[index];
6362    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6363    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6364
6365    return (rc);
6366}
6367
6368static int
6369bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6370                      int                 queue)
6371{
6372    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6373    bus_dma_segment_t segs[1];
6374    bus_dmamap_t map;
6375    struct mbuf *m;
6376    int nsegs;
6377    int rc = 0;
6378
6379    /* allocate the new TPA mbuf */
6380    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6381    if (__predict_false(m == NULL)) {
6382        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6383        return (ENOBUFS);
6384    }
6385
6386    fp->eth_q_stats.mbuf_alloc_tpa++;
6387
6388    /* initialize the mbuf buffer length */
6389    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6390
6391    /* map the mbuf into non-paged pool */
6392    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6393                                 fp->rx_tpa_info_mbuf_spare_map,
6394                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6395    if (__predict_false(rc != 0)) {
6396        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6397        m_free(m);
6398        fp->eth_q_stats.mbuf_alloc_tpa--;
6399        return (rc);
6400    }
6401
6402    /* all mbufs must map to a single segment */
6403    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6404
6405    /* release any existing TPA mbuf mapping */
6406    if (tpa_info->bd.m_map != NULL) {
6407        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6408                        BUS_DMASYNC_POSTREAD);
6409        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6410    }
6411
6412    /* save the mbuf and mapping info for the TPA mbuf */
6413    map = tpa_info->bd.m_map;
6414    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6415    fp->rx_tpa_info_mbuf_spare_map = map;
6416    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6417                    BUS_DMASYNC_PREREAD);
6418    tpa_info->bd.m = m;
6419    tpa_info->seg = segs[0];
6420
6421    return (rc);
6422}
6423
6424/*
6425 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6426 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6427 * chain.
6428 */
6429static int
6430bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6431                      uint16_t            index)
6432{
6433    struct bxe_sw_rx_bd *sge_buf;
6434    struct eth_rx_sge *sge;
6435    bus_dma_segment_t segs[1];
6436    bus_dmamap_t map;
6437    struct mbuf *m;
6438    int nsegs;
6439    int rc = 0;
6440
6441    /* allocate a new SGE mbuf */
6442    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6443    if (__predict_false(m == NULL)) {
6444        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6445        return (ENOMEM);
6446    }
6447
6448    fp->eth_q_stats.mbuf_alloc_sge++;
6449
6450    /* initialize the mbuf buffer length */
6451    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6452
6453    /* map the SGE mbuf into non-paged pool */
6454    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6455                                 fp->rx_sge_mbuf_spare_map,
6456                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6457    if (__predict_false(rc != 0)) {
6458        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6459        m_freem(m);
6460        fp->eth_q_stats.mbuf_alloc_sge--;
6461        return (rc);
6462    }
6463
6464    /* all mbufs must map to a single segment */
6465    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6466
6467    sge_buf = &fp->rx_sge_mbuf_chain[index];
6468
6469    /* release any existing SGE mbuf mapping */
6470    if (sge_buf->m_map != NULL) {
6471        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6472                        BUS_DMASYNC_POSTREAD);
6473        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6474    }
6475
6476    /* save the mbuf and mapping info for a future packet */
6477    map = sge_buf->m_map;
6478    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6479    fp->rx_sge_mbuf_spare_map = map;
6480    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6481                    BUS_DMASYNC_PREREAD);
6482    sge_buf->m = m;
6483
6484    sge = &fp->rx_sge_chain[index];
6485    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6486    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6487
6488    return (rc);
6489}
6490
6491static __noinline int
6492bxe_alloc_fp_buffers(struct bxe_softc *sc)
6493{
6494    struct bxe_fastpath *fp;
6495    int i, j, rc = 0;
6496    int ring_prod, cqe_ring_prod;
6497    int max_agg_queues;
6498
6499    for (i = 0; i < sc->num_queues; i++) {
6500        fp = &sc->fp[i];
6501
6502#if __FreeBSD_version >= 800000
6503        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
6504                                   M_NOWAIT, &fp->tx_mtx);
6505        if (fp->tx_br == NULL) {
6506            BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i);
6507            goto bxe_alloc_fp_buffers_error;
6508        }
6509#endif
6510
6511        ring_prod = cqe_ring_prod = 0;
6512        fp->rx_bd_cons = 0;
6513        fp->rx_cq_cons = 0;
6514
6515        /* allocate buffers for the RX BDs in RX BD chain */
6516        for (j = 0; j < sc->max_rx_bufs; j++) {
6517            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6518            if (rc != 0) {
6519                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6520                      i, rc);
6521                goto bxe_alloc_fp_buffers_error;
6522            }
6523
6524            ring_prod     = RX_BD_NEXT(ring_prod);
6525            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6526        }
6527
6528        fp->rx_bd_prod = ring_prod;
6529        fp->rx_cq_prod = cqe_ring_prod;
6530        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6531
6532        max_agg_queues = MAX_AGG_QS(sc);
6533
6534        fp->tpa_enable = TRUE;
6535
6536        /* fill the TPA pool */
6537        for (j = 0; j < max_agg_queues; j++) {
6538            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6539            if (rc != 0) {
6540                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6541                          i, j);
6542                fp->tpa_enable = FALSE;
6543                goto bxe_alloc_fp_buffers_error;
6544            }
6545
6546            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6547        }
6548
6549        if (fp->tpa_enable) {
6550            /* fill the RX SGE chain */
6551            ring_prod = 0;
6552            for (j = 0; j < RX_SGE_USABLE; j++) {
6553                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6554                if (rc != 0) {
6555                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6556                              i, ring_prod);
6557                    fp->tpa_enable = FALSE;
6558                    ring_prod = 0;
6559                    goto bxe_alloc_fp_buffers_error;
6560                }
6561
6562                ring_prod = RX_SGE_NEXT(ring_prod);
6563            }
6564
6565            fp->rx_sge_prod = ring_prod;
6566        }
6567    }
6568
6569    return (0);
6570
6571bxe_alloc_fp_buffers_error:
6572
6573    /* unwind what was already allocated */
6574    bxe_free_rx_bd_chain(fp);
6575    bxe_free_tpa_pool(fp);
6576    bxe_free_sge_chain(fp);
6577
6578    return (ENOBUFS);
6579}
6580
6581static void
6582bxe_free_fw_stats_mem(struct bxe_softc *sc)
6583{
6584    bxe_dma_free(sc, &sc->fw_stats_dma);
6585
6586    sc->fw_stats_num = 0;
6587
6588    sc->fw_stats_req_size = 0;
6589    sc->fw_stats_req = NULL;
6590    sc->fw_stats_req_mapping = 0;
6591
6592    sc->fw_stats_data_size = 0;
6593    sc->fw_stats_data = NULL;
6594    sc->fw_stats_data_mapping = 0;
6595}
6596
6597static int
6598bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6599{
6600    uint8_t num_queue_stats;
6601    int num_groups;
6602
6603    /* number of queues for statistics is number of eth queues */
6604    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6605
6606    /*
6607     * Total number of FW statistics requests =
6608     *   1 for port stats + 1 for PF stats + num of queues
6609     */
6610    sc->fw_stats_num = (2 + num_queue_stats);
6611
6612    /*
6613     * Request is built from stats_query_header and an array of
6614     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6615     * rules. The real number or requests is configured in the
6616     * stats_query_header.
6617     */
6618    num_groups =
6619        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6620         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6621
6622    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6623          sc->fw_stats_num, num_groups);
6624
6625    sc->fw_stats_req_size =
6626        (sizeof(struct stats_query_header) +
6627         (num_groups * sizeof(struct stats_query_cmd_group)));
6628
6629    /*
6630     * Data for statistics requests + stats_counter.
6631     * stats_counter holds per-STORM counters that are incremented when
6632     * STORM has finished with the current request. Memory for FCoE
6633     * offloaded statistics are counted anyway, even if they will not be sent.
6634     * VF stats are not accounted for here as the data of VF stats is stored
6635     * in memory allocated by the VF, not here.
6636     */
6637    sc->fw_stats_data_size =
6638        (sizeof(struct stats_counter) +
6639         sizeof(struct per_port_stats) +
6640         sizeof(struct per_pf_stats) +
6641         /* sizeof(struct fcoe_statistics_params) + */
6642         (sizeof(struct per_queue_stats) * num_queue_stats));
6643
6644    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6645                      &sc->fw_stats_dma, "fw stats") != 0) {
6646        bxe_free_fw_stats_mem(sc);
6647        return (-1);
6648    }
6649
6650    /* set up the shortcuts */
6651
6652    sc->fw_stats_req =
6653        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6654    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6655
6656    sc->fw_stats_data =
6657        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6658                                     sc->fw_stats_req_size);
6659    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6660                                 sc->fw_stats_req_size);
6661
6662    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6663          (uintmax_t)sc->fw_stats_req_mapping);
6664
6665    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6666          (uintmax_t)sc->fw_stats_data_mapping);
6667
6668    return (0);
6669}
6670
6671/*
6672 * Bits map:
6673 * 0-7  - Engine0 load counter.
6674 * 8-15 - Engine1 load counter.
6675 * 16   - Engine0 RESET_IN_PROGRESS bit.
6676 * 17   - Engine1 RESET_IN_PROGRESS bit.
6677 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6678 *        function on the engine
6679 * 19   - Engine1 ONE_IS_LOADED.
6680 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6681 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6682 *        for just the one belonging to its engine).
6683 */
6684#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6685#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6686#define BXE_PATH0_LOAD_CNT_SHIFT  0
6687#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6688#define BXE_PATH1_LOAD_CNT_SHIFT  8
6689#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6690#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6691#define BXE_GLOBAL_RESET_BIT      0x00040000
6692
6693/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6694static void
6695bxe_set_reset_global(struct bxe_softc *sc)
6696{
6697    uint32_t val;
6698    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6699    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6700    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6701    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6702}
6703
6704/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6705static void
6706bxe_clear_reset_global(struct bxe_softc *sc)
6707{
6708    uint32_t val;
6709    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6710    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6711    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6712    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6713}
6714
6715/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6716static uint8_t
6717bxe_reset_is_global(struct bxe_softc *sc)
6718{
6719    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6720    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6721    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6722}
6723
6724/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6725static void
6726bxe_set_reset_done(struct bxe_softc *sc)
6727{
6728    uint32_t val;
6729    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6730                                 BXE_PATH0_RST_IN_PROG_BIT;
6731
6732    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6733
6734    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6735    /* Clear the bit */
6736    val &= ~bit;
6737    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6738
6739    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6740}
6741
6742/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6743static void
6744bxe_set_reset_in_progress(struct bxe_softc *sc)
6745{
6746    uint32_t val;
6747    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6748                                 BXE_PATH0_RST_IN_PROG_BIT;
6749
6750    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6751
6752    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6753    /* Set the bit */
6754    val |= bit;
6755    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6756
6757    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6758}
6759
6760/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6761static uint8_t
6762bxe_reset_is_done(struct bxe_softc *sc,
6763                  int              engine)
6764{
6765    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6766    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6767                            BXE_PATH0_RST_IN_PROG_BIT;
6768
6769    /* return false if bit is set */
6770    return (val & bit) ? FALSE : TRUE;
6771}
6772
6773/* get the load status for an engine, should be run under rtnl lock */
6774static uint8_t
6775bxe_get_load_status(struct bxe_softc *sc,
6776                    int              engine)
6777{
6778    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6779                             BXE_PATH0_LOAD_CNT_MASK;
6780    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6781                              BXE_PATH0_LOAD_CNT_SHIFT;
6782    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6783
6784    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6785
6786    val = ((val & mask) >> shift);
6787
6788    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6789
6790    return (val != 0);
6791}
6792
6793/* set pf load mark */
6794/* XXX needs to be under rtnl lock */
6795static void
6796bxe_set_pf_load(struct bxe_softc *sc)
6797{
6798    uint32_t val;
6799    uint32_t val1;
6800    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6801                                  BXE_PATH0_LOAD_CNT_MASK;
6802    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6803                                   BXE_PATH0_LOAD_CNT_SHIFT;
6804
6805    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6806
6807    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6808    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6809
6810    /* get the current counter value */
6811    val1 = ((val & mask) >> shift);
6812
6813    /* set bit of this PF */
6814    val1 |= (1 << SC_ABS_FUNC(sc));
6815
6816    /* clear the old value */
6817    val &= ~mask;
6818
6819    /* set the new one */
6820    val |= ((val1 << shift) & mask);
6821
6822    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6823
6824    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6825}
6826
6827/* clear pf load mark */
6828/* XXX needs to be under rtnl lock */
6829static uint8_t
6830bxe_clear_pf_load(struct bxe_softc *sc)
6831{
6832    uint32_t val1, val;
6833    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6834                                  BXE_PATH0_LOAD_CNT_MASK;
6835    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6836                                   BXE_PATH0_LOAD_CNT_SHIFT;
6837
6838    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6839    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6840    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6841
6842    /* get the current counter value */
6843    val1 = (val & mask) >> shift;
6844
6845    /* clear bit of that PF */
6846    val1 &= ~(1 << SC_ABS_FUNC(sc));
6847
6848    /* clear the old value */
6849    val &= ~mask;
6850
6851    /* set the new one */
6852    val |= ((val1 << shift) & mask);
6853
6854    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6855    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6856    return (val1 != 0);
6857}
6858
6859/* send load requrest to mcp and analyze response */
6860static int
6861bxe_nic_load_request(struct bxe_softc *sc,
6862                     uint32_t         *load_code)
6863{
6864    /* init fw_seq */
6865    sc->fw_seq =
6866        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6867         DRV_MSG_SEQ_NUMBER_MASK);
6868
6869    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6870
6871    /* get the current FW pulse sequence */
6872    sc->fw_drv_pulse_wr_seq =
6873        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6874         DRV_PULSE_SEQ_MASK);
6875
6876    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6877          sc->fw_drv_pulse_wr_seq);
6878
6879    /* load request */
6880    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6881                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6882
6883    /* if the MCP fails to respond we must abort */
6884    if (!(*load_code)) {
6885        BLOGE(sc, "MCP response failure!\n");
6886        return (-1);
6887    }
6888
6889    /* if MCP refused then must abort */
6890    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6891        BLOGE(sc, "MCP refused load request\n");
6892        return (-1);
6893    }
6894
6895    return (0);
6896}
6897
6898/*
6899 * Check whether another PF has already loaded FW to chip. In virtualized
6900 * environments a pf from anoth VM may have already initialized the device
6901 * including loading FW.
6902 */
6903static int
6904bxe_nic_load_analyze_req(struct bxe_softc *sc,
6905                         uint32_t         load_code)
6906{
6907    uint32_t my_fw, loaded_fw;
6908
6909    /* is another pf loaded on this engine? */
6910    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6911        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6912        /* build my FW version dword */
6913        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6914                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6915                 (BCM_5710_FW_REVISION_VERSION << 16) +
6916                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6917
6918        /* read loaded FW from chip */
6919        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6920        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6921              loaded_fw, my_fw);
6922
6923        /* abort nic load if version mismatch */
6924        if (my_fw != loaded_fw) {
6925            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6926                  loaded_fw, my_fw);
6927            return (-1);
6928        }
6929    }
6930
6931    return (0);
6932}
6933
6934/* mark PMF if applicable */
6935static void
6936bxe_nic_load_pmf(struct bxe_softc *sc,
6937                 uint32_t         load_code)
6938{
6939    uint32_t ncsi_oem_data_addr;
6940
6941    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6942        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6943        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6944        /*
6945         * Barrier here for ordering between the writing to sc->port.pmf here
6946         * and reading it from the periodic task.
6947         */
6948        sc->port.pmf = 1;
6949        mb();
6950    } else {
6951        sc->port.pmf = 0;
6952    }
6953
6954    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6955
6956    /* XXX needed? */
6957    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6958        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6959            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6960            if (ncsi_oem_data_addr) {
6961                REG_WR(sc,
6962                       (ncsi_oem_data_addr +
6963                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6964                       0);
6965            }
6966        }
6967    }
6968}
6969
6970static void
6971bxe_read_mf_cfg(struct bxe_softc *sc)
6972{
6973    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6974    int abs_func;
6975    int vn;
6976
6977    if (BXE_NOMCP(sc)) {
6978        return; /* what should be the default bvalue in this case */
6979    }
6980
6981    /*
6982     * The formula for computing the absolute function number is...
6983     * For 2 port configuration (4 functions per port):
6984     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6985     * For 4 port configuration (2 functions per port):
6986     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6987     */
6988    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6989        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6990        if (abs_func >= E1H_FUNC_MAX) {
6991            break;
6992        }
6993        sc->devinfo.mf_info.mf_config[vn] =
6994            MFCFG_RD(sc, func_mf_config[abs_func].config);
6995    }
6996
6997    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6998        FUNC_MF_CFG_FUNC_DISABLED) {
6999        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
7000        sc->flags |= BXE_MF_FUNC_DIS;
7001    } else {
7002        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
7003        sc->flags &= ~BXE_MF_FUNC_DIS;
7004    }
7005}
7006
7007/* acquire split MCP access lock register */
7008static int bxe_acquire_alr(struct bxe_softc *sc)
7009{
7010    uint32_t j, val;
7011
7012    for (j = 0; j < 1000; j++) {
7013        val = (1UL << 31);
7014        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
7015        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
7016        if (val & (1L << 31))
7017            break;
7018
7019        DELAY(5000);
7020    }
7021
7022    if (!(val & (1L << 31))) {
7023        BLOGE(sc, "Cannot acquire MCP access lock register\n");
7024        return (-1);
7025    }
7026
7027    return (0);
7028}
7029
7030/* release split MCP access lock register */
7031static void bxe_release_alr(struct bxe_softc *sc)
7032{
7033    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7034}
7035
7036static void
7037bxe_fan_failure(struct bxe_softc *sc)
7038{
7039    int port = SC_PORT(sc);
7040    uint32_t ext_phy_config;
7041
7042    /* mark the failure */
7043    ext_phy_config =
7044        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7045
7046    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7047    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7048    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7049             ext_phy_config);
7050
7051    /* log the failure */
7052    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7053              "the card to prevent permanent damage. "
7054              "Please contact OEM Support for assistance\n");
7055
7056    /* XXX */
7057#if 1
7058    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7059#else
7060    /*
7061     * Schedule device reset (unload)
7062     * This is due to some boards consuming sufficient power when driver is
7063     * up to overheat if fan fails.
7064     */
7065    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7066    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7067#endif
7068}
7069
7070/* this function is called upon a link interrupt */
7071static void
7072bxe_link_attn(struct bxe_softc *sc)
7073{
7074    uint32_t pause_enabled = 0;
7075    struct host_port_stats *pstats;
7076    int cmng_fns;
7077
7078    /* Make sure that we are synced with the current statistics */
7079    bxe_stats_handle(sc, STATS_EVENT_STOP);
7080
7081    elink_link_update(&sc->link_params, &sc->link_vars);
7082
7083    if (sc->link_vars.link_up) {
7084
7085        /* dropless flow control */
7086        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7087            pause_enabled = 0;
7088
7089            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7090                pause_enabled = 1;
7091            }
7092
7093            REG_WR(sc,
7094                   (BAR_USTRORM_INTMEM +
7095                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7096                   pause_enabled);
7097        }
7098
7099        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7100            pstats = BXE_SP(sc, port_stats);
7101            /* reset old mac stats */
7102            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7103        }
7104
7105        if (sc->state == BXE_STATE_OPEN) {
7106            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7107        }
7108    }
7109
7110    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7111        cmng_fns = bxe_get_cmng_fns_mode(sc);
7112
7113        if (cmng_fns != CMNG_FNS_NONE) {
7114            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7115            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7116        } else {
7117            /* rate shaping and fairness are disabled */
7118            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7119        }
7120    }
7121
7122    bxe_link_report_locked(sc);
7123
7124    if (IS_MF(sc)) {
7125        ; // XXX bxe_link_sync_notify(sc);
7126    }
7127}
7128
7129static void
7130bxe_attn_int_asserted(struct bxe_softc *sc,
7131                      uint32_t         asserted)
7132{
7133    int port = SC_PORT(sc);
7134    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7135                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7136    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7137                                        NIG_REG_MASK_INTERRUPT_PORT0;
7138    uint32_t aeu_mask;
7139    uint32_t nig_mask = 0;
7140    uint32_t reg_addr;
7141    uint32_t igu_acked;
7142    uint32_t cnt;
7143
7144    if (sc->attn_state & asserted) {
7145        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7146    }
7147
7148    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7149
7150    aeu_mask = REG_RD(sc, aeu_addr);
7151
7152    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7153          aeu_mask, asserted);
7154
7155    aeu_mask &= ~(asserted & 0x3ff);
7156
7157    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7158
7159    REG_WR(sc, aeu_addr, aeu_mask);
7160
7161    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7162
7163    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7164    sc->attn_state |= asserted;
7165    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7166
7167    if (asserted & ATTN_HARD_WIRED_MASK) {
7168        if (asserted & ATTN_NIG_FOR_FUNC) {
7169
7170	    bxe_acquire_phy_lock(sc);
7171            /* save nig interrupt mask */
7172            nig_mask = REG_RD(sc, nig_int_mask_addr);
7173
7174            /* If nig_mask is not set, no need to call the update function */
7175            if (nig_mask) {
7176                REG_WR(sc, nig_int_mask_addr, 0);
7177
7178                bxe_link_attn(sc);
7179            }
7180
7181            /* handle unicore attn? */
7182        }
7183
7184        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7185            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7186        }
7187
7188        if (asserted & GPIO_2_FUNC) {
7189            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7190        }
7191
7192        if (asserted & GPIO_3_FUNC) {
7193            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7194        }
7195
7196        if (asserted & GPIO_4_FUNC) {
7197            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7198        }
7199
7200        if (port == 0) {
7201            if (asserted & ATTN_GENERAL_ATTN_1) {
7202                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7203                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7204            }
7205            if (asserted & ATTN_GENERAL_ATTN_2) {
7206                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7207                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7208            }
7209            if (asserted & ATTN_GENERAL_ATTN_3) {
7210                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7211                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7212            }
7213        } else {
7214            if (asserted & ATTN_GENERAL_ATTN_4) {
7215                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7216                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7217            }
7218            if (asserted & ATTN_GENERAL_ATTN_5) {
7219                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7220                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7221            }
7222            if (asserted & ATTN_GENERAL_ATTN_6) {
7223                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7224                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7225            }
7226        }
7227    } /* hardwired */
7228
7229    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7230        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7231    } else {
7232        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7233    }
7234
7235    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7236          asserted,
7237          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7238    REG_WR(sc, reg_addr, asserted);
7239
7240    /* now set back the mask */
7241    if (asserted & ATTN_NIG_FOR_FUNC) {
7242        /*
7243         * Verify that IGU ack through BAR was written before restoring
7244         * NIG mask. This loop should exit after 2-3 iterations max.
7245         */
7246        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7247            cnt = 0;
7248
7249            do {
7250                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7251            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7252                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7253
7254            if (!igu_acked) {
7255                BLOGE(sc, "Failed to verify IGU ack on time\n");
7256            }
7257
7258            mb();
7259        }
7260
7261        REG_WR(sc, nig_int_mask_addr, nig_mask);
7262
7263	bxe_release_phy_lock(sc);
7264    }
7265}
7266
7267static void
7268bxe_print_next_block(struct bxe_softc *sc,
7269                     int              idx,
7270                     const char       *blk)
7271{
7272    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7273}
7274
7275static int
7276bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7277                              uint32_t         sig,
7278                              int              par_num,
7279                              uint8_t          print)
7280{
7281    uint32_t cur_bit = 0;
7282    int i = 0;
7283
7284    for (i = 0; sig; i++) {
7285        cur_bit = ((uint32_t)0x1 << i);
7286        if (sig & cur_bit) {
7287            switch (cur_bit) {
7288            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7289                if (print)
7290                    bxe_print_next_block(sc, par_num++, "BRB");
7291                break;
7292            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7293                if (print)
7294                    bxe_print_next_block(sc, par_num++, "PARSER");
7295                break;
7296            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7297                if (print)
7298                    bxe_print_next_block(sc, par_num++, "TSDM");
7299                break;
7300            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7301                if (print)
7302                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7303                break;
7304            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7305                if (print)
7306                    bxe_print_next_block(sc, par_num++, "TCM");
7307                break;
7308            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7309                if (print)
7310                    bxe_print_next_block(sc, par_num++, "TSEMI");
7311                break;
7312            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7313                if (print)
7314                    bxe_print_next_block(sc, par_num++, "XPB");
7315                break;
7316            }
7317
7318            /* Clear the bit */
7319            sig &= ~cur_bit;
7320        }
7321    }
7322
7323    return (par_num);
7324}
7325
7326static int
7327bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7328                              uint32_t         sig,
7329                              int              par_num,
7330                              uint8_t          *global,
7331                              uint8_t          print)
7332{
7333    int i = 0;
7334    uint32_t cur_bit = 0;
7335    for (i = 0; sig; i++) {
7336        cur_bit = ((uint32_t)0x1 << i);
7337        if (sig & cur_bit) {
7338            switch (cur_bit) {
7339            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7340                if (print)
7341                    bxe_print_next_block(sc, par_num++, "PBF");
7342                break;
7343            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7344                if (print)
7345                    bxe_print_next_block(sc, par_num++, "QM");
7346                break;
7347            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7348                if (print)
7349                    bxe_print_next_block(sc, par_num++, "TM");
7350                break;
7351            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7352                if (print)
7353                    bxe_print_next_block(sc, par_num++, "XSDM");
7354                break;
7355            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7356                if (print)
7357                    bxe_print_next_block(sc, par_num++, "XCM");
7358                break;
7359            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7360                if (print)
7361                    bxe_print_next_block(sc, par_num++, "XSEMI");
7362                break;
7363            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7364                if (print)
7365                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7366                break;
7367            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7368                if (print)
7369                    bxe_print_next_block(sc, par_num++, "NIG");
7370                break;
7371            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7372                if (print)
7373                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7374                *global = TRUE;
7375                break;
7376            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7377                if (print)
7378                    bxe_print_next_block(sc, par_num++, "DEBUG");
7379                break;
7380            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7381                if (print)
7382                    bxe_print_next_block(sc, par_num++, "USDM");
7383                break;
7384            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7385                if (print)
7386                    bxe_print_next_block(sc, par_num++, "UCM");
7387                break;
7388            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7389                if (print)
7390                    bxe_print_next_block(sc, par_num++, "USEMI");
7391                break;
7392            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7393                if (print)
7394                    bxe_print_next_block(sc, par_num++, "UPB");
7395                break;
7396            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7397                if (print)
7398                    bxe_print_next_block(sc, par_num++, "CSDM");
7399                break;
7400            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7401                if (print)
7402                    bxe_print_next_block(sc, par_num++, "CCM");
7403                break;
7404            }
7405
7406            /* Clear the bit */
7407            sig &= ~cur_bit;
7408        }
7409    }
7410
7411    return (par_num);
7412}
7413
7414static int
7415bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7416                              uint32_t         sig,
7417                              int              par_num,
7418                              uint8_t          print)
7419{
7420    uint32_t cur_bit = 0;
7421    int i = 0;
7422
7423    for (i = 0; sig; i++) {
7424        cur_bit = ((uint32_t)0x1 << i);
7425        if (sig & cur_bit) {
7426            switch (cur_bit) {
7427            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7428                if (print)
7429                    bxe_print_next_block(sc, par_num++, "CSEMI");
7430                break;
7431            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7432                if (print)
7433                    bxe_print_next_block(sc, par_num++, "PXP");
7434                break;
7435            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7436                if (print)
7437                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7438                break;
7439            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7440                if (print)
7441                    bxe_print_next_block(sc, par_num++, "CFC");
7442                break;
7443            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7444                if (print)
7445                    bxe_print_next_block(sc, par_num++, "CDU");
7446                break;
7447            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7448                if (print)
7449                    bxe_print_next_block(sc, par_num++, "DMAE");
7450                break;
7451            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7452                if (print)
7453                    bxe_print_next_block(sc, par_num++, "IGU");
7454                break;
7455            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7456                if (print)
7457                    bxe_print_next_block(sc, par_num++, "MISC");
7458                break;
7459            }
7460
7461            /* Clear the bit */
7462            sig &= ~cur_bit;
7463        }
7464    }
7465
7466    return (par_num);
7467}
7468
7469static int
7470bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7471                              uint32_t         sig,
7472                              int              par_num,
7473                              uint8_t          *global,
7474                              uint8_t          print)
7475{
7476    uint32_t cur_bit = 0;
7477    int i = 0;
7478
7479    for (i = 0; sig; i++) {
7480        cur_bit = ((uint32_t)0x1 << i);
7481        if (sig & cur_bit) {
7482            switch (cur_bit) {
7483            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7484                if (print)
7485                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7486                *global = TRUE;
7487                break;
7488            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7489                if (print)
7490                    bxe_print_next_block(sc, par_num++,
7491                              "MCP UMP RX");
7492                *global = TRUE;
7493                break;
7494            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7495                if (print)
7496                    bxe_print_next_block(sc, par_num++,
7497                              "MCP UMP TX");
7498                *global = TRUE;
7499                break;
7500            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7501                if (print)
7502                    bxe_print_next_block(sc, par_num++,
7503                              "MCP SCPAD");
7504                *global = TRUE;
7505                break;
7506            }
7507
7508            /* Clear the bit */
7509            sig &= ~cur_bit;
7510        }
7511    }
7512
7513    return (par_num);
7514}
7515
7516static int
7517bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7518                              uint32_t         sig,
7519                              int              par_num,
7520                              uint8_t          print)
7521{
7522    uint32_t cur_bit = 0;
7523    int i = 0;
7524
7525    for (i = 0; sig; i++) {
7526        cur_bit = ((uint32_t)0x1 << i);
7527        if (sig & cur_bit) {
7528            switch (cur_bit) {
7529            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7530                if (print)
7531                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7532                break;
7533            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7534                if (print)
7535                    bxe_print_next_block(sc, par_num++, "ATC");
7536                break;
7537            }
7538
7539            /* Clear the bit */
7540            sig &= ~cur_bit;
7541        }
7542    }
7543
7544    return (par_num);
7545}
7546
7547static uint8_t
7548bxe_parity_attn(struct bxe_softc *sc,
7549                uint8_t          *global,
7550                uint8_t          print,
7551                uint32_t         *sig)
7552{
7553    int par_num = 0;
7554
7555    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7556        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7557        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7558        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7559        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7560        BLOGE(sc, "Parity error: HW block parity attention:\n"
7561                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7562              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7563              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7564              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7565              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7566              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7567
7568        if (print)
7569            BLOGI(sc, "Parity errors detected in blocks: ");
7570
7571        par_num =
7572            bxe_check_blocks_with_parity0(sc, sig[0] &
7573                                          HW_PRTY_ASSERT_SET_0,
7574                                          par_num, print);
7575        par_num =
7576            bxe_check_blocks_with_parity1(sc, sig[1] &
7577                                          HW_PRTY_ASSERT_SET_1,
7578                                          par_num, global, print);
7579        par_num =
7580            bxe_check_blocks_with_parity2(sc, sig[2] &
7581                                          HW_PRTY_ASSERT_SET_2,
7582                                          par_num, print);
7583        par_num =
7584            bxe_check_blocks_with_parity3(sc, sig[3] &
7585                                          HW_PRTY_ASSERT_SET_3,
7586                                          par_num, global, print);
7587        par_num =
7588            bxe_check_blocks_with_parity4(sc, sig[4] &
7589                                          HW_PRTY_ASSERT_SET_4,
7590                                          par_num, print);
7591
7592        if (print)
7593            BLOGI(sc, "\n");
7594
7595        return (TRUE);
7596    }
7597
7598    return (FALSE);
7599}
7600
7601static uint8_t
7602bxe_chk_parity_attn(struct bxe_softc *sc,
7603                    uint8_t          *global,
7604                    uint8_t          print)
7605{
7606    struct attn_route attn = { {0} };
7607    int port = SC_PORT(sc);
7608
7609    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7610    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7611    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7612    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7613
7614    /*
7615     * Since MCP attentions can't be disabled inside the block, we need to
7616     * read AEU registers to see whether they're currently disabled
7617     */
7618    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7619                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7620                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7621                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7622
7623
7624    if (!CHIP_IS_E1x(sc))
7625        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7626
7627    return (bxe_parity_attn(sc, global, print, attn.sig));
7628}
7629
7630static void
7631bxe_attn_int_deasserted4(struct bxe_softc *sc,
7632                         uint32_t         attn)
7633{
7634    uint32_t val;
7635
7636    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7637        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7638        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7639        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7640            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7641        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7642            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7643        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7644            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7645        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7646            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7647        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7648            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7649        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7650            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7651        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7652            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7653        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7654            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7655        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7656            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7657    }
7658
7659    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7660        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7661        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7662        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7663            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7664        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7665            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7666        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7667            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7668        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7669            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7670        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7671            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7672        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7673            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7674    }
7675
7676    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7677                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7678        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7679              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7680                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7681    }
7682}
7683
7684static void
7685bxe_e1h_disable(struct bxe_softc *sc)
7686{
7687    int port = SC_PORT(sc);
7688
7689    bxe_tx_disable(sc);
7690
7691    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7692}
7693
7694static void
7695bxe_e1h_enable(struct bxe_softc *sc)
7696{
7697    int port = SC_PORT(sc);
7698
7699    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7700
7701    // XXX bxe_tx_enable(sc);
7702}
7703
7704/*
7705 * called due to MCP event (on pmf):
7706 *   reread new bandwidth configuration
7707 *   configure FW
7708 *   notify others function about the change
7709 */
7710static void
7711bxe_config_mf_bw(struct bxe_softc *sc)
7712{
7713    if (sc->link_vars.link_up) {
7714        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7715        // XXX bxe_link_sync_notify(sc);
7716    }
7717
7718    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7719}
7720
7721static void
7722bxe_set_mf_bw(struct bxe_softc *sc)
7723{
7724    bxe_config_mf_bw(sc);
7725    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7726}
7727
7728static void
7729bxe_handle_eee_event(struct bxe_softc *sc)
7730{
7731    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7732    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7733}
7734
7735#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7736
7737static void
7738bxe_drv_info_ether_stat(struct bxe_softc *sc)
7739{
7740    struct eth_stats_info *ether_stat =
7741        &sc->sp->drv_info_to_mcp.ether_stat;
7742
7743    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7744            ETH_STAT_INFO_VERSION_LEN);
7745
7746    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7747    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7748                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7749                                          ether_stat->mac_local + MAC_PAD,
7750                                          MAC_PAD, ETH_ALEN);
7751
7752    ether_stat->mtu_size = sc->mtu;
7753
7754    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7755    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7756        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7757    }
7758
7759    // XXX ether_stat->feature_flags |= ???;
7760
7761    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7762
7763    ether_stat->txq_size = sc->tx_ring_size;
7764    ether_stat->rxq_size = sc->rx_ring_size;
7765}
7766
7767static void
7768bxe_handle_drv_info_req(struct bxe_softc *sc)
7769{
7770    enum drv_info_opcode op_code;
7771    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7772
7773    /* if drv_info version supported by MFW doesn't match - send NACK */
7774    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7775        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7776        return;
7777    }
7778
7779    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7780               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7781
7782    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7783
7784    switch (op_code) {
7785    case ETH_STATS_OPCODE:
7786        bxe_drv_info_ether_stat(sc);
7787        break;
7788    case FCOE_STATS_OPCODE:
7789    case ISCSI_STATS_OPCODE:
7790    default:
7791        /* if op code isn't supported - send NACK */
7792        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7793        return;
7794    }
7795
7796    /*
7797     * If we got drv_info attn from MFW then these fields are defined in
7798     * shmem2 for sure
7799     */
7800    SHMEM2_WR(sc, drv_info_host_addr_lo,
7801              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7802    SHMEM2_WR(sc, drv_info_host_addr_hi,
7803              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7804
7805    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7806}
7807
7808static void
7809bxe_dcc_event(struct bxe_softc *sc,
7810              uint32_t         dcc_event)
7811{
7812    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7813
7814    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7815        /*
7816         * This is the only place besides the function initialization
7817         * where the sc->flags can change so it is done without any
7818         * locks
7819         */
7820        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7821            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7822            sc->flags |= BXE_MF_FUNC_DIS;
7823            bxe_e1h_disable(sc);
7824        } else {
7825            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7826            sc->flags &= ~BXE_MF_FUNC_DIS;
7827            bxe_e1h_enable(sc);
7828        }
7829        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7830    }
7831
7832    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7833        bxe_config_mf_bw(sc);
7834        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7835    }
7836
7837    /* Report results to MCP */
7838    if (dcc_event)
7839        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7840    else
7841        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7842}
7843
7844static void
7845bxe_pmf_update(struct bxe_softc *sc)
7846{
7847    int port = SC_PORT(sc);
7848    uint32_t val;
7849
7850    sc->port.pmf = 1;
7851    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7852
7853    /*
7854     * We need the mb() to ensure the ordering between the writing to
7855     * sc->port.pmf here and reading it from the bxe_periodic_task().
7856     */
7857    mb();
7858
7859    /* queue a periodic task */
7860    // XXX schedule task...
7861
7862    // XXX bxe_dcbx_pmf_update(sc);
7863
7864    /* enable nig attention */
7865    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7866    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7867        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7868        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7869    } else if (!CHIP_IS_E1x(sc)) {
7870        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7871        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7872    }
7873
7874    bxe_stats_handle(sc, STATS_EVENT_PMF);
7875}
7876
7877static int
7878bxe_mc_assert(struct bxe_softc *sc)
7879{
7880    char last_idx;
7881    int i, rc = 0;
7882    uint32_t row0, row1, row2, row3;
7883
7884    /* XSTORM */
7885    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7886    if (last_idx)
7887        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7888
7889    /* print the asserts */
7890    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7891
7892        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7893        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7894        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7895        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7896
7897        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7898            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7899                  i, row3, row2, row1, row0);
7900            rc++;
7901        } else {
7902            break;
7903        }
7904    }
7905
7906    /* TSTORM */
7907    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7908    if (last_idx) {
7909        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7910    }
7911
7912    /* print the asserts */
7913    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7914
7915        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7916        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7917        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7918        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7919
7920        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7921            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7922                  i, row3, row2, row1, row0);
7923            rc++;
7924        } else {
7925            break;
7926        }
7927    }
7928
7929    /* CSTORM */
7930    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7931    if (last_idx) {
7932        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7933    }
7934
7935    /* print the asserts */
7936    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7937
7938        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7939        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7940        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7941        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7942
7943        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7944            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7945                  i, row3, row2, row1, row0);
7946            rc++;
7947        } else {
7948            break;
7949        }
7950    }
7951
7952    /* USTORM */
7953    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7954    if (last_idx) {
7955        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7956    }
7957
7958    /* print the asserts */
7959    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7960
7961        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7962        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7963        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7964        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7965
7966        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7967            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7968                  i, row3, row2, row1, row0);
7969            rc++;
7970        } else {
7971            break;
7972        }
7973    }
7974
7975    return (rc);
7976}
7977
7978static void
7979bxe_attn_int_deasserted3(struct bxe_softc *sc,
7980                         uint32_t         attn)
7981{
7982    int func = SC_FUNC(sc);
7983    uint32_t val;
7984
7985    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7986
7987        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7988
7989            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7990            bxe_read_mf_cfg(sc);
7991            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7992                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7993            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7994
7995            if (val & DRV_STATUS_DCC_EVENT_MASK)
7996                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7997
7998            if (val & DRV_STATUS_SET_MF_BW)
7999                bxe_set_mf_bw(sc);
8000
8001            if (val & DRV_STATUS_DRV_INFO_REQ)
8002                bxe_handle_drv_info_req(sc);
8003
8004            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
8005                bxe_pmf_update(sc);
8006
8007            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
8008                bxe_handle_eee_event(sc);
8009
8010            if (sc->link_vars.periodic_flags &
8011                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
8012                /* sync with link */
8013		bxe_acquire_phy_lock(sc);
8014                sc->link_vars.periodic_flags &=
8015                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
8016		bxe_release_phy_lock(sc);
8017                if (IS_MF(sc))
8018                    ; // XXX bxe_link_sync_notify(sc);
8019                bxe_link_report(sc);
8020            }
8021
8022            /*
8023             * Always call it here: bxe_link_report() will
8024             * prevent the link indication duplication.
8025             */
8026            bxe_link_status_update(sc);
8027
8028        } else if (attn & BXE_MC_ASSERT_BITS) {
8029
8030            BLOGE(sc, "MC assert!\n");
8031            bxe_mc_assert(sc);
8032            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8033            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8034            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8035            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8036            bxe_panic(sc, ("MC assert!\n"));
8037
8038        } else if (attn & BXE_MCP_ASSERT) {
8039
8040            BLOGE(sc, "MCP assert!\n");
8041            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8042            // XXX bxe_fw_dump(sc);
8043
8044        } else {
8045            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8046        }
8047    }
8048
8049    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8050        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8051        if (attn & BXE_GRC_TIMEOUT) {
8052            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8053            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8054        }
8055        if (attn & BXE_GRC_RSV) {
8056            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8057            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8058        }
8059        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8060    }
8061}
8062
8063static void
8064bxe_attn_int_deasserted2(struct bxe_softc *sc,
8065                         uint32_t         attn)
8066{
8067    int port = SC_PORT(sc);
8068    int reg_offset;
8069    uint32_t val0, mask0, val1, mask1;
8070    uint32_t val;
8071
8072    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8073        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8074        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8075        /* CFC error attention */
8076        if (val & 0x2) {
8077            BLOGE(sc, "FATAL error from CFC\n");
8078        }
8079    }
8080
8081    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8082        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8083        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8084        /* RQ_USDMDP_FIFO_OVERFLOW */
8085        if (val & 0x18000) {
8086            BLOGE(sc, "FATAL error from PXP\n");
8087        }
8088
8089        if (!CHIP_IS_E1x(sc)) {
8090            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8091            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8092        }
8093    }
8094
8095#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8096#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8097
8098    if (attn & AEU_PXP2_HW_INT_BIT) {
8099        /*  CQ47854 workaround do not panic on
8100         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8101         */
8102        if (!CHIP_IS_E1x(sc)) {
8103            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8104            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8105            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8106            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8107            /*
8108             * If the olny PXP2_EOP_ERROR_BIT is set in
8109             * STS0 and STS1 - clear it
8110             *
8111             * probably we lose additional attentions between
8112             * STS0 and STS_CLR0, in this case user will not
8113             * be notified about them
8114             */
8115            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8116                !(val1 & mask1))
8117                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8118
8119            /* print the register, since no one can restore it */
8120            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8121
8122            /*
8123             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8124             * then notify
8125             */
8126            if (val0 & PXP2_EOP_ERROR_BIT) {
8127                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8128
8129                /*
8130                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8131                 * set then clear attention from PXP2 block without panic
8132                 */
8133                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8134                    ((val1 & mask1) == 0))
8135                    attn &= ~AEU_PXP2_HW_INT_BIT;
8136            }
8137        }
8138    }
8139
8140    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8141        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8142                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8143
8144        val = REG_RD(sc, reg_offset);
8145        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8146        REG_WR(sc, reg_offset, val);
8147
8148        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8149              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8150        bxe_panic(sc, ("HW block attention set2\n"));
8151    }
8152}
8153
8154static void
8155bxe_attn_int_deasserted1(struct bxe_softc *sc,
8156                         uint32_t         attn)
8157{
8158    int port = SC_PORT(sc);
8159    int reg_offset;
8160    uint32_t val;
8161
8162    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8163        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8164        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8165        /* DORQ discard attention */
8166        if (val & 0x2) {
8167            BLOGE(sc, "FATAL error from DORQ\n");
8168        }
8169    }
8170
8171    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8172        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8173                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8174
8175        val = REG_RD(sc, reg_offset);
8176        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8177        REG_WR(sc, reg_offset, val);
8178
8179        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8180              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8181        bxe_panic(sc, ("HW block attention set1\n"));
8182    }
8183}
8184
8185static void
8186bxe_attn_int_deasserted0(struct bxe_softc *sc,
8187                         uint32_t         attn)
8188{
8189    int port = SC_PORT(sc);
8190    int reg_offset;
8191    uint32_t val;
8192
8193    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8194                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8195
8196    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8197        val = REG_RD(sc, reg_offset);
8198        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8199        REG_WR(sc, reg_offset, val);
8200
8201        BLOGW(sc, "SPIO5 hw attention\n");
8202
8203        /* Fan failure attention */
8204        elink_hw_reset_phy(&sc->link_params);
8205        bxe_fan_failure(sc);
8206    }
8207
8208    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8209	bxe_acquire_phy_lock(sc);
8210        elink_handle_module_detect_int(&sc->link_params);
8211	bxe_release_phy_lock(sc);
8212    }
8213
8214    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8215        val = REG_RD(sc, reg_offset);
8216        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8217        REG_WR(sc, reg_offset, val);
8218
8219        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8220                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8221    }
8222}
8223
8224static void
8225bxe_attn_int_deasserted(struct bxe_softc *sc,
8226                        uint32_t         deasserted)
8227{
8228    struct attn_route attn;
8229    struct attn_route *group_mask;
8230    int port = SC_PORT(sc);
8231    int index;
8232    uint32_t reg_addr;
8233    uint32_t val;
8234    uint32_t aeu_mask;
8235    uint8_t global = FALSE;
8236
8237    /*
8238     * Need to take HW lock because MCP or other port might also
8239     * try to handle this event.
8240     */
8241    bxe_acquire_alr(sc);
8242
8243    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8244        /* XXX
8245         * In case of parity errors don't handle attentions so that
8246         * other function would "see" parity errors.
8247         */
8248        sc->recovery_state = BXE_RECOVERY_INIT;
8249        // XXX schedule a recovery task...
8250        /* disable HW interrupts */
8251        bxe_int_disable(sc);
8252        bxe_release_alr(sc);
8253        return;
8254    }
8255
8256    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8257    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8258    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8259    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8260    if (!CHIP_IS_E1x(sc)) {
8261        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8262    } else {
8263        attn.sig[4] = 0;
8264    }
8265
8266    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8267          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8268
8269    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8270        if (deasserted & (1 << index)) {
8271            group_mask = &sc->attn_group[index];
8272
8273            BLOGD(sc, DBG_INTR,
8274                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8275                  group_mask->sig[0], group_mask->sig[1],
8276                  group_mask->sig[2], group_mask->sig[3],
8277                  group_mask->sig[4]);
8278
8279            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8280            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8281            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8282            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8283            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8284        }
8285    }
8286
8287    bxe_release_alr(sc);
8288
8289    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8290        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8291                    COMMAND_REG_ATTN_BITS_CLR);
8292    } else {
8293        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8294    }
8295
8296    val = ~deasserted;
8297    BLOGD(sc, DBG_INTR,
8298          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8299          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8300    REG_WR(sc, reg_addr, val);
8301
8302    if (~sc->attn_state & deasserted) {
8303        BLOGE(sc, "IGU error\n");
8304    }
8305
8306    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8307                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8308
8309    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8310
8311    aeu_mask = REG_RD(sc, reg_addr);
8312
8313    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8314          aeu_mask, deasserted);
8315    aeu_mask |= (deasserted & 0x3ff);
8316    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8317
8318    REG_WR(sc, reg_addr, aeu_mask);
8319    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8320
8321    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8322    sc->attn_state &= ~deasserted;
8323    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8324}
8325
8326static void
8327bxe_attn_int(struct bxe_softc *sc)
8328{
8329    /* read local copy of bits */
8330    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8331    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8332    uint32_t attn_state = sc->attn_state;
8333
8334    /* look for changed bits */
8335    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8336    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8337
8338    BLOGD(sc, DBG_INTR,
8339          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8340          attn_bits, attn_ack, asserted, deasserted);
8341
8342    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8343        BLOGE(sc, "BAD attention state\n");
8344    }
8345
8346    /* handle bits that were raised */
8347    if (asserted) {
8348        bxe_attn_int_asserted(sc, asserted);
8349    }
8350
8351    if (deasserted) {
8352        bxe_attn_int_deasserted(sc, deasserted);
8353    }
8354}
8355
8356static uint16_t
8357bxe_update_dsb_idx(struct bxe_softc *sc)
8358{
8359    struct host_sp_status_block *def_sb = sc->def_sb;
8360    uint16_t rc = 0;
8361
8362    mb(); /* status block is written to by the chip */
8363
8364    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8365        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8366        rc |= BXE_DEF_SB_ATT_IDX;
8367    }
8368
8369    if (sc->def_idx != def_sb->sp_sb.running_index) {
8370        sc->def_idx = def_sb->sp_sb.running_index;
8371        rc |= BXE_DEF_SB_IDX;
8372    }
8373
8374    mb();
8375
8376    return (rc);
8377}
8378
8379static inline struct ecore_queue_sp_obj *
8380bxe_cid_to_q_obj(struct bxe_softc *sc,
8381                 uint32_t         cid)
8382{
8383    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8384    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8385}
8386
8387static void
8388bxe_handle_mcast_eqe(struct bxe_softc *sc)
8389{
8390    struct ecore_mcast_ramrod_params rparam;
8391    int rc;
8392
8393    memset(&rparam, 0, sizeof(rparam));
8394
8395    rparam.mcast_obj = &sc->mcast_obj;
8396
8397    BXE_MCAST_LOCK(sc);
8398
8399    /* clear pending state for the last command */
8400    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8401
8402    /* if there are pending mcast commands - send them */
8403    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8404        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8405        if (rc < 0) {
8406            BLOGD(sc, DBG_SP,
8407                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8408        }
8409    }
8410
8411    BXE_MCAST_UNLOCK(sc);
8412}
8413
8414static void
8415bxe_handle_classification_eqe(struct bxe_softc      *sc,
8416                              union event_ring_elem *elem)
8417{
8418    unsigned long ramrod_flags = 0;
8419    int rc = 0;
8420    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8421    struct ecore_vlan_mac_obj *vlan_mac_obj;
8422
8423    /* always push next commands out, don't wait here */
8424    bit_set(&ramrod_flags, RAMROD_CONT);
8425
8426    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8427    case ECORE_FILTER_MAC_PENDING:
8428        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8429        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8430        break;
8431
8432    case ECORE_FILTER_MCAST_PENDING:
8433        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8434        /*
8435         * This is only relevant for 57710 where multicast MACs are
8436         * configured as unicast MACs using the same ramrod.
8437         */
8438        bxe_handle_mcast_eqe(sc);
8439        return;
8440
8441    default:
8442        BLOGE(sc, "Unsupported classification command: %d\n",
8443              elem->message.data.eth_event.echo);
8444        return;
8445    }
8446
8447    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8448
8449    if (rc < 0) {
8450        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8451    } else if (rc > 0) {
8452        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8453    }
8454}
8455
8456static void
8457bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8458                       union event_ring_elem *elem)
8459{
8460    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8461
8462    /* send rx_mode command again if was requested */
8463    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8464                               &sc->sp_state)) {
8465        bxe_set_storm_rx_mode(sc);
8466    }
8467}
8468
8469static void
8470bxe_update_eq_prod(struct bxe_softc *sc,
8471                   uint16_t         prod)
8472{
8473    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8474    wmb(); /* keep prod updates ordered */
8475}
8476
8477static void
8478bxe_eq_int(struct bxe_softc *sc)
8479{
8480    uint16_t hw_cons, sw_cons, sw_prod;
8481    union event_ring_elem *elem;
8482    uint8_t echo;
8483    uint32_t cid;
8484    uint8_t opcode;
8485    int spqe_cnt = 0;
8486    struct ecore_queue_sp_obj *q_obj;
8487    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8488    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8489
8490    hw_cons = le16toh(*sc->eq_cons_sb);
8491
8492    /*
8493     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8494     * when we get to the next-page we need to adjust so the loop
8495     * condition below will be met. The next element is the size of a
8496     * regular element and hence incrementing by 1
8497     */
8498    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8499        hw_cons++;
8500    }
8501
8502    /*
8503     * This function may never run in parallel with itself for a
8504     * specific sc and no need for a read memory barrier here.
8505     */
8506    sw_cons = sc->eq_cons;
8507    sw_prod = sc->eq_prod;
8508
8509    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8510          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8511
8512    for (;
8513         sw_cons != hw_cons;
8514         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8515
8516        elem = &sc->eq[EQ_DESC(sw_cons)];
8517
8518        /* elem CID originates from FW, actually LE */
8519        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8520        opcode = elem->message.opcode;
8521
8522        /* handle eq element */
8523        switch (opcode) {
8524
8525        case EVENT_RING_OPCODE_STAT_QUERY:
8526            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8527                  sc->stats_comp++);
8528            /* nothing to do with stats comp */
8529            goto next_spqe;
8530
8531        case EVENT_RING_OPCODE_CFC_DEL:
8532            /* handle according to cid range */
8533            /* we may want to verify here that the sc state is HALTING */
8534            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8535            q_obj = bxe_cid_to_q_obj(sc, cid);
8536            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8537                break;
8538            }
8539            goto next_spqe;
8540
8541        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8542            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8543            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8544                break;
8545            }
8546            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8547            goto next_spqe;
8548
8549        case EVENT_RING_OPCODE_START_TRAFFIC:
8550            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8551            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8552                break;
8553            }
8554            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8555            goto next_spqe;
8556
8557        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8558            echo = elem->message.data.function_update_event.echo;
8559            if (echo == SWITCH_UPDATE) {
8560                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8561                if (f_obj->complete_cmd(sc, f_obj,
8562                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8563                    break;
8564                }
8565            }
8566            else {
8567                BLOGD(sc, DBG_SP,
8568                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8569            }
8570            goto next_spqe;
8571
8572        case EVENT_RING_OPCODE_FORWARD_SETUP:
8573            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8574            if (q_obj->complete_cmd(sc, q_obj,
8575                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8576                break;
8577            }
8578            goto next_spqe;
8579
8580        case EVENT_RING_OPCODE_FUNCTION_START:
8581            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8582            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8583                break;
8584            }
8585            goto next_spqe;
8586
8587        case EVENT_RING_OPCODE_FUNCTION_STOP:
8588            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8589            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8590                break;
8591            }
8592            goto next_spqe;
8593        }
8594
8595        switch (opcode | sc->state) {
8596        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8597        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8598            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8599            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8600            rss_raw->clear_pending(rss_raw);
8601            break;
8602
8603        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8604        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8605        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8606        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8607        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8608        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8609            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8610            bxe_handle_classification_eqe(sc, elem);
8611            break;
8612
8613        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8614        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8615        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8616            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8617            bxe_handle_mcast_eqe(sc);
8618            break;
8619
8620        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8621        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8622        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8623            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8624            bxe_handle_rx_mode_eqe(sc, elem);
8625            break;
8626
8627        default:
8628            /* unknown event log error and continue */
8629            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8630                  elem->message.opcode, sc->state);
8631        }
8632
8633next_spqe:
8634        spqe_cnt++;
8635    } /* for */
8636
8637    mb();
8638    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8639
8640    sc->eq_cons = sw_cons;
8641    sc->eq_prod = sw_prod;
8642
8643    /* make sure that above mem writes were issued towards the memory */
8644    wmb();
8645
8646    /* update producer */
8647    bxe_update_eq_prod(sc, sc->eq_prod);
8648}
8649
8650static void
8651bxe_handle_sp_tq(void *context,
8652                 int  pending)
8653{
8654    struct bxe_softc *sc = (struct bxe_softc *)context;
8655    uint16_t status;
8656
8657    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8658
8659    /* what work needs to be performed? */
8660    status = bxe_update_dsb_idx(sc);
8661
8662    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8663
8664    /* HW attentions */
8665    if (status & BXE_DEF_SB_ATT_IDX) {
8666        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8667        bxe_attn_int(sc);
8668        status &= ~BXE_DEF_SB_ATT_IDX;
8669    }
8670
8671    /* SP events: STAT_QUERY and others */
8672    if (status & BXE_DEF_SB_IDX) {
8673        /* handle EQ completions */
8674        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8675        bxe_eq_int(sc);
8676        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8677                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8678        status &= ~BXE_DEF_SB_IDX;
8679    }
8680
8681    /* if status is non zero then something went wrong */
8682    if (__predict_false(status)) {
8683        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8684    }
8685
8686    /* ack status block only if something was actually handled */
8687    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8688               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8689
8690    /*
8691     * Must be called after the EQ processing (since eq leads to sriov
8692     * ramrod completion flows).
8693     * This flow may have been scheduled by the arrival of a ramrod
8694     * completion, or by the sriov code rescheduling itself.
8695     */
8696    // XXX bxe_iov_sp_task(sc);
8697
8698}
8699
8700static void
8701bxe_handle_fp_tq(void *context,
8702                 int  pending)
8703{
8704    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8705    struct bxe_softc *sc = fp->sc;
8706    uint8_t more_tx = FALSE;
8707    uint8_t more_rx = FALSE;
8708
8709    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8710
8711    /* XXX
8712     * IFF_DRV_RUNNING state can't be checked here since we process
8713     * slowpath events on a client queue during setup. Instead
8714     * we need to add a "process/continue" flag here that the driver
8715     * can use to tell the task here not to do anything.
8716     */
8717#if 0
8718    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8719        return;
8720    }
8721#endif
8722
8723    /* update the fastpath index */
8724    bxe_update_fp_sb_idx(fp);
8725
8726    /* XXX add loop here if ever support multiple tx CoS */
8727    /* fp->txdata[cos] */
8728    if (bxe_has_tx_work(fp)) {
8729        BXE_FP_TX_LOCK(fp);
8730        more_tx = bxe_txeof(sc, fp);
8731        BXE_FP_TX_UNLOCK(fp);
8732    }
8733
8734    if (bxe_has_rx_work(fp)) {
8735        more_rx = bxe_rxeof(sc, fp);
8736    }
8737
8738    if (more_rx /*|| more_tx*/) {
8739        /* still more work to do */
8740        taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8741        return;
8742    }
8743
8744    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8745               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8746}
8747
8748static void
8749bxe_task_fp(struct bxe_fastpath *fp)
8750{
8751    struct bxe_softc *sc = fp->sc;
8752    uint8_t more_tx = FALSE;
8753    uint8_t more_rx = FALSE;
8754
8755    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8756
8757    /* update the fastpath index */
8758    bxe_update_fp_sb_idx(fp);
8759
8760    /* XXX add loop here if ever support multiple tx CoS */
8761    /* fp->txdata[cos] */
8762    if (bxe_has_tx_work(fp)) {
8763        BXE_FP_TX_LOCK(fp);
8764        more_tx = bxe_txeof(sc, fp);
8765        BXE_FP_TX_UNLOCK(fp);
8766    }
8767
8768    if (bxe_has_rx_work(fp)) {
8769        more_rx = bxe_rxeof(sc, fp);
8770    }
8771
8772    if (more_rx /*|| more_tx*/) {
8773        /* still more work to do, bail out if this ISR and process later */
8774        taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8775        return;
8776    }
8777
8778    /*
8779     * Here we write the fastpath index taken before doing any tx or rx work.
8780     * It is very well possible other hw events occurred up to this point and
8781     * they were actually processed accordingly above. Since we're going to
8782     * write an older fastpath index, an interrupt is coming which we might
8783     * not do any work in.
8784     */
8785    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8786               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8787}
8788
8789/*
8790 * Legacy interrupt entry point.
8791 *
8792 * Verifies that the controller generated the interrupt and
8793 * then calls a separate routine to handle the various
8794 * interrupt causes: link, RX, and TX.
8795 */
8796static void
8797bxe_intr_legacy(void *xsc)
8798{
8799    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8800    struct bxe_fastpath *fp;
8801    uint16_t status, mask;
8802    int i;
8803
8804    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8805
8806    /*
8807     * 0 for ustorm, 1 for cstorm
8808     * the bits returned from ack_int() are 0-15
8809     * bit 0 = attention status block
8810     * bit 1 = fast path status block
8811     * a mask of 0x2 or more = tx/rx event
8812     * a mask of 1 = slow path event
8813     */
8814
8815    status = bxe_ack_int(sc);
8816
8817    /* the interrupt is not for us */
8818    if (__predict_false(status == 0)) {
8819        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8820        return;
8821    }
8822
8823    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8824
8825    FOR_EACH_ETH_QUEUE(sc, i) {
8826        fp = &sc->fp[i];
8827        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8828        if (status & mask) {
8829            /* acknowledge and disable further fastpath interrupts */
8830            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8831            bxe_task_fp(fp);
8832            status &= ~mask;
8833        }
8834    }
8835
8836    if (__predict_false(status & 0x1)) {
8837        /* acknowledge and disable further slowpath interrupts */
8838        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8839
8840        /* schedule slowpath handler */
8841        taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8842
8843        status &= ~0x1;
8844    }
8845
8846    if (__predict_false(status)) {
8847        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8848    }
8849}
8850
8851/* slowpath interrupt entry point */
8852static void
8853bxe_intr_sp(void *xsc)
8854{
8855    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8856
8857    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8858
8859    /* acknowledge and disable further slowpath interrupts */
8860    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8861
8862    /* schedule slowpath handler */
8863    taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8864}
8865
8866/* fastpath interrupt entry point */
8867static void
8868bxe_intr_fp(void *xfp)
8869{
8870    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8871    struct bxe_softc *sc = fp->sc;
8872
8873    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8874
8875    BLOGD(sc, DBG_INTR,
8876          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8877          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8878
8879    /* acknowledge and disable further fastpath interrupts */
8880    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8881
8882    bxe_task_fp(fp);
8883}
8884
8885/* Release all interrupts allocated by the driver. */
8886static void
8887bxe_interrupt_free(struct bxe_softc *sc)
8888{
8889    int i;
8890
8891    switch (sc->interrupt_mode) {
8892    case INTR_MODE_INTX:
8893        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8894        if (sc->intr[0].resource != NULL) {
8895            bus_release_resource(sc->dev,
8896                                 SYS_RES_IRQ,
8897                                 sc->intr[0].rid,
8898                                 sc->intr[0].resource);
8899        }
8900        break;
8901    case INTR_MODE_MSI:
8902        for (i = 0; i < sc->intr_count; i++) {
8903            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8904            if (sc->intr[i].resource && sc->intr[i].rid) {
8905                bus_release_resource(sc->dev,
8906                                     SYS_RES_IRQ,
8907                                     sc->intr[i].rid,
8908                                     sc->intr[i].resource);
8909            }
8910        }
8911        pci_release_msi(sc->dev);
8912        break;
8913    case INTR_MODE_MSIX:
8914        for (i = 0; i < sc->intr_count; i++) {
8915            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8916            if (sc->intr[i].resource && sc->intr[i].rid) {
8917                bus_release_resource(sc->dev,
8918                                     SYS_RES_IRQ,
8919                                     sc->intr[i].rid,
8920                                     sc->intr[i].resource);
8921            }
8922        }
8923        pci_release_msi(sc->dev);
8924        break;
8925    default:
8926        /* nothing to do as initial allocation failed */
8927        break;
8928    }
8929}
8930
8931/*
8932 * This function determines and allocates the appropriate
8933 * interrupt based on system capabilites and user request.
8934 *
8935 * The user may force a particular interrupt mode, specify
8936 * the number of receive queues, specify the method for
8937 * distribuitng received frames to receive queues, or use
8938 * the default settings which will automatically select the
8939 * best supported combination.  In addition, the OS may or
8940 * may not support certain combinations of these settings.
8941 * This routine attempts to reconcile the settings requested
8942 * by the user with the capabilites available from the system
8943 * to select the optimal combination of features.
8944 *
8945 * Returns:
8946 *   0 = Success, !0 = Failure.
8947 */
8948static int
8949bxe_interrupt_alloc(struct bxe_softc *sc)
8950{
8951    int msix_count = 0;
8952    int msi_count = 0;
8953    int num_requested = 0;
8954    int num_allocated = 0;
8955    int rid, i, j;
8956    int rc;
8957
8958    /* get the number of available MSI/MSI-X interrupts from the OS */
8959    if (sc->interrupt_mode > 0) {
8960        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8961            msix_count = pci_msix_count(sc->dev);
8962        }
8963
8964        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8965            msi_count = pci_msi_count(sc->dev);
8966        }
8967
8968        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8969              msi_count, msix_count);
8970    }
8971
8972    do { /* try allocating MSI-X interrupt resources (at least 2) */
8973        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8974            break;
8975        }
8976
8977        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8978            (msix_count < 2)) {
8979            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8980            break;
8981        }
8982
8983        /* ask for the necessary number of MSI-X vectors */
8984        num_requested = min((sc->num_queues + 1), msix_count);
8985
8986        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8987
8988        num_allocated = num_requested;
8989        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8990            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8991            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8992            break;
8993        }
8994
8995        if (num_allocated < 2) { /* possible? */
8996            BLOGE(sc, "MSI-X allocation less than 2!\n");
8997            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8998            pci_release_msi(sc->dev);
8999            break;
9000        }
9001
9002        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
9003              num_requested, num_allocated);
9004
9005        /* best effort so use the number of vectors allocated to us */
9006        sc->intr_count = num_allocated;
9007        sc->num_queues = num_allocated - 1;
9008
9009        rid = 1; /* initial resource identifier */
9010
9011        /* allocate the MSI-X vectors */
9012        for (i = 0; i < num_allocated; i++) {
9013            sc->intr[i].rid = (rid + i);
9014
9015            if ((sc->intr[i].resource =
9016                 bus_alloc_resource_any(sc->dev,
9017                                        SYS_RES_IRQ,
9018                                        &sc->intr[i].rid,
9019                                        RF_ACTIVE)) == NULL) {
9020                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9021                      i, (rid + i));
9022
9023                for (j = (i - 1); j >= 0; j--) {
9024                    bus_release_resource(sc->dev,
9025                                         SYS_RES_IRQ,
9026                                         sc->intr[j].rid,
9027                                         sc->intr[j].resource);
9028                }
9029
9030                sc->intr_count = 0;
9031                sc->num_queues = 0;
9032                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9033                pci_release_msi(sc->dev);
9034                break;
9035            }
9036
9037            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9038        }
9039    } while (0);
9040
9041    do { /* try allocating MSI vector resources (at least 2) */
9042        if (sc->interrupt_mode != INTR_MODE_MSI) {
9043            break;
9044        }
9045
9046        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9047            (msi_count < 1)) {
9048            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9049            break;
9050        }
9051
9052        /* ask for a single MSI vector */
9053        num_requested = 1;
9054
9055        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9056
9057        num_allocated = num_requested;
9058        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9059            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9060            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9061            break;
9062        }
9063
9064        if (num_allocated != 1) { /* possible? */
9065            BLOGE(sc, "MSI allocation is not 1!\n");
9066            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9067            pci_release_msi(sc->dev);
9068            break;
9069        }
9070
9071        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9072              num_requested, num_allocated);
9073
9074        /* best effort so use the number of vectors allocated to us */
9075        sc->intr_count = num_allocated;
9076        sc->num_queues = num_allocated;
9077
9078        rid = 1; /* initial resource identifier */
9079
9080        sc->intr[0].rid = rid;
9081
9082        if ((sc->intr[0].resource =
9083             bus_alloc_resource_any(sc->dev,
9084                                    SYS_RES_IRQ,
9085                                    &sc->intr[0].rid,
9086                                    RF_ACTIVE)) == NULL) {
9087            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9088            sc->intr_count = 0;
9089            sc->num_queues = 0;
9090            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9091            pci_release_msi(sc->dev);
9092            break;
9093        }
9094
9095        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9096    } while (0);
9097
9098    do { /* try allocating INTx vector resources */
9099        if (sc->interrupt_mode != INTR_MODE_INTX) {
9100            break;
9101        }
9102
9103        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9104
9105        /* only one vector for INTx */
9106        sc->intr_count = 1;
9107        sc->num_queues = 1;
9108
9109        rid = 0; /* initial resource identifier */
9110
9111        sc->intr[0].rid = rid;
9112
9113        if ((sc->intr[0].resource =
9114             bus_alloc_resource_any(sc->dev,
9115                                    SYS_RES_IRQ,
9116                                    &sc->intr[0].rid,
9117                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9118            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9119            sc->intr_count = 0;
9120            sc->num_queues = 0;
9121            sc->interrupt_mode = -1; /* Failed! */
9122            break;
9123        }
9124
9125        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9126    } while (0);
9127
9128    if (sc->interrupt_mode == -1) {
9129        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9130        rc = 1;
9131    } else {
9132        BLOGD(sc, DBG_LOAD,
9133              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9134              sc->interrupt_mode, sc->num_queues);
9135        rc = 0;
9136    }
9137
9138    return (rc);
9139}
9140
9141static void
9142bxe_interrupt_detach(struct bxe_softc *sc)
9143{
9144    struct bxe_fastpath *fp;
9145    int i;
9146
9147    /* release interrupt resources */
9148    for (i = 0; i < sc->intr_count; i++) {
9149        if (sc->intr[i].resource && sc->intr[i].tag) {
9150            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9151            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9152        }
9153    }
9154
9155    for (i = 0; i < sc->num_queues; i++) {
9156        fp = &sc->fp[i];
9157        if (fp->tq) {
9158            taskqueue_drain(fp->tq, &fp->tq_task);
9159            taskqueue_free(fp->tq);
9160            fp->tq = NULL;
9161        }
9162    }
9163
9164
9165    if (sc->sp_tq) {
9166        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9167        taskqueue_free(sc->sp_tq);
9168        sc->sp_tq = NULL;
9169    }
9170}
9171
9172/*
9173 * Enables interrupts and attach to the ISR.
9174 *
9175 * When using multiple MSI/MSI-X vectors the first vector
9176 * is used for slowpath operations while all remaining
9177 * vectors are used for fastpath operations.  If only a
9178 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9179 * ISR must look for both slowpath and fastpath completions.
9180 */
9181static int
9182bxe_interrupt_attach(struct bxe_softc *sc)
9183{
9184    struct bxe_fastpath *fp;
9185    int rc = 0;
9186    int i;
9187
9188    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9189             "bxe%d_sp_tq", sc->unit);
9190    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9191    sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
9192                                      taskqueue_thread_enqueue,
9193                                      &sc->sp_tq);
9194    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9195                            "%s", sc->sp_tq_name);
9196
9197
9198    for (i = 0; i < sc->num_queues; i++) {
9199        fp = &sc->fp[i];
9200        snprintf(fp->tq_name, sizeof(fp->tq_name),
9201                 "bxe%d_fp%d_tq", sc->unit, i);
9202        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9203        fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
9204                                       taskqueue_thread_enqueue,
9205                                       &fp->tq);
9206        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9207                                "%s", fp->tq_name);
9208    }
9209
9210    /* setup interrupt handlers */
9211    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9212        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9213
9214        /*
9215         * Setup the interrupt handler. Note that we pass the driver instance
9216         * to the interrupt handler for the slowpath.
9217         */
9218        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9219                                 (INTR_TYPE_NET | INTR_MPSAFE),
9220                                 NULL, bxe_intr_sp, sc,
9221                                 &sc->intr[0].tag)) != 0) {
9222            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9223            goto bxe_interrupt_attach_exit;
9224        }
9225
9226        bus_describe_intr(sc->dev, sc->intr[0].resource,
9227                          sc->intr[0].tag, "sp");
9228
9229        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9230
9231        /* initialize the fastpath vectors (note the first was used for sp) */
9232        for (i = 0; i < sc->num_queues; i++) {
9233            fp = &sc->fp[i];
9234            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9235
9236            /*
9237             * Setup the interrupt handler. Note that we pass the
9238             * fastpath context to the interrupt handler in this
9239             * case.
9240             */
9241            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9242                                     (INTR_TYPE_NET | INTR_MPSAFE),
9243                                     NULL, bxe_intr_fp, fp,
9244                                     &sc->intr[i + 1].tag)) != 0) {
9245                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9246                      (i + 1), rc);
9247                goto bxe_interrupt_attach_exit;
9248            }
9249
9250            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9251                              sc->intr[i + 1].tag, "fp%02d", i);
9252
9253            /* bind the fastpath instance to a cpu */
9254            if (sc->num_queues > 1) {
9255                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9256            }
9257
9258            fp->state = BXE_FP_STATE_IRQ;
9259        }
9260    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9261        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9262
9263        /*
9264         * Setup the interrupt handler. Note that we pass the
9265         * driver instance to the interrupt handler which
9266         * will handle both the slowpath and fastpath.
9267         */
9268        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9269                                 (INTR_TYPE_NET | INTR_MPSAFE),
9270                                 NULL, bxe_intr_legacy, sc,
9271                                 &sc->intr[0].tag)) != 0) {
9272            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9273            goto bxe_interrupt_attach_exit;
9274        }
9275
9276    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9277        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9278
9279        /*
9280         * Setup the interrupt handler. Note that we pass the
9281         * driver instance to the interrupt handler which
9282         * will handle both the slowpath and fastpath.
9283         */
9284        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9285                                 (INTR_TYPE_NET | INTR_MPSAFE),
9286                                 NULL, bxe_intr_legacy, sc,
9287                                 &sc->intr[0].tag)) != 0) {
9288            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9289            goto bxe_interrupt_attach_exit;
9290        }
9291    }
9292
9293bxe_interrupt_attach_exit:
9294
9295    return (rc);
9296}
9297
9298static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9299static int  bxe_init_hw_common(struct bxe_softc *sc);
9300static int  bxe_init_hw_port(struct bxe_softc *sc);
9301static int  bxe_init_hw_func(struct bxe_softc *sc);
9302static void bxe_reset_common(struct bxe_softc *sc);
9303static void bxe_reset_port(struct bxe_softc *sc);
9304static void bxe_reset_func(struct bxe_softc *sc);
9305static int  bxe_gunzip_init(struct bxe_softc *sc);
9306static void bxe_gunzip_end(struct bxe_softc *sc);
9307static int  bxe_init_firmware(struct bxe_softc *sc);
9308static void bxe_release_firmware(struct bxe_softc *sc);
9309
9310static struct
9311ecore_func_sp_drv_ops bxe_func_sp_drv = {
9312    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9313    .init_hw_cmn      = bxe_init_hw_common,
9314    .init_hw_port     = bxe_init_hw_port,
9315    .init_hw_func     = bxe_init_hw_func,
9316
9317    .reset_hw_cmn     = bxe_reset_common,
9318    .reset_hw_port    = bxe_reset_port,
9319    .reset_hw_func    = bxe_reset_func,
9320
9321    .gunzip_init      = bxe_gunzip_init,
9322    .gunzip_end       = bxe_gunzip_end,
9323
9324    .init_fw          = bxe_init_firmware,
9325    .release_fw       = bxe_release_firmware,
9326};
9327
9328static void
9329bxe_init_func_obj(struct bxe_softc *sc)
9330{
9331    sc->dmae_ready = 0;
9332
9333    ecore_init_func_obj(sc,
9334                        &sc->func_obj,
9335                        BXE_SP(sc, func_rdata),
9336                        BXE_SP_MAPPING(sc, func_rdata),
9337                        BXE_SP(sc, func_afex_rdata),
9338                        BXE_SP_MAPPING(sc, func_afex_rdata),
9339                        &bxe_func_sp_drv);
9340}
9341
9342static int
9343bxe_init_hw(struct bxe_softc *sc,
9344            uint32_t         load_code)
9345{
9346    struct ecore_func_state_params func_params = { NULL };
9347    int rc;
9348
9349    /* prepare the parameters for function state transitions */
9350    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9351
9352    func_params.f_obj = &sc->func_obj;
9353    func_params.cmd = ECORE_F_CMD_HW_INIT;
9354
9355    func_params.params.hw_init.load_phase = load_code;
9356
9357    /*
9358     * Via a plethora of function pointers, we will eventually reach
9359     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9360     */
9361    rc = ecore_func_state_change(sc, &func_params);
9362
9363    return (rc);
9364}
9365
9366static void
9367bxe_fill(struct bxe_softc *sc,
9368         uint32_t         addr,
9369         int              fill,
9370         uint32_t         len)
9371{
9372    uint32_t i;
9373
9374    if (!(len % 4) && !(addr % 4)) {
9375        for (i = 0; i < len; i += 4) {
9376            REG_WR(sc, (addr + i), fill);
9377        }
9378    } else {
9379        for (i = 0; i < len; i++) {
9380            REG_WR8(sc, (addr + i), fill);
9381        }
9382    }
9383}
9384
9385/* writes FP SP data to FW - data_size in dwords */
9386static void
9387bxe_wr_fp_sb_data(struct bxe_softc *sc,
9388                  int              fw_sb_id,
9389                  uint32_t         *sb_data_p,
9390                  uint32_t         data_size)
9391{
9392    int index;
9393
9394    for (index = 0; index < data_size; index++) {
9395        REG_WR(sc,
9396               (BAR_CSTRORM_INTMEM +
9397                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9398                (sizeof(uint32_t) * index)),
9399               *(sb_data_p + index));
9400    }
9401}
9402
9403static void
9404bxe_zero_fp_sb(struct bxe_softc *sc,
9405               int              fw_sb_id)
9406{
9407    struct hc_status_block_data_e2 sb_data_e2;
9408    struct hc_status_block_data_e1x sb_data_e1x;
9409    uint32_t *sb_data_p;
9410    uint32_t data_size = 0;
9411
9412    if (!CHIP_IS_E1x(sc)) {
9413        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9414        sb_data_e2.common.state = SB_DISABLED;
9415        sb_data_e2.common.p_func.vf_valid = FALSE;
9416        sb_data_p = (uint32_t *)&sb_data_e2;
9417        data_size = (sizeof(struct hc_status_block_data_e2) /
9418                     sizeof(uint32_t));
9419    } else {
9420        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9421        sb_data_e1x.common.state = SB_DISABLED;
9422        sb_data_e1x.common.p_func.vf_valid = FALSE;
9423        sb_data_p = (uint32_t *)&sb_data_e1x;
9424        data_size = (sizeof(struct hc_status_block_data_e1x) /
9425                     sizeof(uint32_t));
9426    }
9427
9428    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9429
9430    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9431             0, CSTORM_STATUS_BLOCK_SIZE);
9432    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9433             0, CSTORM_SYNC_BLOCK_SIZE);
9434}
9435
9436static void
9437bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9438                  struct hc_sp_status_block_data *sp_sb_data)
9439{
9440    int i;
9441
9442    for (i = 0;
9443         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9444         i++) {
9445        REG_WR(sc,
9446               (BAR_CSTRORM_INTMEM +
9447                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9448                (i * sizeof(uint32_t))),
9449               *((uint32_t *)sp_sb_data + i));
9450    }
9451}
9452
9453static void
9454bxe_zero_sp_sb(struct bxe_softc *sc)
9455{
9456    struct hc_sp_status_block_data sp_sb_data;
9457
9458    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9459
9460    sp_sb_data.state           = SB_DISABLED;
9461    sp_sb_data.p_func.vf_valid = FALSE;
9462
9463    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9464
9465    bxe_fill(sc,
9466             (BAR_CSTRORM_INTMEM +
9467              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9468              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9469    bxe_fill(sc,
9470             (BAR_CSTRORM_INTMEM +
9471              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9472              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9473}
9474
9475static void
9476bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9477                             int                       igu_sb_id,
9478                             int                       igu_seg_id)
9479{
9480    hc_sm->igu_sb_id      = igu_sb_id;
9481    hc_sm->igu_seg_id     = igu_seg_id;
9482    hc_sm->timer_value    = 0xFF;
9483    hc_sm->time_to_expire = 0xFFFFFFFF;
9484}
9485
9486static void
9487bxe_map_sb_state_machines(struct hc_index_data *index_data)
9488{
9489    /* zero out state machine indices */
9490
9491    /* rx indices */
9492    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9493
9494    /* tx indices */
9495    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9496    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9497    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9498    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9499
9500    /* map indices */
9501
9502    /* rx indices */
9503    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9504        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9505
9506    /* tx indices */
9507    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9508        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9509    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9510        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9511    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9512        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9513    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9514        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9515}
9516
9517static void
9518bxe_init_sb(struct bxe_softc *sc,
9519            bus_addr_t       busaddr,
9520            int              vfid,
9521            uint8_t          vf_valid,
9522            int              fw_sb_id,
9523            int              igu_sb_id)
9524{
9525    struct hc_status_block_data_e2  sb_data_e2;
9526    struct hc_status_block_data_e1x sb_data_e1x;
9527    struct hc_status_block_sm       *hc_sm_p;
9528    uint32_t *sb_data_p;
9529    int igu_seg_id;
9530    int data_size;
9531
9532    if (CHIP_INT_MODE_IS_BC(sc)) {
9533        igu_seg_id = HC_SEG_ACCESS_NORM;
9534    } else {
9535        igu_seg_id = IGU_SEG_ACCESS_NORM;
9536    }
9537
9538    bxe_zero_fp_sb(sc, fw_sb_id);
9539
9540    if (!CHIP_IS_E1x(sc)) {
9541        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9542        sb_data_e2.common.state = SB_ENABLED;
9543        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9544        sb_data_e2.common.p_func.vf_id = vfid;
9545        sb_data_e2.common.p_func.vf_valid = vf_valid;
9546        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9547        sb_data_e2.common.same_igu_sb_1b = TRUE;
9548        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9549        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9550        hc_sm_p = sb_data_e2.common.state_machine;
9551        sb_data_p = (uint32_t *)&sb_data_e2;
9552        data_size = (sizeof(struct hc_status_block_data_e2) /
9553                     sizeof(uint32_t));
9554        bxe_map_sb_state_machines(sb_data_e2.index_data);
9555    } else {
9556        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9557        sb_data_e1x.common.state = SB_ENABLED;
9558        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9559        sb_data_e1x.common.p_func.vf_id = 0xff;
9560        sb_data_e1x.common.p_func.vf_valid = FALSE;
9561        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9562        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9563        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9564        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9565        hc_sm_p = sb_data_e1x.common.state_machine;
9566        sb_data_p = (uint32_t *)&sb_data_e1x;
9567        data_size = (sizeof(struct hc_status_block_data_e1x) /
9568                     sizeof(uint32_t));
9569        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9570    }
9571
9572    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9573    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9574
9575    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9576
9577    /* write indices to HW - PCI guarantees endianity of regpairs */
9578    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9579}
9580
9581static inline uint8_t
9582bxe_fp_qzone_id(struct bxe_fastpath *fp)
9583{
9584    if (CHIP_IS_E1x(fp->sc)) {
9585        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9586    } else {
9587        return (fp->cl_id);
9588    }
9589}
9590
9591static inline uint32_t
9592bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9593                           struct bxe_fastpath *fp)
9594{
9595    uint32_t offset = BAR_USTRORM_INTMEM;
9596
9597    if (!CHIP_IS_E1x(sc)) {
9598        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9599    } else {
9600        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9601    }
9602
9603    return (offset);
9604}
9605
9606static void
9607bxe_init_eth_fp(struct bxe_softc *sc,
9608                int              idx)
9609{
9610    struct bxe_fastpath *fp = &sc->fp[idx];
9611    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9612    unsigned long q_type = 0;
9613    int cos;
9614
9615    fp->sc    = sc;
9616    fp->index = idx;
9617
9618    snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
9619             "bxe%d_fp%d_tx_lock", sc->unit, idx);
9620    mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
9621
9622    snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
9623             "bxe%d_fp%d_rx_lock", sc->unit, idx);
9624    mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
9625
9626    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9627    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9628
9629    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9630                    (SC_L_ID(sc) + idx) :
9631                    /* want client ID same as IGU SB ID for non-E1 */
9632                    fp->igu_sb_id;
9633    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9634
9635    /* setup sb indices */
9636    if (!CHIP_IS_E1x(sc)) {
9637        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9638        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9639    } else {
9640        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9641        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9642    }
9643
9644    /* init shortcut */
9645    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9646
9647    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9648
9649    /*
9650     * XXX If multiple CoS is ever supported then each fastpath structure
9651     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9652     */
9653    for (cos = 0; cos < sc->max_cos; cos++) {
9654        cids[cos] = idx;
9655    }
9656    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9657
9658    /* nothing more for a VF to do */
9659    if (IS_VF(sc)) {
9660        return;
9661    }
9662
9663    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9664                fp->fw_sb_id, fp->igu_sb_id);
9665
9666    bxe_update_fp_sb_idx(fp);
9667
9668    /* Configure Queue State object */
9669    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9670    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9671
9672    ecore_init_queue_obj(sc,
9673                         &sc->sp_objs[idx].q_obj,
9674                         fp->cl_id,
9675                         cids,
9676                         sc->max_cos,
9677                         SC_FUNC(sc),
9678                         BXE_SP(sc, q_rdata),
9679                         BXE_SP_MAPPING(sc, q_rdata),
9680                         q_type);
9681
9682    /* configure classification DBs */
9683    ecore_init_mac_obj(sc,
9684                       &sc->sp_objs[idx].mac_obj,
9685                       fp->cl_id,
9686                       idx,
9687                       SC_FUNC(sc),
9688                       BXE_SP(sc, mac_rdata),
9689                       BXE_SP_MAPPING(sc, mac_rdata),
9690                       ECORE_FILTER_MAC_PENDING,
9691                       &sc->sp_state,
9692                       ECORE_OBJ_TYPE_RX_TX,
9693                       &sc->macs_pool);
9694
9695    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9696          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9697}
9698
9699static inline void
9700bxe_update_rx_prod(struct bxe_softc    *sc,
9701                   struct bxe_fastpath *fp,
9702                   uint16_t            rx_bd_prod,
9703                   uint16_t            rx_cq_prod,
9704                   uint16_t            rx_sge_prod)
9705{
9706    struct ustorm_eth_rx_producers rx_prods = { 0 };
9707    uint32_t i;
9708
9709    /* update producers */
9710    rx_prods.bd_prod  = rx_bd_prod;
9711    rx_prods.cqe_prod = rx_cq_prod;
9712    rx_prods.sge_prod = rx_sge_prod;
9713
9714    /*
9715     * Make sure that the BD and SGE data is updated before updating the
9716     * producers since FW might read the BD/SGE right after the producer
9717     * is updated.
9718     * This is only applicable for weak-ordered memory model archs such
9719     * as IA-64. The following barrier is also mandatory since FW will
9720     * assumes BDs must have buffers.
9721     */
9722    wmb();
9723
9724    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9725        REG_WR(sc,
9726               (fp->ustorm_rx_prods_offset + (i * 4)),
9727               ((uint32_t *)&rx_prods)[i]);
9728    }
9729
9730    wmb(); /* keep prod updates ordered */
9731
9732    BLOGD(sc, DBG_RX,
9733          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9734          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9735}
9736
9737static void
9738bxe_init_rx_rings(struct bxe_softc *sc)
9739{
9740    struct bxe_fastpath *fp;
9741    int i;
9742
9743    for (i = 0; i < sc->num_queues; i++) {
9744        fp = &sc->fp[i];
9745
9746        fp->rx_bd_cons = 0;
9747
9748        /*
9749         * Activate the BD ring...
9750         * Warning, this will generate an interrupt (to the TSTORM)
9751         * so this can only be done after the chip is initialized
9752         */
9753        bxe_update_rx_prod(sc, fp,
9754                           fp->rx_bd_prod,
9755                           fp->rx_cq_prod,
9756                           fp->rx_sge_prod);
9757
9758        if (i != 0) {
9759            continue;
9760        }
9761
9762        if (CHIP_IS_E1(sc)) {
9763            REG_WR(sc,
9764                   (BAR_USTRORM_INTMEM +
9765                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9766                   U64_LO(fp->rcq_dma.paddr));
9767            REG_WR(sc,
9768                   (BAR_USTRORM_INTMEM +
9769                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9770                   U64_HI(fp->rcq_dma.paddr));
9771        }
9772    }
9773}
9774
9775static void
9776bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9777{
9778    SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
9779    fp->tx_db.data.zero_fill1 = 0;
9780    fp->tx_db.data.prod = 0;
9781
9782    fp->tx_pkt_prod = 0;
9783    fp->tx_pkt_cons = 0;
9784    fp->tx_bd_prod = 0;
9785    fp->tx_bd_cons = 0;
9786    fp->eth_q_stats.tx_pkts = 0;
9787}
9788
9789static inline void
9790bxe_init_tx_rings(struct bxe_softc *sc)
9791{
9792    int i;
9793
9794    for (i = 0; i < sc->num_queues; i++) {
9795        bxe_init_tx_ring_one(&sc->fp[i]);
9796    }
9797}
9798
9799static void
9800bxe_init_def_sb(struct bxe_softc *sc)
9801{
9802    struct host_sp_status_block *def_sb = sc->def_sb;
9803    bus_addr_t mapping = sc->def_sb_dma.paddr;
9804    int igu_sp_sb_index;
9805    int igu_seg_id;
9806    int port = SC_PORT(sc);
9807    int func = SC_FUNC(sc);
9808    int reg_offset, reg_offset_en5;
9809    uint64_t section;
9810    int index, sindex;
9811    struct hc_sp_status_block_data sp_sb_data;
9812
9813    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9814
9815    if (CHIP_INT_MODE_IS_BC(sc)) {
9816        igu_sp_sb_index = DEF_SB_IGU_ID;
9817        igu_seg_id = HC_SEG_ACCESS_DEF;
9818    } else {
9819        igu_sp_sb_index = sc->igu_dsb_id;
9820        igu_seg_id = IGU_SEG_ACCESS_DEF;
9821    }
9822
9823    /* attentions */
9824    section = ((uint64_t)mapping +
9825               offsetof(struct host_sp_status_block, atten_status_block));
9826    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9827    sc->attn_state = 0;
9828
9829    reg_offset = (port) ?
9830                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9831                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9832    reg_offset_en5 = (port) ?
9833                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9834                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9835
9836    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9837        /* take care of sig[0]..sig[4] */
9838        for (sindex = 0; sindex < 4; sindex++) {
9839            sc->attn_group[index].sig[sindex] =
9840                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9841        }
9842
9843        if (!CHIP_IS_E1x(sc)) {
9844            /*
9845             * enable5 is separate from the rest of the registers,
9846             * and the address skip is 4 and not 16 between the
9847             * different groups
9848             */
9849            sc->attn_group[index].sig[4] =
9850                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9851        } else {
9852            sc->attn_group[index].sig[4] = 0;
9853        }
9854    }
9855
9856    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9857        reg_offset = (port) ?
9858                         HC_REG_ATTN_MSG1_ADDR_L :
9859                         HC_REG_ATTN_MSG0_ADDR_L;
9860        REG_WR(sc, reg_offset, U64_LO(section));
9861        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9862    } else if (!CHIP_IS_E1x(sc)) {
9863        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9864        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9865    }
9866
9867    section = ((uint64_t)mapping +
9868               offsetof(struct host_sp_status_block, sp_sb));
9869
9870    bxe_zero_sp_sb(sc);
9871
9872    /* PCI guarantees endianity of regpair */
9873    sp_sb_data.state           = SB_ENABLED;
9874    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9875    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9876    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9877    sp_sb_data.igu_seg_id      = igu_seg_id;
9878    sp_sb_data.p_func.pf_id    = func;
9879    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9880    sp_sb_data.p_func.vf_id    = 0xff;
9881
9882    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9883
9884    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9885}
9886
9887static void
9888bxe_init_sp_ring(struct bxe_softc *sc)
9889{
9890    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9891    sc->spq_prod_idx = 0;
9892    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9893    sc->spq_prod_bd = sc->spq;
9894    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9895}
9896
9897static void
9898bxe_init_eq_ring(struct bxe_softc *sc)
9899{
9900    union event_ring_elem *elem;
9901    int i;
9902
9903    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9904        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9905
9906        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9907                                                 BCM_PAGE_SIZE *
9908                                                 (i % NUM_EQ_PAGES)));
9909        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9910                                                 BCM_PAGE_SIZE *
9911                                                 (i % NUM_EQ_PAGES)));
9912    }
9913
9914    sc->eq_cons    = 0;
9915    sc->eq_prod    = NUM_EQ_DESC;
9916    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9917
9918    atomic_store_rel_long(&sc->eq_spq_left,
9919                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9920                               NUM_EQ_DESC) - 1));
9921}
9922
9923static void
9924bxe_init_internal_common(struct bxe_softc *sc)
9925{
9926    int i;
9927
9928    if (IS_MF_SI(sc)) {
9929        /*
9930         * In switch independent mode, the TSTORM needs to accept
9931         * packets that failed classification, since approximate match
9932         * mac addresses aren't written to NIG LLH.
9933         */
9934        REG_WR8(sc,
9935                (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
9936                2);
9937    } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */
9938        REG_WR8(sc,
9939                (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
9940                0);
9941    }
9942
9943    /*
9944     * Zero this manually as its initialization is currently missing
9945     * in the initTool.
9946     */
9947    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9948        REG_WR(sc,
9949               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9950               0);
9951    }
9952
9953    if (!CHIP_IS_E1x(sc)) {
9954        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9955                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9956    }
9957}
9958
9959static void
9960bxe_init_internal(struct bxe_softc *sc,
9961                  uint32_t         load_code)
9962{
9963    switch (load_code) {
9964    case FW_MSG_CODE_DRV_LOAD_COMMON:
9965    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9966        bxe_init_internal_common(sc);
9967        /* no break */
9968
9969    case FW_MSG_CODE_DRV_LOAD_PORT:
9970        /* nothing to do */
9971        /* no break */
9972
9973    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9974        /* internal memory per function is initialized inside bxe_pf_init */
9975        break;
9976
9977    default:
9978        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9979        break;
9980    }
9981}
9982
9983static void
9984storm_memset_func_cfg(struct bxe_softc                         *sc,
9985                      struct tstorm_eth_function_common_config *tcfg,
9986                      uint16_t                                  abs_fid)
9987{
9988    uint32_t addr;
9989    size_t size;
9990
9991    addr = (BAR_TSTRORM_INTMEM +
9992            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9993    size = sizeof(struct tstorm_eth_function_common_config);
9994    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9995}
9996
9997static void
9998bxe_func_init(struct bxe_softc            *sc,
9999              struct bxe_func_init_params *p)
10000{
10001    struct tstorm_eth_function_common_config tcfg = { 0 };
10002
10003    if (CHIP_IS_E1x(sc)) {
10004        storm_memset_func_cfg(sc, &tcfg, p->func_id);
10005    }
10006
10007    /* Enable the function in the FW */
10008    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
10009    storm_memset_func_en(sc, p->func_id, 1);
10010
10011    /* spq */
10012    if (p->func_flgs & FUNC_FLG_SPQ) {
10013        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
10014        REG_WR(sc,
10015               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
10016               p->spq_prod);
10017    }
10018}
10019
10020/*
10021 * Calculates the sum of vn_min_rates.
10022 * It's needed for further normalizing of the min_rates.
10023 * Returns:
10024 *   sum of vn_min_rates.
10025 *     or
10026 *   0 - if all the min_rates are 0.
10027 * In the later case fainess algorithm should be deactivated.
10028 * If all min rates are not zero then those that are zeroes will be set to 1.
10029 */
10030static void
10031bxe_calc_vn_min(struct bxe_softc       *sc,
10032                struct cmng_init_input *input)
10033{
10034    uint32_t vn_cfg;
10035    uint32_t vn_min_rate;
10036    int all_zero = 1;
10037    int vn;
10038
10039    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10040        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10041        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10042                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10043
10044        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10045            /* skip hidden VNs */
10046            vn_min_rate = 0;
10047        } else if (!vn_min_rate) {
10048            /* If min rate is zero - set it to 100 */
10049            vn_min_rate = DEF_MIN_RATE;
10050        } else {
10051            all_zero = 0;
10052        }
10053
10054        input->vnic_min_rate[vn] = vn_min_rate;
10055    }
10056
10057    /* if ETS or all min rates are zeros - disable fairness */
10058    if (BXE_IS_ETS_ENABLED(sc)) {
10059        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10060        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10061    } else if (all_zero) {
10062        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10063        BLOGD(sc, DBG_LOAD,
10064              "Fariness disabled (all MIN values are zeroes)\n");
10065    } else {
10066        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10067    }
10068}
10069
10070static inline uint16_t
10071bxe_extract_max_cfg(struct bxe_softc *sc,
10072                    uint32_t         mf_cfg)
10073{
10074    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10075                        FUNC_MF_CFG_MAX_BW_SHIFT);
10076
10077    if (!max_cfg) {
10078        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10079        max_cfg = 100;
10080    }
10081
10082    return (max_cfg);
10083}
10084
10085static void
10086bxe_calc_vn_max(struct bxe_softc       *sc,
10087                int                    vn,
10088                struct cmng_init_input *input)
10089{
10090    uint16_t vn_max_rate;
10091    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10092    uint32_t max_cfg;
10093
10094    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10095        vn_max_rate = 0;
10096    } else {
10097        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10098
10099        if (IS_MF_SI(sc)) {
10100            /* max_cfg in percents of linkspeed */
10101            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10102        } else { /* SD modes */
10103            /* max_cfg is absolute in 100Mb units */
10104            vn_max_rate = (max_cfg * 100);
10105        }
10106    }
10107
10108    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10109
10110    input->vnic_max_rate[vn] = vn_max_rate;
10111}
10112
10113static void
10114bxe_cmng_fns_init(struct bxe_softc *sc,
10115                  uint8_t          read_cfg,
10116                  uint8_t          cmng_type)
10117{
10118    struct cmng_init_input input;
10119    int vn;
10120
10121    memset(&input, 0, sizeof(struct cmng_init_input));
10122
10123    input.port_rate = sc->link_vars.line_speed;
10124
10125    if (cmng_type == CMNG_FNS_MINMAX) {
10126        /* read mf conf from shmem */
10127        if (read_cfg) {
10128            bxe_read_mf_cfg(sc);
10129        }
10130
10131        /* get VN min rate and enable fairness if not 0 */
10132        bxe_calc_vn_min(sc, &input);
10133
10134        /* get VN max rate */
10135        if (sc->port.pmf) {
10136            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10137                bxe_calc_vn_max(sc, vn, &input);
10138            }
10139        }
10140
10141        /* always enable rate shaping and fairness */
10142        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10143
10144        ecore_init_cmng(&input, &sc->cmng);
10145        return;
10146    }
10147
10148    /* rate shaping and fairness are disabled */
10149    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10150}
10151
10152static int
10153bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10154{
10155    if (CHIP_REV_IS_SLOW(sc)) {
10156        return (CMNG_FNS_NONE);
10157    }
10158
10159    if (IS_MF(sc)) {
10160        return (CMNG_FNS_MINMAX);
10161    }
10162
10163    return (CMNG_FNS_NONE);
10164}
10165
10166static void
10167storm_memset_cmng(struct bxe_softc *sc,
10168                  struct cmng_init *cmng,
10169                  uint8_t          port)
10170{
10171    int vn;
10172    int func;
10173    uint32_t addr;
10174    size_t size;
10175
10176    addr = (BAR_XSTRORM_INTMEM +
10177            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10178    size = sizeof(struct cmng_struct_per_port);
10179    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10180
10181    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10182        func = func_by_vn(sc, vn);
10183
10184        addr = (BAR_XSTRORM_INTMEM +
10185                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10186        size = sizeof(struct rate_shaping_vars_per_vn);
10187        ecore_storm_memset_struct(sc, addr, size,
10188                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10189
10190        addr = (BAR_XSTRORM_INTMEM +
10191                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10192        size = sizeof(struct fairness_vars_per_vn);
10193        ecore_storm_memset_struct(sc, addr, size,
10194                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10195    }
10196}
10197
10198static void
10199bxe_pf_init(struct bxe_softc *sc)
10200{
10201    struct bxe_func_init_params func_init = { 0 };
10202    struct event_ring_data eq_data = { { 0 } };
10203    uint16_t flags;
10204
10205    if (!CHIP_IS_E1x(sc)) {
10206        /* reset IGU PF statistics: MSIX + ATTN */
10207        /* PF */
10208        REG_WR(sc,
10209               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10210                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10211                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10212               0);
10213        /* ATTN */
10214        REG_WR(sc,
10215               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10216                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10217                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10218                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10219               0);
10220    }
10221
10222    /* function setup flags */
10223    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10224
10225    /*
10226     * This flag is relevant for E1x only.
10227     * E2 doesn't have a TPA configuration in a function level.
10228     */
10229    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10230
10231    func_init.func_flgs = flags;
10232    func_init.pf_id     = SC_FUNC(sc);
10233    func_init.func_id   = SC_FUNC(sc);
10234    func_init.spq_map   = sc->spq_dma.paddr;
10235    func_init.spq_prod  = sc->spq_prod_idx;
10236
10237    bxe_func_init(sc, &func_init);
10238
10239    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10240
10241    /*
10242     * Congestion management values depend on the link rate.
10243     * There is no active link so initial link rate is set to 10Gbps.
10244     * When the link comes up the congestion management values are
10245     * re-calculated according to the actual link rate.
10246     */
10247    sc->link_vars.line_speed = SPEED_10000;
10248    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10249
10250    /* Only the PMF sets the HW */
10251    if (sc->port.pmf) {
10252        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10253    }
10254
10255    /* init Event Queue - PCI bus guarantees correct endainity */
10256    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10257    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10258    eq_data.producer     = sc->eq_prod;
10259    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10260    eq_data.sb_id        = DEF_SB_ID;
10261    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10262}
10263
10264static void
10265bxe_hc_int_enable(struct bxe_softc *sc)
10266{
10267    int port = SC_PORT(sc);
10268    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10269    uint32_t val = REG_RD(sc, addr);
10270    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10271    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10272                           (sc->intr_count == 1)) ? TRUE : FALSE;
10273    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10274
10275    if (msix) {
10276        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10277                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10278        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10279                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10280        if (single_msix) {
10281            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10282        }
10283    } else if (msi) {
10284        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10285        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10286                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10287                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10288    } else {
10289        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10290                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10291                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10292                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10293
10294        if (!CHIP_IS_E1(sc)) {
10295            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10296                  val, port, addr);
10297
10298            REG_WR(sc, addr, val);
10299
10300            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10301        }
10302    }
10303
10304    if (CHIP_IS_E1(sc)) {
10305        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10306    }
10307
10308    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10309          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10310
10311    REG_WR(sc, addr, val);
10312
10313    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10314    mb();
10315
10316    if (!CHIP_IS_E1(sc)) {
10317        /* init leading/trailing edge */
10318        if (IS_MF(sc)) {
10319            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10320            if (sc->port.pmf) {
10321                /* enable nig and gpio3 attention */
10322                val |= 0x1100;
10323            }
10324        } else {
10325            val = 0xffff;
10326        }
10327
10328        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10329        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10330    }
10331
10332    /* make sure that interrupts are indeed enabled from here on */
10333    mb();
10334}
10335
10336static void
10337bxe_igu_int_enable(struct bxe_softc *sc)
10338{
10339    uint32_t val;
10340    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10341    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10342                           (sc->intr_count == 1)) ? TRUE : FALSE;
10343    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10344
10345    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10346
10347    if (msix) {
10348        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10349                 IGU_PF_CONF_SINGLE_ISR_EN);
10350        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10351                IGU_PF_CONF_ATTN_BIT_EN);
10352        if (single_msix) {
10353            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10354        }
10355    } else if (msi) {
10356        val &= ~IGU_PF_CONF_INT_LINE_EN;
10357        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10358                IGU_PF_CONF_ATTN_BIT_EN |
10359                IGU_PF_CONF_SINGLE_ISR_EN);
10360    } else {
10361        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10362        val |= (IGU_PF_CONF_INT_LINE_EN |
10363                IGU_PF_CONF_ATTN_BIT_EN |
10364                IGU_PF_CONF_SINGLE_ISR_EN);
10365    }
10366
10367    /* clean previous status - need to configure igu prior to ack*/
10368    if ((!msix) || single_msix) {
10369        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10370        bxe_ack_int(sc);
10371    }
10372
10373    val |= IGU_PF_CONF_FUNC_EN;
10374
10375    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10376          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10377
10378    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10379
10380    mb();
10381
10382    /* init leading/trailing edge */
10383    if (IS_MF(sc)) {
10384        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10385        if (sc->port.pmf) {
10386            /* enable nig and gpio3 attention */
10387            val |= 0x1100;
10388        }
10389    } else {
10390        val = 0xffff;
10391    }
10392
10393    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10394    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10395
10396    /* make sure that interrupts are indeed enabled from here on */
10397    mb();
10398}
10399
10400static void
10401bxe_int_enable(struct bxe_softc *sc)
10402{
10403    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10404        bxe_hc_int_enable(sc);
10405    } else {
10406        bxe_igu_int_enable(sc);
10407    }
10408}
10409
10410static void
10411bxe_hc_int_disable(struct bxe_softc *sc)
10412{
10413    int port = SC_PORT(sc);
10414    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10415    uint32_t val = REG_RD(sc, addr);
10416
10417    /*
10418     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10419     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10420     * block
10421     */
10422    if (CHIP_IS_E1(sc)) {
10423        /*
10424         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10425         * to prevent from HC sending interrupts after we exit the function
10426         */
10427        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10428
10429        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10430                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10431                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10432    } else {
10433        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10434                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10435                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10436                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10437    }
10438
10439    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10440
10441    /* flush all outstanding writes */
10442    mb();
10443
10444    REG_WR(sc, addr, val);
10445    if (REG_RD(sc, addr) != val) {
10446        BLOGE(sc, "proper val not read from HC IGU!\n");
10447    }
10448}
10449
10450static void
10451bxe_igu_int_disable(struct bxe_softc *sc)
10452{
10453    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10454
10455    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10456             IGU_PF_CONF_INT_LINE_EN |
10457             IGU_PF_CONF_ATTN_BIT_EN);
10458
10459    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10460
10461    /* flush all outstanding writes */
10462    mb();
10463
10464    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10465    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10466        BLOGE(sc, "proper val not read from IGU!\n");
10467    }
10468}
10469
10470static void
10471bxe_int_disable(struct bxe_softc *sc)
10472{
10473    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10474        bxe_hc_int_disable(sc);
10475    } else {
10476        bxe_igu_int_disable(sc);
10477    }
10478}
10479
10480static void
10481bxe_nic_init(struct bxe_softc *sc,
10482             int              load_code)
10483{
10484    int i;
10485
10486    for (i = 0; i < sc->num_queues; i++) {
10487        bxe_init_eth_fp(sc, i);
10488    }
10489
10490    rmb(); /* ensure status block indices were read */
10491
10492    bxe_init_rx_rings(sc);
10493    bxe_init_tx_rings(sc);
10494
10495    if (IS_VF(sc)) {
10496        return;
10497    }
10498
10499    /* initialize MOD_ABS interrupts */
10500    elink_init_mod_abs_int(sc, &sc->link_vars,
10501                           sc->devinfo.chip_id,
10502                           sc->devinfo.shmem_base,
10503                           sc->devinfo.shmem2_base,
10504                           SC_PORT(sc));
10505
10506    bxe_init_def_sb(sc);
10507    bxe_update_dsb_idx(sc);
10508    bxe_init_sp_ring(sc);
10509    bxe_init_eq_ring(sc);
10510    bxe_init_internal(sc, load_code);
10511    bxe_pf_init(sc);
10512    bxe_stats_init(sc);
10513
10514    /* flush all before enabling interrupts */
10515    mb();
10516
10517    bxe_int_enable(sc);
10518
10519    /* check for SPIO5 */
10520    bxe_attn_int_deasserted0(sc,
10521                             REG_RD(sc,
10522                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10523                                     SC_PORT(sc)*4)) &
10524                             AEU_INPUTS_ATTN_BITS_SPIO5);
10525}
10526
10527static inline void
10528bxe_init_objs(struct bxe_softc *sc)
10529{
10530    /* mcast rules must be added to tx if tx switching is enabled */
10531    ecore_obj_type o_type =
10532        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10533                                         ECORE_OBJ_TYPE_RX;
10534
10535    /* RX_MODE controlling object */
10536    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10537
10538    /* multicast configuration controlling object */
10539    ecore_init_mcast_obj(sc,
10540                         &sc->mcast_obj,
10541                         sc->fp[0].cl_id,
10542                         sc->fp[0].index,
10543                         SC_FUNC(sc),
10544                         SC_FUNC(sc),
10545                         BXE_SP(sc, mcast_rdata),
10546                         BXE_SP_MAPPING(sc, mcast_rdata),
10547                         ECORE_FILTER_MCAST_PENDING,
10548                         &sc->sp_state,
10549                         o_type);
10550
10551    /* Setup CAM credit pools */
10552    ecore_init_mac_credit_pool(sc,
10553                               &sc->macs_pool,
10554                               SC_FUNC(sc),
10555                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10556                                                 VNICS_PER_PATH(sc));
10557
10558    ecore_init_vlan_credit_pool(sc,
10559                                &sc->vlans_pool,
10560                                SC_ABS_FUNC(sc) >> 1,
10561                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10562                                                  VNICS_PER_PATH(sc));
10563
10564    /* RSS configuration object */
10565    ecore_init_rss_config_obj(sc,
10566                              &sc->rss_conf_obj,
10567                              sc->fp[0].cl_id,
10568                              sc->fp[0].index,
10569                              SC_FUNC(sc),
10570                              SC_FUNC(sc),
10571                              BXE_SP(sc, rss_rdata),
10572                              BXE_SP_MAPPING(sc, rss_rdata),
10573                              ECORE_FILTER_RSS_CONF_PENDING,
10574                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10575}
10576
10577/*
10578 * Initialize the function. This must be called before sending CLIENT_SETUP
10579 * for the first client.
10580 */
10581static inline int
10582bxe_func_start(struct bxe_softc *sc)
10583{
10584    struct ecore_func_state_params func_params = { NULL };
10585    struct ecore_func_start_params *start_params = &func_params.params.start;
10586
10587    /* Prepare parameters for function state transitions */
10588    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10589
10590    func_params.f_obj = &sc->func_obj;
10591    func_params.cmd = ECORE_F_CMD_START;
10592
10593    /* Function parameters */
10594    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10595    start_params->sd_vlan_tag = OVLAN(sc);
10596
10597    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10598        start_params->network_cos_mode = STATIC_COS;
10599    } else { /* CHIP_IS_E1X */
10600        start_params->network_cos_mode = FW_WRR;
10601    }
10602
10603    start_params->gre_tunnel_mode = 0;
10604    start_params->gre_tunnel_rss  = 0;
10605
10606    return (ecore_func_state_change(sc, &func_params));
10607}
10608
10609static int
10610bxe_set_power_state(struct bxe_softc *sc,
10611                    uint8_t          state)
10612{
10613    uint16_t pmcsr;
10614
10615    /* If there is no power capability, silently succeed */
10616    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10617        BLOGW(sc, "No power capability\n");
10618        return (0);
10619    }
10620
10621    pmcsr = pci_read_config(sc->dev,
10622                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10623                            2);
10624
10625    switch (state) {
10626    case PCI_PM_D0:
10627        pci_write_config(sc->dev,
10628                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10629                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10630
10631        if (pmcsr & PCIM_PSTAT_DMASK) {
10632            /* delay required during transition out of D3hot */
10633            DELAY(20000);
10634        }
10635
10636        break;
10637
10638    case PCI_PM_D3hot:
10639        /* XXX if there are other clients above don't shut down the power */
10640
10641        /* don't shut down the power for emulation and FPGA */
10642        if (CHIP_REV_IS_SLOW(sc)) {
10643            return (0);
10644        }
10645
10646        pmcsr &= ~PCIM_PSTAT_DMASK;
10647        pmcsr |= PCIM_PSTAT_D3;
10648
10649        if (sc->wol) {
10650            pmcsr |= PCIM_PSTAT_PMEENABLE;
10651        }
10652
10653        pci_write_config(sc->dev,
10654                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10655                         pmcsr, 4);
10656
10657        /*
10658         * No more memory access after this point until device is brought back
10659         * to D0 state.
10660         */
10661        break;
10662
10663    default:
10664        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10665            state, pmcsr);
10666        return (-1);
10667    }
10668
10669    return (0);
10670}
10671
10672
10673/* return true if succeeded to acquire the lock */
10674static uint8_t
10675bxe_trylock_hw_lock(struct bxe_softc *sc,
10676                    uint32_t         resource)
10677{
10678    uint32_t lock_status;
10679    uint32_t resource_bit = (1 << resource);
10680    int func = SC_FUNC(sc);
10681    uint32_t hw_lock_control_reg;
10682
10683    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10684
10685    /* Validating that the resource is within range */
10686    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10687        BLOGD(sc, DBG_LOAD,
10688              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10689              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10690        return (FALSE);
10691    }
10692
10693    if (func <= 5) {
10694        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10695    } else {
10696        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10697    }
10698
10699    /* try to acquire the lock */
10700    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10701    lock_status = REG_RD(sc, hw_lock_control_reg);
10702    if (lock_status & resource_bit) {
10703        return (TRUE);
10704    }
10705
10706    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10707        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10708        lock_status, resource_bit);
10709
10710    return (FALSE);
10711}
10712
10713/*
10714 * Get the recovery leader resource id according to the engine this function
10715 * belongs to. Currently only only 2 engines is supported.
10716 */
10717static int
10718bxe_get_leader_lock_resource(struct bxe_softc *sc)
10719{
10720    if (SC_PATH(sc)) {
10721        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10722    } else {
10723        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10724    }
10725}
10726
10727/* try to acquire a leader lock for current engine */
10728static uint8_t
10729bxe_trylock_leader_lock(struct bxe_softc *sc)
10730{
10731    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10732}
10733
10734static int
10735bxe_release_leader_lock(struct bxe_softc *sc)
10736{
10737    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10738}
10739
10740/* close gates #2, #3 and #4 */
10741static void
10742bxe_set_234_gates(struct bxe_softc *sc,
10743                  uint8_t          close)
10744{
10745    uint32_t val;
10746
10747    /* gates #2 and #4a are closed/opened for "not E1" only */
10748    if (!CHIP_IS_E1(sc)) {
10749        /* #4 */
10750        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10751        /* #2 */
10752        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10753    }
10754
10755    /* #3 */
10756    if (CHIP_IS_E1x(sc)) {
10757        /* prevent interrupts from HC on both ports */
10758        val = REG_RD(sc, HC_REG_CONFIG_1);
10759        REG_WR(sc, HC_REG_CONFIG_1,
10760               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10761               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10762
10763        val = REG_RD(sc, HC_REG_CONFIG_0);
10764        REG_WR(sc, HC_REG_CONFIG_0,
10765               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10766               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10767    } else {
10768        /* Prevent incomming interrupts in IGU */
10769        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10770
10771        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10772               (!close) ?
10773               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10774               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10775    }
10776
10777    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10778          close ? "closing" : "opening");
10779
10780    wmb();
10781}
10782
10783/* poll for pending writes bit, it should get cleared in no more than 1s */
10784static int
10785bxe_er_poll_igu_vq(struct bxe_softc *sc)
10786{
10787    uint32_t cnt = 1000;
10788    uint32_t pend_bits = 0;
10789
10790    do {
10791        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10792
10793        if (pend_bits == 0) {
10794            break;
10795        }
10796
10797        DELAY(1000);
10798    } while (--cnt > 0);
10799
10800    if (cnt == 0) {
10801        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10802        return (-1);
10803    }
10804
10805    return (0);
10806}
10807
10808#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10809
10810static void
10811bxe_clp_reset_prep(struct bxe_softc *sc,
10812                   uint32_t         *magic_val)
10813{
10814    /* Do some magic... */
10815    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10816    *magic_val = val & SHARED_MF_CLP_MAGIC;
10817    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10818}
10819
10820/* restore the value of the 'magic' bit */
10821static void
10822bxe_clp_reset_done(struct bxe_softc *sc,
10823                   uint32_t         magic_val)
10824{
10825    /* Restore the 'magic' bit value... */
10826    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10827    MFCFG_WR(sc, shared_mf_config.clp_mb,
10828              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10829}
10830
10831/* prepare for MCP reset, takes care of CLP configurations */
10832static void
10833bxe_reset_mcp_prep(struct bxe_softc *sc,
10834                   uint32_t         *magic_val)
10835{
10836    uint32_t shmem;
10837    uint32_t validity_offset;
10838
10839    /* set `magic' bit in order to save MF config */
10840    if (!CHIP_IS_E1(sc)) {
10841        bxe_clp_reset_prep(sc, magic_val);
10842    }
10843
10844    /* get shmem offset */
10845    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10846    validity_offset =
10847        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10848
10849    /* Clear validity map flags */
10850    if (shmem > 0) {
10851        REG_WR(sc, shmem + validity_offset, 0);
10852    }
10853}
10854
10855#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10856#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10857
10858static void
10859bxe_mcp_wait_one(struct bxe_softc *sc)
10860{
10861    /* special handling for emulation and FPGA (10 times longer) */
10862    if (CHIP_REV_IS_SLOW(sc)) {
10863        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10864    } else {
10865        DELAY((MCP_ONE_TIMEOUT) * 1000);
10866    }
10867}
10868
10869/* initialize shmem_base and waits for validity signature to appear */
10870static int
10871bxe_init_shmem(struct bxe_softc *sc)
10872{
10873    int cnt = 0;
10874    uint32_t val = 0;
10875
10876    do {
10877        sc->devinfo.shmem_base     =
10878        sc->link_params.shmem_base =
10879            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10880
10881        if (sc->devinfo.shmem_base) {
10882            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10883            if (val & SHR_MEM_VALIDITY_MB)
10884                return (0);
10885        }
10886
10887        bxe_mcp_wait_one(sc);
10888
10889    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10890
10891    BLOGE(sc, "BAD MCP validity signature\n");
10892
10893    return (-1);
10894}
10895
10896static int
10897bxe_reset_mcp_comp(struct bxe_softc *sc,
10898                   uint32_t         magic_val)
10899{
10900    int rc = bxe_init_shmem(sc);
10901
10902    /* Restore the `magic' bit value */
10903    if (!CHIP_IS_E1(sc)) {
10904        bxe_clp_reset_done(sc, magic_val);
10905    }
10906
10907    return (rc);
10908}
10909
10910static void
10911bxe_pxp_prep(struct bxe_softc *sc)
10912{
10913    if (!CHIP_IS_E1(sc)) {
10914        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10915        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10916        wmb();
10917    }
10918}
10919
10920/*
10921 * Reset the whole chip except for:
10922 *      - PCIE core
10923 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10924 *      - IGU
10925 *      - MISC (including AEU)
10926 *      - GRC
10927 *      - RBCN, RBCP
10928 */
10929static void
10930bxe_process_kill_chip_reset(struct bxe_softc *sc,
10931                            uint8_t          global)
10932{
10933    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10934    uint32_t global_bits2, stay_reset2;
10935
10936    /*
10937     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10938     * (per chip) blocks.
10939     */
10940    global_bits2 =
10941        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10942        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10943
10944    /*
10945     * Don't reset the following blocks.
10946     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10947     *            reset, as in 4 port device they might still be owned
10948     *            by the MCP (there is only one leader per path).
10949     */
10950    not_reset_mask1 =
10951        MISC_REGISTERS_RESET_REG_1_RST_HC |
10952        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10953        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10954
10955    not_reset_mask2 =
10956        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10957        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10958        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10959        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10960        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10961        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10962        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10963        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10964        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10965        MISC_REGISTERS_RESET_REG_2_PGLC |
10966        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10967        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10968        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10969        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10970        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10971        MISC_REGISTERS_RESET_REG_2_UMAC1;
10972
10973    /*
10974     * Keep the following blocks in reset:
10975     *  - all xxMACs are handled by the elink code.
10976     */
10977    stay_reset2 =
10978        MISC_REGISTERS_RESET_REG_2_XMAC |
10979        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10980
10981    /* Full reset masks according to the chip */
10982    reset_mask1 = 0xffffffff;
10983
10984    if (CHIP_IS_E1(sc))
10985        reset_mask2 = 0xffff;
10986    else if (CHIP_IS_E1H(sc))
10987        reset_mask2 = 0x1ffff;
10988    else if (CHIP_IS_E2(sc))
10989        reset_mask2 = 0xfffff;
10990    else /* CHIP_IS_E3 */
10991        reset_mask2 = 0x3ffffff;
10992
10993    /* Don't reset global blocks unless we need to */
10994    if (!global)
10995        reset_mask2 &= ~global_bits2;
10996
10997    /*
10998     * In case of attention in the QM, we need to reset PXP
10999     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
11000     * because otherwise QM reset would release 'close the gates' shortly
11001     * before resetting the PXP, then the PSWRQ would send a write
11002     * request to PGLUE. Then when PXP is reset, PGLUE would try to
11003     * read the payload data from PSWWR, but PSWWR would not
11004     * respond. The write queue in PGLUE would stuck, dmae commands
11005     * would not return. Therefore it's important to reset the second
11006     * reset register (containing the
11007     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
11008     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
11009     * bit).
11010     */
11011    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11012           reset_mask2 & (~not_reset_mask2));
11013
11014    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
11015           reset_mask1 & (~not_reset_mask1));
11016
11017    mb();
11018    wmb();
11019
11020    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11021           reset_mask2 & (~stay_reset2));
11022
11023    mb();
11024    wmb();
11025
11026    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11027    wmb();
11028}
11029
11030static int
11031bxe_process_kill(struct bxe_softc *sc,
11032                 uint8_t          global)
11033{
11034    int cnt = 1000;
11035    uint32_t val = 0;
11036    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11037    uint32_t tags_63_32 = 0;
11038
11039    /* Empty the Tetris buffer, wait for 1s */
11040    do {
11041        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11042        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11043        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11044        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11045        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11046        if (CHIP_IS_E3(sc)) {
11047            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11048        }
11049
11050        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11051            ((port_is_idle_0 & 0x1) == 0x1) &&
11052            ((port_is_idle_1 & 0x1) == 0x1) &&
11053            (pgl_exp_rom2 == 0xffffffff) &&
11054            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11055            break;
11056        DELAY(1000);
11057    } while (cnt-- > 0);
11058
11059    if (cnt <= 0) {
11060        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11061                  "are still outstanding read requests after 1s! "
11062                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11063                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11064              sr_cnt, blk_cnt, port_is_idle_0,
11065              port_is_idle_1, pgl_exp_rom2);
11066        return (-1);
11067    }
11068
11069    mb();
11070
11071    /* Close gates #2, #3 and #4 */
11072    bxe_set_234_gates(sc, TRUE);
11073
11074    /* Poll for IGU VQs for 57712 and newer chips */
11075    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11076        return (-1);
11077    }
11078
11079    /* XXX indicate that "process kill" is in progress to MCP */
11080
11081    /* clear "unprepared" bit */
11082    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11083    mb();
11084
11085    /* Make sure all is written to the chip before the reset */
11086    wmb();
11087
11088    /*
11089     * Wait for 1ms to empty GLUE and PCI-E core queues,
11090     * PSWHST, GRC and PSWRD Tetris buffer.
11091     */
11092    DELAY(1000);
11093
11094    /* Prepare to chip reset: */
11095    /* MCP */
11096    if (global) {
11097        bxe_reset_mcp_prep(sc, &val);
11098    }
11099
11100    /* PXP */
11101    bxe_pxp_prep(sc);
11102    mb();
11103
11104    /* reset the chip */
11105    bxe_process_kill_chip_reset(sc, global);
11106    mb();
11107
11108    /* clear errors in PGB */
11109    if (!CHIP_IS_E1(sc))
11110        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11111
11112    /* Recover after reset: */
11113    /* MCP */
11114    if (global && bxe_reset_mcp_comp(sc, val)) {
11115        return (-1);
11116    }
11117
11118    /* XXX add resetting the NO_MCP mode DB here */
11119
11120    /* Open the gates #2, #3 and #4 */
11121    bxe_set_234_gates(sc, FALSE);
11122
11123    /* XXX
11124     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11125     * re-enable attentions
11126     */
11127
11128    return (0);
11129}
11130
11131static int
11132bxe_leader_reset(struct bxe_softc *sc)
11133{
11134    int rc = 0;
11135    uint8_t global = bxe_reset_is_global(sc);
11136    uint32_t load_code;
11137
11138    /*
11139     * If not going to reset MCP, load "fake" driver to reset HW while
11140     * driver is owner of the HW.
11141     */
11142    if (!global && !BXE_NOMCP(sc)) {
11143        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11144                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11145        if (!load_code) {
11146            BLOGE(sc, "MCP response failure, aborting\n");
11147            rc = -1;
11148            goto exit_leader_reset;
11149        }
11150
11151        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11152            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11153            BLOGE(sc, "MCP unexpected response, aborting\n");
11154            rc = -1;
11155            goto exit_leader_reset2;
11156        }
11157
11158        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11159        if (!load_code) {
11160            BLOGE(sc, "MCP response failure, aborting\n");
11161            rc = -1;
11162            goto exit_leader_reset2;
11163        }
11164    }
11165
11166    /* try to recover after the failure */
11167    if (bxe_process_kill(sc, global)) {
11168        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11169        rc = -1;
11170        goto exit_leader_reset2;
11171    }
11172
11173    /*
11174     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11175     * state.
11176     */
11177    bxe_set_reset_done(sc);
11178    if (global) {
11179        bxe_clear_reset_global(sc);
11180    }
11181
11182exit_leader_reset2:
11183
11184    /* unload "fake driver" if it was loaded */
11185    if (!global && !BXE_NOMCP(sc)) {
11186        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11187        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11188    }
11189
11190exit_leader_reset:
11191
11192    sc->is_leader = 0;
11193    bxe_release_leader_lock(sc);
11194
11195    mb();
11196    return (rc);
11197}
11198
11199/*
11200 * prepare INIT transition, parameters configured:
11201 *   - HC configuration
11202 *   - Queue's CDU context
11203 */
11204static void
11205bxe_pf_q_prep_init(struct bxe_softc               *sc,
11206                   struct bxe_fastpath            *fp,
11207                   struct ecore_queue_init_params *init_params)
11208{
11209    uint8_t cos;
11210    int cxt_index, cxt_offset;
11211
11212    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11213    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11214
11215    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11216    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11217
11218    /* HC rate */
11219    init_params->rx.hc_rate =
11220        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11221    init_params->tx.hc_rate =
11222        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11223
11224    /* FW SB ID */
11225    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11226
11227    /* CQ index among the SB indices */
11228    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11229    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11230
11231    /* set maximum number of COSs supported by this queue */
11232    init_params->max_cos = sc->max_cos;
11233
11234    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11235          fp->index, init_params->max_cos);
11236
11237    /* set the context pointers queue object */
11238    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11239        /* XXX change index/cid here if ever support multiple tx CoS */
11240        /* fp->txdata[cos]->cid */
11241        cxt_index = fp->index / ILT_PAGE_CIDS;
11242        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11243        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11244    }
11245}
11246
11247/* set flags that are common for the Tx-only and not normal connections */
11248static unsigned long
11249bxe_get_common_flags(struct bxe_softc    *sc,
11250                     struct bxe_fastpath *fp,
11251                     uint8_t             zero_stats)
11252{
11253    unsigned long flags = 0;
11254
11255    /* PF driver will always initialize the Queue to an ACTIVE state */
11256    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11257
11258    /*
11259     * tx only connections collect statistics (on the same index as the
11260     * parent connection). The statistics are zeroed when the parent
11261     * connection is initialized.
11262     */
11263
11264    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11265    if (zero_stats) {
11266        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11267    }
11268
11269    /*
11270     * tx only connections can support tx-switching, though their
11271     * CoS-ness doesn't survive the loopback
11272     */
11273    if (sc->flags & BXE_TX_SWITCHING) {
11274        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11275    }
11276
11277    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11278
11279    return (flags);
11280}
11281
11282static unsigned long
11283bxe_get_q_flags(struct bxe_softc    *sc,
11284                struct bxe_fastpath *fp,
11285                uint8_t             leading)
11286{
11287    unsigned long flags = 0;
11288
11289    if (IS_MF_SD(sc)) {
11290        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11291    }
11292
11293    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11294        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11295        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11296    }
11297
11298    if (leading) {
11299        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11300        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11301    }
11302
11303    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11304
11305    /* merge with common flags */
11306    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11307}
11308
11309static void
11310bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11311                      struct bxe_fastpath               *fp,
11312                      struct ecore_general_setup_params *gen_init,
11313                      uint8_t                           cos)
11314{
11315    gen_init->stat_id = bxe_stats_id(fp);
11316    gen_init->spcl_id = fp->cl_id;
11317    gen_init->mtu = sc->mtu;
11318    gen_init->cos = cos;
11319}
11320
11321static void
11322bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11323                 struct bxe_fastpath           *fp,
11324                 struct rxq_pause_params       *pause,
11325                 struct ecore_rxq_setup_params *rxq_init)
11326{
11327    uint8_t max_sge = 0;
11328    uint16_t sge_sz = 0;
11329    uint16_t tpa_agg_size = 0;
11330
11331    pause->sge_th_lo = SGE_TH_LO(sc);
11332    pause->sge_th_hi = SGE_TH_HI(sc);
11333
11334    /* validate SGE ring has enough to cross high threshold */
11335    if (sc->dropless_fc &&
11336            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11337            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11338        BLOGW(sc, "sge ring threshold limit\n");
11339    }
11340
11341    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11342    tpa_agg_size = (2 * sc->mtu);
11343    if (tpa_agg_size < sc->max_aggregation_size) {
11344        tpa_agg_size = sc->max_aggregation_size;
11345    }
11346
11347    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11348    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11349                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11350    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11351
11352    /* pause - not for e1 */
11353    if (!CHIP_IS_E1(sc)) {
11354        pause->bd_th_lo = BD_TH_LO(sc);
11355        pause->bd_th_hi = BD_TH_HI(sc);
11356
11357        pause->rcq_th_lo = RCQ_TH_LO(sc);
11358        pause->rcq_th_hi = RCQ_TH_HI(sc);
11359
11360        /* validate rings have enough entries to cross high thresholds */
11361        if (sc->dropless_fc &&
11362            pause->bd_th_hi + FW_PREFETCH_CNT >
11363            sc->rx_ring_size) {
11364            BLOGW(sc, "rx bd ring threshold limit\n");
11365        }
11366
11367        if (sc->dropless_fc &&
11368            pause->rcq_th_hi + FW_PREFETCH_CNT >
11369            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11370            BLOGW(sc, "rcq ring threshold limit\n");
11371        }
11372
11373        pause->pri_map = 1;
11374    }
11375
11376    /* rxq setup */
11377    rxq_init->dscr_map   = fp->rx_dma.paddr;
11378    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11379    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11380    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11381
11382    /*
11383     * This should be a maximum number of data bytes that may be
11384     * placed on the BD (not including paddings).
11385     */
11386    rxq_init->buf_sz = (fp->rx_buf_size -
11387                        IP_HEADER_ALIGNMENT_PADDING);
11388
11389    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11390    rxq_init->tpa_agg_sz      = tpa_agg_size;
11391    rxq_init->sge_buf_sz      = sge_sz;
11392    rxq_init->max_sges_pkt    = max_sge;
11393    rxq_init->rss_engine_id   = SC_FUNC(sc);
11394    rxq_init->mcast_engine_id = SC_FUNC(sc);
11395
11396    /*
11397     * Maximum number or simultaneous TPA aggregation for this Queue.
11398     * For PF Clients it should be the maximum available number.
11399     * VF driver(s) may want to define it to a smaller value.
11400     */
11401    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11402
11403    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11404    rxq_init->fw_sb_id = fp->fw_sb_id;
11405
11406    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11407
11408    /*
11409     * configure silent vlan removal
11410     * if multi function mode is afex, then mask default vlan
11411     */
11412    if (IS_MF_AFEX(sc)) {
11413        rxq_init->silent_removal_value =
11414            sc->devinfo.mf_info.afex_def_vlan_tag;
11415        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11416    }
11417}
11418
11419static void
11420bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11421                 struct bxe_fastpath           *fp,
11422                 struct ecore_txq_setup_params *txq_init,
11423                 uint8_t                       cos)
11424{
11425    /*
11426     * XXX If multiple CoS is ever supported then each fastpath structure
11427     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11428     * fp->txdata[cos]->tx_dma.paddr;
11429     */
11430    txq_init->dscr_map     = fp->tx_dma.paddr;
11431    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11432    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11433    txq_init->fw_sb_id     = fp->fw_sb_id;
11434
11435    /*
11436     * set the TSS leading client id for TX classfication to the
11437     * leading RSS client id
11438     */
11439    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11440}
11441
11442/*
11443 * This function performs 2 steps in a queue state machine:
11444 *   1) RESET->INIT
11445 *   2) INIT->SETUP
11446 */
11447static int
11448bxe_setup_queue(struct bxe_softc    *sc,
11449                struct bxe_fastpath *fp,
11450                uint8_t             leading)
11451{
11452    struct ecore_queue_state_params q_params = { NULL };
11453    struct ecore_queue_setup_params *setup_params =
11454                        &q_params.params.setup;
11455    int rc;
11456
11457    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11458
11459    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11460
11461    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11462
11463    /* we want to wait for completion in this context */
11464    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11465
11466    /* prepare the INIT parameters */
11467    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11468
11469    /* Set the command */
11470    q_params.cmd = ECORE_Q_CMD_INIT;
11471
11472    /* Change the state to INIT */
11473    rc = ecore_queue_state_change(sc, &q_params);
11474    if (rc) {
11475        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11476        return (rc);
11477    }
11478
11479    BLOGD(sc, DBG_LOAD, "init complete\n");
11480
11481    /* now move the Queue to the SETUP state */
11482    memset(setup_params, 0, sizeof(*setup_params));
11483
11484    /* set Queue flags */
11485    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11486
11487    /* set general SETUP parameters */
11488    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11489                          FIRST_TX_COS_INDEX);
11490
11491    bxe_pf_rx_q_prep(sc, fp,
11492                     &setup_params->pause_params,
11493                     &setup_params->rxq_params);
11494
11495    bxe_pf_tx_q_prep(sc, fp,
11496                     &setup_params->txq_params,
11497                     FIRST_TX_COS_INDEX);
11498
11499    /* Set the command */
11500    q_params.cmd = ECORE_Q_CMD_SETUP;
11501
11502    /* change the state to SETUP */
11503    rc = ecore_queue_state_change(sc, &q_params);
11504    if (rc) {
11505        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11506        return (rc);
11507    }
11508
11509    return (rc);
11510}
11511
11512static int
11513bxe_setup_leading(struct bxe_softc *sc)
11514{
11515    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11516}
11517
11518static int
11519bxe_config_rss_pf(struct bxe_softc            *sc,
11520                  struct ecore_rss_config_obj *rss_obj,
11521                  uint8_t                     config_hash)
11522{
11523    struct ecore_config_rss_params params = { NULL };
11524    int i;
11525
11526    /*
11527     * Although RSS is meaningless when there is a single HW queue we
11528     * still need it enabled in order to have HW Rx hash generated.
11529     */
11530
11531    params.rss_obj = rss_obj;
11532
11533    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11534
11535    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11536
11537    /* RSS configuration */
11538    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11539    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11540    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11541    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11542    if (rss_obj->udp_rss_v4) {
11543        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11544    }
11545    if (rss_obj->udp_rss_v6) {
11546        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11547    }
11548
11549    /* Hash bits */
11550    params.rss_result_mask = MULTI_MASK;
11551
11552    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11553
11554    if (config_hash) {
11555        /* RSS keys */
11556        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11557            params.rss_key[i] = arc4random();
11558        }
11559
11560        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11561    }
11562
11563    return (ecore_config_rss(sc, &params));
11564}
11565
11566static int
11567bxe_config_rss_eth(struct bxe_softc *sc,
11568                   uint8_t          config_hash)
11569{
11570    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11571}
11572
11573static int
11574bxe_init_rss_pf(struct bxe_softc *sc)
11575{
11576    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11577    int i;
11578
11579    /*
11580     * Prepare the initial contents of the indirection table if
11581     * RSS is enabled
11582     */
11583    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11584        sc->rss_conf_obj.ind_table[i] =
11585            (sc->fp->cl_id + (i % num_eth_queues));
11586    }
11587
11588    if (sc->udp_rss) {
11589        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11590    }
11591
11592    /*
11593     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11594     * per-port, so if explicit configuration is needed, do it only
11595     * for a PMF.
11596     *
11597     * For 57712 and newer it's a per-function configuration.
11598     */
11599    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11600}
11601
11602static int
11603bxe_set_mac_one(struct bxe_softc          *sc,
11604                uint8_t                   *mac,
11605                struct ecore_vlan_mac_obj *obj,
11606                uint8_t                   set,
11607                int                       mac_type,
11608                unsigned long             *ramrod_flags)
11609{
11610    struct ecore_vlan_mac_ramrod_params ramrod_param;
11611    int rc;
11612
11613    memset(&ramrod_param, 0, sizeof(ramrod_param));
11614
11615    /* fill in general parameters */
11616    ramrod_param.vlan_mac_obj = obj;
11617    ramrod_param.ramrod_flags = *ramrod_flags;
11618
11619    /* fill a user request section if needed */
11620    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11621        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11622
11623        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11624
11625        /* Set the command: ADD or DEL */
11626        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11627                                            ECORE_VLAN_MAC_DEL;
11628    }
11629
11630    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11631
11632    if (rc == ECORE_EXISTS) {
11633        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11634        /* do not treat adding same MAC as error */
11635        rc = 0;
11636    } else if (rc < 0) {
11637        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11638    }
11639
11640    return (rc);
11641}
11642
11643static int
11644bxe_set_eth_mac(struct bxe_softc *sc,
11645                uint8_t          set)
11646{
11647    unsigned long ramrod_flags = 0;
11648
11649    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11650
11651    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11652
11653    /* Eth MAC is set on RSS leading client (fp[0]) */
11654    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11655                            &sc->sp_objs->mac_obj,
11656                            set, ECORE_ETH_MAC, &ramrod_flags));
11657}
11658
11659static int
11660bxe_get_cur_phy_idx(struct bxe_softc *sc)
11661{
11662    uint32_t sel_phy_idx = 0;
11663
11664    if (sc->link_params.num_phys <= 1) {
11665        return (ELINK_INT_PHY);
11666    }
11667
11668    if (sc->link_vars.link_up) {
11669        sel_phy_idx = ELINK_EXT_PHY1;
11670        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11671        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11672            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11673             ELINK_SUPPORTED_FIBRE))
11674            sel_phy_idx = ELINK_EXT_PHY2;
11675    } else {
11676        switch (elink_phy_selection(&sc->link_params)) {
11677        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11678        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11679        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11680               sel_phy_idx = ELINK_EXT_PHY1;
11681               break;
11682        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11683        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11684               sel_phy_idx = ELINK_EXT_PHY2;
11685               break;
11686        }
11687    }
11688
11689    return (sel_phy_idx);
11690}
11691
11692static int
11693bxe_get_link_cfg_idx(struct bxe_softc *sc)
11694{
11695    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11696
11697    /*
11698     * The selected activated PHY is always after swapping (in case PHY
11699     * swapping is enabled). So when swapping is enabled, we need to reverse
11700     * the configuration
11701     */
11702
11703    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11704        if (sel_phy_idx == ELINK_EXT_PHY1)
11705            sel_phy_idx = ELINK_EXT_PHY2;
11706        else if (sel_phy_idx == ELINK_EXT_PHY2)
11707            sel_phy_idx = ELINK_EXT_PHY1;
11708    }
11709
11710    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11711}
11712
11713static void
11714bxe_set_requested_fc(struct bxe_softc *sc)
11715{
11716    /*
11717     * Initialize link parameters structure variables
11718     * It is recommended to turn off RX FC for jumbo frames
11719     * for better performance
11720     */
11721    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11722        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11723    } else {
11724        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11725    }
11726}
11727
11728static void
11729bxe_calc_fc_adv(struct bxe_softc *sc)
11730{
11731    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11732    switch (sc->link_vars.ieee_fc &
11733            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11734    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
11735    default:
11736        sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11737                                           ADVERTISED_Pause);
11738        break;
11739
11740    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11741        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11742                                          ADVERTISED_Pause);
11743        break;
11744
11745    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11746        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11747        break;
11748    }
11749}
11750
11751static uint16_t
11752bxe_get_mf_speed(struct bxe_softc *sc)
11753{
11754    uint16_t line_speed = sc->link_vars.line_speed;
11755    if (IS_MF(sc)) {
11756        uint16_t maxCfg =
11757            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11758
11759        /* calculate the current MAX line speed limit for the MF devices */
11760        if (IS_MF_SI(sc)) {
11761            line_speed = (line_speed * maxCfg) / 100;
11762        } else { /* SD mode */
11763            uint16_t vn_max_rate = maxCfg * 100;
11764
11765            if (vn_max_rate < line_speed) {
11766                line_speed = vn_max_rate;
11767            }
11768        }
11769    }
11770
11771    return (line_speed);
11772}
11773
11774static void
11775bxe_fill_report_data(struct bxe_softc            *sc,
11776                     struct bxe_link_report_data *data)
11777{
11778    uint16_t line_speed = bxe_get_mf_speed(sc);
11779
11780    memset(data, 0, sizeof(*data));
11781
11782    /* fill the report data with the effective line speed */
11783    data->line_speed = line_speed;
11784
11785    /* Link is down */
11786    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11787        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11788    }
11789
11790    /* Full DUPLEX */
11791    if (sc->link_vars.duplex == DUPLEX_FULL) {
11792        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11793    }
11794
11795    /* Rx Flow Control is ON */
11796    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11797        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11798    }
11799
11800    /* Tx Flow Control is ON */
11801    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11802        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11803    }
11804}
11805
11806/* report link status to OS, should be called under phy_lock */
11807static void
11808bxe_link_report_locked(struct bxe_softc *sc)
11809{
11810    struct bxe_link_report_data cur_data;
11811
11812    /* reread mf_cfg */
11813    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11814        bxe_read_mf_cfg(sc);
11815    }
11816
11817    /* Read the current link report info */
11818    bxe_fill_report_data(sc, &cur_data);
11819
11820    /* Don't report link down or exactly the same link status twice */
11821    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11822        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11823                      &sc->last_reported_link.link_report_flags) &&
11824         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11825                      &cur_data.link_report_flags))) {
11826        return;
11827    }
11828
11829    sc->link_cnt++;
11830
11831    /* report new link params and remember the state for the next time */
11832    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11833
11834    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11835                     &cur_data.link_report_flags)) {
11836        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11837        BLOGI(sc, "NIC Link is Down\n");
11838    } else {
11839        const char *duplex;
11840        const char *flow;
11841
11842        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11843                                   &cur_data.link_report_flags)) {
11844            duplex = "full";
11845        } else {
11846            duplex = "half";
11847        }
11848
11849        /*
11850         * Handle the FC at the end so that only these flags would be
11851         * possibly set. This way we may easily check if there is no FC
11852         * enabled.
11853         */
11854        if (cur_data.link_report_flags) {
11855            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11856                             &cur_data.link_report_flags) &&
11857                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11858                             &cur_data.link_report_flags)) {
11859                flow = "ON - receive & transmit";
11860            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11861                                    &cur_data.link_report_flags) &&
11862                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11863                                     &cur_data.link_report_flags)) {
11864                flow = "ON - receive";
11865            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11866                                     &cur_data.link_report_flags) &&
11867                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11868                                    &cur_data.link_report_flags)) {
11869                flow = "ON - transmit";
11870            } else {
11871                flow = "none"; /* possible? */
11872            }
11873        } else {
11874            flow = "none";
11875        }
11876
11877        if_link_state_change(sc->ifp, LINK_STATE_UP);
11878        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11879              cur_data.line_speed, duplex, flow);
11880    }
11881}
11882
11883static void
11884bxe_link_report(struct bxe_softc *sc)
11885{
11886    bxe_acquire_phy_lock(sc);
11887    bxe_link_report_locked(sc);
11888    bxe_release_phy_lock(sc);
11889}
11890
11891static void
11892bxe_link_status_update(struct bxe_softc *sc)
11893{
11894    if (sc->state != BXE_STATE_OPEN) {
11895        return;
11896    }
11897
11898    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11899        elink_link_status_update(&sc->link_params, &sc->link_vars);
11900    } else {
11901        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11902                                  ELINK_SUPPORTED_10baseT_Full |
11903                                  ELINK_SUPPORTED_100baseT_Half |
11904                                  ELINK_SUPPORTED_100baseT_Full |
11905                                  ELINK_SUPPORTED_1000baseT_Full |
11906                                  ELINK_SUPPORTED_2500baseX_Full |
11907                                  ELINK_SUPPORTED_10000baseT_Full |
11908                                  ELINK_SUPPORTED_TP |
11909                                  ELINK_SUPPORTED_FIBRE |
11910                                  ELINK_SUPPORTED_Autoneg |
11911                                  ELINK_SUPPORTED_Pause |
11912                                  ELINK_SUPPORTED_Asym_Pause);
11913        sc->port.advertising[0] = sc->port.supported[0];
11914
11915        sc->link_params.sc                = sc;
11916        sc->link_params.port              = SC_PORT(sc);
11917        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11918        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11919        sc->link_params.req_line_speed[0] = SPEED_10000;
11920        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11921        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11922
11923        if (CHIP_REV_IS_FPGA(sc)) {
11924            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11925            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11926            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11927                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11928        } else {
11929            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11930            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11931            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11932                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11933        }
11934
11935        sc->link_vars.link_up = 1;
11936
11937        sc->link_vars.duplex    = DUPLEX_FULL;
11938        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11939
11940        if (IS_PF(sc)) {
11941            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11942            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11943            bxe_link_report(sc);
11944        }
11945    }
11946
11947    if (IS_PF(sc)) {
11948        if (sc->link_vars.link_up) {
11949            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11950        } else {
11951            bxe_stats_handle(sc, STATS_EVENT_STOP);
11952        }
11953        bxe_link_report(sc);
11954    } else {
11955        bxe_link_report(sc);
11956        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11957    }
11958}
11959
11960static int
11961bxe_initial_phy_init(struct bxe_softc *sc,
11962                     int              load_mode)
11963{
11964    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11965    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11966    struct elink_params *lp = &sc->link_params;
11967
11968    bxe_set_requested_fc(sc);
11969
11970    if (CHIP_REV_IS_SLOW(sc)) {
11971        uint32_t bond = CHIP_BOND_ID(sc);
11972        uint32_t feat = 0;
11973
11974        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11975            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11976        } else if (bond & 0x4) {
11977            if (CHIP_IS_E3(sc)) {
11978                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11979            } else {
11980                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11981            }
11982        } else if (bond & 0x8) {
11983            if (CHIP_IS_E3(sc)) {
11984                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11985            } else {
11986                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11987            }
11988        }
11989
11990        /* disable EMAC for E3 and above */
11991        if (bond & 0x2) {
11992            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11993        }
11994
11995        sc->link_params.feature_config_flags |= feat;
11996    }
11997
11998    bxe_acquire_phy_lock(sc);
11999
12000    if (load_mode == LOAD_DIAG) {
12001        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
12002        /* Prefer doing PHY loopback at 10G speed, if possible */
12003        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
12004            if (lp->speed_cap_mask[cfg_idx] &
12005                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
12006                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
12007            } else {
12008                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
12009            }
12010        }
12011    }
12012
12013    if (load_mode == LOAD_LOOPBACK_EXT) {
12014        lp->loopback_mode = ELINK_LOOPBACK_EXT;
12015    }
12016
12017    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12018
12019    bxe_release_phy_lock(sc);
12020
12021    bxe_calc_fc_adv(sc);
12022
12023    if (sc->link_vars.link_up) {
12024        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12025        bxe_link_report(sc);
12026    }
12027
12028    if (!CHIP_REV_IS_SLOW(sc)) {
12029        bxe_periodic_start(sc);
12030    }
12031
12032    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12033    return (rc);
12034}
12035
12036/* must be called under IF_ADDR_LOCK */
12037
12038static int
12039bxe_set_mc_list(struct bxe_softc *sc)
12040{
12041    struct ecore_mcast_ramrod_params rparam = { NULL };
12042    int rc = 0;
12043    int mc_count = 0;
12044    int mcnt, i;
12045    struct ecore_mcast_list_elem *mc_mac, *mc_mac_start;
12046    unsigned char *mta;
12047    if_t ifp = sc->ifp;
12048
12049    mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
12050    if (!mc_count)
12051        return (0);
12052
12053    mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
12054            mc_count, M_DEVBUF, M_NOWAIT);
12055
12056    if(mta == NULL) {
12057        BLOGE(sc, "Failed to allocate temp mcast list\n");
12058        return (-1);
12059    }
12060    bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count));
12061
12062    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO));
12063    mc_mac_start = mc_mac;
12064
12065    if (!mc_mac) {
12066        free(mta, M_DEVBUF);
12067        BLOGE(sc, "Failed to allocate temp mcast list\n");
12068        return (-1);
12069    }
12070    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12071
12072    /* mta and mcnt not expected to be  different */
12073    if_multiaddr_array(ifp, mta, &mcnt, mc_count);
12074
12075
12076    rparam.mcast_obj = &sc->mcast_obj;
12077    ECORE_LIST_INIT(&rparam.mcast_list);
12078
12079    for(i=0; i< mcnt; i++) {
12080
12081        mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN));
12082        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list);
12083
12084        BLOGD(sc, DBG_LOAD,
12085              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12086              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12087              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12088
12089        mc_mac++;
12090    }
12091    rparam.mcast_list_len = mc_count;
12092
12093    BXE_MCAST_LOCK(sc);
12094
12095    /* first, clear all configured multicast MACs */
12096    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12097    if (rc < 0) {
12098        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12099        BXE_MCAST_UNLOCK(sc);
12100    	free(mc_mac_start, M_DEVBUF);
12101        free(mta, M_DEVBUF);
12102        return (rc);
12103    }
12104
12105    /* Now add the new MACs */
12106    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12107    if (rc < 0) {
12108        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12109    }
12110
12111    BXE_MCAST_UNLOCK(sc);
12112
12113    free(mc_mac_start, M_DEVBUF);
12114    free(mta, M_DEVBUF);
12115
12116    return (rc);
12117}
12118
12119static int
12120bxe_set_uc_list(struct bxe_softc *sc)
12121{
12122    if_t ifp = sc->ifp;
12123    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12124    struct ifaddr *ifa;
12125    unsigned long ramrod_flags = 0;
12126    int rc;
12127
12128#if __FreeBSD_version < 800000
12129    IF_ADDR_LOCK(ifp);
12130#else
12131    if_addr_rlock(ifp);
12132#endif
12133
12134    /* first schedule a cleanup up of old configuration */
12135    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12136    if (rc < 0) {
12137        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12138#if __FreeBSD_version < 800000
12139        IF_ADDR_UNLOCK(ifp);
12140#else
12141        if_addr_runlock(ifp);
12142#endif
12143        return (rc);
12144    }
12145
12146    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12147    while (ifa) {
12148        if (ifa->ifa_addr->sa_family != AF_LINK) {
12149            ifa = TAILQ_NEXT(ifa, ifa_link);
12150            continue;
12151        }
12152
12153        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12154                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12155        if (rc == -EEXIST) {
12156            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12157            /* do not treat adding same MAC as an error */
12158            rc = 0;
12159        } else if (rc < 0) {
12160            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12161#if __FreeBSD_version < 800000
12162            IF_ADDR_UNLOCK(ifp);
12163#else
12164            if_addr_runlock(ifp);
12165#endif
12166            return (rc);
12167        }
12168
12169        ifa = TAILQ_NEXT(ifa, ifa_link);
12170    }
12171
12172#if __FreeBSD_version < 800000
12173    IF_ADDR_UNLOCK(ifp);
12174#else
12175    if_addr_runlock(ifp);
12176#endif
12177
12178    /* Execute the pending commands */
12179    bit_set(&ramrod_flags, RAMROD_CONT);
12180    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12181                            ECORE_UC_LIST_MAC, &ramrod_flags));
12182}
12183
12184static void
12185bxe_set_rx_mode(struct bxe_softc *sc)
12186{
12187    if_t ifp = sc->ifp;
12188    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12189
12190    if (sc->state != BXE_STATE_OPEN) {
12191        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12192        return;
12193    }
12194
12195    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12196
12197    if (if_getflags(ifp) & IFF_PROMISC) {
12198        rx_mode = BXE_RX_MODE_PROMISC;
12199    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12200               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12201                CHIP_IS_E1(sc))) {
12202        rx_mode = BXE_RX_MODE_ALLMULTI;
12203    } else {
12204        if (IS_PF(sc)) {
12205            /* some multicasts */
12206            if (bxe_set_mc_list(sc) < 0) {
12207                rx_mode = BXE_RX_MODE_ALLMULTI;
12208            }
12209            if (bxe_set_uc_list(sc) < 0) {
12210                rx_mode = BXE_RX_MODE_PROMISC;
12211            }
12212        }
12213    }
12214
12215    sc->rx_mode = rx_mode;
12216
12217    /* schedule the rx_mode command */
12218    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12219        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12220        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12221        return;
12222    }
12223
12224    if (IS_PF(sc)) {
12225        bxe_set_storm_rx_mode(sc);
12226    }
12227}
12228
12229
12230/* update flags in shmem */
12231static void
12232bxe_update_drv_flags(struct bxe_softc *sc,
12233                     uint32_t         flags,
12234                     uint32_t         set)
12235{
12236    uint32_t drv_flags;
12237
12238    if (SHMEM2_HAS(sc, drv_flags)) {
12239        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12240        drv_flags = SHMEM2_RD(sc, drv_flags);
12241
12242        if (set) {
12243            SET_FLAGS(drv_flags, flags);
12244        } else {
12245            RESET_FLAGS(drv_flags, flags);
12246        }
12247
12248        SHMEM2_WR(sc, drv_flags, drv_flags);
12249        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12250
12251        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12252    }
12253}
12254
12255/* periodic timer callout routine, only runs when the interface is up */
12256
12257static void
12258bxe_periodic_callout_func(void *xsc)
12259{
12260    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12261    int i;
12262
12263    if (!BXE_CORE_TRYLOCK(sc)) {
12264        /* just bail and try again next time */
12265
12266        if ((sc->state == BXE_STATE_OPEN) &&
12267            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12268            /* schedule the next periodic callout */
12269            callout_reset(&sc->periodic_callout, hz,
12270                          bxe_periodic_callout_func, sc);
12271        }
12272
12273        return;
12274    }
12275
12276    if ((sc->state != BXE_STATE_OPEN) ||
12277        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12278        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12279        BXE_CORE_UNLOCK(sc);
12280        return;
12281    }
12282
12283    /* Check for TX timeouts on any fastpath. */
12284    FOR_EACH_QUEUE(sc, i) {
12285        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12286            /* Ruh-Roh, chip was reset! */
12287            break;
12288        }
12289    }
12290
12291    if (!CHIP_REV_IS_SLOW(sc)) {
12292        /*
12293         * This barrier is needed to ensure the ordering between the writing
12294         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12295         * the reading here.
12296         */
12297        mb();
12298        if (sc->port.pmf) {
12299	    bxe_acquire_phy_lock(sc);
12300            elink_period_func(&sc->link_params, &sc->link_vars);
12301	    bxe_release_phy_lock(sc);
12302        }
12303    }
12304
12305    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12306        int mb_idx = SC_FW_MB_IDX(sc);
12307        uint32_t drv_pulse;
12308        uint32_t mcp_pulse;
12309
12310        ++sc->fw_drv_pulse_wr_seq;
12311        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12312
12313        drv_pulse = sc->fw_drv_pulse_wr_seq;
12314        bxe_drv_pulse(sc);
12315
12316        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12317                     MCP_PULSE_SEQ_MASK);
12318
12319        /*
12320         * The delta between driver pulse and mcp response should
12321         * be 1 (before mcp response) or 0 (after mcp response).
12322         */
12323        if ((drv_pulse != mcp_pulse) &&
12324            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12325            /* someone lost a heartbeat... */
12326            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12327                  drv_pulse, mcp_pulse);
12328        }
12329    }
12330
12331    /* state is BXE_STATE_OPEN */
12332    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12333
12334    BXE_CORE_UNLOCK(sc);
12335
12336    if ((sc->state == BXE_STATE_OPEN) &&
12337        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12338        /* schedule the next periodic callout */
12339        callout_reset(&sc->periodic_callout, hz,
12340                      bxe_periodic_callout_func, sc);
12341    }
12342}
12343
12344static void
12345bxe_periodic_start(struct bxe_softc *sc)
12346{
12347    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12348    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12349}
12350
12351static void
12352bxe_periodic_stop(struct bxe_softc *sc)
12353{
12354    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12355    callout_drain(&sc->periodic_callout);
12356}
12357
12358/* start the controller */
12359static __noinline int
12360bxe_nic_load(struct bxe_softc *sc,
12361             int              load_mode)
12362{
12363    uint32_t val;
12364    int load_code = 0;
12365    int i, rc = 0;
12366
12367    BXE_CORE_LOCK_ASSERT(sc);
12368
12369    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12370
12371    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12372
12373    if (IS_PF(sc)) {
12374        /* must be called before memory allocation and HW init */
12375        bxe_ilt_set_info(sc);
12376    }
12377
12378    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12379
12380    bxe_set_fp_rx_buf_size(sc);
12381
12382    if (bxe_alloc_fp_buffers(sc) != 0) {
12383        BLOGE(sc, "Failed to allocate fastpath memory\n");
12384        sc->state = BXE_STATE_CLOSED;
12385        rc = ENOMEM;
12386        goto bxe_nic_load_error0;
12387    }
12388
12389    if (bxe_alloc_mem(sc) != 0) {
12390        sc->state = BXE_STATE_CLOSED;
12391        rc = ENOMEM;
12392        goto bxe_nic_load_error0;
12393    }
12394
12395    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12396        sc->state = BXE_STATE_CLOSED;
12397        rc = ENOMEM;
12398        goto bxe_nic_load_error0;
12399    }
12400
12401    if (IS_PF(sc)) {
12402        /* set pf load just before approaching the MCP */
12403        bxe_set_pf_load(sc);
12404
12405        /* if MCP exists send load request and analyze response */
12406        if (!BXE_NOMCP(sc)) {
12407            /* attempt to load pf */
12408            if (bxe_nic_load_request(sc, &load_code) != 0) {
12409                sc->state = BXE_STATE_CLOSED;
12410                rc = ENXIO;
12411                goto bxe_nic_load_error1;
12412            }
12413
12414            /* what did the MCP say? */
12415            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12416                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12417                sc->state = BXE_STATE_CLOSED;
12418                rc = ENXIO;
12419                goto bxe_nic_load_error2;
12420            }
12421        } else {
12422            BLOGI(sc, "Device has no MCP!\n");
12423            load_code = bxe_nic_load_no_mcp(sc);
12424        }
12425
12426        /* mark PMF if applicable */
12427        bxe_nic_load_pmf(sc, load_code);
12428
12429        /* Init Function state controlling object */
12430        bxe_init_func_obj(sc);
12431
12432        /* Initialize HW */
12433        if (bxe_init_hw(sc, load_code) != 0) {
12434            BLOGE(sc, "HW init failed\n");
12435            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12436            sc->state = BXE_STATE_CLOSED;
12437            rc = ENXIO;
12438            goto bxe_nic_load_error2;
12439        }
12440    }
12441
12442    /* set ALWAYS_ALIVE bit in shmem */
12443    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12444    bxe_drv_pulse(sc);
12445    sc->flags |= BXE_NO_PULSE;
12446
12447    /* attach interrupts */
12448    if (bxe_interrupt_attach(sc) != 0) {
12449        sc->state = BXE_STATE_CLOSED;
12450        rc = ENXIO;
12451        goto bxe_nic_load_error2;
12452    }
12453
12454    bxe_nic_init(sc, load_code);
12455
12456    /* Init per-function objects */
12457    if (IS_PF(sc)) {
12458        bxe_init_objs(sc);
12459        // XXX bxe_iov_nic_init(sc);
12460
12461        /* set AFEX default VLAN tag to an invalid value */
12462        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12463        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12464
12465        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12466        rc = bxe_func_start(sc);
12467        if (rc) {
12468            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12469            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12470            sc->state = BXE_STATE_ERROR;
12471            goto bxe_nic_load_error3;
12472        }
12473
12474        /* send LOAD_DONE command to MCP */
12475        if (!BXE_NOMCP(sc)) {
12476            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12477            if (!load_code) {
12478                BLOGE(sc, "MCP response failure, aborting\n");
12479                sc->state = BXE_STATE_ERROR;
12480                rc = ENXIO;
12481                goto bxe_nic_load_error3;
12482            }
12483        }
12484
12485        rc = bxe_setup_leading(sc);
12486        if (rc) {
12487            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12488            sc->state = BXE_STATE_ERROR;
12489            goto bxe_nic_load_error3;
12490        }
12491
12492        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12493            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12494            if (rc) {
12495                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12496                sc->state = BXE_STATE_ERROR;
12497                goto bxe_nic_load_error3;
12498            }
12499        }
12500
12501        rc = bxe_init_rss_pf(sc);
12502        if (rc) {
12503            BLOGE(sc, "PF RSS init failed\n");
12504            sc->state = BXE_STATE_ERROR;
12505            goto bxe_nic_load_error3;
12506        }
12507    }
12508    /* XXX VF */
12509
12510    /* now when Clients are configured we are ready to work */
12511    sc->state = BXE_STATE_OPEN;
12512
12513    /* Configure a ucast MAC */
12514    if (IS_PF(sc)) {
12515        rc = bxe_set_eth_mac(sc, TRUE);
12516    }
12517    if (rc) {
12518        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12519        sc->state = BXE_STATE_ERROR;
12520        goto bxe_nic_load_error3;
12521    }
12522
12523    if (sc->port.pmf) {
12524        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12525        if (rc) {
12526            sc->state = BXE_STATE_ERROR;
12527            goto bxe_nic_load_error3;
12528        }
12529    }
12530
12531    sc->link_params.feature_config_flags &=
12532        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12533
12534    /* start fast path */
12535
12536    /* Initialize Rx filter */
12537    bxe_set_rx_mode(sc);
12538
12539    /* start the Tx */
12540    switch (/* XXX load_mode */LOAD_OPEN) {
12541    case LOAD_NORMAL:
12542    case LOAD_OPEN:
12543        break;
12544
12545    case LOAD_DIAG:
12546    case LOAD_LOOPBACK_EXT:
12547        sc->state = BXE_STATE_DIAG;
12548        break;
12549
12550    default:
12551        break;
12552    }
12553
12554    if (sc->port.pmf) {
12555        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12556    } else {
12557        bxe_link_status_update(sc);
12558    }
12559
12560    /* start the periodic timer callout */
12561    bxe_periodic_start(sc);
12562
12563    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12564        /* mark driver is loaded in shmem2 */
12565        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12566        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12567                  (val |
12568                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12569                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12570    }
12571
12572    /* wait for all pending SP commands to complete */
12573    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12574        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12575        bxe_periodic_stop(sc);
12576        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12577        return (ENXIO);
12578    }
12579
12580    /* Tell the stack the driver is running! */
12581    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12582
12583    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12584
12585    return (0);
12586
12587bxe_nic_load_error3:
12588
12589    if (IS_PF(sc)) {
12590        bxe_int_disable_sync(sc, 1);
12591
12592        /* clean out queued objects */
12593        bxe_squeeze_objects(sc);
12594    }
12595
12596    bxe_interrupt_detach(sc);
12597
12598bxe_nic_load_error2:
12599
12600    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12601        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12602        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12603    }
12604
12605    sc->port.pmf = 0;
12606
12607bxe_nic_load_error1:
12608
12609    /* clear pf_load status, as it was already set */
12610    if (IS_PF(sc)) {
12611        bxe_clear_pf_load(sc);
12612    }
12613
12614bxe_nic_load_error0:
12615
12616    bxe_free_fw_stats_mem(sc);
12617    bxe_free_fp_buffers(sc);
12618    bxe_free_mem(sc);
12619
12620    return (rc);
12621}
12622
12623static int
12624bxe_init_locked(struct bxe_softc *sc)
12625{
12626    int other_engine = SC_PATH(sc) ? 0 : 1;
12627    uint8_t other_load_status, load_status;
12628    uint8_t global = FALSE;
12629    int rc;
12630
12631    BXE_CORE_LOCK_ASSERT(sc);
12632
12633    /* check if the driver is already running */
12634    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12635        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12636        return (0);
12637    }
12638
12639    bxe_set_power_state(sc, PCI_PM_D0);
12640
12641    /*
12642     * If parity occurred during the unload, then attentions and/or
12643     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12644     * loaded on the current engine to complete the recovery. Parity recovery
12645     * is only relevant for PF driver.
12646     */
12647    if (IS_PF(sc)) {
12648        other_load_status = bxe_get_load_status(sc, other_engine);
12649        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12650
12651        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12652            bxe_chk_parity_attn(sc, &global, TRUE)) {
12653            do {
12654                /*
12655                 * If there are attentions and they are in global blocks, set
12656                 * the GLOBAL_RESET bit regardless whether it will be this
12657                 * function that will complete the recovery or not.
12658                 */
12659                if (global) {
12660                    bxe_set_reset_global(sc);
12661                }
12662
12663                /*
12664                 * Only the first function on the current engine should try
12665                 * to recover in open. In case of attentions in global blocks
12666                 * only the first in the chip should try to recover.
12667                 */
12668                if ((!load_status && (!global || !other_load_status)) &&
12669                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12670                    BLOGI(sc, "Recovered during init\n");
12671                    break;
12672                }
12673
12674                /* recovery has failed... */
12675                bxe_set_power_state(sc, PCI_PM_D3hot);
12676                sc->recovery_state = BXE_RECOVERY_FAILED;
12677
12678                BLOGE(sc, "Recovery flow hasn't properly "
12679                          "completed yet, try again later. "
12680                          "If you still see this message after a "
12681                          "few retries then power cycle is required.\n");
12682
12683                rc = ENXIO;
12684                goto bxe_init_locked_done;
12685            } while (0);
12686        }
12687    }
12688
12689    sc->recovery_state = BXE_RECOVERY_DONE;
12690
12691    rc = bxe_nic_load(sc, LOAD_OPEN);
12692
12693bxe_init_locked_done:
12694
12695    if (rc) {
12696        /* Tell the stack the driver is NOT running! */
12697        BLOGE(sc, "Initialization failed, "
12698                  "stack notified driver is NOT running!\n");
12699	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12700    }
12701
12702    return (rc);
12703}
12704
12705static int
12706bxe_stop_locked(struct bxe_softc *sc)
12707{
12708    BXE_CORE_LOCK_ASSERT(sc);
12709    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12710}
12711
12712/*
12713 * Handles controller initialization when called from an unlocked routine.
12714 * ifconfig calls this function.
12715 *
12716 * Returns:
12717 *   void
12718 */
12719static void
12720bxe_init(void *xsc)
12721{
12722    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12723
12724    BXE_CORE_LOCK(sc);
12725    bxe_init_locked(sc);
12726    BXE_CORE_UNLOCK(sc);
12727}
12728
12729static int
12730bxe_init_ifnet(struct bxe_softc *sc)
12731{
12732    if_t ifp;
12733    int capabilities;
12734
12735    /* ifconfig entrypoint for media type/status reporting */
12736    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12737                 bxe_ifmedia_update,
12738                 bxe_ifmedia_status);
12739
12740    /* set the default interface values */
12741    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12742    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12743    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12744
12745    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12746
12747    /* allocate the ifnet structure */
12748    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12749        BLOGE(sc, "Interface allocation failed!\n");
12750        return (ENXIO);
12751    }
12752
12753    if_setsoftc(ifp, sc);
12754    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12755    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12756    if_setioctlfn(ifp, bxe_ioctl);
12757    if_setstartfn(ifp, bxe_tx_start);
12758    if_setgetcounterfn(ifp, bxe_get_counter);
12759#if __FreeBSD_version >= 800000
12760    if_settransmitfn(ifp, bxe_tx_mq_start);
12761    if_setqflushfn(ifp, bxe_mq_flush);
12762#endif
12763#ifdef FreeBSD8_0
12764    if_settimer(ifp, 0);
12765#endif
12766    if_setinitfn(ifp, bxe_init);
12767    if_setmtu(ifp, sc->mtu);
12768    if_sethwassist(ifp, (CSUM_IP      |
12769                        CSUM_TCP      |
12770                        CSUM_UDP      |
12771                        CSUM_TSO      |
12772                        CSUM_TCP_IPV6 |
12773                        CSUM_UDP_IPV6));
12774
12775    capabilities =
12776#if __FreeBSD_version < 700000
12777        (IFCAP_VLAN_MTU       |
12778         IFCAP_VLAN_HWTAGGING |
12779         IFCAP_HWCSUM         |
12780         IFCAP_JUMBO_MTU      |
12781         IFCAP_LRO);
12782#else
12783        (IFCAP_VLAN_MTU       |
12784         IFCAP_VLAN_HWTAGGING |
12785         IFCAP_VLAN_HWTSO     |
12786         IFCAP_VLAN_HWFILTER  |
12787         IFCAP_VLAN_HWCSUM    |
12788         IFCAP_HWCSUM         |
12789         IFCAP_JUMBO_MTU      |
12790         IFCAP_LRO            |
12791         IFCAP_TSO4           |
12792         IFCAP_TSO6           |
12793         IFCAP_WOL_MAGIC);
12794#endif
12795    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12796    if_setbaudrate(ifp, IF_Gbps(10));
12797/* XXX */
12798    if_setsendqlen(ifp, sc->tx_ring_size);
12799    if_setsendqready(ifp);
12800/* XXX */
12801
12802    sc->ifp = ifp;
12803
12804    /* attach to the Ethernet interface list */
12805    ether_ifattach(ifp, sc->link_params.mac_addr);
12806
12807    return (0);
12808}
12809
12810static void
12811bxe_deallocate_bars(struct bxe_softc *sc)
12812{
12813    int i;
12814
12815    for (i = 0; i < MAX_BARS; i++) {
12816        if (sc->bar[i].resource != NULL) {
12817            bus_release_resource(sc->dev,
12818                                 SYS_RES_MEMORY,
12819                                 sc->bar[i].rid,
12820                                 sc->bar[i].resource);
12821            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12822                  i, PCIR_BAR(i));
12823        }
12824    }
12825}
12826
12827static int
12828bxe_allocate_bars(struct bxe_softc *sc)
12829{
12830    u_int flags;
12831    int i;
12832
12833    memset(sc->bar, 0, sizeof(sc->bar));
12834
12835    for (i = 0; i < MAX_BARS; i++) {
12836
12837        /* memory resources reside at BARs 0, 2, 4 */
12838        /* Run `pciconf -lb` to see mappings */
12839        if ((i != 0) && (i != 2) && (i != 4)) {
12840            continue;
12841        }
12842
12843        sc->bar[i].rid = PCIR_BAR(i);
12844
12845        flags = RF_ACTIVE;
12846        if (i == 0) {
12847            flags |= RF_SHAREABLE;
12848        }
12849
12850        if ((sc->bar[i].resource =
12851             bus_alloc_resource_any(sc->dev,
12852                                    SYS_RES_MEMORY,
12853                                    &sc->bar[i].rid,
12854                                    flags)) == NULL) {
12855            return (0);
12856        }
12857
12858        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12859        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12860        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12861
12862        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n",
12863              i, PCIR_BAR(i),
12864              (void *)rman_get_start(sc->bar[i].resource),
12865              (void *)rman_get_end(sc->bar[i].resource),
12866              rman_get_size(sc->bar[i].resource),
12867              (void *)sc->bar[i].kva);
12868    }
12869
12870    return (0);
12871}
12872
12873static void
12874bxe_get_function_num(struct bxe_softc *sc)
12875{
12876    uint32_t val = 0;
12877
12878    /*
12879     * Read the ME register to get the function number. The ME register
12880     * holds the relative-function number and absolute-function number. The
12881     * absolute-function number appears only in E2 and above. Before that
12882     * these bits always contained zero, therefore we cannot blindly use them.
12883     */
12884
12885    val = REG_RD(sc, BAR_ME_REGISTER);
12886
12887    sc->pfunc_rel =
12888        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12889    sc->path_id =
12890        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12891
12892    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12893        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12894    } else {
12895        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12896    }
12897
12898    BLOGD(sc, DBG_LOAD,
12899          "Relative function %d, Absolute function %d, Path %d\n",
12900          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12901}
12902
12903static uint32_t
12904bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12905{
12906    uint32_t shmem2_size;
12907    uint32_t offset;
12908    uint32_t mf_cfg_offset_value;
12909
12910    /* Non 57712 */
12911    offset = (SHMEM_RD(sc, func_mb) +
12912              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12913
12914    /* 57712 plus */
12915    if (sc->devinfo.shmem2_base != 0) {
12916        shmem2_size = SHMEM2_RD(sc, size);
12917        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12918            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12919            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12920                offset = mf_cfg_offset_value;
12921            }
12922        }
12923    }
12924
12925    return (offset);
12926}
12927
12928static uint32_t
12929bxe_pcie_capability_read(struct bxe_softc *sc,
12930                         int    reg,
12931                         int    width)
12932{
12933    int pcie_reg;
12934
12935    /* ensure PCIe capability is enabled */
12936    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12937        if (pcie_reg != 0) {
12938            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12939            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12940        }
12941    }
12942
12943    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12944
12945    return (0);
12946}
12947
12948static uint8_t
12949bxe_is_pcie_pending(struct bxe_softc *sc)
12950{
12951    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12952            PCIM_EXP_STA_TRANSACTION_PND);
12953}
12954
12955/*
12956 * Walk the PCI capabiites list for the device to find what features are
12957 * supported. These capabilites may be enabled/disabled by firmware so it's
12958 * best to walk the list rather than make assumptions.
12959 */
12960static void
12961bxe_probe_pci_caps(struct bxe_softc *sc)
12962{
12963    uint16_t link_status;
12964    int reg;
12965
12966    /* check if PCI Power Management is enabled */
12967    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12968        if (reg != 0) {
12969            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12970
12971            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12972            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12973        }
12974    }
12975
12976    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12977
12978    /* handle PCIe 2.0 workarounds for 57710 */
12979    if (CHIP_IS_E1(sc)) {
12980        /* workaround for 57710 errata E4_57710_27462 */
12981        sc->devinfo.pcie_link_speed =
12982            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12983
12984        /* workaround for 57710 errata E4_57710_27488 */
12985        sc->devinfo.pcie_link_width =
12986            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12987        if (sc->devinfo.pcie_link_speed > 1) {
12988            sc->devinfo.pcie_link_width =
12989                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12990        }
12991    } else {
12992        sc->devinfo.pcie_link_speed =
12993            (link_status & PCIM_LINK_STA_SPEED);
12994        sc->devinfo.pcie_link_width =
12995            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12996    }
12997
12998    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12999          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13000
13001    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13002    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13003
13004    /* check if MSI capability is enabled */
13005    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13006        if (reg != 0) {
13007            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13008
13009            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13010            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13011        }
13012    }
13013
13014    /* check if MSI-X capability is enabled */
13015    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13016        if (reg != 0) {
13017            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13018
13019            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13020            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13021        }
13022    }
13023}
13024
13025static int
13026bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13027{
13028    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13029    uint32_t val;
13030
13031    /* get the outer vlan if we're in switch-dependent mode */
13032
13033    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13034    mf_info->ext_id = (uint16_t)val;
13035
13036    mf_info->multi_vnics_mode = 1;
13037
13038    if (!VALID_OVLAN(mf_info->ext_id)) {
13039        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13040        return (1);
13041    }
13042
13043    /* get the capabilities */
13044    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13045        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13046        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13047    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13048               FUNC_MF_CFG_PROTOCOL_FCOE) {
13049        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13050    } else {
13051        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13052    }
13053
13054    mf_info->vnics_per_port =
13055        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13056
13057    return (0);
13058}
13059
13060static uint32_t
13061bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13062{
13063    uint32_t retval = 0;
13064    uint32_t val;
13065
13066    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13067
13068    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13069        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13070            retval |= MF_PROTO_SUPPORT_ETHERNET;
13071        }
13072        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13073            retval |= MF_PROTO_SUPPORT_ISCSI;
13074        }
13075        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13076            retval |= MF_PROTO_SUPPORT_FCOE;
13077        }
13078    }
13079
13080    return (retval);
13081}
13082
13083static int
13084bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13085{
13086    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13087    uint32_t val;
13088
13089    /*
13090     * There is no outer vlan if we're in switch-independent mode.
13091     * If the mac is valid then assume multi-function.
13092     */
13093
13094    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13095
13096    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13097
13098    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13099
13100    mf_info->vnics_per_port =
13101        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13102
13103    return (0);
13104}
13105
13106static int
13107bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13108{
13109    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13110    uint32_t e1hov_tag;
13111    uint32_t func_config;
13112    uint32_t niv_config;
13113
13114    mf_info->multi_vnics_mode = 1;
13115
13116    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13117    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13118    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13119
13120    mf_info->ext_id =
13121        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13122                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13123
13124    mf_info->default_vlan =
13125        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13126                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13127
13128    mf_info->niv_allowed_priorities =
13129        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13130                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13131
13132    mf_info->niv_default_cos =
13133        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13134                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13135
13136    mf_info->afex_vlan_mode =
13137        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13138         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13139
13140    mf_info->niv_mba_enabled =
13141        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13142         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13143
13144    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13145
13146    mf_info->vnics_per_port =
13147        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13148
13149    return (0);
13150}
13151
13152static int
13153bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13154{
13155    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13156    uint32_t mf_cfg1;
13157    uint32_t mf_cfg2;
13158    uint32_t ovlan1;
13159    uint32_t ovlan2;
13160    uint8_t i, j;
13161
13162    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13163          SC_PORT(sc));
13164    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13165          mf_info->mf_config[SC_VN(sc)]);
13166    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13167          mf_info->multi_vnics_mode);
13168    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13169          mf_info->vnics_per_port);
13170    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13171          mf_info->ext_id);
13172    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13173          mf_info->min_bw[0], mf_info->min_bw[1],
13174          mf_info->min_bw[2], mf_info->min_bw[3]);
13175    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13176          mf_info->max_bw[0], mf_info->max_bw[1],
13177          mf_info->max_bw[2], mf_info->max_bw[3]);
13178    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13179          sc->mac_addr_str);
13180
13181    /* various MF mode sanity checks... */
13182
13183    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13184        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13185              SC_PORT(sc));
13186        return (1);
13187    }
13188
13189    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13190        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13191              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13192        return (1);
13193    }
13194
13195    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13196        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13197        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13198            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13199                  SC_VN(sc), OVLAN(sc));
13200            return (1);
13201        }
13202
13203        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13204            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13205                  mf_info->multi_vnics_mode, OVLAN(sc));
13206            return (1);
13207        }
13208
13209        /*
13210         * Verify all functions are either MF or SF mode. If MF, make sure
13211         * sure that all non-hidden functions have a valid ovlan. If SF,
13212         * make sure that all non-hidden functions have an invalid ovlan.
13213         */
13214        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13215            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13216            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13217            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13218                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13219                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13220                BLOGE(sc, "mf_mode=SD function %d MF config "
13221                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13222                      i, mf_info->multi_vnics_mode, ovlan1);
13223                return (1);
13224            }
13225        }
13226
13227        /* Verify all funcs on the same port each have a different ovlan. */
13228        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13229            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13230            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13231            /* iterate from the next function on the port to the max func */
13232            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13233                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13234                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13235                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13236                    VALID_OVLAN(ovlan1) &&
13237                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13238                    VALID_OVLAN(ovlan2) &&
13239                    (ovlan1 == ovlan2)) {
13240                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13241                              "have the same ovlan (%d)\n",
13242                          i, j, ovlan1);
13243                    return (1);
13244                }
13245            }
13246        }
13247    } /* MULTI_FUNCTION_SD */
13248
13249    return (0);
13250}
13251
13252static int
13253bxe_get_mf_cfg_info(struct bxe_softc *sc)
13254{
13255    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13256    uint32_t val, mac_upper;
13257    uint8_t i, vnic;
13258
13259    /* initialize mf_info defaults */
13260    mf_info->vnics_per_port   = 1;
13261    mf_info->multi_vnics_mode = FALSE;
13262    mf_info->path_has_ovlan   = FALSE;
13263    mf_info->mf_mode          = SINGLE_FUNCTION;
13264
13265    if (!CHIP_IS_MF_CAP(sc)) {
13266        return (0);
13267    }
13268
13269    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13270        BLOGE(sc, "Invalid mf_cfg_base!\n");
13271        return (1);
13272    }
13273
13274    /* get the MF mode (switch dependent / independent / single-function) */
13275
13276    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13277
13278    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13279    {
13280    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13281
13282        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13283
13284        /* check for legal upper mac bytes */
13285        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13286            mf_info->mf_mode = MULTI_FUNCTION_SI;
13287        } else {
13288            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13289        }
13290
13291        break;
13292
13293    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13294    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13295
13296        /* get outer vlan configuration */
13297        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13298
13299        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13300            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13301            mf_info->mf_mode = MULTI_FUNCTION_SD;
13302        } else {
13303            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13304        }
13305
13306        break;
13307
13308    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13309
13310        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13311        return (0);
13312
13313    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13314
13315        /*
13316         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13317         * and the MAC address is valid.
13318         */
13319        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13320
13321        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13322            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13323            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13324        } else {
13325            BLOGE(sc, "Invalid config for AFEX mode\n");
13326        }
13327
13328        break;
13329
13330    default:
13331
13332        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13333              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13334
13335        return (1);
13336    }
13337
13338    /* set path mf_mode (which could be different than function mf_mode) */
13339    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13340        mf_info->path_has_ovlan = TRUE;
13341    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13342        /*
13343         * Decide on path multi vnics mode. If we're not in MF mode and in
13344         * 4-port mode, this is good enough to check vnic-0 of the other port
13345         * on the same path
13346         */
13347        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13348            uint8_t other_port = !(PORT_ID(sc) & 1);
13349            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13350
13351            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13352
13353            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13354        }
13355    }
13356
13357    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13358        /* invalid MF config */
13359        if (SC_VN(sc) >= 1) {
13360            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13361            return (1);
13362        }
13363
13364        return (0);
13365    }
13366
13367    /* get the MF configuration */
13368    mf_info->mf_config[SC_VN(sc)] =
13369        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13370
13371    switch(mf_info->mf_mode)
13372    {
13373    case MULTI_FUNCTION_SD:
13374
13375        bxe_get_shmem_mf_cfg_info_sd(sc);
13376        break;
13377
13378    case MULTI_FUNCTION_SI:
13379
13380        bxe_get_shmem_mf_cfg_info_si(sc);
13381        break;
13382
13383    case MULTI_FUNCTION_AFEX:
13384
13385        bxe_get_shmem_mf_cfg_info_niv(sc);
13386        break;
13387
13388    default:
13389
13390        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13391              mf_info->mf_mode);
13392        return (1);
13393    }
13394
13395    /* get the congestion management parameters */
13396
13397    vnic = 0;
13398    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13399        /* get min/max bw */
13400        val = MFCFG_RD(sc, func_mf_config[i].config);
13401        mf_info->min_bw[vnic] =
13402            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13403        mf_info->max_bw[vnic] =
13404            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13405        vnic++;
13406    }
13407
13408    return (bxe_check_valid_mf_cfg(sc));
13409}
13410
13411static int
13412bxe_get_shmem_info(struct bxe_softc *sc)
13413{
13414    int port;
13415    uint32_t mac_hi, mac_lo, val;
13416
13417    port = SC_PORT(sc);
13418    mac_hi = mac_lo = 0;
13419
13420    sc->link_params.sc   = sc;
13421    sc->link_params.port = port;
13422
13423    /* get the hardware config info */
13424    sc->devinfo.hw_config =
13425        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13426    sc->devinfo.hw_config2 =
13427        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13428
13429    sc->link_params.hw_led_mode =
13430        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13431         SHARED_HW_CFG_LED_MODE_SHIFT);
13432
13433    /* get the port feature config */
13434    sc->port.config =
13435        SHMEM_RD(sc, dev_info.port_feature_config[port].config),
13436
13437    /* get the link params */
13438    sc->link_params.speed_cap_mask[0] =
13439        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13440    sc->link_params.speed_cap_mask[1] =
13441        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13442
13443    /* get the lane config */
13444    sc->link_params.lane_config =
13445        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13446
13447    /* get the link config */
13448    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13449    sc->port.link_config[ELINK_INT_PHY] = val;
13450    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13451    sc->port.link_config[ELINK_EXT_PHY1] =
13452        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13453
13454    /* get the override preemphasis flag and enable it or turn it off */
13455    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13456    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13457        sc->link_params.feature_config_flags |=
13458            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13459    } else {
13460        sc->link_params.feature_config_flags &=
13461            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13462    }
13463
13464    /* get the initial value of the link params */
13465    sc->link_params.multi_phy_config =
13466        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13467
13468    /* get external phy info */
13469    sc->port.ext_phy_config =
13470        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13471
13472    /* get the multifunction configuration */
13473    bxe_get_mf_cfg_info(sc);
13474
13475    /* get the mac address */
13476    if (IS_MF(sc)) {
13477        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13478        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13479    } else {
13480        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13481        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13482    }
13483
13484    if ((mac_lo == 0) && (mac_hi == 0)) {
13485        *sc->mac_addr_str = 0;
13486        BLOGE(sc, "No Ethernet address programmed!\n");
13487    } else {
13488        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13489        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13490        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13491        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13492        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13493        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13494        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13495                 "%02x:%02x:%02x:%02x:%02x:%02x",
13496                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13497                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13498                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13499        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13500    }
13501
13502    return (0);
13503}
13504
13505static void
13506bxe_get_tunable_params(struct bxe_softc *sc)
13507{
13508    /* sanity checks */
13509
13510    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13511        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13512        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13513        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13514        bxe_interrupt_mode = INTR_MODE_MSIX;
13515    }
13516
13517    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13518        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13519        bxe_queue_count = 0;
13520    }
13521
13522    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13523        if (bxe_max_rx_bufs == 0) {
13524            bxe_max_rx_bufs = RX_BD_USABLE;
13525        } else {
13526            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13527            bxe_max_rx_bufs = 2048;
13528        }
13529    }
13530
13531    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13532        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13533        bxe_hc_rx_ticks = 25;
13534    }
13535
13536    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13537        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13538        bxe_hc_tx_ticks = 50;
13539    }
13540
13541    if (bxe_max_aggregation_size == 0) {
13542        bxe_max_aggregation_size = TPA_AGG_SIZE;
13543    }
13544
13545    if (bxe_max_aggregation_size > 0xffff) {
13546        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13547              bxe_max_aggregation_size);
13548        bxe_max_aggregation_size = TPA_AGG_SIZE;
13549    }
13550
13551    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13552        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13553        bxe_mrrs = -1;
13554    }
13555
13556    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13557        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13558        bxe_autogreeen = 0;
13559    }
13560
13561    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13562        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13563        bxe_udp_rss = 0;
13564    }
13565
13566    /* pull in user settings */
13567
13568    sc->interrupt_mode       = bxe_interrupt_mode;
13569    sc->max_rx_bufs          = bxe_max_rx_bufs;
13570    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13571    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13572    sc->max_aggregation_size = bxe_max_aggregation_size;
13573    sc->mrrs                 = bxe_mrrs;
13574    sc->autogreeen           = bxe_autogreeen;
13575    sc->udp_rss              = bxe_udp_rss;
13576
13577    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13578        sc->num_queues = 1;
13579    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13580        sc->num_queues =
13581            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13582                MAX_RSS_CHAINS);
13583        if (sc->num_queues > mp_ncpus) {
13584            sc->num_queues = mp_ncpus;
13585        }
13586    }
13587
13588    BLOGD(sc, DBG_LOAD,
13589          "User Config: "
13590          "debug=0x%lx "
13591          "interrupt_mode=%d "
13592          "queue_count=%d "
13593          "hc_rx_ticks=%d "
13594          "hc_tx_ticks=%d "
13595          "rx_budget=%d "
13596          "max_aggregation_size=%d "
13597          "mrrs=%d "
13598          "autogreeen=%d "
13599          "udp_rss=%d\n",
13600          bxe_debug,
13601          sc->interrupt_mode,
13602          sc->num_queues,
13603          sc->hc_rx_ticks,
13604          sc->hc_tx_ticks,
13605          bxe_rx_budget,
13606          sc->max_aggregation_size,
13607          sc->mrrs,
13608          sc->autogreeen,
13609          sc->udp_rss);
13610}
13611
13612static void
13613bxe_media_detect(struct bxe_softc *sc)
13614{
13615    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13616    switch (sc->link_params.phy[phy_idx].media_type) {
13617    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13618    case ELINK_ETH_PHY_XFP_FIBER:
13619        BLOGI(sc, "Found 10Gb Fiber media.\n");
13620        sc->media = IFM_10G_SR;
13621        break;
13622    case ELINK_ETH_PHY_SFP_1G_FIBER:
13623        BLOGI(sc, "Found 1Gb Fiber media.\n");
13624        sc->media = IFM_1000_SX;
13625        break;
13626    case ELINK_ETH_PHY_KR:
13627    case ELINK_ETH_PHY_CX4:
13628        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13629        sc->media = IFM_10G_CX4;
13630        break;
13631    case ELINK_ETH_PHY_DA_TWINAX:
13632        BLOGI(sc, "Found 10Gb Twinax media.\n");
13633        sc->media = IFM_10G_TWINAX;
13634        break;
13635    case ELINK_ETH_PHY_BASE_T:
13636        if (sc->link_params.speed_cap_mask[0] &
13637            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13638            BLOGI(sc, "Found 10GBase-T media.\n");
13639            sc->media = IFM_10G_T;
13640        } else {
13641            BLOGI(sc, "Found 1000Base-T media.\n");
13642            sc->media = IFM_1000_T;
13643        }
13644        break;
13645    case ELINK_ETH_PHY_NOT_PRESENT:
13646        BLOGI(sc, "Media not present.\n");
13647        sc->media = 0;
13648        break;
13649    case ELINK_ETH_PHY_UNSPECIFIED:
13650    default:
13651        BLOGI(sc, "Unknown media!\n");
13652        sc->media = 0;
13653        break;
13654    }
13655}
13656
13657#define GET_FIELD(value, fname)                     \
13658    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13659#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13660#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13661
13662static int
13663bxe_get_igu_cam_info(struct bxe_softc *sc)
13664{
13665    int pfid = SC_FUNC(sc);
13666    int igu_sb_id;
13667    uint32_t val;
13668    uint8_t fid, igu_sb_cnt = 0;
13669
13670    sc->igu_base_sb = 0xff;
13671
13672    if (CHIP_INT_MODE_IS_BC(sc)) {
13673        int vn = SC_VN(sc);
13674        igu_sb_cnt = sc->igu_sb_cnt;
13675        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13676                           FP_SB_MAX_E1x);
13677        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13678                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13679        return (0);
13680    }
13681
13682    /* IGU in normal mode - read CAM */
13683    for (igu_sb_id = 0;
13684         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13685         igu_sb_id++) {
13686        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13687        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13688            continue;
13689        }
13690        fid = IGU_FID(val);
13691        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13692            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13693                continue;
13694            }
13695            if (IGU_VEC(val) == 0) {
13696                /* default status block */
13697                sc->igu_dsb_id = igu_sb_id;
13698            } else {
13699                if (sc->igu_base_sb == 0xff) {
13700                    sc->igu_base_sb = igu_sb_id;
13701                }
13702                igu_sb_cnt++;
13703            }
13704        }
13705    }
13706
13707    /*
13708     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13709     * that number of CAM entries will not be equal to the value advertised in
13710     * PCI. Driver should use the minimal value of both as the actual status
13711     * block count
13712     */
13713    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13714
13715    if (igu_sb_cnt == 0) {
13716        BLOGE(sc, "CAM configuration error\n");
13717        return (-1);
13718    }
13719
13720    return (0);
13721}
13722
13723/*
13724 * Gather various information from the device config space, the device itself,
13725 * shmem, and the user input.
13726 */
13727static int
13728bxe_get_device_info(struct bxe_softc *sc)
13729{
13730    uint32_t val;
13731    int rc;
13732
13733    /* Get the data for the device */
13734    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13735    sc->devinfo.device_id    = pci_get_device(sc->dev);
13736    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13737    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13738
13739    /* get the chip revision (chip metal comes from pci config space) */
13740    sc->devinfo.chip_id     =
13741    sc->link_params.chip_id =
13742        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13743         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13744         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13745         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13746
13747    /* force 57811 according to MISC register */
13748    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13749        if (CHIP_IS_57810(sc)) {
13750            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13751                                   (sc->devinfo.chip_id & 0x0000ffff));
13752        } else if (CHIP_IS_57810_MF(sc)) {
13753            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13754                                   (sc->devinfo.chip_id & 0x0000ffff));
13755        }
13756        sc->devinfo.chip_id |= 0x1;
13757    }
13758
13759    BLOGD(sc, DBG_LOAD,
13760          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13761          sc->devinfo.chip_id,
13762          ((sc->devinfo.chip_id >> 16) & 0xffff),
13763          ((sc->devinfo.chip_id >> 12) & 0xf),
13764          ((sc->devinfo.chip_id >>  4) & 0xff),
13765          ((sc->devinfo.chip_id >>  0) & 0xf));
13766
13767    val = (REG_RD(sc, 0x2874) & 0x55);
13768    if ((sc->devinfo.chip_id & 0x1) ||
13769        (CHIP_IS_E1(sc) && val) ||
13770        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13771        sc->flags |= BXE_ONE_PORT_FLAG;
13772        BLOGD(sc, DBG_LOAD, "single port device\n");
13773    }
13774
13775    /* set the doorbell size */
13776    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13777
13778    /* determine whether the device is in 2 port or 4 port mode */
13779    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13780    if (CHIP_IS_E2E3(sc)) {
13781        /*
13782         * Read port4mode_en_ovwr[0]:
13783         *   If 1, four port mode is in port4mode_en_ovwr[1].
13784         *   If 0, four port mode is in port4mode_en[0].
13785         */
13786        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13787        if (val & 1) {
13788            val = ((val >> 1) & 1);
13789        } else {
13790            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13791        }
13792
13793        sc->devinfo.chip_port_mode =
13794            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13795
13796        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13797    }
13798
13799    /* get the function and path info for the device */
13800    bxe_get_function_num(sc);
13801
13802    /* get the shared memory base address */
13803    sc->devinfo.shmem_base     =
13804    sc->link_params.shmem_base =
13805        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13806    sc->devinfo.shmem2_base =
13807        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13808                                  MISC_REG_GENERIC_CR_0));
13809
13810    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13811          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13812
13813    if (!sc->devinfo.shmem_base) {
13814        /* this should ONLY prevent upcoming shmem reads */
13815        BLOGI(sc, "MCP not active\n");
13816        sc->flags |= BXE_NO_MCP_FLAG;
13817        return (0);
13818    }
13819
13820    /* make sure the shared memory contents are valid */
13821    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13822    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13823        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13824        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13825        return (0);
13826    }
13827    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13828
13829    /* get the bootcode version */
13830    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13831    snprintf(sc->devinfo.bc_ver_str,
13832             sizeof(sc->devinfo.bc_ver_str),
13833             "%d.%d.%d",
13834             ((sc->devinfo.bc_ver >> 24) & 0xff),
13835             ((sc->devinfo.bc_ver >> 16) & 0xff),
13836             ((sc->devinfo.bc_ver >>  8) & 0xff));
13837    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13838
13839    /* get the bootcode shmem address */
13840    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13841    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13842
13843    /* clean indirect addresses as they're not used */
13844    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13845    if (IS_PF(sc)) {
13846        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13847        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13848        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13849        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13850        if (CHIP_IS_E1x(sc)) {
13851            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13852            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13853            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13854            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13855        }
13856
13857        /*
13858         * Enable internal target-read (in case we are probed after PF
13859         * FLR). Must be done prior to any BAR read access. Only for
13860         * 57712 and up
13861         */
13862        if (!CHIP_IS_E1x(sc)) {
13863            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13864        }
13865    }
13866
13867    /* get the nvram size */
13868    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13869    sc->devinfo.flash_size =
13870        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13871    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13872
13873    /* get PCI capabilites */
13874    bxe_probe_pci_caps(sc);
13875
13876    bxe_set_power_state(sc, PCI_PM_D0);
13877
13878    /* get various configuration parameters from shmem */
13879    bxe_get_shmem_info(sc);
13880
13881    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13882        val = pci_read_config(sc->dev,
13883                              (sc->devinfo.pcie_msix_cap_reg +
13884                               PCIR_MSIX_CTRL),
13885                              2);
13886        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13887    } else {
13888        sc->igu_sb_cnt = 1;
13889    }
13890
13891    sc->igu_base_addr = BAR_IGU_INTMEM;
13892
13893    /* initialize IGU parameters */
13894    if (CHIP_IS_E1x(sc)) {
13895        sc->devinfo.int_block = INT_BLOCK_HC;
13896        sc->igu_dsb_id = DEF_SB_IGU_ID;
13897        sc->igu_base_sb = 0;
13898    } else {
13899        sc->devinfo.int_block = INT_BLOCK_IGU;
13900
13901        /* do not allow device reset during IGU info preocessing */
13902        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13903
13904        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13905
13906        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13907            int tout = 5000;
13908
13909            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13910
13911            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13912            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13913            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13914
13915            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13916                tout--;
13917                DELAY(1000);
13918            }
13919
13920            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13921                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13922                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13923                return (-1);
13924            }
13925        }
13926
13927        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13928            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13929            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13930        } else {
13931            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13932        }
13933
13934        rc = bxe_get_igu_cam_info(sc);
13935
13936        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13937
13938        if (rc) {
13939            return (rc);
13940        }
13941    }
13942
13943    /*
13944     * Get base FW non-default (fast path) status block ID. This value is
13945     * used to initialize the fw_sb_id saved on the fp/queue structure to
13946     * determine the id used by the FW.
13947     */
13948    if (CHIP_IS_E1x(sc)) {
13949        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13950    } else {
13951        /*
13952         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13953         * the same queue are indicated on the same IGU SB). So we prefer
13954         * FW and IGU SBs to be the same value.
13955         */
13956        sc->base_fw_ndsb = sc->igu_base_sb;
13957    }
13958
13959    BLOGD(sc, DBG_LOAD,
13960          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13961          sc->igu_dsb_id, sc->igu_base_sb,
13962          sc->igu_sb_cnt, sc->base_fw_ndsb);
13963
13964    elink_phy_probe(&sc->link_params);
13965
13966    return (0);
13967}
13968
13969static void
13970bxe_link_settings_supported(struct bxe_softc *sc,
13971                            uint32_t         switch_cfg)
13972{
13973    uint32_t cfg_size = 0;
13974    uint32_t idx;
13975    uint8_t port = SC_PORT(sc);
13976
13977    /* aggregation of supported attributes of all external phys */
13978    sc->port.supported[0] = 0;
13979    sc->port.supported[1] = 0;
13980
13981    switch (sc->link_params.num_phys) {
13982    case 1:
13983        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13984        cfg_size = 1;
13985        break;
13986    case 2:
13987        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13988        cfg_size = 1;
13989        break;
13990    case 3:
13991        if (sc->link_params.multi_phy_config &
13992            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13993            sc->port.supported[1] =
13994                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13995            sc->port.supported[0] =
13996                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13997        } else {
13998            sc->port.supported[0] =
13999                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14000            sc->port.supported[1] =
14001                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14002        }
14003        cfg_size = 2;
14004        break;
14005    }
14006
14007    if (!(sc->port.supported[0] || sc->port.supported[1])) {
14008        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14009              SHMEM_RD(sc,
14010                       dev_info.port_hw_config[port].external_phy_config),
14011              SHMEM_RD(sc,
14012                       dev_info.port_hw_config[port].external_phy_config2));
14013        return;
14014    }
14015
14016    if (CHIP_IS_E3(sc))
14017        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14018    else {
14019        switch (switch_cfg) {
14020        case ELINK_SWITCH_CFG_1G:
14021            sc->port.phy_addr =
14022                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14023            break;
14024        case ELINK_SWITCH_CFG_10G:
14025            sc->port.phy_addr =
14026                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14027            break;
14028        default:
14029            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14030                  sc->port.link_config[0]);
14031            return;
14032        }
14033    }
14034
14035    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14036
14037    /* mask what we support according to speed_cap_mask per configuration */
14038    for (idx = 0; idx < cfg_size; idx++) {
14039        if (!(sc->link_params.speed_cap_mask[idx] &
14040              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14041            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14042        }
14043
14044        if (!(sc->link_params.speed_cap_mask[idx] &
14045              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14046            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14047        }
14048
14049        if (!(sc->link_params.speed_cap_mask[idx] &
14050              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14051            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14052        }
14053
14054        if (!(sc->link_params.speed_cap_mask[idx] &
14055              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14056            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14057        }
14058
14059        if (!(sc->link_params.speed_cap_mask[idx] &
14060              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14061            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14062        }
14063
14064        if (!(sc->link_params.speed_cap_mask[idx] &
14065              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14066            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14067        }
14068
14069        if (!(sc->link_params.speed_cap_mask[idx] &
14070              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14071            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14072        }
14073
14074        if (!(sc->link_params.speed_cap_mask[idx] &
14075              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14076            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14077        }
14078    }
14079
14080    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14081          sc->port.supported[0], sc->port.supported[1]);
14082}
14083
14084static void
14085bxe_link_settings_requested(struct bxe_softc *sc)
14086{
14087    uint32_t link_config;
14088    uint32_t idx;
14089    uint32_t cfg_size = 0;
14090
14091    sc->port.advertising[0] = 0;
14092    sc->port.advertising[1] = 0;
14093
14094    switch (sc->link_params.num_phys) {
14095    case 1:
14096    case 2:
14097        cfg_size = 1;
14098        break;
14099    case 3:
14100        cfg_size = 2;
14101        break;
14102    }
14103
14104    for (idx = 0; idx < cfg_size; idx++) {
14105        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14106        link_config = sc->port.link_config[idx];
14107
14108        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14109        case PORT_FEATURE_LINK_SPEED_AUTO:
14110            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14111                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14112                sc->port.advertising[idx] |= sc->port.supported[idx];
14113                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14114                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14115                    sc->port.advertising[idx] |=
14116                        (ELINK_SUPPORTED_100baseT_Half |
14117                         ELINK_SUPPORTED_100baseT_Full);
14118            } else {
14119                /* force 10G, no AN */
14120                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14121                sc->port.advertising[idx] |=
14122                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14123                continue;
14124            }
14125            break;
14126
14127        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14128            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14129                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14130                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14131                                              ADVERTISED_TP);
14132            } else {
14133                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14134                          "speed_cap_mask=0x%08x\n",
14135                      link_config, sc->link_params.speed_cap_mask[idx]);
14136                return;
14137            }
14138            break;
14139
14140        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14141            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14142                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14143                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14144                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14145                                              ADVERTISED_TP);
14146            } else {
14147                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14148                          "speed_cap_mask=0x%08x\n",
14149                      link_config, sc->link_params.speed_cap_mask[idx]);
14150                return;
14151            }
14152            break;
14153
14154        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14155            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14156                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14157                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14158                                              ADVERTISED_TP);
14159            } else {
14160                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14161                          "speed_cap_mask=0x%08x\n",
14162                      link_config, sc->link_params.speed_cap_mask[idx]);
14163                return;
14164            }
14165            break;
14166
14167        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14168            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14169                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14170                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14171                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14172                                              ADVERTISED_TP);
14173            } else {
14174                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14175                          "speed_cap_mask=0x%08x\n",
14176                      link_config, sc->link_params.speed_cap_mask[idx]);
14177                return;
14178            }
14179            break;
14180
14181        case PORT_FEATURE_LINK_SPEED_1G:
14182            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14183                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14184                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14185                                              ADVERTISED_TP);
14186            } else {
14187                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14188                          "speed_cap_mask=0x%08x\n",
14189                      link_config, sc->link_params.speed_cap_mask[idx]);
14190                return;
14191            }
14192            break;
14193
14194        case PORT_FEATURE_LINK_SPEED_2_5G:
14195            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14196                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14197                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14198                                              ADVERTISED_TP);
14199            } else {
14200                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14201                          "speed_cap_mask=0x%08x\n",
14202                      link_config, sc->link_params.speed_cap_mask[idx]);
14203                return;
14204            }
14205            break;
14206
14207        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14208            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14209                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14210                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14211                                              ADVERTISED_FIBRE);
14212            } else {
14213                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14214                          "speed_cap_mask=0x%08x\n",
14215                      link_config, sc->link_params.speed_cap_mask[idx]);
14216                return;
14217            }
14218            break;
14219
14220        case PORT_FEATURE_LINK_SPEED_20G:
14221            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14222            break;
14223
14224        default:
14225            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14226                      "speed_cap_mask=0x%08x\n",
14227                  link_config, sc->link_params.speed_cap_mask[idx]);
14228            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14229            sc->port.advertising[idx] = sc->port.supported[idx];
14230            break;
14231        }
14232
14233        sc->link_params.req_flow_ctrl[idx] =
14234            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14235
14236        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14237            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14238                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14239            } else {
14240                bxe_set_requested_fc(sc);
14241            }
14242        }
14243
14244        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14245                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14246              sc->link_params.req_line_speed[idx],
14247              sc->link_params.req_duplex[idx],
14248              sc->link_params.req_flow_ctrl[idx],
14249              sc->port.advertising[idx]);
14250    }
14251}
14252
14253static void
14254bxe_get_phy_info(struct bxe_softc *sc)
14255{
14256    uint8_t port = SC_PORT(sc);
14257    uint32_t config = sc->port.config;
14258    uint32_t eee_mode;
14259
14260    /* shmem data already read in bxe_get_shmem_info() */
14261
14262    BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14263                        "link_config0=0x%08x\n",
14264               sc->link_params.lane_config,
14265               sc->link_params.speed_cap_mask[0],
14266               sc->port.link_config[0]);
14267
14268    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14269    bxe_link_settings_requested(sc);
14270
14271    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14272        sc->link_params.feature_config_flags |=
14273            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14274    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14275        sc->link_params.feature_config_flags &=
14276            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14277    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14278        sc->link_params.feature_config_flags |=
14279            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14280    }
14281
14282    /* configure link feature according to nvram value */
14283    eee_mode =
14284        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14285          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14286         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14287    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14288        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14289                                    ELINK_EEE_MODE_ENABLE_LPI |
14290                                    ELINK_EEE_MODE_OUTPUT_TIME);
14291    } else {
14292        sc->link_params.eee_mode = 0;
14293    }
14294
14295    /* get the media type */
14296    bxe_media_detect(sc);
14297}
14298
14299static void
14300bxe_get_params(struct bxe_softc *sc)
14301{
14302    /* get user tunable params */
14303    bxe_get_tunable_params(sc);
14304
14305    /* select the RX and TX ring sizes */
14306    sc->tx_ring_size = TX_BD_USABLE;
14307    sc->rx_ring_size = RX_BD_USABLE;
14308
14309    /* XXX disable WoL */
14310    sc->wol = 0;
14311}
14312
14313static void
14314bxe_set_modes_bitmap(struct bxe_softc *sc)
14315{
14316    uint32_t flags = 0;
14317
14318    if (CHIP_REV_IS_FPGA(sc)) {
14319        SET_FLAGS(flags, MODE_FPGA);
14320    } else if (CHIP_REV_IS_EMUL(sc)) {
14321        SET_FLAGS(flags, MODE_EMUL);
14322    } else {
14323        SET_FLAGS(flags, MODE_ASIC);
14324    }
14325
14326    if (CHIP_IS_MODE_4_PORT(sc)) {
14327        SET_FLAGS(flags, MODE_PORT4);
14328    } else {
14329        SET_FLAGS(flags, MODE_PORT2);
14330    }
14331
14332    if (CHIP_IS_E2(sc)) {
14333        SET_FLAGS(flags, MODE_E2);
14334    } else if (CHIP_IS_E3(sc)) {
14335        SET_FLAGS(flags, MODE_E3);
14336        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14337            SET_FLAGS(flags, MODE_E3_A0);
14338        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14339            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14340        }
14341    }
14342
14343    if (IS_MF(sc)) {
14344        SET_FLAGS(flags, MODE_MF);
14345        switch (sc->devinfo.mf_info.mf_mode) {
14346        case MULTI_FUNCTION_SD:
14347            SET_FLAGS(flags, MODE_MF_SD);
14348            break;
14349        case MULTI_FUNCTION_SI:
14350            SET_FLAGS(flags, MODE_MF_SI);
14351            break;
14352        case MULTI_FUNCTION_AFEX:
14353            SET_FLAGS(flags, MODE_MF_AFEX);
14354            break;
14355        }
14356    } else {
14357        SET_FLAGS(flags, MODE_SF);
14358    }
14359
14360#if defined(__LITTLE_ENDIAN)
14361    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14362#else /* __BIG_ENDIAN */
14363    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14364#endif
14365
14366    INIT_MODE_FLAGS(sc) = flags;
14367}
14368
14369static int
14370bxe_alloc_hsi_mem(struct bxe_softc *sc)
14371{
14372    struct bxe_fastpath *fp;
14373    bus_addr_t busaddr;
14374    int max_agg_queues;
14375    int max_segments;
14376    bus_size_t max_size;
14377    bus_size_t max_seg_size;
14378    char buf[32];
14379    int rc;
14380    int i, j;
14381
14382    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14383
14384    /* allocate the parent bus DMA tag */
14385    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14386                            1,                        /* alignment */
14387                            0,                        /* boundary limit */
14388                            BUS_SPACE_MAXADDR,        /* restricted low */
14389                            BUS_SPACE_MAXADDR,        /* restricted hi */
14390                            NULL,                     /* addr filter() */
14391                            NULL,                     /* addr filter() arg */
14392                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14393                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14394                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14395                            0,                        /* flags */
14396                            NULL,                     /* lock() */
14397                            NULL,                     /* lock() arg */
14398                            &sc->parent_dma_tag);     /* returned dma tag */
14399    if (rc != 0) {
14400        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14401        return (1);
14402    }
14403
14404    /************************/
14405    /* DEFAULT STATUS BLOCK */
14406    /************************/
14407
14408    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14409                      &sc->def_sb_dma, "default status block") != 0) {
14410        /* XXX */
14411        bus_dma_tag_destroy(sc->parent_dma_tag);
14412        return (1);
14413    }
14414
14415    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14416
14417    /***************/
14418    /* EVENT QUEUE */
14419    /***************/
14420
14421    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14422                      &sc->eq_dma, "event queue") != 0) {
14423        /* XXX */
14424        bxe_dma_free(sc, &sc->def_sb_dma);
14425        sc->def_sb = NULL;
14426        bus_dma_tag_destroy(sc->parent_dma_tag);
14427        return (1);
14428    }
14429
14430    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14431
14432    /*************/
14433    /* SLOW PATH */
14434    /*************/
14435
14436    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14437                      &sc->sp_dma, "slow path") != 0) {
14438        /* XXX */
14439        bxe_dma_free(sc, &sc->eq_dma);
14440        sc->eq = NULL;
14441        bxe_dma_free(sc, &sc->def_sb_dma);
14442        sc->def_sb = NULL;
14443        bus_dma_tag_destroy(sc->parent_dma_tag);
14444        return (1);
14445    }
14446
14447    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14448
14449    /*******************/
14450    /* SLOW PATH QUEUE */
14451    /*******************/
14452
14453    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14454                      &sc->spq_dma, "slow path queue") != 0) {
14455        /* XXX */
14456        bxe_dma_free(sc, &sc->sp_dma);
14457        sc->sp = NULL;
14458        bxe_dma_free(sc, &sc->eq_dma);
14459        sc->eq = NULL;
14460        bxe_dma_free(sc, &sc->def_sb_dma);
14461        sc->def_sb = NULL;
14462        bus_dma_tag_destroy(sc->parent_dma_tag);
14463        return (1);
14464    }
14465
14466    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14467
14468    /***************************/
14469    /* FW DECOMPRESSION BUFFER */
14470    /***************************/
14471
14472    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14473                      "fw decompression buffer") != 0) {
14474        /* XXX */
14475        bxe_dma_free(sc, &sc->spq_dma);
14476        sc->spq = NULL;
14477        bxe_dma_free(sc, &sc->sp_dma);
14478        sc->sp = NULL;
14479        bxe_dma_free(sc, &sc->eq_dma);
14480        sc->eq = NULL;
14481        bxe_dma_free(sc, &sc->def_sb_dma);
14482        sc->def_sb = NULL;
14483        bus_dma_tag_destroy(sc->parent_dma_tag);
14484        return (1);
14485    }
14486
14487    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14488
14489    if ((sc->gz_strm =
14490         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14491        /* XXX */
14492        bxe_dma_free(sc, &sc->gz_buf_dma);
14493        sc->gz_buf = NULL;
14494        bxe_dma_free(sc, &sc->spq_dma);
14495        sc->spq = NULL;
14496        bxe_dma_free(sc, &sc->sp_dma);
14497        sc->sp = NULL;
14498        bxe_dma_free(sc, &sc->eq_dma);
14499        sc->eq = NULL;
14500        bxe_dma_free(sc, &sc->def_sb_dma);
14501        sc->def_sb = NULL;
14502        bus_dma_tag_destroy(sc->parent_dma_tag);
14503        return (1);
14504    }
14505
14506    /*************/
14507    /* FASTPATHS */
14508    /*************/
14509
14510    /* allocate DMA memory for each fastpath structure */
14511    for (i = 0; i < sc->num_queues; i++) {
14512        fp = &sc->fp[i];
14513        fp->sc    = sc;
14514        fp->index = i;
14515
14516        /*******************/
14517        /* FP STATUS BLOCK */
14518        /*******************/
14519
14520        snprintf(buf, sizeof(buf), "fp %d status block", i);
14521        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14522                          &fp->sb_dma, buf) != 0) {
14523            /* XXX unwind and free previous fastpath allocations */
14524            BLOGE(sc, "Failed to alloc %s\n", buf);
14525            return (1);
14526        } else {
14527            if (CHIP_IS_E2E3(sc)) {
14528                fp->status_block.e2_sb =
14529                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14530            } else {
14531                fp->status_block.e1x_sb =
14532                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14533            }
14534        }
14535
14536        /******************/
14537        /* FP TX BD CHAIN */
14538        /******************/
14539
14540        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14541        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14542                          &fp->tx_dma, buf) != 0) {
14543            /* XXX unwind and free previous fastpath allocations */
14544            BLOGE(sc, "Failed to alloc %s\n", buf);
14545            return (1);
14546        } else {
14547            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14548        }
14549
14550        /* link together the tx bd chain pages */
14551        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14552            /* index into the tx bd chain array to last entry per page */
14553            struct eth_tx_next_bd *tx_next_bd =
14554                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14555            /* point to the next page and wrap from last page */
14556            busaddr = (fp->tx_dma.paddr +
14557                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14558            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14559            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14560        }
14561
14562        /******************/
14563        /* FP RX BD CHAIN */
14564        /******************/
14565
14566        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14567        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14568                          &fp->rx_dma, buf) != 0) {
14569            /* XXX unwind and free previous fastpath allocations */
14570            BLOGE(sc, "Failed to alloc %s\n", buf);
14571            return (1);
14572        } else {
14573            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14574        }
14575
14576        /* link together the rx bd chain pages */
14577        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14578            /* index into the rx bd chain array to last entry per page */
14579            struct eth_rx_bd *rx_bd =
14580                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14581            /* point to the next page and wrap from last page */
14582            busaddr = (fp->rx_dma.paddr +
14583                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14584            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14585            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14586        }
14587
14588        /*******************/
14589        /* FP RX RCQ CHAIN */
14590        /*******************/
14591
14592        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14593        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14594                          &fp->rcq_dma, buf) != 0) {
14595            /* XXX unwind and free previous fastpath allocations */
14596            BLOGE(sc, "Failed to alloc %s\n", buf);
14597            return (1);
14598        } else {
14599            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14600        }
14601
14602        /* link together the rcq chain pages */
14603        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14604            /* index into the rcq chain array to last entry per page */
14605            struct eth_rx_cqe_next_page *rx_cqe_next =
14606                (struct eth_rx_cqe_next_page *)
14607                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14608            /* point to the next page and wrap from last page */
14609            busaddr = (fp->rcq_dma.paddr +
14610                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14611            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14612            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14613        }
14614
14615        /*******************/
14616        /* FP RX SGE CHAIN */
14617        /*******************/
14618
14619        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14620        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14621                          &fp->rx_sge_dma, buf) != 0) {
14622            /* XXX unwind and free previous fastpath allocations */
14623            BLOGE(sc, "Failed to alloc %s\n", buf);
14624            return (1);
14625        } else {
14626            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14627        }
14628
14629        /* link together the sge chain pages */
14630        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14631            /* index into the rcq chain array to last entry per page */
14632            struct eth_rx_sge *rx_sge =
14633                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14634            /* point to the next page and wrap from last page */
14635            busaddr = (fp->rx_sge_dma.paddr +
14636                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14637            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14638            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14639        }
14640
14641        /***********************/
14642        /* FP TX MBUF DMA MAPS */
14643        /***********************/
14644
14645        /* set required sizes before mapping to conserve resources */
14646        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14647            max_size     = BXE_TSO_MAX_SIZE;
14648            max_segments = BXE_TSO_MAX_SEGMENTS;
14649            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14650        } else {
14651            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14652            max_segments = BXE_MAX_SEGMENTS;
14653            max_seg_size = MCLBYTES;
14654        }
14655
14656        /* create a dma tag for the tx mbufs */
14657        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14658                                1,                  /* alignment */
14659                                0,                  /* boundary limit */
14660                                BUS_SPACE_MAXADDR,  /* restricted low */
14661                                BUS_SPACE_MAXADDR,  /* restricted hi */
14662                                NULL,               /* addr filter() */
14663                                NULL,               /* addr filter() arg */
14664                                max_size,           /* max map size */
14665                                max_segments,       /* num discontinuous */
14666                                max_seg_size,       /* max seg size */
14667                                0,                  /* flags */
14668                                NULL,               /* lock() */
14669                                NULL,               /* lock() arg */
14670                                &fp->tx_mbuf_tag);  /* returned dma tag */
14671        if (rc != 0) {
14672            /* XXX unwind and free previous fastpath allocations */
14673            BLOGE(sc, "Failed to create dma tag for "
14674                      "'fp %d tx mbufs' (%d)\n", i, rc);
14675            return (1);
14676        }
14677
14678        /* create dma maps for each of the tx mbuf clusters */
14679        for (j = 0; j < TX_BD_TOTAL; j++) {
14680            if (bus_dmamap_create(fp->tx_mbuf_tag,
14681                                  BUS_DMA_NOWAIT,
14682                                  &fp->tx_mbuf_chain[j].m_map)) {
14683                /* XXX unwind and free previous fastpath allocations */
14684                BLOGE(sc, "Failed to create dma map for "
14685                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14686                return (1);
14687            }
14688        }
14689
14690        /***********************/
14691        /* FP RX MBUF DMA MAPS */
14692        /***********************/
14693
14694        /* create a dma tag for the rx mbufs */
14695        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14696                                1,                  /* alignment */
14697                                0,                  /* boundary limit */
14698                                BUS_SPACE_MAXADDR,  /* restricted low */
14699                                BUS_SPACE_MAXADDR,  /* restricted hi */
14700                                NULL,               /* addr filter() */
14701                                NULL,               /* addr filter() arg */
14702                                MJUM9BYTES,         /* max map size */
14703                                1,                  /* num discontinuous */
14704                                MJUM9BYTES,         /* max seg size */
14705                                0,                  /* flags */
14706                                NULL,               /* lock() */
14707                                NULL,               /* lock() arg */
14708                                &fp->rx_mbuf_tag);  /* returned dma tag */
14709        if (rc != 0) {
14710            /* XXX unwind and free previous fastpath allocations */
14711            BLOGE(sc, "Failed to create dma tag for "
14712                      "'fp %d rx mbufs' (%d)\n", i, rc);
14713            return (1);
14714        }
14715
14716        /* create dma maps for each of the rx mbuf clusters */
14717        for (j = 0; j < RX_BD_TOTAL; j++) {
14718            if (bus_dmamap_create(fp->rx_mbuf_tag,
14719                                  BUS_DMA_NOWAIT,
14720                                  &fp->rx_mbuf_chain[j].m_map)) {
14721                /* XXX unwind and free previous fastpath allocations */
14722                BLOGE(sc, "Failed to create dma map for "
14723                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14724                return (1);
14725            }
14726        }
14727
14728        /* create dma map for the spare rx mbuf cluster */
14729        if (bus_dmamap_create(fp->rx_mbuf_tag,
14730                              BUS_DMA_NOWAIT,
14731                              &fp->rx_mbuf_spare_map)) {
14732            /* XXX unwind and free previous fastpath allocations */
14733            BLOGE(sc, "Failed to create dma map for "
14734                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14735            return (1);
14736        }
14737
14738        /***************************/
14739        /* FP RX SGE MBUF DMA MAPS */
14740        /***************************/
14741
14742        /* create a dma tag for the rx sge mbufs */
14743        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14744                                1,                  /* alignment */
14745                                0,                  /* boundary limit */
14746                                BUS_SPACE_MAXADDR,  /* restricted low */
14747                                BUS_SPACE_MAXADDR,  /* restricted hi */
14748                                NULL,               /* addr filter() */
14749                                NULL,               /* addr filter() arg */
14750                                BCM_PAGE_SIZE,      /* max map size */
14751                                1,                  /* num discontinuous */
14752                                BCM_PAGE_SIZE,      /* max seg size */
14753                                0,                  /* flags */
14754                                NULL,               /* lock() */
14755                                NULL,               /* lock() arg */
14756                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14757        if (rc != 0) {
14758            /* XXX unwind and free previous fastpath allocations */
14759            BLOGE(sc, "Failed to create dma tag for "
14760                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14761            return (1);
14762        }
14763
14764        /* create dma maps for the rx sge mbuf clusters */
14765        for (j = 0; j < RX_SGE_TOTAL; j++) {
14766            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14767                                  BUS_DMA_NOWAIT,
14768                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14769                /* XXX unwind and free previous fastpath allocations */
14770                BLOGE(sc, "Failed to create dma map for "
14771                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14772                return (1);
14773            }
14774        }
14775
14776        /* create dma map for the spare rx sge mbuf cluster */
14777        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14778                              BUS_DMA_NOWAIT,
14779                              &fp->rx_sge_mbuf_spare_map)) {
14780            /* XXX unwind and free previous fastpath allocations */
14781            BLOGE(sc, "Failed to create dma map for "
14782                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14783            return (1);
14784        }
14785
14786        /***************************/
14787        /* FP RX TPA MBUF DMA MAPS */
14788        /***************************/
14789
14790        /* create dma maps for the rx tpa mbuf clusters */
14791        max_agg_queues = MAX_AGG_QS(sc);
14792
14793        for (j = 0; j < max_agg_queues; j++) {
14794            if (bus_dmamap_create(fp->rx_mbuf_tag,
14795                                  BUS_DMA_NOWAIT,
14796                                  &fp->rx_tpa_info[j].bd.m_map)) {
14797                /* XXX unwind and free previous fastpath allocations */
14798                BLOGE(sc, "Failed to create dma map for "
14799                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14800                return (1);
14801            }
14802        }
14803
14804        /* create dma map for the spare rx tpa mbuf cluster */
14805        if (bus_dmamap_create(fp->rx_mbuf_tag,
14806                              BUS_DMA_NOWAIT,
14807                              &fp->rx_tpa_info_mbuf_spare_map)) {
14808            /* XXX unwind and free previous fastpath allocations */
14809            BLOGE(sc, "Failed to create dma map for "
14810                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14811            return (1);
14812        }
14813
14814        bxe_init_sge_ring_bit_mask(fp);
14815    }
14816
14817    return (0);
14818}
14819
14820static void
14821bxe_free_hsi_mem(struct bxe_softc *sc)
14822{
14823    struct bxe_fastpath *fp;
14824    int max_agg_queues;
14825    int i, j;
14826
14827    if (sc->parent_dma_tag == NULL) {
14828        return; /* assume nothing was allocated */
14829    }
14830
14831    for (i = 0; i < sc->num_queues; i++) {
14832        fp = &sc->fp[i];
14833
14834        /*******************/
14835        /* FP STATUS BLOCK */
14836        /*******************/
14837
14838        bxe_dma_free(sc, &fp->sb_dma);
14839        memset(&fp->status_block, 0, sizeof(fp->status_block));
14840
14841        /******************/
14842        /* FP TX BD CHAIN */
14843        /******************/
14844
14845        bxe_dma_free(sc, &fp->tx_dma);
14846        fp->tx_chain = NULL;
14847
14848        /******************/
14849        /* FP RX BD CHAIN */
14850        /******************/
14851
14852        bxe_dma_free(sc, &fp->rx_dma);
14853        fp->rx_chain = NULL;
14854
14855        /*******************/
14856        /* FP RX RCQ CHAIN */
14857        /*******************/
14858
14859        bxe_dma_free(sc, &fp->rcq_dma);
14860        fp->rcq_chain = NULL;
14861
14862        /*******************/
14863        /* FP RX SGE CHAIN */
14864        /*******************/
14865
14866        bxe_dma_free(sc, &fp->rx_sge_dma);
14867        fp->rx_sge_chain = NULL;
14868
14869        /***********************/
14870        /* FP TX MBUF DMA MAPS */
14871        /***********************/
14872
14873        if (fp->tx_mbuf_tag != NULL) {
14874            for (j = 0; j < TX_BD_TOTAL; j++) {
14875                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14876                    bus_dmamap_unload(fp->tx_mbuf_tag,
14877                                      fp->tx_mbuf_chain[j].m_map);
14878                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14879                                       fp->tx_mbuf_chain[j].m_map);
14880                }
14881            }
14882
14883            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14884            fp->tx_mbuf_tag = NULL;
14885        }
14886
14887        /***********************/
14888        /* FP RX MBUF DMA MAPS */
14889        /***********************/
14890
14891        if (fp->rx_mbuf_tag != NULL) {
14892            for (j = 0; j < RX_BD_TOTAL; j++) {
14893                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14894                    bus_dmamap_unload(fp->rx_mbuf_tag,
14895                                      fp->rx_mbuf_chain[j].m_map);
14896                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14897                                       fp->rx_mbuf_chain[j].m_map);
14898                }
14899            }
14900
14901            if (fp->rx_mbuf_spare_map != NULL) {
14902                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14903                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14904            }
14905
14906            /***************************/
14907            /* FP RX TPA MBUF DMA MAPS */
14908            /***************************/
14909
14910            max_agg_queues = MAX_AGG_QS(sc);
14911
14912            for (j = 0; j < max_agg_queues; j++) {
14913                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14914                    bus_dmamap_unload(fp->rx_mbuf_tag,
14915                                      fp->rx_tpa_info[j].bd.m_map);
14916                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14917                                       fp->rx_tpa_info[j].bd.m_map);
14918                }
14919            }
14920
14921            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14922                bus_dmamap_unload(fp->rx_mbuf_tag,
14923                                  fp->rx_tpa_info_mbuf_spare_map);
14924                bus_dmamap_destroy(fp->rx_mbuf_tag,
14925                                   fp->rx_tpa_info_mbuf_spare_map);
14926            }
14927
14928            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14929            fp->rx_mbuf_tag = NULL;
14930        }
14931
14932        /***************************/
14933        /* FP RX SGE MBUF DMA MAPS */
14934        /***************************/
14935
14936        if (fp->rx_sge_mbuf_tag != NULL) {
14937            for (j = 0; j < RX_SGE_TOTAL; j++) {
14938                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14939                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14940                                      fp->rx_sge_mbuf_chain[j].m_map);
14941                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14942                                       fp->rx_sge_mbuf_chain[j].m_map);
14943                }
14944            }
14945
14946            if (fp->rx_sge_mbuf_spare_map != NULL) {
14947                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14948                                  fp->rx_sge_mbuf_spare_map);
14949                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14950                                   fp->rx_sge_mbuf_spare_map);
14951            }
14952
14953            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14954            fp->rx_sge_mbuf_tag = NULL;
14955        }
14956    }
14957
14958    /***************************/
14959    /* FW DECOMPRESSION BUFFER */
14960    /***************************/
14961
14962    bxe_dma_free(sc, &sc->gz_buf_dma);
14963    sc->gz_buf = NULL;
14964    free(sc->gz_strm, M_DEVBUF);
14965    sc->gz_strm = NULL;
14966
14967    /*******************/
14968    /* SLOW PATH QUEUE */
14969    /*******************/
14970
14971    bxe_dma_free(sc, &sc->spq_dma);
14972    sc->spq = NULL;
14973
14974    /*************/
14975    /* SLOW PATH */
14976    /*************/
14977
14978    bxe_dma_free(sc, &sc->sp_dma);
14979    sc->sp = NULL;
14980
14981    /***************/
14982    /* EVENT QUEUE */
14983    /***************/
14984
14985    bxe_dma_free(sc, &sc->eq_dma);
14986    sc->eq = NULL;
14987
14988    /************************/
14989    /* DEFAULT STATUS BLOCK */
14990    /************************/
14991
14992    bxe_dma_free(sc, &sc->def_sb_dma);
14993    sc->def_sb = NULL;
14994
14995    bus_dma_tag_destroy(sc->parent_dma_tag);
14996    sc->parent_dma_tag = NULL;
14997}
14998
14999/*
15000 * Previous driver DMAE transaction may have occurred when pre-boot stage
15001 * ended and boot began. This would invalidate the addresses of the
15002 * transaction, resulting in was-error bit set in the PCI causing all
15003 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15004 * the interrupt which detected this from the pglueb and the was-done bit
15005 */
15006static void
15007bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15008{
15009    uint32_t val;
15010
15011    if (!CHIP_IS_E1x(sc)) {
15012        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15013        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15014            BLOGD(sc, DBG_LOAD,
15015                  "Clearing 'was-error' bit that was set in pglueb");
15016            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15017        }
15018    }
15019}
15020
15021static int
15022bxe_prev_mcp_done(struct bxe_softc *sc)
15023{
15024    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15025                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15026    if (!rc) {
15027        BLOGE(sc, "MCP response failure, aborting\n");
15028        return (-1);
15029    }
15030
15031    return (0);
15032}
15033
15034static struct bxe_prev_list_node *
15035bxe_prev_path_get_entry(struct bxe_softc *sc)
15036{
15037    struct bxe_prev_list_node *tmp;
15038
15039    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15040        if ((sc->pcie_bus == tmp->bus) &&
15041            (sc->pcie_device == tmp->slot) &&
15042            (SC_PATH(sc) == tmp->path)) {
15043            return (tmp);
15044        }
15045    }
15046
15047    return (NULL);
15048}
15049
15050static uint8_t
15051bxe_prev_is_path_marked(struct bxe_softc *sc)
15052{
15053    struct bxe_prev_list_node *tmp;
15054    int rc = FALSE;
15055
15056    mtx_lock(&bxe_prev_mtx);
15057
15058    tmp = bxe_prev_path_get_entry(sc);
15059    if (tmp) {
15060        if (tmp->aer) {
15061            BLOGD(sc, DBG_LOAD,
15062                  "Path %d/%d/%d was marked by AER\n",
15063                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15064        } else {
15065            rc = TRUE;
15066            BLOGD(sc, DBG_LOAD,
15067                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15068                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15069        }
15070    }
15071
15072    mtx_unlock(&bxe_prev_mtx);
15073
15074    return (rc);
15075}
15076
15077static int
15078bxe_prev_mark_path(struct bxe_softc *sc,
15079                   uint8_t          after_undi)
15080{
15081    struct bxe_prev_list_node *tmp;
15082
15083    mtx_lock(&bxe_prev_mtx);
15084
15085    /* Check whether the entry for this path already exists */
15086    tmp = bxe_prev_path_get_entry(sc);
15087    if (tmp) {
15088        if (!tmp->aer) {
15089            BLOGD(sc, DBG_LOAD,
15090                  "Re-marking AER in path %d/%d/%d\n",
15091                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15092        } else {
15093            BLOGD(sc, DBG_LOAD,
15094                  "Removing AER indication from path %d/%d/%d\n",
15095                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15096            tmp->aer = 0;
15097        }
15098
15099        mtx_unlock(&bxe_prev_mtx);
15100        return (0);
15101    }
15102
15103    mtx_unlock(&bxe_prev_mtx);
15104
15105    /* Create an entry for this path and add it */
15106    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15107                 (M_NOWAIT | M_ZERO));
15108    if (!tmp) {
15109        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15110        return (-1);
15111    }
15112
15113    tmp->bus  = sc->pcie_bus;
15114    tmp->slot = sc->pcie_device;
15115    tmp->path = SC_PATH(sc);
15116    tmp->aer  = 0;
15117    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15118
15119    mtx_lock(&bxe_prev_mtx);
15120
15121    BLOGD(sc, DBG_LOAD,
15122          "Marked path %d/%d/%d - finished previous unload\n",
15123          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15124    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15125
15126    mtx_unlock(&bxe_prev_mtx);
15127
15128    return (0);
15129}
15130
15131static int
15132bxe_do_flr(struct bxe_softc *sc)
15133{
15134    int i;
15135
15136    /* only E2 and onwards support FLR */
15137    if (CHIP_IS_E1x(sc)) {
15138        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15139        return (-1);
15140    }
15141
15142    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15143    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15144        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15145              sc->devinfo.bc_ver);
15146        return (-1);
15147    }
15148
15149    /* Wait for Transaction Pending bit clean */
15150    for (i = 0; i < 4; i++) {
15151        if (i) {
15152            DELAY(((1 << (i - 1)) * 100) * 1000);
15153        }
15154
15155        if (!bxe_is_pcie_pending(sc)) {
15156            goto clear;
15157        }
15158    }
15159
15160    BLOGE(sc, "PCIE transaction is not cleared, "
15161              "proceeding with reset anyway\n");
15162
15163clear:
15164
15165    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15166    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15167
15168    return (0);
15169}
15170
15171struct bxe_mac_vals {
15172    uint32_t xmac_addr;
15173    uint32_t xmac_val;
15174    uint32_t emac_addr;
15175    uint32_t emac_val;
15176    uint32_t umac_addr;
15177    uint32_t umac_val;
15178    uint32_t bmac_addr;
15179    uint32_t bmac_val[2];
15180};
15181
15182static void
15183bxe_prev_unload_close_mac(struct bxe_softc *sc,
15184                          struct bxe_mac_vals *vals)
15185{
15186    uint32_t val, base_addr, offset, mask, reset_reg;
15187    uint8_t mac_stopped = FALSE;
15188    uint8_t port = SC_PORT(sc);
15189    uint32_t wb_data[2];
15190
15191    /* reset addresses as they also mark which values were changed */
15192    vals->bmac_addr = 0;
15193    vals->umac_addr = 0;
15194    vals->xmac_addr = 0;
15195    vals->emac_addr = 0;
15196
15197    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15198
15199    if (!CHIP_IS_E3(sc)) {
15200        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15201        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15202        if ((mask & reset_reg) && val) {
15203            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15204            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15205                                    : NIG_REG_INGRESS_BMAC0_MEM;
15206            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15207                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15208
15209            /*
15210             * use rd/wr since we cannot use dmae. This is safe
15211             * since MCP won't access the bus due to the request
15212             * to unload, and no function on the path can be
15213             * loaded at this time.
15214             */
15215            wb_data[0] = REG_RD(sc, base_addr + offset);
15216            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15217            vals->bmac_addr = base_addr + offset;
15218            vals->bmac_val[0] = wb_data[0];
15219            vals->bmac_val[1] = wb_data[1];
15220            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15221            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15222            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15223        }
15224
15225        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15226        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15227        vals->emac_val = REG_RD(sc, vals->emac_addr);
15228        REG_WR(sc, vals->emac_addr, 0);
15229        mac_stopped = TRUE;
15230    } else {
15231        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15232            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15233            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15234            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15235            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15236            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15237            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15238            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15239            REG_WR(sc, vals->xmac_addr, 0);
15240            mac_stopped = TRUE;
15241        }
15242
15243        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15244        if (mask & reset_reg) {
15245            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15246            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15247            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15248            vals->umac_val = REG_RD(sc, vals->umac_addr);
15249            REG_WR(sc, vals->umac_addr, 0);
15250            mac_stopped = TRUE;
15251        }
15252    }
15253
15254    if (mac_stopped) {
15255        DELAY(20000);
15256    }
15257}
15258
15259#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15260#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15261#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15262#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15263
15264static void
15265bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15266                         uint8_t          port,
15267                         uint8_t          inc)
15268{
15269    uint16_t rcq, bd;
15270    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15271
15272    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15273    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15274
15275    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15276    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15277
15278    BLOGD(sc, DBG_LOAD,
15279          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15280          port, bd, rcq);
15281}
15282
15283static int
15284bxe_prev_unload_common(struct bxe_softc *sc)
15285{
15286    uint32_t reset_reg, tmp_reg = 0, rc;
15287    uint8_t prev_undi = FALSE;
15288    struct bxe_mac_vals mac_vals;
15289    uint32_t timer_count = 1000;
15290    uint32_t prev_brb;
15291
15292    /*
15293     * It is possible a previous function received 'common' answer,
15294     * but hasn't loaded yet, therefore creating a scenario of
15295     * multiple functions receiving 'common' on the same path.
15296     */
15297    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15298
15299    memset(&mac_vals, 0, sizeof(mac_vals));
15300
15301    if (bxe_prev_is_path_marked(sc)) {
15302        return (bxe_prev_mcp_done(sc));
15303    }
15304
15305    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15306
15307    /* Reset should be performed after BRB is emptied */
15308    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15309        /* Close the MAC Rx to prevent BRB from filling up */
15310        bxe_prev_unload_close_mac(sc, &mac_vals);
15311
15312        /* close LLH filters towards the BRB */
15313        elink_set_rx_filter(&sc->link_params, 0);
15314
15315        /*
15316         * Check if the UNDI driver was previously loaded.
15317         * UNDI driver initializes CID offset for normal bell to 0x7
15318         */
15319        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15320            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15321            if (tmp_reg == 0x7) {
15322                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15323                prev_undi = TRUE;
15324                /* clear the UNDI indication */
15325                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15326                /* clear possible idle check errors */
15327                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15328            }
15329        }
15330
15331        /* wait until BRB is empty */
15332        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15333        while (timer_count) {
15334            prev_brb = tmp_reg;
15335
15336            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15337            if (!tmp_reg) {
15338                break;
15339            }
15340
15341            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15342
15343            /* reset timer as long as BRB actually gets emptied */
15344            if (prev_brb > tmp_reg) {
15345                timer_count = 1000;
15346            } else {
15347                timer_count--;
15348            }
15349
15350            /* If UNDI resides in memory, manually increment it */
15351            if (prev_undi) {
15352                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15353            }
15354
15355            DELAY(10);
15356        }
15357
15358        if (!timer_count) {
15359            BLOGE(sc, "Failed to empty BRB\n");
15360        }
15361    }
15362
15363    /* No packets are in the pipeline, path is ready for reset */
15364    bxe_reset_common(sc);
15365
15366    if (mac_vals.xmac_addr) {
15367        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15368    }
15369    if (mac_vals.umac_addr) {
15370        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15371    }
15372    if (mac_vals.emac_addr) {
15373        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15374    }
15375    if (mac_vals.bmac_addr) {
15376        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15377        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15378    }
15379
15380    rc = bxe_prev_mark_path(sc, prev_undi);
15381    if (rc) {
15382        bxe_prev_mcp_done(sc);
15383        return (rc);
15384    }
15385
15386    return (bxe_prev_mcp_done(sc));
15387}
15388
15389static int
15390bxe_prev_unload_uncommon(struct bxe_softc *sc)
15391{
15392    int rc;
15393
15394    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15395
15396    /* Test if previous unload process was already finished for this path */
15397    if (bxe_prev_is_path_marked(sc)) {
15398        return (bxe_prev_mcp_done(sc));
15399    }
15400
15401    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15402
15403    /*
15404     * If function has FLR capabilities, and existing FW version matches
15405     * the one required, then FLR will be sufficient to clean any residue
15406     * left by previous driver
15407     */
15408    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15409    if (!rc) {
15410        /* fw version is good */
15411        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15412        rc = bxe_do_flr(sc);
15413    }
15414
15415    if (!rc) {
15416        /* FLR was performed */
15417        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15418        return (0);
15419    }
15420
15421    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15422
15423    /* Close the MCP request, return failure*/
15424    rc = bxe_prev_mcp_done(sc);
15425    if (!rc) {
15426        rc = BXE_PREV_WAIT_NEEDED;
15427    }
15428
15429    return (rc);
15430}
15431
15432static int
15433bxe_prev_unload(struct bxe_softc *sc)
15434{
15435    int time_counter = 10;
15436    uint32_t fw, hw_lock_reg, hw_lock_val;
15437    uint32_t rc = 0;
15438
15439    /*
15440     * Clear HW from errors which may have resulted from an interrupted
15441     * DMAE transaction.
15442     */
15443    bxe_prev_interrupted_dmae(sc);
15444
15445    /* Release previously held locks */
15446    hw_lock_reg =
15447        (SC_FUNC(sc) <= 5) ?
15448            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15449            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15450
15451    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15452    if (hw_lock_val) {
15453        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15454            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15455            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15456                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15457        }
15458        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15459        REG_WR(sc, hw_lock_reg, 0xffffffff);
15460    } else {
15461        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15462    }
15463
15464    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15465        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15466        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15467    }
15468
15469    do {
15470        /* Lock MCP using an unload request */
15471        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15472        if (!fw) {
15473            BLOGE(sc, "MCP response failure, aborting\n");
15474            rc = -1;
15475            break;
15476        }
15477
15478        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15479            rc = bxe_prev_unload_common(sc);
15480            break;
15481        }
15482
15483        /* non-common reply from MCP night require looping */
15484        rc = bxe_prev_unload_uncommon(sc);
15485        if (rc != BXE_PREV_WAIT_NEEDED) {
15486            break;
15487        }
15488
15489        DELAY(20000);
15490    } while (--time_counter);
15491
15492    if (!time_counter || rc) {
15493        BLOGE(sc, "Failed to unload previous driver!"
15494            " time_counter %d rc %d\n", time_counter, rc);
15495        rc = -1;
15496    }
15497
15498    return (rc);
15499}
15500
15501void
15502bxe_dcbx_set_state(struct bxe_softc *sc,
15503                   uint8_t          dcb_on,
15504                   uint32_t         dcbx_enabled)
15505{
15506    if (!CHIP_IS_E1x(sc)) {
15507        sc->dcb_state = dcb_on;
15508        sc->dcbx_enabled = dcbx_enabled;
15509    } else {
15510        sc->dcb_state = FALSE;
15511        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15512    }
15513    BLOGD(sc, DBG_LOAD,
15514          "DCB state [%s:%s]\n",
15515          dcb_on ? "ON" : "OFF",
15516          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15517          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15518          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15519          "on-chip with negotiation" : "invalid");
15520}
15521
15522/* must be called after sriov-enable */
15523static int
15524bxe_set_qm_cid_count(struct bxe_softc *sc)
15525{
15526    int cid_count = BXE_L2_MAX_CID(sc);
15527
15528    if (IS_SRIOV(sc)) {
15529        cid_count += BXE_VF_CIDS;
15530    }
15531
15532    if (CNIC_SUPPORT(sc)) {
15533        cid_count += CNIC_CID_MAX;
15534    }
15535
15536    return (roundup(cid_count, QM_CID_ROUND));
15537}
15538
15539static void
15540bxe_init_multi_cos(struct bxe_softc *sc)
15541{
15542    int pri, cos;
15543
15544    uint32_t pri_map = 0; /* XXX change to user config */
15545
15546    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15547        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15548        if (cos < sc->max_cos) {
15549            sc->prio_to_cos[pri] = cos;
15550        } else {
15551            BLOGW(sc, "Invalid COS %d for priority %d "
15552                      "(max COS is %d), setting to 0\n",
15553                  cos, pri, (sc->max_cos - 1));
15554            sc->prio_to_cos[pri] = 0;
15555        }
15556    }
15557}
15558
15559static int
15560bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15561{
15562    struct bxe_softc *sc;
15563    int error, result;
15564
15565    result = 0;
15566    error = sysctl_handle_int(oidp, &result, 0, req);
15567
15568    if (error || !req->newptr) {
15569        return (error);
15570    }
15571
15572    if (result == 1) {
15573        uint32_t  temp;
15574        sc = (struct bxe_softc *)arg1;
15575
15576        BLOGI(sc, "... dumping driver state ...\n");
15577        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15578        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15579    }
15580
15581    return (error);
15582}
15583
15584static int
15585bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS)
15586{
15587    struct bxe_softc *sc;
15588    int error, result;
15589
15590    result = 0;
15591    error = sysctl_handle_int(oidp, &result, 0, req);
15592
15593    if (error || !req->newptr) {
15594        return (error);
15595    }
15596
15597    if (result == 1) {
15598        sc = (struct bxe_softc *)arg1;
15599
15600        BLOGI(sc, "... grcdump start ...\n");
15601        bxe_grc_dump(sc);
15602        BLOGI(sc, "... grcdump done ...\n");
15603    }
15604
15605    return (error);
15606}
15607
15608static int
15609bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15610{
15611    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15612    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15613    uint32_t *offset;
15614    uint64_t value = 0;
15615    int index = (int)arg2;
15616
15617    if (index >= BXE_NUM_ETH_STATS) {
15618        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15619        return (-1);
15620    }
15621
15622    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15623
15624    switch (bxe_eth_stats_arr[index].size) {
15625    case 4:
15626        value = (uint64_t)*offset;
15627        break;
15628    case 8:
15629        value = HILO_U64(*offset, *(offset + 1));
15630        break;
15631    default:
15632        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15633              index, bxe_eth_stats_arr[index].size);
15634        return (-1);
15635    }
15636
15637    return (sysctl_handle_64(oidp, &value, 0, req));
15638}
15639
15640static int
15641bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15642{
15643    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15644    uint32_t *eth_stats;
15645    uint32_t *offset;
15646    uint64_t value = 0;
15647    uint32_t q_stat = (uint32_t)arg2;
15648    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15649    uint32_t index = (q_stat & 0xffff);
15650
15651    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15652
15653    if (index >= BXE_NUM_ETH_Q_STATS) {
15654        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15655        return (-1);
15656    }
15657
15658    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15659
15660    switch (bxe_eth_q_stats_arr[index].size) {
15661    case 4:
15662        value = (uint64_t)*offset;
15663        break;
15664    case 8:
15665        value = HILO_U64(*offset, *(offset + 1));
15666        break;
15667    default:
15668        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15669              index, bxe_eth_q_stats_arr[index].size);
15670        return (-1);
15671    }
15672
15673    return (sysctl_handle_64(oidp, &value, 0, req));
15674}
15675
15676static void
15677bxe_add_sysctls(struct bxe_softc *sc)
15678{
15679    struct sysctl_ctx_list *ctx;
15680    struct sysctl_oid_list *children;
15681    struct sysctl_oid *queue_top, *queue;
15682    struct sysctl_oid_list *queue_top_children, *queue_children;
15683    char queue_num_buf[32];
15684    uint32_t q_stat;
15685    int i, j;
15686
15687    ctx = device_get_sysctl_ctx(sc->dev);
15688    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15689
15690    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15691                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15692                      "version");
15693
15694    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15695                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15696                      "bootcode version");
15697
15698    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15699             BCM_5710_FW_MAJOR_VERSION,
15700             BCM_5710_FW_MINOR_VERSION,
15701             BCM_5710_FW_REVISION_VERSION,
15702             BCM_5710_FW_ENGINEERING_VERSION);
15703    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15704                      CTLFLAG_RD, sc->fw_ver_str, 0,
15705                      "firmware version");
15706
15707    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15708        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15709         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15710         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15711         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15712                                                                "Unknown"));
15713    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15714                      CTLFLAG_RD, sc->mf_mode_str, 0,
15715                      "multifunction mode");
15716
15717    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15718                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15719                    "multifunction vnics per port");
15720
15721    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15722                      CTLFLAG_RD, sc->mac_addr_str, 0,
15723                      "mac address");
15724
15725    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15726        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15727         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15728         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15729                                              "???GT/s"),
15730        sc->devinfo.pcie_link_width);
15731    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15732                      CTLFLAG_RD, sc->pci_link_str, 0,
15733                      "pci link status");
15734
15735    sc->debug = bxe_debug;
15736    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15737                    CTLFLAG_RW, &sc->debug,
15738                    "debug logging mode");
15739
15740    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump",
15741                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15742                    bxe_sysctl_trigger_grcdump, "IU",
15743                    "set by driver when a grcdump is needed");
15744
15745    sc->grcdump_done = 0;
15746    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15747                   CTLFLAG_RW, &sc->grcdump_done, 0,
15748                   "set by driver when grcdump is done");
15749
15750    sc->rx_budget = bxe_rx_budget;
15751    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15752                    CTLFLAG_RW, &sc->rx_budget, 0,
15753                    "rx processing budget");
15754
15755    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15756                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15757                    bxe_sysctl_state, "IU", "dump driver state");
15758
15759    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15760        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15761                        bxe_eth_stats_arr[i].string,
15762                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15763                        bxe_sysctl_eth_stat, "LU",
15764                        bxe_eth_stats_arr[i].string);
15765    }
15766
15767    /* add a new parent node for all queues "dev.bxe.#.queue" */
15768    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15769                                CTLFLAG_RD, NULL, "queue");
15770    queue_top_children = SYSCTL_CHILDREN(queue_top);
15771
15772    for (i = 0; i < sc->num_queues; i++) {
15773        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15774        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15775        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15776                                queue_num_buf, CTLFLAG_RD, NULL,
15777                                "single queue");
15778        queue_children = SYSCTL_CHILDREN(queue);
15779
15780        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15781            q_stat = ((i << 16) | j);
15782            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15783                            bxe_eth_q_stats_arr[j].string,
15784                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15785                            bxe_sysctl_eth_q_stat, "LU",
15786                            bxe_eth_q_stats_arr[j].string);
15787        }
15788    }
15789}
15790
15791/*
15792 * Device attach function.
15793 *
15794 * Allocates device resources, performs secondary chip identification, and
15795 * initializes driver instance variables. This function is called from driver
15796 * load after a successful probe.
15797 *
15798 * Returns:
15799 *   0 = Success, >0 = Failure
15800 */
15801static int
15802bxe_attach(device_t dev)
15803{
15804    struct bxe_softc *sc;
15805
15806    sc = device_get_softc(dev);
15807
15808    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15809
15810    sc->state = BXE_STATE_CLOSED;
15811
15812    sc->dev  = dev;
15813    sc->unit = device_get_unit(dev);
15814
15815    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15816
15817    sc->pcie_bus    = pci_get_bus(dev);
15818    sc->pcie_device = pci_get_slot(dev);
15819    sc->pcie_func   = pci_get_function(dev);
15820
15821    /* enable bus master capability */
15822    pci_enable_busmaster(dev);
15823
15824    /* get the BARs */
15825    if (bxe_allocate_bars(sc) != 0) {
15826        return (ENXIO);
15827    }
15828
15829    /* initialize the mutexes */
15830    bxe_init_mutexes(sc);
15831
15832    /* prepare the periodic callout */
15833    callout_init(&sc->periodic_callout, 0);
15834
15835    /* prepare the chip taskqueue */
15836    sc->chip_tq_flags = CHIP_TQ_NONE;
15837    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15838             "bxe%d_chip_tq", sc->unit);
15839    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15840    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15841                                   taskqueue_thread_enqueue,
15842                                   &sc->chip_tq);
15843    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15844                            "%s", sc->chip_tq_name);
15845
15846    /* get device info and set params */
15847    if (bxe_get_device_info(sc) != 0) {
15848        BLOGE(sc, "getting device info\n");
15849        bxe_deallocate_bars(sc);
15850        pci_disable_busmaster(dev);
15851        return (ENXIO);
15852    }
15853
15854    /* get final misc params */
15855    bxe_get_params(sc);
15856
15857    /* set the default MTU (changed via ifconfig) */
15858    sc->mtu = ETHERMTU;
15859
15860    bxe_set_modes_bitmap(sc);
15861
15862    /* XXX
15863     * If in AFEX mode and the function is configured for FCoE
15864     * then bail... no L2 allowed.
15865     */
15866
15867    /* get phy settings from shmem and 'and' against admin settings */
15868    bxe_get_phy_info(sc);
15869
15870    /* initialize the FreeBSD ifnet interface */
15871    if (bxe_init_ifnet(sc) != 0) {
15872        bxe_release_mutexes(sc);
15873        bxe_deallocate_bars(sc);
15874        pci_disable_busmaster(dev);
15875        return (ENXIO);
15876    }
15877
15878    if (bxe_add_cdev(sc) != 0) {
15879        if (sc->ifp != NULL) {
15880            ether_ifdetach(sc->ifp);
15881        }
15882        ifmedia_removeall(&sc->ifmedia);
15883        bxe_release_mutexes(sc);
15884        bxe_deallocate_bars(sc);
15885        pci_disable_busmaster(dev);
15886        return (ENXIO);
15887    }
15888
15889    /* allocate device interrupts */
15890    if (bxe_interrupt_alloc(sc) != 0) {
15891        bxe_del_cdev(sc);
15892        if (sc->ifp != NULL) {
15893            ether_ifdetach(sc->ifp);
15894        }
15895        ifmedia_removeall(&sc->ifmedia);
15896        bxe_release_mutexes(sc);
15897        bxe_deallocate_bars(sc);
15898        pci_disable_busmaster(dev);
15899        return (ENXIO);
15900    }
15901
15902    /* allocate ilt */
15903    if (bxe_alloc_ilt_mem(sc) != 0) {
15904        bxe_interrupt_free(sc);
15905        bxe_del_cdev(sc);
15906        if (sc->ifp != NULL) {
15907            ether_ifdetach(sc->ifp);
15908        }
15909        ifmedia_removeall(&sc->ifmedia);
15910        bxe_release_mutexes(sc);
15911        bxe_deallocate_bars(sc);
15912        pci_disable_busmaster(dev);
15913        return (ENXIO);
15914    }
15915
15916    /* allocate the host hardware/software hsi structures */
15917    if (bxe_alloc_hsi_mem(sc) != 0) {
15918        bxe_free_ilt_mem(sc);
15919        bxe_interrupt_free(sc);
15920        bxe_del_cdev(sc);
15921        if (sc->ifp != NULL) {
15922            ether_ifdetach(sc->ifp);
15923        }
15924        ifmedia_removeall(&sc->ifmedia);
15925        bxe_release_mutexes(sc);
15926        bxe_deallocate_bars(sc);
15927        pci_disable_busmaster(dev);
15928        return (ENXIO);
15929    }
15930
15931    /* need to reset chip if UNDI was active */
15932    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
15933        /* init fw_seq */
15934        sc->fw_seq =
15935            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
15936             DRV_MSG_SEQ_NUMBER_MASK);
15937        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
15938        bxe_prev_unload(sc);
15939    }
15940
15941#if 1
15942    /* XXX */
15943    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15944#else
15945    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
15946        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
15947        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
15948        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
15949        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
15950        bxe_dcbx_init_params(sc);
15951    } else {
15952        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15953    }
15954#endif
15955
15956    /* calculate qm_cid_count */
15957    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
15958    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
15959
15960    sc->max_cos = 1;
15961    bxe_init_multi_cos(sc);
15962
15963    bxe_add_sysctls(sc);
15964
15965    return (0);
15966}
15967
15968/*
15969 * Device detach function.
15970 *
15971 * Stops the controller, resets the controller, and releases resources.
15972 *
15973 * Returns:
15974 *   0 = Success, >0 = Failure
15975 */
15976static int
15977bxe_detach(device_t dev)
15978{
15979    struct bxe_softc *sc;
15980    if_t ifp;
15981
15982    sc = device_get_softc(dev);
15983
15984    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
15985
15986    ifp = sc->ifp;
15987    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
15988        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
15989        return(EBUSY);
15990    }
15991
15992    bxe_del_cdev(sc);
15993
15994    /* stop the periodic callout */
15995    bxe_periodic_stop(sc);
15996
15997    /* stop the chip taskqueue */
15998    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
15999    if (sc->chip_tq) {
16000        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16001        taskqueue_free(sc->chip_tq);
16002        sc->chip_tq = NULL;
16003    }
16004
16005    /* stop and reset the controller if it was open */
16006    if (sc->state != BXE_STATE_CLOSED) {
16007        BXE_CORE_LOCK(sc);
16008        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16009        BXE_CORE_UNLOCK(sc);
16010    }
16011
16012    /* release the network interface */
16013    if (ifp != NULL) {
16014        ether_ifdetach(ifp);
16015    }
16016    ifmedia_removeall(&sc->ifmedia);
16017
16018    /* XXX do the following based on driver state... */
16019
16020    /* free the host hardware/software hsi structures */
16021    bxe_free_hsi_mem(sc);
16022
16023    /* free ilt */
16024    bxe_free_ilt_mem(sc);
16025
16026    /* release the interrupts */
16027    bxe_interrupt_free(sc);
16028
16029    /* Release the mutexes*/
16030    bxe_release_mutexes(sc);
16031
16032    /* Release the PCIe BAR mapped memory */
16033    bxe_deallocate_bars(sc);
16034
16035    /* Release the FreeBSD interface. */
16036    if (sc->ifp != NULL) {
16037        if_free(sc->ifp);
16038    }
16039
16040    pci_disable_busmaster(dev);
16041
16042    return (0);
16043}
16044
16045/*
16046 * Device shutdown function.
16047 *
16048 * Stops and resets the controller.
16049 *
16050 * Returns:
16051 *   Nothing
16052 */
16053static int
16054bxe_shutdown(device_t dev)
16055{
16056    struct bxe_softc *sc;
16057
16058    sc = device_get_softc(dev);
16059
16060    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16061
16062    /* stop the periodic callout */
16063    bxe_periodic_stop(sc);
16064
16065    BXE_CORE_LOCK(sc);
16066    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16067    BXE_CORE_UNLOCK(sc);
16068
16069    return (0);
16070}
16071
16072void
16073bxe_igu_ack_sb(struct bxe_softc *sc,
16074               uint8_t          igu_sb_id,
16075               uint8_t          segment,
16076               uint16_t         index,
16077               uint8_t          op,
16078               uint8_t          update)
16079{
16080    uint32_t igu_addr = sc->igu_base_addr;
16081    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16082    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16083}
16084
16085static void
16086bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16087                     uint8_t          func,
16088                     uint8_t          idu_sb_id,
16089                     uint8_t          is_pf)
16090{
16091    uint32_t data, ctl, cnt = 100;
16092    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16093    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16094    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16095    uint32_t sb_bit =  1 << (idu_sb_id%32);
16096    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16097    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16098
16099    /* Not supported in BC mode */
16100    if (CHIP_INT_MODE_IS_BC(sc)) {
16101        return;
16102    }
16103
16104    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16105             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16106            IGU_REGULAR_CLEANUP_SET |
16107            IGU_REGULAR_BCLEANUP);
16108
16109    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16110           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16111           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16112
16113    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16114            data, igu_addr_data);
16115    REG_WR(sc, igu_addr_data, data);
16116
16117    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16118                      BUS_SPACE_BARRIER_WRITE);
16119    mb();
16120
16121    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16122            ctl, igu_addr_ctl);
16123    REG_WR(sc, igu_addr_ctl, ctl);
16124
16125    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16126                      BUS_SPACE_BARRIER_WRITE);
16127    mb();
16128
16129    /* wait for clean up to finish */
16130    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16131        DELAY(20000);
16132    }
16133
16134    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16135        BLOGD(sc, DBG_LOAD,
16136              "Unable to finish IGU cleanup: "
16137              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16138              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16139    }
16140}
16141
16142static void
16143bxe_igu_clear_sb(struct bxe_softc *sc,
16144                 uint8_t          idu_sb_id)
16145{
16146    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16147}
16148
16149
16150
16151
16152
16153
16154
16155/*******************/
16156/* ECORE CALLBACKS */
16157/*******************/
16158
16159static void
16160bxe_reset_common(struct bxe_softc *sc)
16161{
16162    uint32_t val = 0x1400;
16163
16164    /* reset_common */
16165    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16166
16167    if (CHIP_IS_E3(sc)) {
16168        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16169        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16170    }
16171
16172    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16173}
16174
16175static void
16176bxe_common_init_phy(struct bxe_softc *sc)
16177{
16178    uint32_t shmem_base[2];
16179    uint32_t shmem2_base[2];
16180
16181    /* Avoid common init in case MFW supports LFA */
16182    if (SHMEM2_RD(sc, size) >
16183        (uint32_t)offsetof(struct shmem2_region,
16184                           lfa_host_addr[SC_PORT(sc)])) {
16185        return;
16186    }
16187
16188    shmem_base[0]  = sc->devinfo.shmem_base;
16189    shmem2_base[0] = sc->devinfo.shmem2_base;
16190
16191    if (!CHIP_IS_E1x(sc)) {
16192        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16193        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16194    }
16195
16196    bxe_acquire_phy_lock(sc);
16197    elink_common_init_phy(sc, shmem_base, shmem2_base,
16198                          sc->devinfo.chip_id, 0);
16199    bxe_release_phy_lock(sc);
16200}
16201
16202static void
16203bxe_pf_disable(struct bxe_softc *sc)
16204{
16205    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16206
16207    val &= ~IGU_PF_CONF_FUNC_EN;
16208
16209    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16210    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16211    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16212}
16213
16214static void
16215bxe_init_pxp(struct bxe_softc *sc)
16216{
16217    uint16_t devctl;
16218    int r_order, w_order;
16219
16220    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16221
16222    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16223
16224    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16225
16226    if (sc->mrrs == -1) {
16227        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16228    } else {
16229        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16230        r_order = sc->mrrs;
16231    }
16232
16233    ecore_init_pxp_arb(sc, r_order, w_order);
16234}
16235
16236static uint32_t
16237bxe_get_pretend_reg(struct bxe_softc *sc)
16238{
16239    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16240    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16241    return (base + (SC_ABS_FUNC(sc)) * stride);
16242}
16243
16244/*
16245 * Called only on E1H or E2.
16246 * When pretending to be PF, the pretend value is the function number 0..7.
16247 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16248 * combination.
16249 */
16250static int
16251bxe_pretend_func(struct bxe_softc *sc,
16252                 uint16_t         pretend_func_val)
16253{
16254    uint32_t pretend_reg;
16255
16256    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16257        return (-1);
16258    }
16259
16260    /* get my own pretend register */
16261    pretend_reg = bxe_get_pretend_reg(sc);
16262    REG_WR(sc, pretend_reg, pretend_func_val);
16263    REG_RD(sc, pretend_reg);
16264    return (0);
16265}
16266
16267static void
16268bxe_iov_init_dmae(struct bxe_softc *sc)
16269{
16270    return;
16271}
16272
16273static void
16274bxe_iov_init_dq(struct bxe_softc *sc)
16275{
16276    return;
16277}
16278
16279/* send a NIG loopback debug packet */
16280static void
16281bxe_lb_pckt(struct bxe_softc *sc)
16282{
16283    uint32_t wb_write[3];
16284
16285    /* Ethernet source and destination addresses */
16286    wb_write[0] = 0x55555555;
16287    wb_write[1] = 0x55555555;
16288    wb_write[2] = 0x20;     /* SOP */
16289    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16290
16291    /* NON-IP protocol */
16292    wb_write[0] = 0x09000000;
16293    wb_write[1] = 0x55555555;
16294    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16295    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16296}
16297
16298/*
16299 * Some of the internal memories are not directly readable from the driver.
16300 * To test them we send debug packets.
16301 */
16302static int
16303bxe_int_mem_test(struct bxe_softc *sc)
16304{
16305    int factor;
16306    int count, i;
16307    uint32_t val = 0;
16308
16309    if (CHIP_REV_IS_FPGA(sc)) {
16310        factor = 120;
16311    } else if (CHIP_REV_IS_EMUL(sc)) {
16312        factor = 200;
16313    } else {
16314        factor = 1;
16315    }
16316
16317    /* disable inputs of parser neighbor blocks */
16318    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16319    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16320    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16321    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16322
16323    /*  write 0 to parser credits for CFC search request */
16324    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16325
16326    /* send Ethernet packet */
16327    bxe_lb_pckt(sc);
16328
16329    /* TODO do i reset NIG statistic? */
16330    /* Wait until NIG register shows 1 packet of size 0x10 */
16331    count = 1000 * factor;
16332    while (count) {
16333        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16334        val = *BXE_SP(sc, wb_data[0]);
16335        if (val == 0x10) {
16336            break;
16337        }
16338
16339        DELAY(10000);
16340        count--;
16341    }
16342
16343    if (val != 0x10) {
16344        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16345        return (-1);
16346    }
16347
16348    /* wait until PRS register shows 1 packet */
16349    count = (1000 * factor);
16350    while (count) {
16351        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16352        if (val == 1) {
16353            break;
16354        }
16355
16356        DELAY(10000);
16357        count--;
16358    }
16359
16360    if (val != 0x1) {
16361        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16362        return (-2);
16363    }
16364
16365    /* Reset and init BRB, PRS */
16366    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16367    DELAY(50000);
16368    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16369    DELAY(50000);
16370    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16371    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16372
16373    /* Disable inputs of parser neighbor blocks */
16374    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16375    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16376    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16377    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16378
16379    /* Write 0 to parser credits for CFC search request */
16380    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16381
16382    /* send 10 Ethernet packets */
16383    for (i = 0; i < 10; i++) {
16384        bxe_lb_pckt(sc);
16385    }
16386
16387    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16388    count = (1000 * factor);
16389    while (count) {
16390        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16391        val = *BXE_SP(sc, wb_data[0]);
16392        if (val == 0xb0) {
16393            break;
16394        }
16395
16396        DELAY(10000);
16397        count--;
16398    }
16399
16400    if (val != 0xb0) {
16401        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16402        return (-3);
16403    }
16404
16405    /* Wait until PRS register shows 2 packets */
16406    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16407    if (val != 2) {
16408        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16409    }
16410
16411    /* Write 1 to parser credits for CFC search request */
16412    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16413
16414    /* Wait until PRS register shows 3 packets */
16415    DELAY(10000 * factor);
16416
16417    /* Wait until NIG register shows 1 packet of size 0x10 */
16418    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16419    if (val != 3) {
16420        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16421    }
16422
16423    /* clear NIG EOP FIFO */
16424    for (i = 0; i < 11; i++) {
16425        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16426    }
16427
16428    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16429    if (val != 1) {
16430        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16431        return (-4);
16432    }
16433
16434    /* Reset and init BRB, PRS, NIG */
16435    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16436    DELAY(50000);
16437    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16438    DELAY(50000);
16439    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16440    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16441    if (!CNIC_SUPPORT(sc)) {
16442        /* set NIC mode */
16443        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16444    }
16445
16446    /* Enable inputs of parser neighbor blocks */
16447    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16448    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16449    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16450    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16451
16452    return (0);
16453}
16454
16455static void
16456bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16457{
16458    int is_required;
16459    uint32_t val;
16460    int port;
16461
16462    is_required = 0;
16463    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16464           SHARED_HW_CFG_FAN_FAILURE_MASK);
16465
16466    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16467        is_required = 1;
16468    }
16469    /*
16470     * The fan failure mechanism is usually related to the PHY type since
16471     * the power consumption of the board is affected by the PHY. Currently,
16472     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16473     */
16474    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16475        for (port = PORT_0; port < PORT_MAX; port++) {
16476            is_required |= elink_fan_failure_det_req(sc,
16477                                                     sc->devinfo.shmem_base,
16478                                                     sc->devinfo.shmem2_base,
16479                                                     port);
16480        }
16481    }
16482
16483    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16484
16485    if (is_required == 0) {
16486        return;
16487    }
16488
16489    /* Fan failure is indicated by SPIO 5 */
16490    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16491
16492    /* set to active low mode */
16493    val = REG_RD(sc, MISC_REG_SPIO_INT);
16494    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16495    REG_WR(sc, MISC_REG_SPIO_INT, val);
16496
16497    /* enable interrupt to signal the IGU */
16498    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16499    val |= MISC_SPIO_SPIO5;
16500    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16501}
16502
16503static void
16504bxe_enable_blocks_attention(struct bxe_softc *sc)
16505{
16506    uint32_t val;
16507
16508    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16509    if (!CHIP_IS_E1x(sc)) {
16510        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16511    } else {
16512        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16513    }
16514    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16515    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16516    /*
16517     * mask read length error interrupts in brb for parser
16518     * (parsing unit and 'checksum and crc' unit)
16519     * these errors are legal (PU reads fixed length and CAC can cause
16520     * read length error on truncated packets)
16521     */
16522    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16523    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16524    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16525    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16526    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16527    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16528/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16529/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16530    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16531    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16532    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16533/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16534/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16535    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16536    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16537    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16538    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16539/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16540/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16541
16542    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16543           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16544           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16545    if (!CHIP_IS_E1x(sc)) {
16546        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16547                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16548    }
16549    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16550
16551    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16552    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16553    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16554/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16555
16556    if (!CHIP_IS_E1x(sc)) {
16557        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16558        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16559    }
16560
16561    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16562    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16563/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16564    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16565}
16566
16567/**
16568 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16569 *
16570 * @sc:     driver handle
16571 */
16572static int
16573bxe_init_hw_common(struct bxe_softc *sc)
16574{
16575    uint8_t abs_func_id;
16576    uint32_t val;
16577
16578    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16579          SC_ABS_FUNC(sc));
16580
16581    /*
16582     * take the RESET lock to protect undi_unload flow from accessing
16583     * registers while we are resetting the chip
16584     */
16585    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16586
16587    bxe_reset_common(sc);
16588
16589    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16590
16591    val = 0xfffc;
16592    if (CHIP_IS_E3(sc)) {
16593        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16594        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16595    }
16596
16597    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16598
16599    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16600
16601    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16602    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16603
16604    if (!CHIP_IS_E1x(sc)) {
16605        /*
16606         * 4-port mode or 2-port mode we need to turn off master-enable for
16607         * everyone. After that we turn it back on for self. So, we disregard
16608         * multi-function, and always disable all functions on the given path,
16609         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16610         */
16611        for (abs_func_id = SC_PATH(sc);
16612             abs_func_id < (E2_FUNC_MAX * 2);
16613             abs_func_id += 2) {
16614            if (abs_func_id == SC_ABS_FUNC(sc)) {
16615                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16616                continue;
16617            }
16618
16619            bxe_pretend_func(sc, abs_func_id);
16620
16621            /* clear pf enable */
16622            bxe_pf_disable(sc);
16623
16624            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16625        }
16626    }
16627
16628    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16629
16630    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16631
16632    if (CHIP_IS_E1(sc)) {
16633        /*
16634         * enable HW interrupt from PXP on USDM overflow
16635         * bit 16 on INT_MASK_0
16636         */
16637        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16638    }
16639
16640    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16641    bxe_init_pxp(sc);
16642
16643#ifdef __BIG_ENDIAN
16644    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16645    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16646    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16647    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16648    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16649    /* make sure this value is 0 */
16650    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16651
16652    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16653    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16654    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16655    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16656    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16657#endif
16658
16659    ecore_ilt_init_page_size(sc, INITOP_SET);
16660
16661    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16662        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16663    }
16664
16665    /* let the HW do it's magic... */
16666    DELAY(100000);
16667
16668    /* finish PXP init */
16669    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16670    if (val != 1) {
16671        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16672            val);
16673        return (-1);
16674    }
16675    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16676    if (val != 1) {
16677        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16678        return (-1);
16679    }
16680
16681    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16682
16683    /*
16684     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16685     * entries with value "0" and valid bit on. This needs to be done by the
16686     * first PF that is loaded in a path (i.e. common phase)
16687     */
16688    if (!CHIP_IS_E1x(sc)) {
16689/*
16690 * In E2 there is a bug in the timers block that can cause function 6 / 7
16691 * (i.e. vnic3) to start even if it is marked as "scan-off".
16692 * This occurs when a different function (func2,3) is being marked
16693 * as "scan-off". Real-life scenario for example: if a driver is being
16694 * load-unloaded while func6,7 are down. This will cause the timer to access
16695 * the ilt, translate to a logical address and send a request to read/write.
16696 * Since the ilt for the function that is down is not valid, this will cause
16697 * a translation error which is unrecoverable.
16698 * The Workaround is intended to make sure that when this happens nothing
16699 * fatal will occur. The workaround:
16700 *  1.  First PF driver which loads on a path will:
16701 *      a.  After taking the chip out of reset, by using pretend,
16702 *          it will write "0" to the following registers of
16703 *          the other vnics.
16704 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16705 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16706 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16707 *          And for itself it will write '1' to
16708 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16709 *          dmae-operations (writing to pram for example.)
16710 *          note: can be done for only function 6,7 but cleaner this
16711 *            way.
16712 *      b.  Write zero+valid to the entire ILT.
16713 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16714 *          VNIC3 (of that port). The range allocated will be the
16715 *          entire ILT. This is needed to prevent  ILT range error.
16716 *  2.  Any PF driver load flow:
16717 *      a.  ILT update with the physical addresses of the allocated
16718 *          logical pages.
16719 *      b.  Wait 20msec. - note that this timeout is needed to make
16720 *          sure there are no requests in one of the PXP internal
16721 *          queues with "old" ILT addresses.
16722 *      c.  PF enable in the PGLC.
16723 *      d.  Clear the was_error of the PF in the PGLC. (could have
16724 *          occurred while driver was down)
16725 *      e.  PF enable in the CFC (WEAK + STRONG)
16726 *      f.  Timers scan enable
16727 *  3.  PF driver unload flow:
16728 *      a.  Clear the Timers scan_en.
16729 *      b.  Polling for scan_on=0 for that PF.
16730 *      c.  Clear the PF enable bit in the PXP.
16731 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16732 *      e.  Write zero+valid to all ILT entries (The valid bit must
16733 *          stay set)
16734 *      f.  If this is VNIC 3 of a port then also init
16735 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16736 *          to the last enrty in the ILT.
16737 *
16738 *      Notes:
16739 *      Currently the PF error in the PGLC is non recoverable.
16740 *      In the future the there will be a recovery routine for this error.
16741 *      Currently attention is masked.
16742 *      Having an MCP lock on the load/unload process does not guarantee that
16743 *      there is no Timer disable during Func6/7 enable. This is because the
16744 *      Timers scan is currently being cleared by the MCP on FLR.
16745 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16746 *      there is error before clearing it. But the flow above is simpler and
16747 *      more general.
16748 *      All ILT entries are written by zero+valid and not just PF6/7
16749 *      ILT entries since in the future the ILT entries allocation for
16750 *      PF-s might be dynamic.
16751 */
16752        struct ilt_client_info ilt_cli;
16753        struct ecore_ilt ilt;
16754
16755        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16756        memset(&ilt, 0, sizeof(struct ecore_ilt));
16757
16758        /* initialize dummy TM client */
16759        ilt_cli.start      = 0;
16760        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16761        ilt_cli.client_num = ILT_CLIENT_TM;
16762
16763        /*
16764         * Step 1: set zeroes to all ilt page entries with valid bit on
16765         * Step 2: set the timers first/last ilt entry to point
16766         * to the entire range to prevent ILT range error for 3rd/4th
16767         * vnic (this code assumes existence of the vnic)
16768         *
16769         * both steps performed by call to ecore_ilt_client_init_op()
16770         * with dummy TM client
16771         *
16772         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16773         * and his brother are split registers
16774         */
16775
16776        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16777        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16778        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16779
16780        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16781        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16782        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16783    }
16784
16785    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16786    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16787
16788    if (!CHIP_IS_E1x(sc)) {
16789        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16790                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16791
16792        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16793        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16794
16795        /* let the HW do it's magic... */
16796        do {
16797            DELAY(200000);
16798            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16799        } while (factor-- && (val != 1));
16800
16801        if (val != 1) {
16802            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16803            return (-1);
16804        }
16805    }
16806
16807    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16808
16809    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16810
16811    bxe_iov_init_dmae(sc);
16812
16813    /* clean the DMAE memory */
16814    sc->dmae_ready = 1;
16815    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16816
16817    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16818
16819    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16820
16821    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16822
16823    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16824
16825    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16826    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16827    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16828    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16829
16830    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16831
16832    /* QM queues pointers table */
16833    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16834
16835    /* soft reset pulse */
16836    REG_WR(sc, QM_REG_SOFT_RESET, 1);
16837    REG_WR(sc, QM_REG_SOFT_RESET, 0);
16838
16839    if (CNIC_SUPPORT(sc))
16840        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
16841
16842    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
16843    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
16844    if (!CHIP_REV_IS_SLOW(sc)) {
16845        /* enable hw interrupt from doorbell Q */
16846        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16847    }
16848
16849    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16850
16851    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16852    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
16853
16854    if (!CHIP_IS_E1(sc)) {
16855        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
16856    }
16857
16858    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
16859        if (IS_MF_AFEX(sc)) {
16860            /*
16861             * configure that AFEX and VLAN headers must be
16862             * received in AFEX mode
16863             */
16864            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
16865            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
16866            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
16867            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
16868            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
16869        } else {
16870            /*
16871             * Bit-map indicating which L2 hdrs may appear
16872             * after the basic Ethernet header
16873             */
16874            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
16875                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16876        }
16877    }
16878
16879    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
16880    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
16881    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
16882    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
16883
16884    if (!CHIP_IS_E1x(sc)) {
16885        /* reset VFC memories */
16886        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16887               VFC_MEMORIES_RST_REG_CAM_RST |
16888               VFC_MEMORIES_RST_REG_RAM_RST);
16889        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16890               VFC_MEMORIES_RST_REG_CAM_RST |
16891               VFC_MEMORIES_RST_REG_RAM_RST);
16892
16893        DELAY(20000);
16894    }
16895
16896    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
16897    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
16898    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
16899    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
16900
16901    /* sync semi rtc */
16902    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
16903           0x80000000);
16904    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
16905           0x80000000);
16906
16907    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
16908    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
16909    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
16910
16911    if (!CHIP_IS_E1x(sc)) {
16912        if (IS_MF_AFEX(sc)) {
16913            /*
16914             * configure that AFEX and VLAN headers must be
16915             * sent in AFEX mode
16916             */
16917            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
16918            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
16919            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
16920            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
16921            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
16922        } else {
16923            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
16924                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16925        }
16926    }
16927
16928    REG_WR(sc, SRC_REG_SOFT_RST, 1);
16929
16930    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
16931
16932    if (CNIC_SUPPORT(sc)) {
16933        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
16934        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
16935        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
16936        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
16937        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
16938        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
16939        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
16940        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
16941        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
16942        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
16943    }
16944    REG_WR(sc, SRC_REG_SOFT_RST, 0);
16945
16946    if (sizeof(union cdu_context) != 1024) {
16947        /* we currently assume that a context is 1024 bytes */
16948        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
16949              (long)sizeof(union cdu_context));
16950    }
16951
16952    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
16953    val = (4 << 24) + (0 << 12) + 1024;
16954    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
16955
16956    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
16957
16958    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
16959    /* enable context validation interrupt from CFC */
16960    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16961
16962    /* set the thresholds to prevent CFC/CDU race */
16963    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
16964    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
16965
16966    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
16967        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
16968    }
16969
16970    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
16971    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
16972
16973    /* Reset PCIE errors for debug */
16974    REG_WR(sc, 0x2814, 0xffffffff);
16975    REG_WR(sc, 0x3820, 0xffffffff);
16976
16977    if (!CHIP_IS_E1x(sc)) {
16978        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
16979               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
16980                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
16981        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
16982               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
16983                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
16984                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
16985        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
16986               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
16987                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
16988                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
16989    }
16990
16991    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
16992
16993    if (!CHIP_IS_E1(sc)) {
16994        /* in E3 this done in per-port section */
16995        if (!CHIP_IS_E3(sc))
16996            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
16997    }
16998
16999    if (CHIP_IS_E1H(sc)) {
17000        /* not applicable for E2 (and above ...) */
17001        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17002    }
17003
17004    if (CHIP_REV_IS_SLOW(sc)) {
17005        DELAY(200000);
17006    }
17007
17008    /* finish CFC init */
17009    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17010    if (val != 1) {
17011        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17012        return (-1);
17013    }
17014    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17015    if (val != 1) {
17016        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17017        return (-1);
17018    }
17019    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17020    if (val != 1) {
17021        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17022        return (-1);
17023    }
17024    REG_WR(sc, CFC_REG_DEBUG0, 0);
17025
17026    if (CHIP_IS_E1(sc)) {
17027        /* read NIG statistic to see if this is our first up since powerup */
17028        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17029        val = *BXE_SP(sc, wb_data[0]);
17030
17031        /* do internal memory self test */
17032        if ((val == 0) && bxe_int_mem_test(sc)) {
17033            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17034            return (-1);
17035        }
17036    }
17037
17038    bxe_setup_fan_failure_detection(sc);
17039
17040    /* clear PXP2 attentions */
17041    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17042
17043    bxe_enable_blocks_attention(sc);
17044
17045    if (!CHIP_REV_IS_SLOW(sc)) {
17046        ecore_enable_blocks_parity(sc);
17047    }
17048
17049    if (!BXE_NOMCP(sc)) {
17050        if (CHIP_IS_E1x(sc)) {
17051            bxe_common_init_phy(sc);
17052        }
17053    }
17054
17055    return (0);
17056}
17057
17058/**
17059 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17060 *
17061 * @sc:     driver handle
17062 */
17063static int
17064bxe_init_hw_common_chip(struct bxe_softc *sc)
17065{
17066    int rc = bxe_init_hw_common(sc);
17067
17068    if (rc) {
17069        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17070        return (rc);
17071    }
17072
17073    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17074    if (!BXE_NOMCP(sc)) {
17075        bxe_common_init_phy(sc);
17076    }
17077
17078    return (0);
17079}
17080
17081static int
17082bxe_init_hw_port(struct bxe_softc *sc)
17083{
17084    int port = SC_PORT(sc);
17085    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17086    uint32_t low, high;
17087    uint32_t val;
17088
17089    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17090
17091    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17092
17093    ecore_init_block(sc, BLOCK_MISC, init_phase);
17094    ecore_init_block(sc, BLOCK_PXP, init_phase);
17095    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17096
17097    /*
17098     * Timers bug workaround: disables the pf_master bit in pglue at
17099     * common phase, we need to enable it here before any dmae access are
17100     * attempted. Therefore we manually added the enable-master to the
17101     * port phase (it also happens in the function phase)
17102     */
17103    if (!CHIP_IS_E1x(sc)) {
17104        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17105    }
17106
17107    ecore_init_block(sc, BLOCK_ATC, init_phase);
17108    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17109    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17110    ecore_init_block(sc, BLOCK_QM, init_phase);
17111
17112    ecore_init_block(sc, BLOCK_TCM, init_phase);
17113    ecore_init_block(sc, BLOCK_UCM, init_phase);
17114    ecore_init_block(sc, BLOCK_CCM, init_phase);
17115    ecore_init_block(sc, BLOCK_XCM, init_phase);
17116
17117    /* QM cid (connection) count */
17118    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17119
17120    if (CNIC_SUPPORT(sc)) {
17121        ecore_init_block(sc, BLOCK_TM, init_phase);
17122        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17123        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17124    }
17125
17126    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17127
17128    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17129
17130    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17131        if (IS_MF(sc)) {
17132            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17133        } else if (sc->mtu > 4096) {
17134            if (BXE_ONE_PORT(sc)) {
17135                low = 160;
17136            } else {
17137                val = sc->mtu;
17138                /* (24*1024 + val*4)/256 */
17139                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17140            }
17141        } else {
17142            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17143        }
17144        high = (low + 56); /* 14*1024/256 */
17145        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17146        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17147    }
17148
17149    if (CHIP_IS_MODE_4_PORT(sc)) {
17150        REG_WR(sc, SC_PORT(sc) ?
17151               BRB1_REG_MAC_GUARANTIED_1 :
17152               BRB1_REG_MAC_GUARANTIED_0, 40);
17153    }
17154
17155    ecore_init_block(sc, BLOCK_PRS, init_phase);
17156    if (CHIP_IS_E3B0(sc)) {
17157        if (IS_MF_AFEX(sc)) {
17158            /* configure headers for AFEX mode */
17159            REG_WR(sc, SC_PORT(sc) ?
17160                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17161                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17162            REG_WR(sc, SC_PORT(sc) ?
17163                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17164                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17165            REG_WR(sc, SC_PORT(sc) ?
17166                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17167                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17168        } else {
17169            /* Ovlan exists only if we are in multi-function +
17170             * switch-dependent mode, in switch-independent there
17171             * is no ovlan headers
17172             */
17173            REG_WR(sc, SC_PORT(sc) ?
17174                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17175                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17176                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17177        }
17178    }
17179
17180    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17181    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17182    ecore_init_block(sc, BLOCK_USDM, init_phase);
17183    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17184
17185    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17186    ecore_init_block(sc, BLOCK_USEM, init_phase);
17187    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17188    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17189
17190    ecore_init_block(sc, BLOCK_UPB, init_phase);
17191    ecore_init_block(sc, BLOCK_XPB, init_phase);
17192
17193    ecore_init_block(sc, BLOCK_PBF, init_phase);
17194
17195    if (CHIP_IS_E1x(sc)) {
17196        /* configure PBF to work without PAUSE mtu 9000 */
17197        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17198
17199        /* update threshold */
17200        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17201        /* update init credit */
17202        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17203
17204        /* probe changes */
17205        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17206        DELAY(50);
17207        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17208    }
17209
17210    if (CNIC_SUPPORT(sc)) {
17211        ecore_init_block(sc, BLOCK_SRC, init_phase);
17212    }
17213
17214    ecore_init_block(sc, BLOCK_CDU, init_phase);
17215    ecore_init_block(sc, BLOCK_CFC, init_phase);
17216
17217    if (CHIP_IS_E1(sc)) {
17218        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17219        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17220    }
17221    ecore_init_block(sc, BLOCK_HC, init_phase);
17222
17223    ecore_init_block(sc, BLOCK_IGU, init_phase);
17224
17225    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17226    /* init aeu_mask_attn_func_0/1:
17227     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17228     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17229     *             bits 4-7 are used for "per vn group attention" */
17230    val = IS_MF(sc) ? 0xF7 : 0x7;
17231    /* Enable DCBX attention for all but E1 */
17232    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17233    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17234
17235    ecore_init_block(sc, BLOCK_NIG, init_phase);
17236
17237    if (!CHIP_IS_E1x(sc)) {
17238        /* Bit-map indicating which L2 hdrs may appear after the
17239         * basic Ethernet header
17240         */
17241        if (IS_MF_AFEX(sc)) {
17242            REG_WR(sc, SC_PORT(sc) ?
17243                   NIG_REG_P1_HDRS_AFTER_BASIC :
17244                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17245        } else {
17246            REG_WR(sc, SC_PORT(sc) ?
17247                   NIG_REG_P1_HDRS_AFTER_BASIC :
17248                   NIG_REG_P0_HDRS_AFTER_BASIC,
17249                   IS_MF_SD(sc) ? 7 : 6);
17250        }
17251
17252        if (CHIP_IS_E3(sc)) {
17253            REG_WR(sc, SC_PORT(sc) ?
17254                   NIG_REG_LLH1_MF_MODE :
17255                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17256        }
17257    }
17258    if (!CHIP_IS_E3(sc)) {
17259        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17260    }
17261
17262    if (!CHIP_IS_E1(sc)) {
17263        /* 0x2 disable mf_ov, 0x1 enable */
17264        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17265               (IS_MF_SD(sc) ? 0x1 : 0x2));
17266
17267        if (!CHIP_IS_E1x(sc)) {
17268            val = 0;
17269            switch (sc->devinfo.mf_info.mf_mode) {
17270            case MULTI_FUNCTION_SD:
17271                val = 1;
17272                break;
17273            case MULTI_FUNCTION_SI:
17274            case MULTI_FUNCTION_AFEX:
17275                val = 2;
17276                break;
17277            }
17278
17279            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17280                        NIG_REG_LLH0_CLS_TYPE), val);
17281        }
17282        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17283        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17284        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17285    }
17286
17287    /* If SPIO5 is set to generate interrupts, enable it for this port */
17288    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17289    if (val & MISC_SPIO_SPIO5) {
17290        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17291                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17292        val = REG_RD(sc, reg_addr);
17293        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17294        REG_WR(sc, reg_addr, val);
17295    }
17296
17297    return (0);
17298}
17299
17300static uint32_t
17301bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17302                       uint32_t         reg,
17303                       uint32_t         expected,
17304                       uint32_t         poll_count)
17305{
17306    uint32_t cur_cnt = poll_count;
17307    uint32_t val;
17308
17309    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17310        DELAY(FLR_WAIT_INTERVAL);
17311    }
17312
17313    return (val);
17314}
17315
17316static int
17317bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17318                              uint32_t         reg,
17319                              char             *msg,
17320                              uint32_t         poll_cnt)
17321{
17322    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17323
17324    if (val != 0) {
17325        BLOGE(sc, "%s usage count=%d\n", msg, val);
17326        return (1);
17327    }
17328
17329    return (0);
17330}
17331
17332/* Common routines with VF FLR cleanup */
17333static uint32_t
17334bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17335{
17336    /* adjust polling timeout */
17337    if (CHIP_REV_IS_EMUL(sc)) {
17338        return (FLR_POLL_CNT * 2000);
17339    }
17340
17341    if (CHIP_REV_IS_FPGA(sc)) {
17342        return (FLR_POLL_CNT * 120);
17343    }
17344
17345    return (FLR_POLL_CNT);
17346}
17347
17348static int
17349bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17350                           uint32_t         poll_cnt)
17351{
17352    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17353    if (bxe_flr_clnup_poll_hw_counter(sc,
17354                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17355                                      "CFC PF usage counter timed out",
17356                                      poll_cnt)) {
17357        return (1);
17358    }
17359
17360    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17361    if (bxe_flr_clnup_poll_hw_counter(sc,
17362                                      DORQ_REG_PF_USAGE_CNT,
17363                                      "DQ PF usage counter timed out",
17364                                      poll_cnt)) {
17365        return (1);
17366    }
17367
17368    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17369    if (bxe_flr_clnup_poll_hw_counter(sc,
17370                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17371                                      "QM PF usage counter timed out",
17372                                      poll_cnt)) {
17373        return (1);
17374    }
17375
17376    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17377    if (bxe_flr_clnup_poll_hw_counter(sc,
17378                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17379                                      "Timers VNIC usage counter timed out",
17380                                      poll_cnt)) {
17381        return (1);
17382    }
17383
17384    if (bxe_flr_clnup_poll_hw_counter(sc,
17385                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17386                                      "Timers NUM_SCANS usage counter timed out",
17387                                      poll_cnt)) {
17388        return (1);
17389    }
17390
17391    /* Wait DMAE PF usage counter to zero */
17392    if (bxe_flr_clnup_poll_hw_counter(sc,
17393                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17394                                      "DMAE dommand register timed out",
17395                                      poll_cnt)) {
17396        return (1);
17397    }
17398
17399    return (0);
17400}
17401
17402#define OP_GEN_PARAM(param)                                            \
17403    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17404#define OP_GEN_TYPE(type)                                           \
17405    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17406#define OP_GEN_AGG_VECT(index)                                             \
17407    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17408
17409static int
17410bxe_send_final_clnup(struct bxe_softc *sc,
17411                     uint8_t          clnup_func,
17412                     uint32_t         poll_cnt)
17413{
17414    uint32_t op_gen_command = 0;
17415    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17416                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17417    int ret = 0;
17418
17419    if (REG_RD(sc, comp_addr)) {
17420        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17421        return (1);
17422    }
17423
17424    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17425    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17426    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17427    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17428
17429    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17430    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17431
17432    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17433        BLOGE(sc, "FW final cleanup did not succeed\n");
17434        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17435              (REG_RD(sc, comp_addr)));
17436        bxe_panic(sc, ("FLR cleanup failed\n"));
17437        return (1);
17438    }
17439
17440    /* Zero completion for nxt FLR */
17441    REG_WR(sc, comp_addr, 0);
17442
17443    return (ret);
17444}
17445
17446static void
17447bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17448                       struct pbf_pN_buf_regs *regs,
17449                       uint32_t               poll_count)
17450{
17451    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17452    uint32_t cur_cnt = poll_count;
17453
17454    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17455    crd = crd_start = REG_RD(sc, regs->crd);
17456    init_crd = REG_RD(sc, regs->init_crd);
17457
17458    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17459    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17460    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17461
17462    while ((crd != init_crd) &&
17463           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17464            (init_crd - crd_start))) {
17465        if (cur_cnt--) {
17466            DELAY(FLR_WAIT_INTERVAL);
17467            crd = REG_RD(sc, regs->crd);
17468            crd_freed = REG_RD(sc, regs->crd_freed);
17469        } else {
17470            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17471            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17472            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17473            break;
17474        }
17475    }
17476
17477    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17478          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17479}
17480
17481static void
17482bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17483                       struct pbf_pN_cmd_regs *regs,
17484                       uint32_t               poll_count)
17485{
17486    uint32_t occup, to_free, freed, freed_start;
17487    uint32_t cur_cnt = poll_count;
17488
17489    occup = to_free = REG_RD(sc, regs->lines_occup);
17490    freed = freed_start = REG_RD(sc, regs->lines_freed);
17491
17492    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17493    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17494
17495    while (occup &&
17496           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17497        if (cur_cnt--) {
17498            DELAY(FLR_WAIT_INTERVAL);
17499            occup = REG_RD(sc, regs->lines_occup);
17500            freed = REG_RD(sc, regs->lines_freed);
17501        } else {
17502            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17503            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17504            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17505            break;
17506        }
17507    }
17508
17509    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17510          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17511}
17512
17513static void
17514bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17515{
17516    struct pbf_pN_cmd_regs cmd_regs[] = {
17517        {0, (CHIP_IS_E3B0(sc)) ?
17518            PBF_REG_TQ_OCCUPANCY_Q0 :
17519            PBF_REG_P0_TQ_OCCUPANCY,
17520            (CHIP_IS_E3B0(sc)) ?
17521            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17522            PBF_REG_P0_TQ_LINES_FREED_CNT},
17523        {1, (CHIP_IS_E3B0(sc)) ?
17524            PBF_REG_TQ_OCCUPANCY_Q1 :
17525            PBF_REG_P1_TQ_OCCUPANCY,
17526            (CHIP_IS_E3B0(sc)) ?
17527            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17528            PBF_REG_P1_TQ_LINES_FREED_CNT},
17529        {4, (CHIP_IS_E3B0(sc)) ?
17530            PBF_REG_TQ_OCCUPANCY_LB_Q :
17531            PBF_REG_P4_TQ_OCCUPANCY,
17532            (CHIP_IS_E3B0(sc)) ?
17533            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17534            PBF_REG_P4_TQ_LINES_FREED_CNT}
17535    };
17536
17537    struct pbf_pN_buf_regs buf_regs[] = {
17538        {0, (CHIP_IS_E3B0(sc)) ?
17539            PBF_REG_INIT_CRD_Q0 :
17540            PBF_REG_P0_INIT_CRD ,
17541            (CHIP_IS_E3B0(sc)) ?
17542            PBF_REG_CREDIT_Q0 :
17543            PBF_REG_P0_CREDIT,
17544            (CHIP_IS_E3B0(sc)) ?
17545            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17546            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17547        {1, (CHIP_IS_E3B0(sc)) ?
17548            PBF_REG_INIT_CRD_Q1 :
17549            PBF_REG_P1_INIT_CRD,
17550            (CHIP_IS_E3B0(sc)) ?
17551            PBF_REG_CREDIT_Q1 :
17552            PBF_REG_P1_CREDIT,
17553            (CHIP_IS_E3B0(sc)) ?
17554            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17555            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17556        {4, (CHIP_IS_E3B0(sc)) ?
17557            PBF_REG_INIT_CRD_LB_Q :
17558            PBF_REG_P4_INIT_CRD,
17559            (CHIP_IS_E3B0(sc)) ?
17560            PBF_REG_CREDIT_LB_Q :
17561            PBF_REG_P4_CREDIT,
17562            (CHIP_IS_E3B0(sc)) ?
17563            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17564            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17565    };
17566
17567    int i;
17568
17569    /* Verify the command queues are flushed P0, P1, P4 */
17570    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17571        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17572    }
17573
17574    /* Verify the transmission buffers are flushed P0, P1, P4 */
17575    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17576        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17577    }
17578}
17579
17580static void
17581bxe_hw_enable_status(struct bxe_softc *sc)
17582{
17583    uint32_t val;
17584
17585    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17586    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17587
17588    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17589    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17590
17591    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17592    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17593
17594    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17595    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17596
17597    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17598    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17599
17600    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17601    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17602
17603    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17604    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17605
17606    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17607    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17608}
17609
17610static int
17611bxe_pf_flr_clnup(struct bxe_softc *sc)
17612{
17613    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17614
17615    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17616
17617    /* Re-enable PF target read access */
17618    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17619
17620    /* Poll HW usage counters */
17621    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17622    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17623        return (-1);
17624    }
17625
17626    /* Zero the igu 'trailing edge' and 'leading edge' */
17627
17628    /* Send the FW cleanup command */
17629    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17630        return (-1);
17631    }
17632
17633    /* ATC cleanup */
17634
17635    /* Verify TX hw is flushed */
17636    bxe_tx_hw_flushed(sc, poll_cnt);
17637
17638    /* Wait 100ms (not adjusted according to platform) */
17639    DELAY(100000);
17640
17641    /* Verify no pending pci transactions */
17642    if (bxe_is_pcie_pending(sc)) {
17643        BLOGE(sc, "PCIE Transactions still pending\n");
17644    }
17645
17646    /* Debug */
17647    bxe_hw_enable_status(sc);
17648
17649    /*
17650     * Master enable - Due to WB DMAE writes performed before this
17651     * register is re-initialized as part of the regular function init
17652     */
17653    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17654
17655    return (0);
17656}
17657
17658static int
17659bxe_init_hw_func(struct bxe_softc *sc)
17660{
17661    int port = SC_PORT(sc);
17662    int func = SC_FUNC(sc);
17663    int init_phase = PHASE_PF0 + func;
17664    struct ecore_ilt *ilt = sc->ilt;
17665    uint16_t cdu_ilt_start;
17666    uint32_t addr, val;
17667    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17668    int i, main_mem_width, rc;
17669
17670    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17671
17672    /* FLR cleanup */
17673    if (!CHIP_IS_E1x(sc)) {
17674        rc = bxe_pf_flr_clnup(sc);
17675        if (rc) {
17676            BLOGE(sc, "FLR cleanup failed!\n");
17677            // XXX bxe_fw_dump(sc);
17678            // XXX bxe_idle_chk(sc);
17679            return (rc);
17680        }
17681    }
17682
17683    /* set MSI reconfigure capability */
17684    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17685        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17686        val = REG_RD(sc, addr);
17687        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17688        REG_WR(sc, addr, val);
17689    }
17690
17691    ecore_init_block(sc, BLOCK_PXP, init_phase);
17692    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17693
17694    ilt = sc->ilt;
17695    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17696
17697    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17698        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17699        ilt->lines[cdu_ilt_start + i].page_mapping =
17700            sc->context[i].vcxt_dma.paddr;
17701        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17702    }
17703    ecore_ilt_init_op(sc, INITOP_SET);
17704
17705    /* Set NIC mode */
17706    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17707    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17708
17709    if (!CHIP_IS_E1x(sc)) {
17710        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17711
17712        /* Turn on a single ISR mode in IGU if driver is going to use
17713         * INT#x or MSI
17714         */
17715        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17716            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17717        }
17718
17719        /*
17720         * Timers workaround bug: function init part.
17721         * Need to wait 20msec after initializing ILT,
17722         * needed to make sure there are no requests in
17723         * one of the PXP internal queues with "old" ILT addresses
17724         */
17725        DELAY(20000);
17726
17727        /*
17728         * Master enable - Due to WB DMAE writes performed before this
17729         * register is re-initialized as part of the regular function
17730         * init
17731         */
17732        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17733        /* Enable the function in IGU */
17734        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17735    }
17736
17737    sc->dmae_ready = 1;
17738
17739    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17740
17741    if (!CHIP_IS_E1x(sc))
17742        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17743
17744    ecore_init_block(sc, BLOCK_ATC, init_phase);
17745    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17746    ecore_init_block(sc, BLOCK_NIG, init_phase);
17747    ecore_init_block(sc, BLOCK_SRC, init_phase);
17748    ecore_init_block(sc, BLOCK_MISC, init_phase);
17749    ecore_init_block(sc, BLOCK_TCM, init_phase);
17750    ecore_init_block(sc, BLOCK_UCM, init_phase);
17751    ecore_init_block(sc, BLOCK_CCM, init_phase);
17752    ecore_init_block(sc, BLOCK_XCM, init_phase);
17753    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17754    ecore_init_block(sc, BLOCK_USEM, init_phase);
17755    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17756    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17757
17758    if (!CHIP_IS_E1x(sc))
17759        REG_WR(sc, QM_REG_PF_EN, 1);
17760
17761    if (!CHIP_IS_E1x(sc)) {
17762        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17763        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17764        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17765        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17766    }
17767    ecore_init_block(sc, BLOCK_QM, init_phase);
17768
17769    ecore_init_block(sc, BLOCK_TM, init_phase);
17770    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17771
17772    bxe_iov_init_dq(sc);
17773
17774    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17775    ecore_init_block(sc, BLOCK_PRS, init_phase);
17776    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17777    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17778    ecore_init_block(sc, BLOCK_USDM, init_phase);
17779    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17780    ecore_init_block(sc, BLOCK_UPB, init_phase);
17781    ecore_init_block(sc, BLOCK_XPB, init_phase);
17782    ecore_init_block(sc, BLOCK_PBF, init_phase);
17783    if (!CHIP_IS_E1x(sc))
17784        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17785
17786    ecore_init_block(sc, BLOCK_CDU, init_phase);
17787
17788    ecore_init_block(sc, BLOCK_CFC, init_phase);
17789
17790    if (!CHIP_IS_E1x(sc))
17791        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17792
17793    if (IS_MF(sc)) {
17794        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17795        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17796    }
17797
17798    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17799
17800    /* HC init per function */
17801    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17802        if (CHIP_IS_E1H(sc)) {
17803            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17804
17805            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17806            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17807        }
17808        ecore_init_block(sc, BLOCK_HC, init_phase);
17809
17810    } else {
17811        int num_segs, sb_idx, prod_offset;
17812
17813        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17814
17815        if (!CHIP_IS_E1x(sc)) {
17816            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17817            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17818        }
17819
17820        ecore_init_block(sc, BLOCK_IGU, init_phase);
17821
17822        if (!CHIP_IS_E1x(sc)) {
17823            int dsb_idx = 0;
17824            /**
17825             * Producer memory:
17826             * E2 mode: address 0-135 match to the mapping memory;
17827             * 136 - PF0 default prod; 137 - PF1 default prod;
17828             * 138 - PF2 default prod; 139 - PF3 default prod;
17829             * 140 - PF0 attn prod;    141 - PF1 attn prod;
17830             * 142 - PF2 attn prod;    143 - PF3 attn prod;
17831             * 144-147 reserved.
17832             *
17833             * E1.5 mode - In backward compatible mode;
17834             * for non default SB; each even line in the memory
17835             * holds the U producer and each odd line hold
17836             * the C producer. The first 128 producers are for
17837             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
17838             * producers are for the DSB for each PF.
17839             * Each PF has five segments: (the order inside each
17840             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
17841             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
17842             * 144-147 attn prods;
17843             */
17844            /* non-default-status-blocks */
17845            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17846                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
17847            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
17848                prod_offset = (sc->igu_base_sb + sb_idx) *
17849                    num_segs;
17850
17851                for (i = 0; i < num_segs; i++) {
17852                    addr = IGU_REG_PROD_CONS_MEMORY +
17853                            (prod_offset + i) * 4;
17854                    REG_WR(sc, addr, 0);
17855                }
17856                /* send consumer update with value 0 */
17857                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
17858                           USTORM_ID, 0, IGU_INT_NOP, 1);
17859                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
17860            }
17861
17862            /* default-status-blocks */
17863            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17864                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
17865
17866            if (CHIP_IS_MODE_4_PORT(sc))
17867                dsb_idx = SC_FUNC(sc);
17868            else
17869                dsb_idx = SC_VN(sc);
17870
17871            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
17872                       IGU_BC_BASE_DSB_PROD + dsb_idx :
17873                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
17874
17875            /*
17876             * igu prods come in chunks of E1HVN_MAX (4) -
17877             * does not matters what is the current chip mode
17878             */
17879            for (i = 0; i < (num_segs * E1HVN_MAX);
17880                 i += E1HVN_MAX) {
17881                addr = IGU_REG_PROD_CONS_MEMORY +
17882                            (prod_offset + i)*4;
17883                REG_WR(sc, addr, 0);
17884            }
17885            /* send consumer update with 0 */
17886            if (CHIP_INT_MODE_IS_BC(sc)) {
17887                bxe_ack_sb(sc, sc->igu_dsb_id,
17888                           USTORM_ID, 0, IGU_INT_NOP, 1);
17889                bxe_ack_sb(sc, sc->igu_dsb_id,
17890                           CSTORM_ID, 0, IGU_INT_NOP, 1);
17891                bxe_ack_sb(sc, sc->igu_dsb_id,
17892                           XSTORM_ID, 0, IGU_INT_NOP, 1);
17893                bxe_ack_sb(sc, sc->igu_dsb_id,
17894                           TSTORM_ID, 0, IGU_INT_NOP, 1);
17895                bxe_ack_sb(sc, sc->igu_dsb_id,
17896                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
17897            } else {
17898                bxe_ack_sb(sc, sc->igu_dsb_id,
17899                           USTORM_ID, 0, IGU_INT_NOP, 1);
17900                bxe_ack_sb(sc, sc->igu_dsb_id,
17901                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
17902            }
17903            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
17904
17905            /* !!! these should become driver const once
17906               rf-tool supports split-68 const */
17907            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
17908            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
17909            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
17910            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
17911            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
17912            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
17913        }
17914    }
17915
17916    /* Reset PCIE errors for debug */
17917    REG_WR(sc, 0x2114, 0xffffffff);
17918    REG_WR(sc, 0x2120, 0xffffffff);
17919
17920    if (CHIP_IS_E1x(sc)) {
17921        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
17922        main_mem_base = HC_REG_MAIN_MEMORY +
17923                SC_PORT(sc) * (main_mem_size * 4);
17924        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
17925        main_mem_width = 8;
17926
17927        val = REG_RD(sc, main_mem_prty_clr);
17928        if (val) {
17929            BLOGD(sc, DBG_LOAD,
17930                  "Parity errors in HC block during function init (0x%x)!\n",
17931                  val);
17932        }
17933
17934        /* Clear "false" parity errors in MSI-X table */
17935        for (i = main_mem_base;
17936             i < main_mem_base + main_mem_size * 4;
17937             i += main_mem_width) {
17938            bxe_read_dmae(sc, i, main_mem_width / 4);
17939            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
17940                           i, main_mem_width / 4);
17941        }
17942        /* Clear HC parity attention */
17943        REG_RD(sc, main_mem_prty_clr);
17944    }
17945
17946#if 1
17947    /* Enable STORMs SP logging */
17948    REG_WR8(sc, BAR_USTRORM_INTMEM +
17949           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17950    REG_WR8(sc, BAR_TSTRORM_INTMEM +
17951           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17952    REG_WR8(sc, BAR_CSTRORM_INTMEM +
17953           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17954    REG_WR8(sc, BAR_XSTRORM_INTMEM +
17955           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17956#endif
17957
17958    elink_phy_probe(&sc->link_params);
17959
17960    return (0);
17961}
17962
17963static void
17964bxe_link_reset(struct bxe_softc *sc)
17965{
17966    if (!BXE_NOMCP(sc)) {
17967	bxe_acquire_phy_lock(sc);
17968        elink_lfa_reset(&sc->link_params, &sc->link_vars);
17969	bxe_release_phy_lock(sc);
17970    } else {
17971        if (!CHIP_REV_IS_SLOW(sc)) {
17972            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
17973        }
17974    }
17975}
17976
17977static void
17978bxe_reset_port(struct bxe_softc *sc)
17979{
17980    int port = SC_PORT(sc);
17981    uint32_t val;
17982
17983    /* reset physical Link */
17984    bxe_link_reset(sc);
17985
17986    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17987
17988    /* Do not rcv packets to BRB */
17989    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
17990    /* Do not direct rcv packets that are not for MCP to the BRB */
17991    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
17992               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
17993
17994    /* Configure AEU */
17995    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
17996
17997    DELAY(100000);
17998
17999    /* Check for BRB port occupancy */
18000    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18001    if (val) {
18002        BLOGD(sc, DBG_LOAD,
18003              "BRB1 is not empty, %d blocks are occupied\n", val);
18004    }
18005
18006    /* TODO: Close Doorbell port? */
18007}
18008
18009static void
18010bxe_ilt_wr(struct bxe_softc *sc,
18011           uint32_t         index,
18012           bus_addr_t       addr)
18013{
18014    int reg;
18015    uint32_t wb_write[2];
18016
18017    if (CHIP_IS_E1(sc)) {
18018        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18019    } else {
18020        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18021    }
18022
18023    wb_write[0] = ONCHIP_ADDR1(addr);
18024    wb_write[1] = ONCHIP_ADDR2(addr);
18025    REG_WR_DMAE(sc, reg, wb_write, 2);
18026}
18027
18028static void
18029bxe_clear_func_ilt(struct bxe_softc *sc,
18030                   uint32_t         func)
18031{
18032    uint32_t i, base = FUNC_ILT_BASE(func);
18033    for (i = base; i < base + ILT_PER_FUNC; i++) {
18034        bxe_ilt_wr(sc, i, 0);
18035    }
18036}
18037
18038static void
18039bxe_reset_func(struct bxe_softc *sc)
18040{
18041    struct bxe_fastpath *fp;
18042    int port = SC_PORT(sc);
18043    int func = SC_FUNC(sc);
18044    int i;
18045
18046    /* Disable the function in the FW */
18047    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18048    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18049    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18050    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18051
18052    /* FP SBs */
18053    FOR_EACH_ETH_QUEUE(sc, i) {
18054        fp = &sc->fp[i];
18055        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18056                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18057                SB_DISABLED);
18058    }
18059
18060    /* SP SB */
18061    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18062            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18063            SB_DISABLED);
18064
18065    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18066        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18067    }
18068
18069    /* Configure IGU */
18070    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18071        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18072        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18073    } else {
18074        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18075        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18076    }
18077
18078    if (CNIC_LOADED(sc)) {
18079        /* Disable Timer scan */
18080        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18081        /*
18082         * Wait for at least 10ms and up to 2 second for the timers
18083         * scan to complete
18084         */
18085        for (i = 0; i < 200; i++) {
18086            DELAY(10000);
18087            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18088                break;
18089        }
18090    }
18091
18092    /* Clear ILT */
18093    bxe_clear_func_ilt(sc, func);
18094
18095    /*
18096     * Timers workaround bug for E2: if this is vnic-3,
18097     * we need to set the entire ilt range for this timers.
18098     */
18099    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18100        struct ilt_client_info ilt_cli;
18101        /* use dummy TM client */
18102        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18103        ilt_cli.start = 0;
18104        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18105        ilt_cli.client_num = ILT_CLIENT_TM;
18106
18107        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18108    }
18109
18110    /* this assumes that reset_port() called before reset_func()*/
18111    if (!CHIP_IS_E1x(sc)) {
18112        bxe_pf_disable(sc);
18113    }
18114
18115    sc->dmae_ready = 0;
18116}
18117
18118static int
18119bxe_gunzip_init(struct bxe_softc *sc)
18120{
18121    return (0);
18122}
18123
18124static void
18125bxe_gunzip_end(struct bxe_softc *sc)
18126{
18127    return;
18128}
18129
18130static int
18131bxe_init_firmware(struct bxe_softc *sc)
18132{
18133    if (CHIP_IS_E1(sc)) {
18134        ecore_init_e1_firmware(sc);
18135        sc->iro_array = e1_iro_arr;
18136    } else if (CHIP_IS_E1H(sc)) {
18137        ecore_init_e1h_firmware(sc);
18138        sc->iro_array = e1h_iro_arr;
18139    } else if (!CHIP_IS_E1x(sc)) {
18140        ecore_init_e2_firmware(sc);
18141        sc->iro_array = e2_iro_arr;
18142    } else {
18143        BLOGE(sc, "Unsupported chip revision\n");
18144        return (-1);
18145    }
18146
18147    return (0);
18148}
18149
18150static void
18151bxe_release_firmware(struct bxe_softc *sc)
18152{
18153    /* Do nothing */
18154    return;
18155}
18156
18157static int
18158ecore_gunzip(struct bxe_softc *sc,
18159             const uint8_t    *zbuf,
18160             int              len)
18161{
18162    /* XXX : Implement... */
18163    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18164    return (FALSE);
18165}
18166
18167static void
18168ecore_reg_wr_ind(struct bxe_softc *sc,
18169                 uint32_t         addr,
18170                 uint32_t         val)
18171{
18172    bxe_reg_wr_ind(sc, addr, val);
18173}
18174
18175static void
18176ecore_write_dmae_phys_len(struct bxe_softc *sc,
18177                          bus_addr_t       phys_addr,
18178                          uint32_t         addr,
18179                          uint32_t         len)
18180{
18181    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18182}
18183
18184void
18185ecore_storm_memset_struct(struct bxe_softc *sc,
18186                          uint32_t         addr,
18187                          size_t           size,
18188                          uint32_t         *data)
18189{
18190    uint8_t i;
18191    for (i = 0; i < size/4; i++) {
18192        REG_WR(sc, addr + (i * 4), data[i]);
18193    }
18194}
18195
18196
18197/*
18198 * character device - ioctl interface definitions
18199 */
18200
18201
18202#include "bxe_dump.h"
18203#include "bxe_ioctl.h"
18204#include <sys/conf.h>
18205
18206static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18207                struct thread *td);
18208
18209static struct cdevsw bxe_cdevsw = {
18210    .d_version = D_VERSION,
18211    .d_ioctl = bxe_eioctl,
18212    .d_name = "bxecnic",
18213};
18214
18215#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18216
18217
18218#define DUMP_ALL_PRESETS        0x1FFF
18219#define DUMP_MAX_PRESETS        13
18220#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18221#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18222#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18223#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18224#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18225
18226#define IS_REG_IN_PRESET(presets, idx)  \
18227                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18228
18229
18230static int
18231bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18232{
18233    if (CHIP_IS_E1(sc))
18234        return dump_num_registers[0][preset-1];
18235    else if (CHIP_IS_E1H(sc))
18236        return dump_num_registers[1][preset-1];
18237    else if (CHIP_IS_E2(sc))
18238        return dump_num_registers[2][preset-1];
18239    else if (CHIP_IS_E3A0(sc))
18240        return dump_num_registers[3][preset-1];
18241    else if (CHIP_IS_E3B0(sc))
18242        return dump_num_registers[4][preset-1];
18243    else
18244        return 0;
18245}
18246
18247static int
18248bxe_get_total_regs_len32(struct bxe_softc *sc)
18249{
18250    uint32_t preset_idx;
18251    int regdump_len32 = 0;
18252
18253
18254    /* Calculate the total preset regs length */
18255    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18256        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18257    }
18258
18259    return regdump_len32;
18260}
18261
18262static const uint32_t *
18263__bxe_get_page_addr_ar(struct bxe_softc *sc)
18264{
18265    if (CHIP_IS_E2(sc))
18266        return page_vals_e2;
18267    else if (CHIP_IS_E3(sc))
18268        return page_vals_e3;
18269    else
18270        return NULL;
18271}
18272
18273static uint32_t
18274__bxe_get_page_reg_num(struct bxe_softc *sc)
18275{
18276    if (CHIP_IS_E2(sc))
18277        return PAGE_MODE_VALUES_E2;
18278    else if (CHIP_IS_E3(sc))
18279        return PAGE_MODE_VALUES_E3;
18280    else
18281        return 0;
18282}
18283
18284static const uint32_t *
18285__bxe_get_page_write_ar(struct bxe_softc *sc)
18286{
18287    if (CHIP_IS_E2(sc))
18288        return page_write_regs_e2;
18289    else if (CHIP_IS_E3(sc))
18290        return page_write_regs_e3;
18291    else
18292        return NULL;
18293}
18294
18295static uint32_t
18296__bxe_get_page_write_num(struct bxe_softc *sc)
18297{
18298    if (CHIP_IS_E2(sc))
18299        return PAGE_WRITE_REGS_E2;
18300    else if (CHIP_IS_E3(sc))
18301        return PAGE_WRITE_REGS_E3;
18302    else
18303        return 0;
18304}
18305
18306static const struct reg_addr *
18307__bxe_get_page_read_ar(struct bxe_softc *sc)
18308{
18309    if (CHIP_IS_E2(sc))
18310        return page_read_regs_e2;
18311    else if (CHIP_IS_E3(sc))
18312        return page_read_regs_e3;
18313    else
18314        return NULL;
18315}
18316
18317static uint32_t
18318__bxe_get_page_read_num(struct bxe_softc *sc)
18319{
18320    if (CHIP_IS_E2(sc))
18321        return PAGE_READ_REGS_E2;
18322    else if (CHIP_IS_E3(sc))
18323        return PAGE_READ_REGS_E3;
18324    else
18325        return 0;
18326}
18327
18328static bool
18329bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18330{
18331    if (CHIP_IS_E1(sc))
18332        return IS_E1_REG(reg_info->chips);
18333    else if (CHIP_IS_E1H(sc))
18334        return IS_E1H_REG(reg_info->chips);
18335    else if (CHIP_IS_E2(sc))
18336        return IS_E2_REG(reg_info->chips);
18337    else if (CHIP_IS_E3A0(sc))
18338        return IS_E3A0_REG(reg_info->chips);
18339    else if (CHIP_IS_E3B0(sc))
18340        return IS_E3B0_REG(reg_info->chips);
18341    else
18342        return 0;
18343}
18344
18345static bool
18346bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18347{
18348    if (CHIP_IS_E1(sc))
18349        return IS_E1_REG(wreg_info->chips);
18350    else if (CHIP_IS_E1H(sc))
18351        return IS_E1H_REG(wreg_info->chips);
18352    else if (CHIP_IS_E2(sc))
18353        return IS_E2_REG(wreg_info->chips);
18354    else if (CHIP_IS_E3A0(sc))
18355        return IS_E3A0_REG(wreg_info->chips);
18356    else if (CHIP_IS_E3B0(sc))
18357        return IS_E3B0_REG(wreg_info->chips);
18358    else
18359        return 0;
18360}
18361
18362/**
18363 * bxe_read_pages_regs - read "paged" registers
18364 *
18365 * @bp          device handle
18366 * @p           output buffer
18367 *
18368 * Reads "paged" memories: memories that may only be read by first writing to a
18369 * specific address ("write address") and then reading from a specific address
18370 * ("read address"). There may be more than one write address per "page" and
18371 * more than one read address per write address.
18372 */
18373static void
18374bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18375{
18376    uint32_t i, j, k, n;
18377
18378    /* addresses of the paged registers */
18379    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18380    /* number of paged registers */
18381    int num_pages = __bxe_get_page_reg_num(sc);
18382    /* write addresses */
18383    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18384    /* number of write addresses */
18385    int write_num = __bxe_get_page_write_num(sc);
18386    /* read addresses info */
18387    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18388    /* number of read addresses */
18389    int read_num = __bxe_get_page_read_num(sc);
18390    uint32_t addr, size;
18391
18392    for (i = 0; i < num_pages; i++) {
18393        for (j = 0; j < write_num; j++) {
18394            REG_WR(sc, write_addr[j], page_addr[i]);
18395
18396            for (k = 0; k < read_num; k++) {
18397                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18398                    size = read_addr[k].size;
18399                    for (n = 0; n < size; n++) {
18400                        addr = read_addr[k].addr + n*4;
18401                        *p++ = REG_RD(sc, addr);
18402                    }
18403                }
18404            }
18405        }
18406    }
18407    return;
18408}
18409
18410
18411static int
18412bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18413{
18414    uint32_t i, j, addr;
18415    const struct wreg_addr *wreg_addr_p = NULL;
18416
18417    if (CHIP_IS_E1(sc))
18418        wreg_addr_p = &wreg_addr_e1;
18419    else if (CHIP_IS_E1H(sc))
18420        wreg_addr_p = &wreg_addr_e1h;
18421    else if (CHIP_IS_E2(sc))
18422        wreg_addr_p = &wreg_addr_e2;
18423    else if (CHIP_IS_E3A0(sc))
18424        wreg_addr_p = &wreg_addr_e3;
18425    else if (CHIP_IS_E3B0(sc))
18426        wreg_addr_p = &wreg_addr_e3b0;
18427    else
18428        return (-1);
18429
18430    /* Read the idle_chk registers */
18431    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18432        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18433            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18434            for (j = 0; j < idle_reg_addrs[i].size; j++)
18435                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18436        }
18437    }
18438
18439    /* Read the regular registers */
18440    for (i = 0; i < REGS_COUNT; i++) {
18441        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18442            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18443            for (j = 0; j < reg_addrs[i].size; j++)
18444                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18445        }
18446    }
18447
18448    /* Read the CAM registers */
18449    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18450        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18451        for (i = 0; i < wreg_addr_p->size; i++) {
18452            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18453
18454            /* In case of wreg_addr register, read additional
18455               registers from read_regs array
18456             */
18457            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18458                addr = *(wreg_addr_p->read_regs);
18459                *p++ = REG_RD(sc, addr + j*4);
18460            }
18461        }
18462    }
18463
18464    /* Paged registers are supported in E2 & E3 only */
18465    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18466        /* Read "paged" registers */
18467        bxe_read_pages_regs(sc, p, preset);
18468    }
18469
18470    return 0;
18471}
18472
18473static int
18474bxe_grc_dump(struct bxe_softc *sc)
18475{
18476    int rval = 0;
18477    uint32_t preset_idx;
18478    uint8_t *buf;
18479    uint32_t size;
18480    struct  dump_header *d_hdr;
18481
18482    if (sc->grcdump_done)
18483	return (rval);
18484
18485    ecore_disable_blocks_parity(sc);
18486
18487    buf = sc->grc_dump;
18488    d_hdr = sc->grc_dump;
18489
18490    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18491    d_hdr->version = BNX2X_DUMP_VERSION;
18492    d_hdr->preset = DUMP_ALL_PRESETS;
18493
18494    if (CHIP_IS_E1(sc)) {
18495        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18496    } else if (CHIP_IS_E1H(sc)) {
18497        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18498    } else if (CHIP_IS_E2(sc)) {
18499        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18500                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18501    } else if (CHIP_IS_E3A0(sc)) {
18502        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18503                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18504    } else if (CHIP_IS_E3B0(sc)) {
18505        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18506                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18507    }
18508
18509    buf += sizeof(struct  dump_header);
18510
18511    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18512
18513        /* Skip presets with IOR */
18514        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18515            (preset_idx == 11))
18516            continue;
18517
18518        rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx);
18519
18520	if (rval)
18521            break;
18522
18523        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18524
18525        buf += size;
18526    }
18527
18528    ecore_clear_blocks_parity(sc);
18529    ecore_enable_blocks_parity(sc);
18530
18531    sc->grcdump_done = 1;
18532    return(rval);
18533}
18534
18535static int
18536bxe_add_cdev(struct bxe_softc *sc)
18537{
18538    int grc_dump_size;
18539
18540    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18541				sizeof(struct  dump_header);
18542
18543    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18544
18545    if (sc->grc_dump == NULL)
18546        return (-1);
18547
18548    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18549                            sc->ifp->if_dunit,
18550                            UID_ROOT,
18551                            GID_WHEEL,
18552                            0600,
18553                            "%s",
18554                            if_name(sc->ifp));
18555
18556    if (sc->ioctl_dev == NULL) {
18557
18558        free(sc->grc_dump, M_DEVBUF);
18559
18560        return (-1);
18561    }
18562
18563    sc->ioctl_dev->si_drv1 = sc;
18564
18565    return (0);
18566}
18567
18568static void
18569bxe_del_cdev(struct bxe_softc *sc)
18570{
18571    if (sc->ioctl_dev != NULL)
18572        destroy_dev(sc->ioctl_dev);
18573
18574    if (sc->grc_dump == NULL)
18575        free(sc->grc_dump, M_DEVBUF);
18576
18577    return;
18578}
18579
18580static int
18581bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18582        struct thread *td)
18583{
18584    struct bxe_softc    *sc;
18585    int                 rval = 0;
18586    device_t            pci_dev;
18587    bxe_grcdump_t       *dump = NULL;
18588    int grc_dump_size;
18589
18590    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
18591        return ENXIO;
18592
18593    pci_dev= sc->dev;
18594
18595    dump = (bxe_grcdump_t *)data;
18596
18597    switch(cmd) {
18598
18599        case BXE_GRC_DUMP_SIZE:
18600            dump->pci_func = sc->pcie_func;
18601            dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18602					sizeof(struct  dump_header);
18603            break;
18604
18605        case BXE_GRC_DUMP:
18606
18607            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18608				sizeof(struct  dump_header);
18609
18610            if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) ||
18611                (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) {
18612                rval = EINVAL;
18613                break;
18614            }
18615	    dump->grcdump_dwords = grc_dump_size >> 2;
18616            rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
18617            sc->grcdump_done = 0;
18618
18619            break;
18620
18621        default:
18622            break;
18623    }
18624
18625    return (rval);
18626}
18627