bxe.c revision 315881
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/bxe/bxe.c 315881 2017-03-24 02:58:20Z davidcs $");
29
30#define BXE_DRIVER_VERSION "1.78.90"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        BRCM_VENDORID,
171        CHIP_NUM_57840_2_20,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 2x20GbE"
174    },
175    {
176        BRCM_VENDORID,
177        CHIP_NUM_57840_MF,
178        PCI_ANY_ID, PCI_ANY_ID,
179        "QLogic NetXtreme II BCM57840 MF 10GbE"
180    },
181    {
182        0, 0, 0, 0, NULL
183    }
184};
185
186MALLOC_DECLARE(M_BXE_ILT);
187MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
188
189/*
190 * FreeBSD device entry points.
191 */
192static int bxe_probe(device_t);
193static int bxe_attach(device_t);
194static int bxe_detach(device_t);
195static int bxe_shutdown(device_t);
196
197/*
198 * FreeBSD KLD module/device interface event handler method.
199 */
200static device_method_t bxe_methods[] = {
201    /* Device interface (device_if.h) */
202    DEVMETHOD(device_probe,     bxe_probe),
203    DEVMETHOD(device_attach,    bxe_attach),
204    DEVMETHOD(device_detach,    bxe_detach),
205    DEVMETHOD(device_shutdown,  bxe_shutdown),
206    /* Bus interface (bus_if.h) */
207    DEVMETHOD(bus_print_child,  bus_generic_print_child),
208    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
209    KOBJMETHOD_END
210};
211
212/*
213 * FreeBSD KLD Module data declaration
214 */
215static driver_t bxe_driver = {
216    "bxe",                   /* module name */
217    bxe_methods,             /* event handler */
218    sizeof(struct bxe_softc) /* extra data */
219};
220
221/*
222 * FreeBSD dev class is needed to manage dev instances and
223 * to associate with a bus type
224 */
225static devclass_t bxe_devclass;
226
227MODULE_DEPEND(bxe, pci, 1, 1, 1);
228MODULE_DEPEND(bxe, ether, 1, 1, 1);
229DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
230
231/* resources needed for unloading a previously loaded device */
232
233#define BXE_PREV_WAIT_NEEDED 1
234struct mtx bxe_prev_mtx;
235MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
236struct bxe_prev_list_node {
237    LIST_ENTRY(bxe_prev_list_node) node;
238    uint8_t bus;
239    uint8_t slot;
240    uint8_t path;
241    uint8_t aer; /* XXX automatic error recovery */
242    uint8_t undi;
243};
244static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
245
246static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
247
248/* Tunable device values... */
249
250SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
251
252/* Debug */
253unsigned long bxe_debug = 0;
254SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
255             &bxe_debug, 0, "Debug logging mode");
256
257/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
258static int bxe_interrupt_mode = INTR_MODE_MSIX;
259SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
260           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
261
262/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
263static int bxe_queue_count = 4;
264SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
265           &bxe_queue_count, 0, "Multi-Queue queue count");
266
267/* max number of buffers per queue (default RX_BD_USABLE) */
268static int bxe_max_rx_bufs = 0;
269SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
270           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
271
272/* Host interrupt coalescing RX tick timer (usecs) */
273static int bxe_hc_rx_ticks = 25;
274SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
275           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
276
277/* Host interrupt coalescing TX tick timer (usecs) */
278static int bxe_hc_tx_ticks = 50;
279SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
280           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
281
282/* Maximum number of Rx packets to process at a time */
283static int bxe_rx_budget = 0xffffffff;
284SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
285           &bxe_rx_budget, 0, "Rx processing budget");
286
287/* Maximum LRO aggregation size */
288static int bxe_max_aggregation_size = 0;
289SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
290           &bxe_max_aggregation_size, 0, "max aggregation size");
291
292/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
293static int bxe_mrrs = -1;
294SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
295           &bxe_mrrs, 0, "PCIe maximum read request size");
296
297/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
298static int bxe_autogreeen = 0;
299SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
300           &bxe_autogreeen, 0, "AutoGrEEEn support");
301
302/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
303static int bxe_udp_rss = 0;
304SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
305           &bxe_udp_rss, 0, "UDP RSS support");
306
307
308#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
309
310#define STATS_OFFSET32(stat_name)                   \
311    (offsetof(struct bxe_eth_stats, stat_name) / 4)
312
313#define Q_STATS_OFFSET32(stat_name)                   \
314    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
315
316static const struct {
317    uint32_t offset;
318    uint32_t size;
319    uint32_t flags;
320#define STATS_FLAGS_PORT  1
321#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
322#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
323    char string[STAT_NAME_LEN];
324} bxe_eth_stats_arr[] = {
325    { STATS_OFFSET32(total_bytes_received_hi),
326                8, STATS_FLAGS_BOTH, "rx_bytes" },
327    { STATS_OFFSET32(error_bytes_received_hi),
328                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
329    { STATS_OFFSET32(total_unicast_packets_received_hi),
330                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
331    { STATS_OFFSET32(total_multicast_packets_received_hi),
332                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
333    { STATS_OFFSET32(total_broadcast_packets_received_hi),
334                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
335    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
336                8, STATS_FLAGS_PORT, "rx_crc_errors" },
337    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
338                8, STATS_FLAGS_PORT, "rx_align_errors" },
339    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
340                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
341    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
342                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
343    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
344                8, STATS_FLAGS_PORT, "rx_fragments" },
345    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
346                8, STATS_FLAGS_PORT, "rx_jabbers" },
347    { STATS_OFFSET32(no_buff_discard_hi),
348                8, STATS_FLAGS_BOTH, "rx_discards" },
349    { STATS_OFFSET32(mac_filter_discard),
350                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
351    { STATS_OFFSET32(mf_tag_discard),
352                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
353    { STATS_OFFSET32(pfc_frames_received_hi),
354                8, STATS_FLAGS_PORT, "pfc_frames_received" },
355    { STATS_OFFSET32(pfc_frames_sent_hi),
356                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
357    { STATS_OFFSET32(brb_drop_hi),
358                8, STATS_FLAGS_PORT, "rx_brb_discard" },
359    { STATS_OFFSET32(brb_truncate_hi),
360                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
361    { STATS_OFFSET32(pause_frames_received_hi),
362                8, STATS_FLAGS_PORT, "rx_pause_frames" },
363    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
364                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
365    { STATS_OFFSET32(nig_timer_max),
366                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
367    { STATS_OFFSET32(total_bytes_transmitted_hi),
368                8, STATS_FLAGS_BOTH, "tx_bytes" },
369    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
370                8, STATS_FLAGS_PORT, "tx_error_bytes" },
371    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
372                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
373    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
374                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
375    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
376                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
377    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
378                8, STATS_FLAGS_PORT, "tx_mac_errors" },
379    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
380                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
381    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
382                8, STATS_FLAGS_PORT, "tx_single_collisions" },
383    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
384                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
385    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
386                8, STATS_FLAGS_PORT, "tx_deferred" },
387    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
388                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
389    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
390                8, STATS_FLAGS_PORT, "tx_late_collisions" },
391    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
392                8, STATS_FLAGS_PORT, "tx_total_collisions" },
393    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
394                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
395    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
396                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
397    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
398                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
399    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
400                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
401    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
402                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
403    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
404                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
405    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
406                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
407    { STATS_OFFSET32(pause_frames_sent_hi),
408                8, STATS_FLAGS_PORT, "tx_pause_frames" },
409    { STATS_OFFSET32(total_tpa_aggregations_hi),
410                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
411    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
412                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
413    { STATS_OFFSET32(total_tpa_bytes_hi),
414                8, STATS_FLAGS_FUNC, "tpa_bytes"},
415    { STATS_OFFSET32(eee_tx_lpi),
416                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
417    { STATS_OFFSET32(rx_calls),
418                4, STATS_FLAGS_FUNC, "rx_calls"},
419    { STATS_OFFSET32(rx_pkts),
420                4, STATS_FLAGS_FUNC, "rx_pkts"},
421    { STATS_OFFSET32(rx_tpa_pkts),
422                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
423    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
424                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
425    { STATS_OFFSET32(rx_bxe_service_rxsgl),
426                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
427    { STATS_OFFSET32(rx_jumbo_sge_pkts),
428                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
429    { STATS_OFFSET32(rx_soft_errors),
430                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
431    { STATS_OFFSET32(rx_hw_csum_errors),
432                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
433    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
434                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
435    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
436                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
437    { STATS_OFFSET32(rx_budget_reached),
438                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
439    { STATS_OFFSET32(tx_pkts),
440                4, STATS_FLAGS_FUNC, "tx_pkts"},
441    { STATS_OFFSET32(tx_soft_errors),
442                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
443    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
444                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
445    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
446                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
447    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
448                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
449    { STATS_OFFSET32(tx_ofld_frames_lso),
450                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
451    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
452                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
453    { STATS_OFFSET32(tx_encap_failures),
454                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
455    { STATS_OFFSET32(tx_hw_queue_full),
456                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
457    { STATS_OFFSET32(tx_hw_max_queue_depth),
458                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
459    { STATS_OFFSET32(tx_dma_mapping_failure),
460                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
461    { STATS_OFFSET32(tx_max_drbr_queue_depth),
462                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
463    { STATS_OFFSET32(tx_window_violation_std),
464                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
465    { STATS_OFFSET32(tx_window_violation_tso),
466                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
467    { STATS_OFFSET32(tx_chain_lost_mbuf),
468                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
469    { STATS_OFFSET32(tx_frames_deferred),
470                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
471    { STATS_OFFSET32(tx_queue_xoff),
472                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
473    { STATS_OFFSET32(mbuf_defrag_attempts),
474                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
475    { STATS_OFFSET32(mbuf_defrag_failures),
476                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
477    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
478                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
479    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
480                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
481    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
482                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
483    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
485    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
486                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
487    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
488                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
489    { STATS_OFFSET32(mbuf_alloc_tx),
490                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
491    { STATS_OFFSET32(mbuf_alloc_rx),
492                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
493    { STATS_OFFSET32(mbuf_alloc_sge),
494                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
495    { STATS_OFFSET32(mbuf_alloc_tpa),
496                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
497    { STATS_OFFSET32(tx_queue_full_return),
498                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
499    { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
500                4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
501    { STATS_OFFSET32(tx_request_link_down_failures),
502                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
503    { STATS_OFFSET32(bd_avail_too_less_failures),
504                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
505    { STATS_OFFSET32(tx_mq_not_empty),
506                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
507    { STATS_OFFSET32(nsegs_path1_errors),
508                4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
509    { STATS_OFFSET32(nsegs_path2_errors),
510                4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
511
512
513};
514
515static const struct {
516    uint32_t offset;
517    uint32_t size;
518    char string[STAT_NAME_LEN];
519} bxe_eth_q_stats_arr[] = {
520    { Q_STATS_OFFSET32(total_bytes_received_hi),
521                8, "rx_bytes" },
522    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
523                8, "rx_ucast_packets" },
524    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
525                8, "rx_mcast_packets" },
526    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
527                8, "rx_bcast_packets" },
528    { Q_STATS_OFFSET32(no_buff_discard_hi),
529                8, "rx_discards" },
530    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
531                8, "tx_bytes" },
532    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
533                8, "tx_ucast_packets" },
534    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
535                8, "tx_mcast_packets" },
536    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
537                8, "tx_bcast_packets" },
538    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
539                8, "tpa_aggregations" },
540    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
541                8, "tpa_aggregated_frames"},
542    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
543                8, "tpa_bytes"},
544    { Q_STATS_OFFSET32(rx_calls),
545                4, "rx_calls"},
546    { Q_STATS_OFFSET32(rx_pkts),
547                4, "rx_pkts"},
548    { Q_STATS_OFFSET32(rx_tpa_pkts),
549                4, "rx_tpa_pkts"},
550    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
551                4, "rx_erroneous_jumbo_sge_pkts"},
552    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
553                4, "rx_bxe_service_rxsgl"},
554    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
555                4, "rx_jumbo_sge_pkts"},
556    { Q_STATS_OFFSET32(rx_soft_errors),
557                4, "rx_soft_errors"},
558    { Q_STATS_OFFSET32(rx_hw_csum_errors),
559                4, "rx_hw_csum_errors"},
560    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
561                4, "rx_ofld_frames_csum_ip"},
562    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
563                4, "rx_ofld_frames_csum_tcp_udp"},
564    { Q_STATS_OFFSET32(rx_budget_reached),
565                4, "rx_budget_reached"},
566    { Q_STATS_OFFSET32(tx_pkts),
567                4, "tx_pkts"},
568    { Q_STATS_OFFSET32(tx_soft_errors),
569                4, "tx_soft_errors"},
570    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
571                4, "tx_ofld_frames_csum_ip"},
572    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
573                4, "tx_ofld_frames_csum_tcp"},
574    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
575                4, "tx_ofld_frames_csum_udp"},
576    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
577                4, "tx_ofld_frames_lso"},
578    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
579                4, "tx_ofld_frames_lso_hdr_splits"},
580    { Q_STATS_OFFSET32(tx_encap_failures),
581                4, "tx_encap_failures"},
582    { Q_STATS_OFFSET32(tx_hw_queue_full),
583                4, "tx_hw_queue_full"},
584    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
585                4, "tx_hw_max_queue_depth"},
586    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
587                4, "tx_dma_mapping_failure"},
588    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
589                4, "tx_max_drbr_queue_depth"},
590    { Q_STATS_OFFSET32(tx_window_violation_std),
591                4, "tx_window_violation_std"},
592    { Q_STATS_OFFSET32(tx_window_violation_tso),
593                4, "tx_window_violation_tso"},
594    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
595                4, "tx_chain_lost_mbuf"},
596    { Q_STATS_OFFSET32(tx_frames_deferred),
597                4, "tx_frames_deferred"},
598    { Q_STATS_OFFSET32(tx_queue_xoff),
599                4, "tx_queue_xoff"},
600    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
601                4, "mbuf_defrag_attempts"},
602    { Q_STATS_OFFSET32(mbuf_defrag_failures),
603                4, "mbuf_defrag_failures"},
604    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
605                4, "mbuf_rx_bd_alloc_failed"},
606    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
607                4, "mbuf_rx_bd_mapping_failed"},
608    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
609                4, "mbuf_rx_tpa_alloc_failed"},
610    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
611                4, "mbuf_rx_tpa_mapping_failed"},
612    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
613                4, "mbuf_rx_sge_alloc_failed"},
614    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
615                4, "mbuf_rx_sge_mapping_failed"},
616    { Q_STATS_OFFSET32(mbuf_alloc_tx),
617                4, "mbuf_alloc_tx"},
618    { Q_STATS_OFFSET32(mbuf_alloc_rx),
619                4, "mbuf_alloc_rx"},
620    { Q_STATS_OFFSET32(mbuf_alloc_sge),
621                4, "mbuf_alloc_sge"},
622    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
623                4, "mbuf_alloc_tpa"},
624    { Q_STATS_OFFSET32(tx_queue_full_return),
625                4, "tx_queue_full_return"},
626    { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
627                4, "bxe_tx_mq_sc_state_failures"},
628    { Q_STATS_OFFSET32(tx_request_link_down_failures),
629                4, "tx_request_link_down_failures"},
630    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
631                4, "bd_avail_too_less_failures"},
632    { Q_STATS_OFFSET32(tx_mq_not_empty),
633                4, "tx_mq_not_empty"},
634    { Q_STATS_OFFSET32(nsegs_path1_errors),
635                4, "nsegs_path1_errors"},
636    { Q_STATS_OFFSET32(nsegs_path2_errors),
637                4, "nsegs_path2_errors"}
638
639
640};
641
642#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
643#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
644
645
646static void    bxe_cmng_fns_init(struct bxe_softc *sc,
647                                 uint8_t          read_cfg,
648                                 uint8_t          cmng_type);
649static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
650static void    storm_memset_cmng(struct bxe_softc *sc,
651                                 struct cmng_init *cmng,
652                                 uint8_t          port);
653static void    bxe_set_reset_global(struct bxe_softc *sc);
654static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
655static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
656                                 int              engine);
657static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
658static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
659                                   uint8_t          *global,
660                                   uint8_t          print);
661static void    bxe_int_disable(struct bxe_softc *sc);
662static int     bxe_release_leader_lock(struct bxe_softc *sc);
663static void    bxe_pf_disable(struct bxe_softc *sc);
664static void    bxe_free_fp_buffers(struct bxe_softc *sc);
665static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
666                                      struct bxe_fastpath *fp,
667                                      uint16_t            rx_bd_prod,
668                                      uint16_t            rx_cq_prod,
669                                      uint16_t            rx_sge_prod);
670static void    bxe_link_report_locked(struct bxe_softc *sc);
671static void    bxe_link_report(struct bxe_softc *sc);
672static void    bxe_link_status_update(struct bxe_softc *sc);
673static void    bxe_periodic_callout_func(void *xsc);
674static void    bxe_periodic_start(struct bxe_softc *sc);
675static void    bxe_periodic_stop(struct bxe_softc *sc);
676static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
677                                    uint16_t prev_index,
678                                    uint16_t index);
679static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
680                                     int                 queue);
681static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
682                                     uint16_t            index);
683static uint8_t bxe_txeof(struct bxe_softc *sc,
684                         struct bxe_fastpath *fp);
685static void    bxe_task_fp(struct bxe_fastpath *fp);
686static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
687                                     struct mbuf      *m,
688                                     uint8_t          contents);
689static int     bxe_alloc_mem(struct bxe_softc *sc);
690static void    bxe_free_mem(struct bxe_softc *sc);
691static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
692static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
693static int     bxe_interrupt_attach(struct bxe_softc *sc);
694static void    bxe_interrupt_detach(struct bxe_softc *sc);
695static void    bxe_set_rx_mode(struct bxe_softc *sc);
696static int     bxe_init_locked(struct bxe_softc *sc);
697static int     bxe_stop_locked(struct bxe_softc *sc);
698static __noinline int bxe_nic_load(struct bxe_softc *sc,
699                                   int              load_mode);
700static __noinline int bxe_nic_unload(struct bxe_softc *sc,
701                                     uint32_t         unload_mode,
702                                     uint8_t          keep_link);
703
704static void bxe_handle_sp_tq(void *context, int pending);
705static void bxe_handle_fp_tq(void *context, int pending);
706
707static int bxe_add_cdev(struct bxe_softc *sc);
708static void bxe_del_cdev(struct bxe_softc *sc);
709int bxe_grc_dump(struct bxe_softc *sc);
710static int bxe_alloc_buf_rings(struct bxe_softc *sc);
711static void bxe_free_buf_rings(struct bxe_softc *sc);
712
713/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
714uint32_t
715calc_crc32(uint8_t  *crc32_packet,
716           uint32_t crc32_length,
717           uint32_t crc32_seed,
718           uint8_t  complement)
719{
720   uint32_t byte         = 0;
721   uint32_t bit          = 0;
722   uint8_t  msb          = 0;
723   uint32_t temp         = 0;
724   uint32_t shft         = 0;
725   uint8_t  current_byte = 0;
726   uint32_t crc32_result = crc32_seed;
727   const uint32_t CRC32_POLY = 0x1edc6f41;
728
729   if ((crc32_packet == NULL) ||
730       (crc32_length == 0) ||
731       ((crc32_length % 8) != 0))
732    {
733        return (crc32_result);
734    }
735
736    for (byte = 0; byte < crc32_length; byte = byte + 1)
737    {
738        current_byte = crc32_packet[byte];
739        for (bit = 0; bit < 8; bit = bit + 1)
740        {
741            /* msb = crc32_result[31]; */
742            msb = (uint8_t)(crc32_result >> 31);
743
744            crc32_result = crc32_result << 1;
745
746            /* it (msb != current_byte[bit]) */
747            if (msb != (0x1 & (current_byte >> bit)))
748            {
749                crc32_result = crc32_result ^ CRC32_POLY;
750                /* crc32_result[0] = 1 */
751                crc32_result |= 1;
752            }
753        }
754    }
755
756    /* Last step is to:
757     * 1. "mirror" every bit
758     * 2. swap the 4 bytes
759     * 3. complement each bit
760     */
761
762    /* Mirror */
763    temp = crc32_result;
764    shft = sizeof(crc32_result) * 8 - 1;
765
766    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
767    {
768        temp <<= 1;
769        temp |= crc32_result & 1;
770        shft-- ;
771    }
772
773    /* temp[31-bit] = crc32_result[bit] */
774    temp <<= shft;
775
776    /* Swap */
777    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
778    {
779        uint32_t t0, t1, t2, t3;
780        t0 = (0x000000ff & (temp >> 24));
781        t1 = (0x0000ff00 & (temp >> 8));
782        t2 = (0x00ff0000 & (temp << 8));
783        t3 = (0xff000000 & (temp << 24));
784        crc32_result = t0 | t1 | t2 | t3;
785    }
786
787    /* Complement */
788    if (complement)
789    {
790        crc32_result = ~crc32_result;
791    }
792
793    return (crc32_result);
794}
795
796int
797bxe_test_bit(int                    nr,
798             volatile unsigned long *addr)
799{
800    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
801}
802
803void
804bxe_set_bit(unsigned int           nr,
805            volatile unsigned long *addr)
806{
807    atomic_set_acq_long(addr, (1 << nr));
808}
809
810void
811bxe_clear_bit(int                    nr,
812              volatile unsigned long *addr)
813{
814    atomic_clear_acq_long(addr, (1 << nr));
815}
816
817int
818bxe_test_and_set_bit(int                    nr,
819                       volatile unsigned long *addr)
820{
821    unsigned long x;
822    nr = (1 << nr);
823    do {
824        x = *addr;
825    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
826    // if (x & nr) bit_was_set; else bit_was_not_set;
827    return (x & nr);
828}
829
830int
831bxe_test_and_clear_bit(int                    nr,
832                       volatile unsigned long *addr)
833{
834    unsigned long x;
835    nr = (1 << nr);
836    do {
837        x = *addr;
838    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
839    // if (x & nr) bit_was_set; else bit_was_not_set;
840    return (x & nr);
841}
842
843int
844bxe_cmpxchg(volatile int *addr,
845            int          old,
846            int          new)
847{
848    int x;
849    do {
850        x = *addr;
851    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
852    return (x);
853}
854
855/*
856 * Get DMA memory from the OS.
857 *
858 * Validates that the OS has provided DMA buffers in response to a
859 * bus_dmamap_load call and saves the physical address of those buffers.
860 * When the callback is used the OS will return 0 for the mapping function
861 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
862 * failures back to the caller.
863 *
864 * Returns:
865 *   Nothing.
866 */
867static void
868bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
869{
870    struct bxe_dma *dma = arg;
871
872    if (error) {
873        dma->paddr = 0;
874        dma->nseg  = 0;
875        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
876    } else {
877        dma->paddr = segs->ds_addr;
878        dma->nseg  = nseg;
879    }
880}
881
882/*
883 * Allocate a block of memory and map it for DMA. No partial completions
884 * allowed and release any resources acquired if we can't acquire all
885 * resources.
886 *
887 * Returns:
888 *   0 = Success, !0 = Failure
889 */
890int
891bxe_dma_alloc(struct bxe_softc *sc,
892              bus_size_t       size,
893              struct bxe_dma   *dma,
894              const char       *msg)
895{
896    int rc;
897
898    if (dma->size > 0) {
899        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
900              (unsigned long)dma->size);
901        return (1);
902    }
903
904    memset(dma, 0, sizeof(*dma)); /* sanity */
905    dma->sc   = sc;
906    dma->size = size;
907    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
908
909    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
910                            BCM_PAGE_SIZE,      /* alignment */
911                            0,                  /* boundary limit */
912                            BUS_SPACE_MAXADDR,  /* restricted low */
913                            BUS_SPACE_MAXADDR,  /* restricted hi */
914                            NULL,               /* addr filter() */
915                            NULL,               /* addr filter() arg */
916                            size,               /* max map size */
917                            1,                  /* num discontinuous */
918                            size,               /* max seg size */
919                            BUS_DMA_ALLOCNOW,   /* flags */
920                            NULL,               /* lock() */
921                            NULL,               /* lock() arg */
922                            &dma->tag);         /* returned dma tag */
923    if (rc != 0) {
924        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
925        memset(dma, 0, sizeof(*dma));
926        return (1);
927    }
928
929    rc = bus_dmamem_alloc(dma->tag,
930                          (void **)&dma->vaddr,
931                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
932                          &dma->map);
933    if (rc != 0) {
934        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
935        bus_dma_tag_destroy(dma->tag);
936        memset(dma, 0, sizeof(*dma));
937        return (1);
938    }
939
940    rc = bus_dmamap_load(dma->tag,
941                         dma->map,
942                         dma->vaddr,
943                         size,
944                         bxe_dma_map_addr, /* BLOGD in here */
945                         dma,
946                         BUS_DMA_NOWAIT);
947    if (rc != 0) {
948        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
949        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
950        bus_dma_tag_destroy(dma->tag);
951        memset(dma, 0, sizeof(*dma));
952        return (1);
953    }
954
955    return (0);
956}
957
958void
959bxe_dma_free(struct bxe_softc *sc,
960             struct bxe_dma   *dma)
961{
962    if (dma->size > 0) {
963        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
964
965        bus_dmamap_sync(dma->tag, dma->map,
966                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
967        bus_dmamap_unload(dma->tag, dma->map);
968        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
969        bus_dma_tag_destroy(dma->tag);
970    }
971
972    memset(dma, 0, sizeof(*dma));
973}
974
975/*
976 * These indirect read and write routines are only during init.
977 * The locking is handled by the MCP.
978 */
979
980void
981bxe_reg_wr_ind(struct bxe_softc *sc,
982               uint32_t         addr,
983               uint32_t         val)
984{
985    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
986    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
987    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
988}
989
990uint32_t
991bxe_reg_rd_ind(struct bxe_softc *sc,
992               uint32_t         addr)
993{
994    uint32_t val;
995
996    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
997    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
998    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
999
1000    return (val);
1001}
1002
1003static int
1004bxe_acquire_hw_lock(struct bxe_softc *sc,
1005                    uint32_t         resource)
1006{
1007    uint32_t lock_status;
1008    uint32_t resource_bit = (1 << resource);
1009    int func = SC_FUNC(sc);
1010    uint32_t hw_lock_control_reg;
1011    int cnt;
1012
1013    /* validate the resource is within range */
1014    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1015        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1016            " resource_bit 0x%x\n", resource, resource_bit);
1017        return (-1);
1018    }
1019
1020    if (func <= 5) {
1021        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1022    } else {
1023        hw_lock_control_reg =
1024                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1025    }
1026
1027    /* validate the resource is not already taken */
1028    lock_status = REG_RD(sc, hw_lock_control_reg);
1029    if (lock_status & resource_bit) {
1030        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1031              resource, lock_status, resource_bit);
1032        return (-1);
1033    }
1034
1035    /* try every 5ms for 5 seconds */
1036    for (cnt = 0; cnt < 1000; cnt++) {
1037        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1038        lock_status = REG_RD(sc, hw_lock_control_reg);
1039        if (lock_status & resource_bit) {
1040            return (0);
1041        }
1042        DELAY(5000);
1043    }
1044
1045    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1046        resource, resource_bit);
1047    return (-1);
1048}
1049
1050static int
1051bxe_release_hw_lock(struct bxe_softc *sc,
1052                    uint32_t         resource)
1053{
1054    uint32_t lock_status;
1055    uint32_t resource_bit = (1 << resource);
1056    int func = SC_FUNC(sc);
1057    uint32_t hw_lock_control_reg;
1058
1059    /* validate the resource is within range */
1060    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1061        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1062            " resource_bit 0x%x\n", resource, resource_bit);
1063        return (-1);
1064    }
1065
1066    if (func <= 5) {
1067        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1068    } else {
1069        hw_lock_control_reg =
1070                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1071    }
1072
1073    /* validate the resource is currently taken */
1074    lock_status = REG_RD(sc, hw_lock_control_reg);
1075    if (!(lock_status & resource_bit)) {
1076        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1077              resource, lock_status, resource_bit);
1078        return (-1);
1079    }
1080
1081    REG_WR(sc, hw_lock_control_reg, resource_bit);
1082    return (0);
1083}
1084static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1085{
1086	BXE_PHY_LOCK(sc);
1087	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1088}
1089
1090static void bxe_release_phy_lock(struct bxe_softc *sc)
1091{
1092	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1093	BXE_PHY_UNLOCK(sc);
1094}
1095/*
1096 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1097 * had we done things the other way around, if two pfs from the same port
1098 * would attempt to access nvram at the same time, we could run into a
1099 * scenario such as:
1100 * pf A takes the port lock.
1101 * pf B succeeds in taking the same lock since they are from the same port.
1102 * pf A takes the per pf misc lock. Performs eeprom access.
1103 * pf A finishes. Unlocks the per pf misc lock.
1104 * Pf B takes the lock and proceeds to perform it's own access.
1105 * pf A unlocks the per port lock, while pf B is still working (!).
1106 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1107 * access corrupted by pf B).*
1108 */
1109static int
1110bxe_acquire_nvram_lock(struct bxe_softc *sc)
1111{
1112    int port = SC_PORT(sc);
1113    int count, i;
1114    uint32_t val = 0;
1115
1116    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1117    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1118
1119    /* adjust timeout for emulation/FPGA */
1120    count = NVRAM_TIMEOUT_COUNT;
1121    if (CHIP_REV_IS_SLOW(sc)) {
1122        count *= 100;
1123    }
1124
1125    /* request access to nvram interface */
1126    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1127           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1128
1129    for (i = 0; i < count*10; i++) {
1130        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1131        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1132            break;
1133        }
1134
1135        DELAY(5);
1136    }
1137
1138    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1139        BLOGE(sc, "Cannot get access to nvram interface "
1140            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1141            port, val);
1142        return (-1);
1143    }
1144
1145    return (0);
1146}
1147
1148static int
1149bxe_release_nvram_lock(struct bxe_softc *sc)
1150{
1151    int port = SC_PORT(sc);
1152    int count, i;
1153    uint32_t val = 0;
1154
1155    /* adjust timeout for emulation/FPGA */
1156    count = NVRAM_TIMEOUT_COUNT;
1157    if (CHIP_REV_IS_SLOW(sc)) {
1158        count *= 100;
1159    }
1160
1161    /* relinquish nvram interface */
1162    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1163           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1164
1165    for (i = 0; i < count*10; i++) {
1166        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1167        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1168            break;
1169        }
1170
1171        DELAY(5);
1172    }
1173
1174    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1175        BLOGE(sc, "Cannot free access to nvram interface "
1176            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1177            port, val);
1178        return (-1);
1179    }
1180
1181    /* release HW lock: protect against other PFs in PF Direct Assignment */
1182    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1183
1184    return (0);
1185}
1186
1187static void
1188bxe_enable_nvram_access(struct bxe_softc *sc)
1189{
1190    uint32_t val;
1191
1192    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1193
1194    /* enable both bits, even on read */
1195    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1196           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1197}
1198
1199static void
1200bxe_disable_nvram_access(struct bxe_softc *sc)
1201{
1202    uint32_t val;
1203
1204    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1205
1206    /* disable both bits, even after read */
1207    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1208           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1209                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1210}
1211
1212static int
1213bxe_nvram_read_dword(struct bxe_softc *sc,
1214                     uint32_t         offset,
1215                     uint32_t         *ret_val,
1216                     uint32_t         cmd_flags)
1217{
1218    int count, i, rc;
1219    uint32_t val;
1220
1221    /* build the command word */
1222    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1223
1224    /* need to clear DONE bit separately */
1225    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1226
1227    /* address of the NVRAM to read from */
1228    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1229           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1230
1231    /* issue a read command */
1232    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1233
1234    /* adjust timeout for emulation/FPGA */
1235    count = NVRAM_TIMEOUT_COUNT;
1236    if (CHIP_REV_IS_SLOW(sc)) {
1237        count *= 100;
1238    }
1239
1240    /* wait for completion */
1241    *ret_val = 0;
1242    rc = -1;
1243    for (i = 0; i < count; i++) {
1244        DELAY(5);
1245        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1246
1247        if (val & MCPR_NVM_COMMAND_DONE) {
1248            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1249            /* we read nvram data in cpu order
1250             * but ethtool sees it as an array of bytes
1251             * converting to big-endian will do the work
1252             */
1253            *ret_val = htobe32(val);
1254            rc = 0;
1255            break;
1256        }
1257    }
1258
1259    if (rc == -1) {
1260        BLOGE(sc, "nvram read timeout expired "
1261            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1262            offset, cmd_flags, val);
1263    }
1264
1265    return (rc);
1266}
1267
1268static int
1269bxe_nvram_read(struct bxe_softc *sc,
1270               uint32_t         offset,
1271               uint8_t          *ret_buf,
1272               int              buf_size)
1273{
1274    uint32_t cmd_flags;
1275    uint32_t val;
1276    int rc;
1277
1278    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1279        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1280              offset, buf_size);
1281        return (-1);
1282    }
1283
1284    if ((offset + buf_size) > sc->devinfo.flash_size) {
1285        BLOGE(sc, "Invalid parameter, "
1286                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1287              offset, buf_size, sc->devinfo.flash_size);
1288        return (-1);
1289    }
1290
1291    /* request access to nvram interface */
1292    rc = bxe_acquire_nvram_lock(sc);
1293    if (rc) {
1294        return (rc);
1295    }
1296
1297    /* enable access to nvram interface */
1298    bxe_enable_nvram_access(sc);
1299
1300    /* read the first word(s) */
1301    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1302    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1303        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1304        memcpy(ret_buf, &val, 4);
1305
1306        /* advance to the next dword */
1307        offset += sizeof(uint32_t);
1308        ret_buf += sizeof(uint32_t);
1309        buf_size -= sizeof(uint32_t);
1310        cmd_flags = 0;
1311    }
1312
1313    if (rc == 0) {
1314        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1315        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1316        memcpy(ret_buf, &val, 4);
1317    }
1318
1319    /* disable access to nvram interface */
1320    bxe_disable_nvram_access(sc);
1321    bxe_release_nvram_lock(sc);
1322
1323    return (rc);
1324}
1325
1326static int
1327bxe_nvram_write_dword(struct bxe_softc *sc,
1328                      uint32_t         offset,
1329                      uint32_t         val,
1330                      uint32_t         cmd_flags)
1331{
1332    int count, i, rc;
1333
1334    /* build the command word */
1335    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1336
1337    /* need to clear DONE bit separately */
1338    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1339
1340    /* write the data */
1341    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1342
1343    /* address of the NVRAM to write to */
1344    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1345           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1346
1347    /* issue the write command */
1348    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1349
1350    /* adjust timeout for emulation/FPGA */
1351    count = NVRAM_TIMEOUT_COUNT;
1352    if (CHIP_REV_IS_SLOW(sc)) {
1353        count *= 100;
1354    }
1355
1356    /* wait for completion */
1357    rc = -1;
1358    for (i = 0; i < count; i++) {
1359        DELAY(5);
1360        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1361        if (val & MCPR_NVM_COMMAND_DONE) {
1362            rc = 0;
1363            break;
1364        }
1365    }
1366
1367    if (rc == -1) {
1368        BLOGE(sc, "nvram write timeout expired "
1369            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1370            offset, cmd_flags, val);
1371    }
1372
1373    return (rc);
1374}
1375
1376#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1377
1378static int
1379bxe_nvram_write1(struct bxe_softc *sc,
1380                 uint32_t         offset,
1381                 uint8_t          *data_buf,
1382                 int              buf_size)
1383{
1384    uint32_t cmd_flags;
1385    uint32_t align_offset;
1386    uint32_t val;
1387    int rc;
1388
1389    if ((offset + buf_size) > sc->devinfo.flash_size) {
1390        BLOGE(sc, "Invalid parameter, "
1391                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1392              offset, buf_size, sc->devinfo.flash_size);
1393        return (-1);
1394    }
1395
1396    /* request access to nvram interface */
1397    rc = bxe_acquire_nvram_lock(sc);
1398    if (rc) {
1399        return (rc);
1400    }
1401
1402    /* enable access to nvram interface */
1403    bxe_enable_nvram_access(sc);
1404
1405    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1406    align_offset = (offset & ~0x03);
1407    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1408
1409    if (rc == 0) {
1410        val &= ~(0xff << BYTE_OFFSET(offset));
1411        val |= (*data_buf << BYTE_OFFSET(offset));
1412
1413        /* nvram data is returned as an array of bytes
1414         * convert it back to cpu order
1415         */
1416        val = be32toh(val);
1417
1418        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1419    }
1420
1421    /* disable access to nvram interface */
1422    bxe_disable_nvram_access(sc);
1423    bxe_release_nvram_lock(sc);
1424
1425    return (rc);
1426}
1427
1428static int
1429bxe_nvram_write(struct bxe_softc *sc,
1430                uint32_t         offset,
1431                uint8_t          *data_buf,
1432                int              buf_size)
1433{
1434    uint32_t cmd_flags;
1435    uint32_t val;
1436    uint32_t written_so_far;
1437    int rc;
1438
1439    if (buf_size == 1) {
1440        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1441    }
1442
1443    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1444        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1445              offset, buf_size);
1446        return (-1);
1447    }
1448
1449    if (buf_size == 0) {
1450        return (0); /* nothing to do */
1451    }
1452
1453    if ((offset + buf_size) > sc->devinfo.flash_size) {
1454        BLOGE(sc, "Invalid parameter, "
1455                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1456              offset, buf_size, sc->devinfo.flash_size);
1457        return (-1);
1458    }
1459
1460    /* request access to nvram interface */
1461    rc = bxe_acquire_nvram_lock(sc);
1462    if (rc) {
1463        return (rc);
1464    }
1465
1466    /* enable access to nvram interface */
1467    bxe_enable_nvram_access(sc);
1468
1469    written_so_far = 0;
1470    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1471    while ((written_so_far < buf_size) && (rc == 0)) {
1472        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1473            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1474        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1475            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1476        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1477            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1478        }
1479
1480        memcpy(&val, data_buf, 4);
1481
1482        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1483
1484        /* advance to the next dword */
1485        offset += sizeof(uint32_t);
1486        data_buf += sizeof(uint32_t);
1487        written_so_far += sizeof(uint32_t);
1488        cmd_flags = 0;
1489    }
1490
1491    /* disable access to nvram interface */
1492    bxe_disable_nvram_access(sc);
1493    bxe_release_nvram_lock(sc);
1494
1495    return (rc);
1496}
1497
1498/* copy command into DMAE command memory and set DMAE command Go */
1499void
1500bxe_post_dmae(struct bxe_softc    *sc,
1501              struct dmae_cmd *dmae,
1502              int                 idx)
1503{
1504    uint32_t cmd_offset;
1505    int i;
1506
1507    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1508    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1509        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1510    }
1511
1512    REG_WR(sc, dmae_reg_go_c[idx], 1);
1513}
1514
1515uint32_t
1516bxe_dmae_opcode_add_comp(uint32_t opcode,
1517                         uint8_t  comp_type)
1518{
1519    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1520                      DMAE_CMD_C_TYPE_ENABLE));
1521}
1522
1523uint32_t
1524bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1525{
1526    return (opcode & ~DMAE_CMD_SRC_RESET);
1527}
1528
1529uint32_t
1530bxe_dmae_opcode(struct bxe_softc *sc,
1531                uint8_t          src_type,
1532                uint8_t          dst_type,
1533                uint8_t          with_comp,
1534                uint8_t          comp_type)
1535{
1536    uint32_t opcode = 0;
1537
1538    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1539               (dst_type << DMAE_CMD_DST_SHIFT));
1540
1541    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1542
1543    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1544
1545    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1546               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1547
1548    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1549
1550#ifdef __BIG_ENDIAN
1551    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1552#else
1553    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1554#endif
1555
1556    if (with_comp) {
1557        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1558    }
1559
1560    return (opcode);
1561}
1562
1563static void
1564bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1565                        struct dmae_cmd *dmae,
1566                        uint8_t             src_type,
1567                        uint8_t             dst_type)
1568{
1569    memset(dmae, 0, sizeof(struct dmae_cmd));
1570
1571    /* set the opcode */
1572    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1573                                   TRUE, DMAE_COMP_PCI);
1574
1575    /* fill in the completion parameters */
1576    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1577    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1578    dmae->comp_val     = DMAE_COMP_VAL;
1579}
1580
1581/* issue a DMAE command over the init channel and wait for completion */
1582static int
1583bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1584                         struct dmae_cmd *dmae)
1585{
1586    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1587    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1588
1589    BXE_DMAE_LOCK(sc);
1590
1591    /* reset completion */
1592    *wb_comp = 0;
1593
1594    /* post the command on the channel used for initializations */
1595    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1596
1597    /* wait for completion */
1598    DELAY(5);
1599
1600    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1601        if (!timeout ||
1602            (sc->recovery_state != BXE_RECOVERY_DONE &&
1603             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1604            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1605                *wb_comp, sc->recovery_state);
1606            BXE_DMAE_UNLOCK(sc);
1607            return (DMAE_TIMEOUT);
1608        }
1609
1610        timeout--;
1611        DELAY(50);
1612    }
1613
1614    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1615        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1616                *wb_comp, sc->recovery_state);
1617        BXE_DMAE_UNLOCK(sc);
1618        return (DMAE_PCI_ERROR);
1619    }
1620
1621    BXE_DMAE_UNLOCK(sc);
1622    return (0);
1623}
1624
1625void
1626bxe_read_dmae(struct bxe_softc *sc,
1627              uint32_t         src_addr,
1628              uint32_t         len32)
1629{
1630    struct dmae_cmd dmae;
1631    uint32_t *data;
1632    int i, rc;
1633
1634    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1635
1636    if (!sc->dmae_ready) {
1637        data = BXE_SP(sc, wb_data[0]);
1638
1639        for (i = 0; i < len32; i++) {
1640            data[i] = (CHIP_IS_E1(sc)) ?
1641                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1642                          REG_RD(sc, (src_addr + (i * 4)));
1643        }
1644
1645        return;
1646    }
1647
1648    /* set opcode and fixed command fields */
1649    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1650
1651    /* fill in addresses and len */
1652    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1653    dmae.src_addr_hi = 0;
1654    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1655    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1656    dmae.len         = len32;
1657
1658    /* issue the command and wait for completion */
1659    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1660        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1661    }
1662}
1663
1664void
1665bxe_write_dmae(struct bxe_softc *sc,
1666               bus_addr_t       dma_addr,
1667               uint32_t         dst_addr,
1668               uint32_t         len32)
1669{
1670    struct dmae_cmd dmae;
1671    int rc;
1672
1673    if (!sc->dmae_ready) {
1674        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1675
1676        if (CHIP_IS_E1(sc)) {
1677            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1678        } else {
1679            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1680        }
1681
1682        return;
1683    }
1684
1685    /* set opcode and fixed command fields */
1686    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1687
1688    /* fill in addresses and len */
1689    dmae.src_addr_lo = U64_LO(dma_addr);
1690    dmae.src_addr_hi = U64_HI(dma_addr);
1691    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1692    dmae.dst_addr_hi = 0;
1693    dmae.len         = len32;
1694
1695    /* issue the command and wait for completion */
1696    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1697        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1698    }
1699}
1700
1701void
1702bxe_write_dmae_phys_len(struct bxe_softc *sc,
1703                        bus_addr_t       phys_addr,
1704                        uint32_t         addr,
1705                        uint32_t         len)
1706{
1707    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1708    int offset = 0;
1709
1710    while (len > dmae_wr_max) {
1711        bxe_write_dmae(sc,
1712                       (phys_addr + offset), /* src DMA address */
1713                       (addr + offset),      /* dst GRC address */
1714                       dmae_wr_max);
1715        offset += (dmae_wr_max * 4);
1716        len -= dmae_wr_max;
1717    }
1718
1719    bxe_write_dmae(sc,
1720                   (phys_addr + offset), /* src DMA address */
1721                   (addr + offset),      /* dst GRC address */
1722                   len);
1723}
1724
1725void
1726bxe_set_ctx_validation(struct bxe_softc   *sc,
1727                       struct eth_context *cxt,
1728                       uint32_t           cid)
1729{
1730    /* ustorm cxt validation */
1731    cxt->ustorm_ag_context.cdu_usage =
1732        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1733            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1734    /* xcontext validation */
1735    cxt->xstorm_ag_context.cdu_reserved =
1736        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1737            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1738}
1739
1740static void
1741bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1742                            uint8_t          port,
1743                            uint8_t          fw_sb_id,
1744                            uint8_t          sb_index,
1745                            uint8_t          ticks)
1746{
1747    uint32_t addr =
1748        (BAR_CSTRORM_INTMEM +
1749         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1750
1751    REG_WR8(sc, addr, ticks);
1752
1753    BLOGD(sc, DBG_LOAD,
1754          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1755          port, fw_sb_id, sb_index, ticks);
1756}
1757
1758static void
1759bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1760                            uint8_t          port,
1761                            uint16_t         fw_sb_id,
1762                            uint8_t          sb_index,
1763                            uint8_t          disable)
1764{
1765    uint32_t enable_flag =
1766        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1767    uint32_t addr =
1768        (BAR_CSTRORM_INTMEM +
1769         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1770    uint8_t flags;
1771
1772    /* clear and set */
1773    flags = REG_RD8(sc, addr);
1774    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1775    flags |= enable_flag;
1776    REG_WR8(sc, addr, flags);
1777
1778    BLOGD(sc, DBG_LOAD,
1779          "port %d fw_sb_id %d sb_index %d disable %d\n",
1780          port, fw_sb_id, sb_index, disable);
1781}
1782
1783void
1784bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1785                             uint8_t          fw_sb_id,
1786                             uint8_t          sb_index,
1787                             uint8_t          disable,
1788                             uint16_t         usec)
1789{
1790    int port = SC_PORT(sc);
1791    uint8_t ticks = (usec / 4); /* XXX ??? */
1792
1793    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1794
1795    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1796    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1797}
1798
1799void
1800elink_cb_udelay(struct bxe_softc *sc,
1801                uint32_t         usecs)
1802{
1803    DELAY(usecs);
1804}
1805
1806uint32_t
1807elink_cb_reg_read(struct bxe_softc *sc,
1808                  uint32_t         reg_addr)
1809{
1810    return (REG_RD(sc, reg_addr));
1811}
1812
1813void
1814elink_cb_reg_write(struct bxe_softc *sc,
1815                   uint32_t         reg_addr,
1816                   uint32_t         val)
1817{
1818    REG_WR(sc, reg_addr, val);
1819}
1820
1821void
1822elink_cb_reg_wb_write(struct bxe_softc *sc,
1823                      uint32_t         offset,
1824                      uint32_t         *wb_write,
1825                      uint16_t         len)
1826{
1827    REG_WR_DMAE(sc, offset, wb_write, len);
1828}
1829
1830void
1831elink_cb_reg_wb_read(struct bxe_softc *sc,
1832                     uint32_t         offset,
1833                     uint32_t         *wb_write,
1834                     uint16_t         len)
1835{
1836    REG_RD_DMAE(sc, offset, wb_write, len);
1837}
1838
1839uint8_t
1840elink_cb_path_id(struct bxe_softc *sc)
1841{
1842    return (SC_PATH(sc));
1843}
1844
1845void
1846elink_cb_event_log(struct bxe_softc     *sc,
1847                   const elink_log_id_t elink_log_id,
1848                   ...)
1849{
1850    /* XXX */
1851    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1852}
1853
1854static int
1855bxe_set_spio(struct bxe_softc *sc,
1856             int              spio,
1857             uint32_t         mode)
1858{
1859    uint32_t spio_reg;
1860
1861    /* Only 2 SPIOs are configurable */
1862    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1863        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1864        return (-1);
1865    }
1866
1867    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1868
1869    /* read SPIO and mask except the float bits */
1870    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1871
1872    switch (mode) {
1873    case MISC_SPIO_OUTPUT_LOW:
1874        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1875        /* clear FLOAT and set CLR */
1876        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1877        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1878        break;
1879
1880    case MISC_SPIO_OUTPUT_HIGH:
1881        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1882        /* clear FLOAT and set SET */
1883        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1884        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1885        break;
1886
1887    case MISC_SPIO_INPUT_HI_Z:
1888        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1889        /* set FLOAT */
1890        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1891        break;
1892
1893    default:
1894        break;
1895    }
1896
1897    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1898    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1899
1900    return (0);
1901}
1902
1903static int
1904bxe_gpio_read(struct bxe_softc *sc,
1905              int              gpio_num,
1906              uint8_t          port)
1907{
1908    /* The GPIO should be swapped if swap register is set and active */
1909    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1910                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1911    int gpio_shift = (gpio_num +
1912                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1913    uint32_t gpio_mask = (1 << gpio_shift);
1914    uint32_t gpio_reg;
1915
1916    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1918            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1919            gpio_mask);
1920        return (-1);
1921    }
1922
1923    /* read GPIO value */
1924    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1925
1926    /* get the requested pin value */
1927    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1928}
1929
1930static int
1931bxe_gpio_write(struct bxe_softc *sc,
1932               int              gpio_num,
1933               uint32_t         mode,
1934               uint8_t          port)
1935{
1936    /* The GPIO should be swapped if swap register is set and active */
1937    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1938                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1939    int gpio_shift = (gpio_num +
1940                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1941    uint32_t gpio_mask = (1 << gpio_shift);
1942    uint32_t gpio_reg;
1943
1944    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1945        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1946            " gpio_shift %d gpio_mask 0x%x\n",
1947            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1948        return (-1);
1949    }
1950
1951    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1952
1953    /* read GPIO and mask except the float bits */
1954    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1955
1956    switch (mode) {
1957    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1958        BLOGD(sc, DBG_PHY,
1959              "Set GPIO %d (shift %d) -> output low\n",
1960              gpio_num, gpio_shift);
1961        /* clear FLOAT and set CLR */
1962        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1963        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1964        break;
1965
1966    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1967        BLOGD(sc, DBG_PHY,
1968              "Set GPIO %d (shift %d) -> output high\n",
1969              gpio_num, gpio_shift);
1970        /* clear FLOAT and set SET */
1971        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1973        break;
1974
1975    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1976        BLOGD(sc, DBG_PHY,
1977              "Set GPIO %d (shift %d) -> input\n",
1978              gpio_num, gpio_shift);
1979        /* set FLOAT */
1980        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1981        break;
1982
1983    default:
1984        break;
1985    }
1986
1987    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1988    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1989
1990    return (0);
1991}
1992
1993static int
1994bxe_gpio_mult_write(struct bxe_softc *sc,
1995                    uint8_t          pins,
1996                    uint32_t         mode)
1997{
1998    uint32_t gpio_reg;
1999
2000    /* any port swapping should be handled by caller */
2001
2002    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2003
2004    /* read GPIO and mask except the float bits */
2005    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2006    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2007    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2008    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2009
2010    switch (mode) {
2011    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2012        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2013        /* set CLR */
2014        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2015        break;
2016
2017    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2018        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2019        /* set SET */
2020        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2021        break;
2022
2023    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2024        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2025        /* set FLOAT */
2026        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2027        break;
2028
2029    default:
2030        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2031            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2032        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2033        return (-1);
2034    }
2035
2036    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2037    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2038
2039    return (0);
2040}
2041
2042static int
2043bxe_gpio_int_write(struct bxe_softc *sc,
2044                   int              gpio_num,
2045                   uint32_t         mode,
2046                   uint8_t          port)
2047{
2048    /* The GPIO should be swapped if swap register is set and active */
2049    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2050                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2051    int gpio_shift = (gpio_num +
2052                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2053    uint32_t gpio_mask = (1 << gpio_shift);
2054    uint32_t gpio_reg;
2055
2056    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2057        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2058            " gpio_shift %d gpio_mask 0x%x\n",
2059            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2060        return (-1);
2061    }
2062
2063    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2064
2065    /* read GPIO int */
2066    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2067
2068    switch (mode) {
2069    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2070        BLOGD(sc, DBG_PHY,
2071              "Clear GPIO INT %d (shift %d) -> output low\n",
2072              gpio_num, gpio_shift);
2073        /* clear SET and set CLR */
2074        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2075        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2076        break;
2077
2078    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2079        BLOGD(sc, DBG_PHY,
2080              "Set GPIO INT %d (shift %d) -> output high\n",
2081              gpio_num, gpio_shift);
2082        /* clear CLR and set SET */
2083        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2084        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2085        break;
2086
2087    default:
2088        break;
2089    }
2090
2091    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2092    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2093
2094    return (0);
2095}
2096
2097uint32_t
2098elink_cb_gpio_read(struct bxe_softc *sc,
2099                   uint16_t         gpio_num,
2100                   uint8_t          port)
2101{
2102    return (bxe_gpio_read(sc, gpio_num, port));
2103}
2104
2105uint8_t
2106elink_cb_gpio_write(struct bxe_softc *sc,
2107                    uint16_t         gpio_num,
2108                    uint8_t          mode, /* 0=low 1=high */
2109                    uint8_t          port)
2110{
2111    return (bxe_gpio_write(sc, gpio_num, mode, port));
2112}
2113
2114uint8_t
2115elink_cb_gpio_mult_write(struct bxe_softc *sc,
2116                         uint8_t          pins,
2117                         uint8_t          mode) /* 0=low 1=high */
2118{
2119    return (bxe_gpio_mult_write(sc, pins, mode));
2120}
2121
2122uint8_t
2123elink_cb_gpio_int_write(struct bxe_softc *sc,
2124                        uint16_t         gpio_num,
2125                        uint8_t          mode, /* 0=low 1=high */
2126                        uint8_t          port)
2127{
2128    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2129}
2130
2131void
2132elink_cb_notify_link_changed(struct bxe_softc *sc)
2133{
2134    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2135                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2136}
2137
2138/* send the MCP a request, block until there is a reply */
2139uint32_t
2140elink_cb_fw_command(struct bxe_softc *sc,
2141                    uint32_t         command,
2142                    uint32_t         param)
2143{
2144    int mb_idx = SC_FW_MB_IDX(sc);
2145    uint32_t seq;
2146    uint32_t rc = 0;
2147    uint32_t cnt = 1;
2148    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2149
2150    BXE_FWMB_LOCK(sc);
2151
2152    seq = ++sc->fw_seq;
2153    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2154    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2155
2156    BLOGD(sc, DBG_PHY,
2157          "wrote command 0x%08x to FW MB param 0x%08x\n",
2158          (command | seq), param);
2159
2160    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2161    do {
2162        DELAY(delay * 1000);
2163        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2164    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2165
2166    BLOGD(sc, DBG_PHY,
2167          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2168          cnt*delay, rc, seq);
2169
2170    /* is this a reply to our command? */
2171    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2172        rc &= FW_MSG_CODE_MASK;
2173    } else {
2174        /* Ruh-roh! */
2175        BLOGE(sc, "FW failed to respond!\n");
2176        // XXX bxe_fw_dump(sc);
2177        rc = 0;
2178    }
2179
2180    BXE_FWMB_UNLOCK(sc);
2181    return (rc);
2182}
2183
2184static uint32_t
2185bxe_fw_command(struct bxe_softc *sc,
2186               uint32_t         command,
2187               uint32_t         param)
2188{
2189    return (elink_cb_fw_command(sc, command, param));
2190}
2191
2192static void
2193__storm_memset_dma_mapping(struct bxe_softc *sc,
2194                           uint32_t         addr,
2195                           bus_addr_t       mapping)
2196{
2197    REG_WR(sc, addr, U64_LO(mapping));
2198    REG_WR(sc, (addr + 4), U64_HI(mapping));
2199}
2200
2201static void
2202storm_memset_spq_addr(struct bxe_softc *sc,
2203                      bus_addr_t       mapping,
2204                      uint16_t         abs_fid)
2205{
2206    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2207                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2208    __storm_memset_dma_mapping(sc, addr, mapping);
2209}
2210
2211static void
2212storm_memset_vf_to_pf(struct bxe_softc *sc,
2213                      uint16_t         abs_fid,
2214                      uint16_t         pf_id)
2215{
2216    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2217    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2218    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2219    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2220}
2221
2222static void
2223storm_memset_func_en(struct bxe_softc *sc,
2224                     uint16_t         abs_fid,
2225                     uint8_t          enable)
2226{
2227    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2228    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2229    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2230    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2231}
2232
2233static void
2234storm_memset_eq_data(struct bxe_softc       *sc,
2235                     struct event_ring_data *eq_data,
2236                     uint16_t               pfid)
2237{
2238    uint32_t addr;
2239    size_t size;
2240
2241    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2242    size = sizeof(struct event_ring_data);
2243    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2244}
2245
2246static void
2247storm_memset_eq_prod(struct bxe_softc *sc,
2248                     uint16_t         eq_prod,
2249                     uint16_t         pfid)
2250{
2251    uint32_t addr = (BAR_CSTRORM_INTMEM +
2252                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2253    REG_WR16(sc, addr, eq_prod);
2254}
2255
2256/*
2257 * Post a slowpath command.
2258 *
2259 * A slowpath command is used to propagate a configuration change through
2260 * the controller in a controlled manner, allowing each STORM processor and
2261 * other H/W blocks to phase in the change.  The commands sent on the
2262 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2263 * completion of the ramrod will occur in different ways.  Here's a
2264 * breakdown of ramrods and how they complete:
2265 *
2266 * RAMROD_CMD_ID_ETH_PORT_SETUP
2267 *   Used to setup the leading connection on a port.  Completes on the
2268 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2269 *
2270 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2271 *   Used to setup an additional connection on a port.  Completes on the
2272 *   RCQ of the multi-queue/RSS connection being initialized.
2273 *
2274 * RAMROD_CMD_ID_ETH_STAT_QUERY
2275 *   Used to force the storm processors to update the statistics database
2276 *   in host memory.  This ramrod is send on the leading connection CID and
2277 *   completes as an index increment of the CSTORM on the default status
2278 *   block.
2279 *
2280 * RAMROD_CMD_ID_ETH_UPDATE
2281 *   Used to update the state of the leading connection, usually to udpate
2282 *   the RSS indirection table.  Completes on the RCQ of the leading
2283 *   connection. (Not currently used under FreeBSD until OS support becomes
2284 *   available.)
2285 *
2286 * RAMROD_CMD_ID_ETH_HALT
2287 *   Used when tearing down a connection prior to driver unload.  Completes
2288 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2289 *   use this on the leading connection.
2290 *
2291 * RAMROD_CMD_ID_ETH_SET_MAC
2292 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2293 *   the RCQ of the leading connection.
2294 *
2295 * RAMROD_CMD_ID_ETH_CFC_DEL
2296 *   Used when tearing down a conneciton prior to driver unload.  Completes
2297 *   on the RCQ of the leading connection (since the current connection
2298 *   has been completely removed from controller memory).
2299 *
2300 * RAMROD_CMD_ID_ETH_PORT_DEL
2301 *   Used to tear down the leading connection prior to driver unload,
2302 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2303 *   default status block.
2304 *
2305 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2306 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2307 *   RSS connection that is being offloaded.  (Not currently used under
2308 *   FreeBSD.)
2309 *
2310 * There can only be one command pending per function.
2311 *
2312 * Returns:
2313 *   0 = Success, !0 = Failure.
2314 */
2315
2316/* must be called under the spq lock */
2317static inline
2318struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2319{
2320    struct eth_spe *next_spe = sc->spq_prod_bd;
2321
2322    if (sc->spq_prod_bd == sc->spq_last_bd) {
2323        /* wrap back to the first eth_spq */
2324        sc->spq_prod_bd = sc->spq;
2325        sc->spq_prod_idx = 0;
2326    } else {
2327        sc->spq_prod_bd++;
2328        sc->spq_prod_idx++;
2329    }
2330
2331    return (next_spe);
2332}
2333
2334/* must be called under the spq lock */
2335static inline
2336void bxe_sp_prod_update(struct bxe_softc *sc)
2337{
2338    int func = SC_FUNC(sc);
2339
2340    /*
2341     * Make sure that BD data is updated before writing the producer.
2342     * BD data is written to the memory, the producer is read from the
2343     * memory, thus we need a full memory barrier to ensure the ordering.
2344     */
2345    mb();
2346
2347    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2348             sc->spq_prod_idx);
2349
2350    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2351                      BUS_SPACE_BARRIER_WRITE);
2352}
2353
2354/**
2355 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2356 *
2357 * @cmd:      command to check
2358 * @cmd_type: command type
2359 */
2360static inline
2361int bxe_is_contextless_ramrod(int cmd,
2362                              int cmd_type)
2363{
2364    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2365        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2366        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2367        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2368        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2369        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2370        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2371        return (TRUE);
2372    } else {
2373        return (FALSE);
2374    }
2375}
2376
2377/**
2378 * bxe_sp_post - place a single command on an SP ring
2379 *
2380 * @sc:         driver handle
2381 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2382 * @cid:        SW CID the command is related to
2383 * @data_hi:    command private data address (high 32 bits)
2384 * @data_lo:    command private data address (low 32 bits)
2385 * @cmd_type:   command type (e.g. NONE, ETH)
2386 *
2387 * SP data is handled as if it's always an address pair, thus data fields are
2388 * not swapped to little endian in upper functions. Instead this function swaps
2389 * data as if it's two uint32 fields.
2390 */
2391int
2392bxe_sp_post(struct bxe_softc *sc,
2393            int              command,
2394            int              cid,
2395            uint32_t         data_hi,
2396            uint32_t         data_lo,
2397            int              cmd_type)
2398{
2399    struct eth_spe *spe;
2400    uint16_t type;
2401    int common;
2402
2403    common = bxe_is_contextless_ramrod(command, cmd_type);
2404
2405    BXE_SP_LOCK(sc);
2406
2407    if (common) {
2408        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2409            BLOGE(sc, "EQ ring is full!\n");
2410            BXE_SP_UNLOCK(sc);
2411            return (-1);
2412        }
2413    } else {
2414        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2415            BLOGE(sc, "SPQ ring is full!\n");
2416            BXE_SP_UNLOCK(sc);
2417            return (-1);
2418        }
2419    }
2420
2421    spe = bxe_sp_get_next(sc);
2422
2423    /* CID needs port number to be encoded int it */
2424    spe->hdr.conn_and_cmd_data =
2425        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2426
2427    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2428
2429    /* TBD: Check if it works for VFs */
2430    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2431             SPE_HDR_T_FUNCTION_ID);
2432
2433    spe->hdr.type = htole16(type);
2434
2435    spe->data.update_data_addr.hi = htole32(data_hi);
2436    spe->data.update_data_addr.lo = htole32(data_lo);
2437
2438    /*
2439     * It's ok if the actual decrement is issued towards the memory
2440     * somewhere between the lock and unlock. Thus no more explict
2441     * memory barrier is needed.
2442     */
2443    if (common) {
2444        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2445    } else {
2446        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2447    }
2448
2449    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2450    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2451          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2452    BLOGD(sc, DBG_SP,
2453          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2454          sc->spq_prod_idx,
2455          (uint32_t)U64_HI(sc->spq_dma.paddr),
2456          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2457          command,
2458          common,
2459          HW_CID(sc, cid),
2460          data_hi,
2461          data_lo,
2462          type,
2463          atomic_load_acq_long(&sc->cq_spq_left),
2464          atomic_load_acq_long(&sc->eq_spq_left));
2465
2466    bxe_sp_prod_update(sc);
2467
2468    BXE_SP_UNLOCK(sc);
2469    return (0);
2470}
2471
2472/**
2473 * bxe_debug_print_ind_table - prints the indirection table configuration.
2474 *
2475 * @sc: driver hanlde
2476 * @p:  pointer to rss configuration
2477 */
2478
2479/*
2480 * FreeBSD Device probe function.
2481 *
2482 * Compares the device found to the driver's list of supported devices and
2483 * reports back to the bsd loader whether this is the right driver for the device.
2484 * This is the driver entry function called from the "kldload" command.
2485 *
2486 * Returns:
2487 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2488 */
2489static int
2490bxe_probe(device_t dev)
2491{
2492    struct bxe_device_type *t;
2493    char *descbuf;
2494    uint16_t did, sdid, svid, vid;
2495
2496    /* Find our device structure */
2497    t = bxe_devs;
2498
2499    /* Get the data for the device to be probed. */
2500    vid  = pci_get_vendor(dev);
2501    did  = pci_get_device(dev);
2502    svid = pci_get_subvendor(dev);
2503    sdid = pci_get_subdevice(dev);
2504
2505    /* Look through the list of known devices for a match. */
2506    while (t->bxe_name != NULL) {
2507        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2508            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2509            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2510            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2511            if (descbuf == NULL)
2512                return (ENOMEM);
2513
2514            /* Print out the device identity. */
2515            snprintf(descbuf, BXE_DEVDESC_MAX,
2516                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2517                     (((pci_read_config(dev, PCIR_REVID, 4) &
2518                        0xf0) >> 4) + 'A'),
2519                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2520                     BXE_DRIVER_VERSION);
2521
2522            device_set_desc_copy(dev, descbuf);
2523            free(descbuf, M_TEMP);
2524            return (BUS_PROBE_DEFAULT);
2525        }
2526        t++;
2527    }
2528
2529    return (ENXIO);
2530}
2531
2532static void
2533bxe_init_mutexes(struct bxe_softc *sc)
2534{
2535#ifdef BXE_CORE_LOCK_SX
2536    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2537             "bxe%d_core_lock", sc->unit);
2538    sx_init(&sc->core_sx, sc->core_sx_name);
2539#else
2540    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2541             "bxe%d_core_lock", sc->unit);
2542    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2543#endif
2544
2545    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2546             "bxe%d_sp_lock", sc->unit);
2547    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2548
2549    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2550             "bxe%d_dmae_lock", sc->unit);
2551    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2552
2553    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2554             "bxe%d_phy_lock", sc->unit);
2555    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2556
2557    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2558             "bxe%d_fwmb_lock", sc->unit);
2559    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2560
2561    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2562             "bxe%d_print_lock", sc->unit);
2563    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2564
2565    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2566             "bxe%d_stats_lock", sc->unit);
2567    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2568
2569    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2570             "bxe%d_mcast_lock", sc->unit);
2571    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2572}
2573
2574static void
2575bxe_release_mutexes(struct bxe_softc *sc)
2576{
2577#ifdef BXE_CORE_LOCK_SX
2578    sx_destroy(&sc->core_sx);
2579#else
2580    if (mtx_initialized(&sc->core_mtx)) {
2581        mtx_destroy(&sc->core_mtx);
2582    }
2583#endif
2584
2585    if (mtx_initialized(&sc->sp_mtx)) {
2586        mtx_destroy(&sc->sp_mtx);
2587    }
2588
2589    if (mtx_initialized(&sc->dmae_mtx)) {
2590        mtx_destroy(&sc->dmae_mtx);
2591    }
2592
2593    if (mtx_initialized(&sc->port.phy_mtx)) {
2594        mtx_destroy(&sc->port.phy_mtx);
2595    }
2596
2597    if (mtx_initialized(&sc->fwmb_mtx)) {
2598        mtx_destroy(&sc->fwmb_mtx);
2599    }
2600
2601    if (mtx_initialized(&sc->print_mtx)) {
2602        mtx_destroy(&sc->print_mtx);
2603    }
2604
2605    if (mtx_initialized(&sc->stats_mtx)) {
2606        mtx_destroy(&sc->stats_mtx);
2607    }
2608
2609    if (mtx_initialized(&sc->mcast_mtx)) {
2610        mtx_destroy(&sc->mcast_mtx);
2611    }
2612}
2613
2614static void
2615bxe_tx_disable(struct bxe_softc* sc)
2616{
2617    if_t ifp = sc->ifp;
2618
2619    /* tell the stack the driver is stopped and TX queue is full */
2620    if (ifp !=  NULL) {
2621        if_setdrvflags(ifp, 0);
2622    }
2623}
2624
2625static void
2626bxe_drv_pulse(struct bxe_softc *sc)
2627{
2628    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2629             sc->fw_drv_pulse_wr_seq);
2630}
2631
2632static inline uint16_t
2633bxe_tx_avail(struct bxe_softc *sc,
2634             struct bxe_fastpath *fp)
2635{
2636    int16_t  used;
2637    uint16_t prod;
2638    uint16_t cons;
2639
2640    prod = fp->tx_bd_prod;
2641    cons = fp->tx_bd_cons;
2642
2643    used = SUB_S16(prod, cons);
2644
2645    return (int16_t)(sc->tx_ring_size) - used;
2646}
2647
2648static inline int
2649bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2650{
2651    uint16_t hw_cons;
2652
2653    mb(); /* status block fields can change */
2654    hw_cons = le16toh(*fp->tx_cons_sb);
2655    return (hw_cons != fp->tx_pkt_cons);
2656}
2657
2658static inline uint8_t
2659bxe_has_tx_work(struct bxe_fastpath *fp)
2660{
2661    /* expand this for multi-cos if ever supported */
2662    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2663}
2664
2665static inline int
2666bxe_has_rx_work(struct bxe_fastpath *fp)
2667{
2668    uint16_t rx_cq_cons_sb;
2669
2670    mb(); /* status block fields can change */
2671    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2672    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2673        rx_cq_cons_sb++;
2674    return (fp->rx_cq_cons != rx_cq_cons_sb);
2675}
2676
2677static void
2678bxe_sp_event(struct bxe_softc    *sc,
2679             struct bxe_fastpath *fp,
2680             union eth_rx_cqe    *rr_cqe)
2681{
2682    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2683    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2684    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2685    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2686
2687    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2688          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2689
2690    switch (command) {
2691    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2692        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2693        drv_cmd = ECORE_Q_CMD_UPDATE;
2694        break;
2695
2696    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2697        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2698        drv_cmd = ECORE_Q_CMD_SETUP;
2699        break;
2700
2701    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2702        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2703        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2704        break;
2705
2706    case (RAMROD_CMD_ID_ETH_HALT):
2707        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2708        drv_cmd = ECORE_Q_CMD_HALT;
2709        break;
2710
2711    case (RAMROD_CMD_ID_ETH_TERMINATE):
2712        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2713        drv_cmd = ECORE_Q_CMD_TERMINATE;
2714        break;
2715
2716    case (RAMROD_CMD_ID_ETH_EMPTY):
2717        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2718        drv_cmd = ECORE_Q_CMD_EMPTY;
2719        break;
2720
2721    default:
2722        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2723              command, fp->index);
2724        return;
2725    }
2726
2727    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2728        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2729        /*
2730         * q_obj->complete_cmd() failure means that this was
2731         * an unexpected completion.
2732         *
2733         * In this case we don't want to increase the sc->spq_left
2734         * because apparently we haven't sent this command the first
2735         * place.
2736         */
2737        // bxe_panic(sc, ("Unexpected SP completion\n"));
2738        return;
2739    }
2740
2741    atomic_add_acq_long(&sc->cq_spq_left, 1);
2742
2743    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2744          atomic_load_acq_long(&sc->cq_spq_left));
2745}
2746
2747/*
2748 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2749 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2750 * the current aggregation queue as in-progress.
2751 */
2752static void
2753bxe_tpa_start(struct bxe_softc            *sc,
2754              struct bxe_fastpath         *fp,
2755              uint16_t                    queue,
2756              uint16_t                    cons,
2757              uint16_t                    prod,
2758              struct eth_fast_path_rx_cqe *cqe)
2759{
2760    struct bxe_sw_rx_bd tmp_bd;
2761    struct bxe_sw_rx_bd *rx_buf;
2762    struct eth_rx_bd *rx_bd;
2763    int max_agg_queues;
2764    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2765    uint16_t index;
2766
2767    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2768                       "cons=%d prod=%d\n",
2769          fp->index, queue, cons, prod);
2770
2771    max_agg_queues = MAX_AGG_QS(sc);
2772
2773    KASSERT((queue < max_agg_queues),
2774            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2775             fp->index, queue, max_agg_queues));
2776
2777    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2778            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2779             fp->index, queue));
2780
2781    /* copy the existing mbuf and mapping from the TPA pool */
2782    tmp_bd = tpa_info->bd;
2783
2784    if (tmp_bd.m == NULL) {
2785        uint32_t *tmp;
2786
2787        tmp = (uint32_t *)cqe;
2788
2789        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2790              fp->index, queue, cons, prod);
2791        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2792            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2793
2794        /* XXX Error handling? */
2795        return;
2796    }
2797
2798    /* change the TPA queue to the start state */
2799    tpa_info->state            = BXE_TPA_STATE_START;
2800    tpa_info->placement_offset = cqe->placement_offset;
2801    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2802    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2803    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2804
2805    fp->rx_tpa_queue_used |= (1 << queue);
2806
2807    /*
2808     * If all the buffer descriptors are filled with mbufs then fill in
2809     * the current consumer index with a new BD. Else if a maximum Rx
2810     * buffer limit is imposed then fill in the next producer index.
2811     */
2812    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2813                prod : cons;
2814
2815    /* move the received mbuf and mapping to TPA pool */
2816    tpa_info->bd = fp->rx_mbuf_chain[cons];
2817
2818    /* release any existing RX BD mbuf mappings */
2819    if (cons != index) {
2820        rx_buf = &fp->rx_mbuf_chain[cons];
2821
2822        if (rx_buf->m_map != NULL) {
2823            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2824                            BUS_DMASYNC_POSTREAD);
2825            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2826        }
2827
2828        /*
2829         * We get here when the maximum number of rx buffers is less than
2830         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2831         * it out here without concern of a memory leak.
2832         */
2833        fp->rx_mbuf_chain[cons].m = NULL;
2834    }
2835
2836    /* update the Rx SW BD with the mbuf info from the TPA pool */
2837    fp->rx_mbuf_chain[index] = tmp_bd;
2838
2839    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2840    rx_bd = &fp->rx_chain[index];
2841    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2842    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2843}
2844
2845/*
2846 * When a TPA aggregation is completed, loop through the individual mbufs
2847 * of the aggregation, combining them into a single mbuf which will be sent
2848 * up the stack. Refill all freed SGEs with mbufs as we go along.
2849 */
2850static int
2851bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2852                   struct bxe_fastpath       *fp,
2853                   struct bxe_sw_tpa_info    *tpa_info,
2854                   uint16_t                  queue,
2855                   uint16_t                  pages,
2856                   struct mbuf               *m,
2857			       struct eth_end_agg_rx_cqe *cqe,
2858                   uint16_t                  cqe_idx)
2859{
2860    struct mbuf *m_frag;
2861    uint32_t frag_len, frag_size, i;
2862    uint16_t sge_idx;
2863    int rc = 0;
2864    int j;
2865
2866    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2867
2868    BLOGD(sc, DBG_LRO,
2869          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2870          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2871
2872    /* make sure the aggregated frame is not too big to handle */
2873    if (pages > 8 * PAGES_PER_SGE) {
2874
2875        uint32_t *tmp = (uint32_t *)cqe;
2876
2877        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2878                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2879              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2880              tpa_info->len_on_bd, frag_size);
2881
2882        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2883            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2884
2885        bxe_panic(sc, ("sge page count error\n"));
2886        return (EINVAL);
2887    }
2888
2889    /*
2890     * Scan through the scatter gather list pulling individual mbufs into a
2891     * single mbuf for the host stack.
2892     */
2893    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2894        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2895
2896        /*
2897         * Firmware gives the indices of the SGE as if the ring is an array
2898         * (meaning that the "next" element will consume 2 indices).
2899         */
2900        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2901
2902        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2903                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2904              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2905
2906        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2907
2908        /* allocate a new mbuf for the SGE */
2909        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2910        if (rc) {
2911            /* Leave all remaining SGEs in the ring! */
2912            return (rc);
2913        }
2914
2915        /* update the fragment length */
2916        m_frag->m_len = frag_len;
2917
2918        /* concatenate the fragment to the head mbuf */
2919        m_cat(m, m_frag);
2920        fp->eth_q_stats.mbuf_alloc_sge--;
2921
2922        /* update the TPA mbuf size and remaining fragment size */
2923        m->m_pkthdr.len += frag_len;
2924        frag_size -= frag_len;
2925    }
2926
2927    BLOGD(sc, DBG_LRO,
2928          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2929          fp->index, queue, frag_size);
2930
2931    return (rc);
2932}
2933
2934static inline void
2935bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2936{
2937    int i, j;
2938
2939    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2940        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2941
2942        for (j = 0; j < 2; j++) {
2943            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2944            idx--;
2945        }
2946    }
2947}
2948
2949static inline void
2950bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2951{
2952    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2953    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2954
2955    /*
2956     * Clear the two last indices in the page to 1. These are the indices that
2957     * correspond to the "next" element, hence will never be indicated and
2958     * should be removed from the calculations.
2959     */
2960    bxe_clear_sge_mask_next_elems(fp);
2961}
2962
2963static inline void
2964bxe_update_last_max_sge(struct bxe_fastpath *fp,
2965                        uint16_t            idx)
2966{
2967    uint16_t last_max = fp->last_max_sge;
2968
2969    if (SUB_S16(idx, last_max) > 0) {
2970        fp->last_max_sge = idx;
2971    }
2972}
2973
2974static inline void
2975bxe_update_sge_prod(struct bxe_softc          *sc,
2976                    struct bxe_fastpath       *fp,
2977                    uint16_t                  sge_len,
2978                    union eth_sgl_or_raw_data *cqe)
2979{
2980    uint16_t last_max, last_elem, first_elem;
2981    uint16_t delta = 0;
2982    uint16_t i;
2983
2984    if (!sge_len) {
2985        return;
2986    }
2987
2988    /* first mark all used pages */
2989    for (i = 0; i < sge_len; i++) {
2990        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2991                            RX_SGE(le16toh(cqe->sgl[i])));
2992    }
2993
2994    BLOGD(sc, DBG_LRO,
2995          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2996          fp->index, sge_len - 1,
2997          le16toh(cqe->sgl[sge_len - 1]));
2998
2999    /* assume that the last SGE index is the biggest */
3000    bxe_update_last_max_sge(fp,
3001                            le16toh(cqe->sgl[sge_len - 1]));
3002
3003    last_max = RX_SGE(fp->last_max_sge);
3004    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3005    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3006
3007    /* if ring is not full */
3008    if (last_elem + 1 != first_elem) {
3009        last_elem++;
3010    }
3011
3012    /* now update the prod */
3013    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3014        if (__predict_true(fp->sge_mask[i])) {
3015            break;
3016        }
3017
3018        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3019        delta += BIT_VEC64_ELEM_SZ;
3020    }
3021
3022    if (delta > 0) {
3023        fp->rx_sge_prod += delta;
3024        /* clear page-end entries */
3025        bxe_clear_sge_mask_next_elems(fp);
3026    }
3027
3028    BLOGD(sc, DBG_LRO,
3029          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3030          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3031}
3032
3033/*
3034 * The aggregation on the current TPA queue has completed. Pull the individual
3035 * mbuf fragments together into a single mbuf, perform all necessary checksum
3036 * calculations, and send the resuting mbuf to the stack.
3037 */
3038static void
3039bxe_tpa_stop(struct bxe_softc          *sc,
3040             struct bxe_fastpath       *fp,
3041             struct bxe_sw_tpa_info    *tpa_info,
3042             uint16_t                  queue,
3043             uint16_t                  pages,
3044			 struct eth_end_agg_rx_cqe *cqe,
3045             uint16_t                  cqe_idx)
3046{
3047    if_t ifp = sc->ifp;
3048    struct mbuf *m;
3049    int rc = 0;
3050
3051    BLOGD(sc, DBG_LRO,
3052          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3053          fp->index, queue, tpa_info->placement_offset,
3054          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3055
3056    m = tpa_info->bd.m;
3057
3058    /* allocate a replacement before modifying existing mbuf */
3059    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3060    if (rc) {
3061        /* drop the frame and log an error */
3062        fp->eth_q_stats.rx_soft_errors++;
3063        goto bxe_tpa_stop_exit;
3064    }
3065
3066    /* we have a replacement, fixup the current mbuf */
3067    m_adj(m, tpa_info->placement_offset);
3068    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3069
3070    /* mark the checksums valid (taken care of by the firmware) */
3071    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3072    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3073    m->m_pkthdr.csum_data = 0xffff;
3074    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3075                               CSUM_IP_VALID   |
3076                               CSUM_DATA_VALID |
3077                               CSUM_PSEUDO_HDR);
3078
3079    /* aggregate all of the SGEs into a single mbuf */
3080    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3081    if (rc) {
3082        /* drop the packet and log an error */
3083        fp->eth_q_stats.rx_soft_errors++;
3084        m_freem(m);
3085    } else {
3086        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3087            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3088            m->m_flags |= M_VLANTAG;
3089        }
3090
3091        /* assign packet to this interface interface */
3092        if_setrcvif(m, ifp);
3093
3094#if __FreeBSD_version >= 800000
3095        /* specify what RSS queue was used for this flow */
3096        m->m_pkthdr.flowid = fp->index;
3097        BXE_SET_FLOWID(m);
3098#endif
3099
3100        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3101        fp->eth_q_stats.rx_tpa_pkts++;
3102
3103        /* pass the frame to the stack */
3104        if_input(ifp, m);
3105    }
3106
3107    /* we passed an mbuf up the stack or dropped the frame */
3108    fp->eth_q_stats.mbuf_alloc_tpa--;
3109
3110bxe_tpa_stop_exit:
3111
3112    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3113    fp->rx_tpa_queue_used &= ~(1 << queue);
3114}
3115
3116static uint8_t
3117bxe_service_rxsgl(
3118                 struct bxe_fastpath *fp,
3119                 uint16_t len,
3120                 uint16_t lenonbd,
3121                 struct mbuf *m,
3122                 struct eth_fast_path_rx_cqe *cqe_fp)
3123{
3124    struct mbuf *m_frag;
3125    uint16_t frags, frag_len;
3126    uint16_t sge_idx = 0;
3127    uint16_t j;
3128    uint8_t i, rc = 0;
3129    uint32_t frag_size;
3130
3131    /* adjust the mbuf */
3132    m->m_len = lenonbd;
3133
3134    frag_size =  len - lenonbd;
3135    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3136
3137    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3138        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3139
3140        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3141        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3142        m_frag->m_len = frag_len;
3143
3144       /* allocate a new mbuf for the SGE */
3145        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3146        if (rc) {
3147            /* Leave all remaining SGEs in the ring! */
3148            return (rc);
3149        }
3150        fp->eth_q_stats.mbuf_alloc_sge--;
3151
3152        /* concatenate the fragment to the head mbuf */
3153        m_cat(m, m_frag);
3154
3155        frag_size -= frag_len;
3156    }
3157
3158    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3159
3160    return rc;
3161}
3162
3163static uint8_t
3164bxe_rxeof(struct bxe_softc    *sc,
3165          struct bxe_fastpath *fp)
3166{
3167    if_t ifp = sc->ifp;
3168    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3169    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3170    int rx_pkts = 0;
3171    int rc = 0;
3172
3173    BXE_FP_RX_LOCK(fp);
3174
3175    /* CQ "next element" is of the size of the regular element */
3176    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3177    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3178        hw_cq_cons++;
3179    }
3180
3181    bd_cons = fp->rx_bd_cons;
3182    bd_prod = fp->rx_bd_prod;
3183    bd_prod_fw = bd_prod;
3184    sw_cq_cons = fp->rx_cq_cons;
3185    sw_cq_prod = fp->rx_cq_prod;
3186
3187    /*
3188     * Memory barrier necessary as speculative reads of the rx
3189     * buffer can be ahead of the index in the status block
3190     */
3191    rmb();
3192
3193    BLOGD(sc, DBG_RX,
3194          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3195          fp->index, hw_cq_cons, sw_cq_cons);
3196
3197    while (sw_cq_cons != hw_cq_cons) {
3198        struct bxe_sw_rx_bd *rx_buf = NULL;
3199        union eth_rx_cqe *cqe;
3200        struct eth_fast_path_rx_cqe *cqe_fp;
3201        uint8_t cqe_fp_flags;
3202        enum eth_rx_cqe_type cqe_fp_type;
3203        uint16_t len, lenonbd,  pad;
3204        struct mbuf *m = NULL;
3205
3206        comp_ring_cons = RCQ(sw_cq_cons);
3207        bd_prod = RX_BD(bd_prod);
3208        bd_cons = RX_BD(bd_cons);
3209
3210        cqe          = &fp->rcq_chain[comp_ring_cons];
3211        cqe_fp       = &cqe->fast_path_cqe;
3212        cqe_fp_flags = cqe_fp->type_error_flags;
3213        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3214
3215        BLOGD(sc, DBG_RX,
3216              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3217              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3218              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3219              fp->index,
3220              hw_cq_cons,
3221              sw_cq_cons,
3222              bd_prod,
3223              bd_cons,
3224              CQE_TYPE(cqe_fp_flags),
3225              cqe_fp_flags,
3226              cqe_fp->status_flags,
3227              le32toh(cqe_fp->rss_hash_result),
3228              le16toh(cqe_fp->vlan_tag),
3229              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3230              le16toh(cqe_fp->len_on_bd));
3231
3232        /* is this a slowpath msg? */
3233        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3234            bxe_sp_event(sc, fp, cqe);
3235            goto next_cqe;
3236        }
3237
3238        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3239
3240        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3241            struct bxe_sw_tpa_info *tpa_info;
3242            uint16_t frag_size, pages;
3243            uint8_t queue;
3244
3245            if (CQE_TYPE_START(cqe_fp_type)) {
3246                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3247                              bd_cons, bd_prod, cqe_fp);
3248                m = NULL; /* packet not ready yet */
3249                goto next_rx;
3250            }
3251
3252            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3253                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3254
3255            queue = cqe->end_agg_cqe.queue_index;
3256            tpa_info = &fp->rx_tpa_info[queue];
3257
3258            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3259                  fp->index, queue);
3260
3261            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3262                         tpa_info->len_on_bd);
3263            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3264
3265            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3266                         &cqe->end_agg_cqe, comp_ring_cons);
3267
3268            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3269
3270            goto next_cqe;
3271        }
3272
3273        /* non TPA */
3274
3275        /* is this an error packet? */
3276        if (__predict_false(cqe_fp_flags &
3277                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3278            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3279            fp->eth_q_stats.rx_soft_errors++;
3280            goto next_rx;
3281        }
3282
3283        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3284        lenonbd = le16toh(cqe_fp->len_on_bd);
3285        pad = cqe_fp->placement_offset;
3286
3287        m = rx_buf->m;
3288
3289        if (__predict_false(m == NULL)) {
3290            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3291                  bd_cons, fp->index);
3292            goto next_rx;
3293        }
3294
3295        /* XXX double copy if packet length under a threshold */
3296
3297        /*
3298         * If all the buffer descriptors are filled with mbufs then fill in
3299         * the current consumer index with a new BD. Else if a maximum Rx
3300         * buffer limit is imposed then fill in the next producer index.
3301         */
3302        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3303                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3304                                      bd_prod : bd_cons);
3305        if (rc != 0) {
3306
3307            /* we simply reuse the received mbuf and don't post it to the stack */
3308            m = NULL;
3309
3310            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3311                  fp->index, rc);
3312            fp->eth_q_stats.rx_soft_errors++;
3313
3314            if (sc->max_rx_bufs != RX_BD_USABLE) {
3315                /* copy this consumer index to the producer index */
3316                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3317                       sizeof(struct bxe_sw_rx_bd));
3318                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3319            }
3320
3321            goto next_rx;
3322        }
3323
3324        /* current mbuf was detached from the bd */
3325        fp->eth_q_stats.mbuf_alloc_rx--;
3326
3327        /* we allocated a replacement mbuf, fixup the current one */
3328        m_adj(m, pad);
3329        m->m_pkthdr.len = m->m_len = len;
3330
3331        if ((len > 60) && (len > lenonbd)) {
3332            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3333            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3334            if (rc)
3335                break;
3336            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3337        } else if (lenonbd < len) {
3338            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3339        }
3340
3341        /* assign packet to this interface interface */
3342	if_setrcvif(m, ifp);
3343
3344        /* assume no hardware checksum has complated */
3345        m->m_pkthdr.csum_flags = 0;
3346
3347        /* validate checksum if offload enabled */
3348        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3349            /* check for a valid IP frame */
3350            if (!(cqe->fast_path_cqe.status_flags &
3351                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3352                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3353                if (__predict_false(cqe_fp_flags &
3354                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3355                    fp->eth_q_stats.rx_hw_csum_errors++;
3356                } else {
3357                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3358                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3359                }
3360            }
3361
3362            /* check for a valid TCP/UDP frame */
3363            if (!(cqe->fast_path_cqe.status_flags &
3364                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3365                if (__predict_false(cqe_fp_flags &
3366                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3367                    fp->eth_q_stats.rx_hw_csum_errors++;
3368                } else {
3369                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3370                    m->m_pkthdr.csum_data = 0xFFFF;
3371                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3372                                               CSUM_PSEUDO_HDR);
3373                }
3374            }
3375        }
3376
3377        /* if there is a VLAN tag then flag that info */
3378        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3379            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3380            m->m_flags |= M_VLANTAG;
3381        }
3382
3383#if __FreeBSD_version >= 800000
3384        /* specify what RSS queue was used for this flow */
3385        m->m_pkthdr.flowid = fp->index;
3386        BXE_SET_FLOWID(m);
3387#endif
3388
3389next_rx:
3390
3391        bd_cons    = RX_BD_NEXT(bd_cons);
3392        bd_prod    = RX_BD_NEXT(bd_prod);
3393        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3394
3395        /* pass the frame to the stack */
3396        if (__predict_true(m != NULL)) {
3397            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3398            rx_pkts++;
3399            if_input(ifp, m);
3400        }
3401
3402next_cqe:
3403
3404        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3405        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3406
3407        /* limit spinning on the queue */
3408        if (rc != 0)
3409            break;
3410
3411        if (rx_pkts == sc->rx_budget) {
3412            fp->eth_q_stats.rx_budget_reached++;
3413            break;
3414        }
3415    } /* while work to do */
3416
3417    fp->rx_bd_cons = bd_cons;
3418    fp->rx_bd_prod = bd_prod_fw;
3419    fp->rx_cq_cons = sw_cq_cons;
3420    fp->rx_cq_prod = sw_cq_prod;
3421
3422    /* Update producers */
3423    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3424
3425    fp->eth_q_stats.rx_pkts += rx_pkts;
3426    fp->eth_q_stats.rx_calls++;
3427
3428    BXE_FP_RX_UNLOCK(fp);
3429
3430    return (sw_cq_cons != hw_cq_cons);
3431}
3432
3433static uint16_t
3434bxe_free_tx_pkt(struct bxe_softc    *sc,
3435                struct bxe_fastpath *fp,
3436                uint16_t            idx)
3437{
3438    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3439    struct eth_tx_start_bd *tx_start_bd;
3440    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3441    uint16_t new_cons;
3442    int nbd;
3443
3444    /* unmap the mbuf from non-paged memory */
3445    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3446
3447    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3448    nbd = le16toh(tx_start_bd->nbd) - 1;
3449
3450    new_cons = (tx_buf->first_bd + nbd);
3451
3452    /* free the mbuf */
3453    if (__predict_true(tx_buf->m != NULL)) {
3454        m_freem(tx_buf->m);
3455        fp->eth_q_stats.mbuf_alloc_tx--;
3456    } else {
3457        fp->eth_q_stats.tx_chain_lost_mbuf++;
3458    }
3459
3460    tx_buf->m = NULL;
3461    tx_buf->first_bd = 0;
3462
3463    return (new_cons);
3464}
3465
3466/* transmit timeout watchdog */
3467static int
3468bxe_watchdog(struct bxe_softc    *sc,
3469             struct bxe_fastpath *fp)
3470{
3471    BXE_FP_TX_LOCK(fp);
3472
3473    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3474        BXE_FP_TX_UNLOCK(fp);
3475        return (0);
3476    }
3477
3478    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3479    if(sc->trigger_grcdump) {
3480         /* taking grcdump */
3481         bxe_grc_dump(sc);
3482    }
3483
3484    BXE_FP_TX_UNLOCK(fp);
3485
3486    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3487    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3488
3489    return (-1);
3490}
3491
3492/* processes transmit completions */
3493static uint8_t
3494bxe_txeof(struct bxe_softc    *sc,
3495          struct bxe_fastpath *fp)
3496{
3497    if_t ifp = sc->ifp;
3498    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3499    uint16_t tx_bd_avail;
3500
3501    BXE_FP_TX_LOCK_ASSERT(fp);
3502
3503    bd_cons = fp->tx_bd_cons;
3504    hw_cons = le16toh(*fp->tx_cons_sb);
3505    sw_cons = fp->tx_pkt_cons;
3506
3507    while (sw_cons != hw_cons) {
3508        pkt_cons = TX_BD(sw_cons);
3509
3510        BLOGD(sc, DBG_TX,
3511              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3512              fp->index, hw_cons, sw_cons, pkt_cons);
3513
3514        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3515
3516        sw_cons++;
3517    }
3518
3519    fp->tx_pkt_cons = sw_cons;
3520    fp->tx_bd_cons  = bd_cons;
3521
3522    BLOGD(sc, DBG_TX,
3523          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3524          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3525
3526    mb();
3527
3528    tx_bd_avail = bxe_tx_avail(sc, fp);
3529
3530    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3531        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3532    } else {
3533        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3534    }
3535
3536    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3537        /* reset the watchdog timer if there are pending transmits */
3538        fp->watchdog_timer = BXE_TX_TIMEOUT;
3539        return (TRUE);
3540    } else {
3541        /* clear watchdog when there are no pending transmits */
3542        fp->watchdog_timer = 0;
3543        return (FALSE);
3544    }
3545}
3546
3547static void
3548bxe_drain_tx_queues(struct bxe_softc *sc)
3549{
3550    struct bxe_fastpath *fp;
3551    int i, count;
3552
3553    /* wait until all TX fastpath tasks have completed */
3554    for (i = 0; i < sc->num_queues; i++) {
3555        fp = &sc->fp[i];
3556
3557        count = 1000;
3558
3559        while (bxe_has_tx_work(fp)) {
3560
3561            BXE_FP_TX_LOCK(fp);
3562            bxe_txeof(sc, fp);
3563            BXE_FP_TX_UNLOCK(fp);
3564
3565            if (count == 0) {
3566                BLOGE(sc, "Timeout waiting for fp[%d] "
3567                          "transmits to complete!\n", i);
3568                bxe_panic(sc, ("tx drain failure\n"));
3569                return;
3570            }
3571
3572            count--;
3573            DELAY(1000);
3574            rmb();
3575        }
3576    }
3577
3578    return;
3579}
3580
3581static int
3582bxe_del_all_macs(struct bxe_softc          *sc,
3583                 struct ecore_vlan_mac_obj *mac_obj,
3584                 int                       mac_type,
3585                 uint8_t                   wait_for_comp)
3586{
3587    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3588    int rc;
3589
3590    /* wait for completion of requested */
3591    if (wait_for_comp) {
3592        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3593    }
3594
3595    /* Set the mac type of addresses we want to clear */
3596    bxe_set_bit(mac_type, &vlan_mac_flags);
3597
3598    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3599    if (rc < 0) {
3600        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3601            rc, mac_type, wait_for_comp);
3602    }
3603
3604    return (rc);
3605}
3606
3607static int
3608bxe_fill_accept_flags(struct bxe_softc *sc,
3609                      uint32_t         rx_mode,
3610                      unsigned long    *rx_accept_flags,
3611                      unsigned long    *tx_accept_flags)
3612{
3613    /* Clear the flags first */
3614    *rx_accept_flags = 0;
3615    *tx_accept_flags = 0;
3616
3617    switch (rx_mode) {
3618    case BXE_RX_MODE_NONE:
3619        /*
3620         * 'drop all' supersedes any accept flags that may have been
3621         * passed to the function.
3622         */
3623        break;
3624
3625    case BXE_RX_MODE_NORMAL:
3626        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3627        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3628        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3629
3630        /* internal switching mode */
3631        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3632        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3633        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3634
3635        break;
3636
3637    case BXE_RX_MODE_ALLMULTI:
3638        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3639        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3640        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3641
3642        /* internal switching mode */
3643        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3644        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3645        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3646
3647        break;
3648
3649    case BXE_RX_MODE_PROMISC:
3650        /*
3651         * According to deffinition of SI mode, iface in promisc mode
3652         * should receive matched and unmatched (in resolution of port)
3653         * unicast packets.
3654         */
3655        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3656        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3657        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3658        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3659
3660        /* internal switching mode */
3661        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3662        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3663
3664        if (IS_MF_SI(sc)) {
3665            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3666        } else {
3667            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3668        }
3669
3670        break;
3671
3672    default:
3673        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3674        return (-1);
3675    }
3676
3677    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3678    if (rx_mode != BXE_RX_MODE_NONE) {
3679        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3680        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3681    }
3682
3683    return (0);
3684}
3685
3686static int
3687bxe_set_q_rx_mode(struct bxe_softc *sc,
3688                  uint8_t          cl_id,
3689                  unsigned long    rx_mode_flags,
3690                  unsigned long    rx_accept_flags,
3691                  unsigned long    tx_accept_flags,
3692                  unsigned long    ramrod_flags)
3693{
3694    struct ecore_rx_mode_ramrod_params ramrod_param;
3695    int rc;
3696
3697    memset(&ramrod_param, 0, sizeof(ramrod_param));
3698
3699    /* Prepare ramrod parameters */
3700    ramrod_param.cid = 0;
3701    ramrod_param.cl_id = cl_id;
3702    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3703    ramrod_param.func_id = SC_FUNC(sc);
3704
3705    ramrod_param.pstate = &sc->sp_state;
3706    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3707
3708    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3709    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3710
3711    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3712
3713    ramrod_param.ramrod_flags = ramrod_flags;
3714    ramrod_param.rx_mode_flags = rx_mode_flags;
3715
3716    ramrod_param.rx_accept_flags = rx_accept_flags;
3717    ramrod_param.tx_accept_flags = tx_accept_flags;
3718
3719    rc = ecore_config_rx_mode(sc, &ramrod_param);
3720    if (rc < 0) {
3721        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3722            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3723            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3724            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3725            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3726        return (rc);
3727    }
3728
3729    return (0);
3730}
3731
3732static int
3733bxe_set_storm_rx_mode(struct bxe_softc *sc)
3734{
3735    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3736    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3737    int rc;
3738
3739    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3740                               &tx_accept_flags);
3741    if (rc) {
3742        return (rc);
3743    }
3744
3745    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3746    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3747
3748    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3749    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3750                              rx_accept_flags, tx_accept_flags,
3751                              ramrod_flags));
3752}
3753
3754/* returns the "mcp load_code" according to global load_count array */
3755static int
3756bxe_nic_load_no_mcp(struct bxe_softc *sc)
3757{
3758    int path = SC_PATH(sc);
3759    int port = SC_PORT(sc);
3760
3761    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3762          path, load_count[path][0], load_count[path][1],
3763          load_count[path][2]);
3764    load_count[path][0]++;
3765    load_count[path][1 + port]++;
3766    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3767          path, load_count[path][0], load_count[path][1],
3768          load_count[path][2]);
3769    if (load_count[path][0] == 1) {
3770        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3771    } else if (load_count[path][1 + port] == 1) {
3772        return (FW_MSG_CODE_DRV_LOAD_PORT);
3773    } else {
3774        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3775    }
3776}
3777
3778/* returns the "mcp load_code" according to global load_count array */
3779static int
3780bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3781{
3782    int port = SC_PORT(sc);
3783    int path = SC_PATH(sc);
3784
3785    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3786          path, load_count[path][0], load_count[path][1],
3787          load_count[path][2]);
3788    load_count[path][0]--;
3789    load_count[path][1 + port]--;
3790    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3791          path, load_count[path][0], load_count[path][1],
3792          load_count[path][2]);
3793    if (load_count[path][0] == 0) {
3794        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3795    } else if (load_count[path][1 + port] == 0) {
3796        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3797    } else {
3798        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3799    }
3800}
3801
3802/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3803static uint32_t
3804bxe_send_unload_req(struct bxe_softc *sc,
3805                    int              unload_mode)
3806{
3807    uint32_t reset_code = 0;
3808
3809    /* Select the UNLOAD request mode */
3810    if (unload_mode == UNLOAD_NORMAL) {
3811        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3812    } else {
3813        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3814    }
3815
3816    /* Send the request to the MCP */
3817    if (!BXE_NOMCP(sc)) {
3818        reset_code = bxe_fw_command(sc, reset_code, 0);
3819    } else {
3820        reset_code = bxe_nic_unload_no_mcp(sc);
3821    }
3822
3823    return (reset_code);
3824}
3825
3826/* send UNLOAD_DONE command to the MCP */
3827static void
3828bxe_send_unload_done(struct bxe_softc *sc,
3829                     uint8_t          keep_link)
3830{
3831    uint32_t reset_param =
3832        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3833
3834    /* Report UNLOAD_DONE to MCP */
3835    if (!BXE_NOMCP(sc)) {
3836        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3837    }
3838}
3839
3840static int
3841bxe_func_wait_started(struct bxe_softc *sc)
3842{
3843    int tout = 50;
3844
3845    if (!sc->port.pmf) {
3846        return (0);
3847    }
3848
3849    /*
3850     * (assumption: No Attention from MCP at this stage)
3851     * PMF probably in the middle of TX disable/enable transaction
3852     * 1. Sync IRS for default SB
3853     * 2. Sync SP queue - this guarantees us that attention handling started
3854     * 3. Wait, that TX disable/enable transaction completes
3855     *
3856     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3857     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3858     * received completion for the transaction the state is TX_STOPPED.
3859     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3860     * transaction.
3861     */
3862
3863    /* XXX make sure default SB ISR is done */
3864    /* need a way to synchronize an irq (intr_mtx?) */
3865
3866    /* XXX flush any work queues */
3867
3868    while (ecore_func_get_state(sc, &sc->func_obj) !=
3869           ECORE_F_STATE_STARTED && tout--) {
3870        DELAY(20000);
3871    }
3872
3873    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3874        /*
3875         * Failed to complete the transaction in a "good way"
3876         * Force both transactions with CLR bit.
3877         */
3878        struct ecore_func_state_params func_params = { NULL };
3879
3880        BLOGE(sc, "Unexpected function state! "
3881                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3882
3883        func_params.f_obj = &sc->func_obj;
3884        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3885
3886        /* STARTED-->TX_STOPPED */
3887        func_params.cmd = ECORE_F_CMD_TX_STOP;
3888        ecore_func_state_change(sc, &func_params);
3889
3890        /* TX_STOPPED-->STARTED */
3891        func_params.cmd = ECORE_F_CMD_TX_START;
3892        return (ecore_func_state_change(sc, &func_params));
3893    }
3894
3895    return (0);
3896}
3897
3898static int
3899bxe_stop_queue(struct bxe_softc *sc,
3900               int              index)
3901{
3902    struct bxe_fastpath *fp = &sc->fp[index];
3903    struct ecore_queue_state_params q_params = { NULL };
3904    int rc;
3905
3906    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3907
3908    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3909    /* We want to wait for completion in this context */
3910    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3911
3912    /* Stop the primary connection: */
3913
3914    /* ...halt the connection */
3915    q_params.cmd = ECORE_Q_CMD_HALT;
3916    rc = ecore_queue_state_change(sc, &q_params);
3917    if (rc) {
3918        return (rc);
3919    }
3920
3921    /* ...terminate the connection */
3922    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3923    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3924    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3925    rc = ecore_queue_state_change(sc, &q_params);
3926    if (rc) {
3927        return (rc);
3928    }
3929
3930    /* ...delete cfc entry */
3931    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3932    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3933    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3934    return (ecore_queue_state_change(sc, &q_params));
3935}
3936
3937/* wait for the outstanding SP commands */
3938static inline uint8_t
3939bxe_wait_sp_comp(struct bxe_softc *sc,
3940                 unsigned long    mask)
3941{
3942    unsigned long tmp;
3943    int tout = 5000; /* wait for 5 secs tops */
3944
3945    while (tout--) {
3946        mb();
3947        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3948            return (TRUE);
3949        }
3950
3951        DELAY(1000);
3952    }
3953
3954    mb();
3955
3956    tmp = atomic_load_acq_long(&sc->sp_state);
3957    if (tmp & mask) {
3958        BLOGE(sc, "Filtering completion timed out: "
3959                  "sp_state 0x%lx, mask 0x%lx\n",
3960              tmp, mask);
3961        return (FALSE);
3962    }
3963
3964    return (FALSE);
3965}
3966
3967static int
3968bxe_func_stop(struct bxe_softc *sc)
3969{
3970    struct ecore_func_state_params func_params = { NULL };
3971    int rc;
3972
3973    /* prepare parameters for function state transitions */
3974    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3975    func_params.f_obj = &sc->func_obj;
3976    func_params.cmd = ECORE_F_CMD_STOP;
3977
3978    /*
3979     * Try to stop the function the 'good way'. If it fails (in case
3980     * of a parity error during bxe_chip_cleanup()) and we are
3981     * not in a debug mode, perform a state transaction in order to
3982     * enable further HW_RESET transaction.
3983     */
3984    rc = ecore_func_state_change(sc, &func_params);
3985    if (rc) {
3986        BLOGE(sc, "FUNC_STOP ramrod failed. "
3987                  "Running a dry transaction (%d)\n", rc);
3988        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3989        return (ecore_func_state_change(sc, &func_params));
3990    }
3991
3992    return (0);
3993}
3994
3995static int
3996bxe_reset_hw(struct bxe_softc *sc,
3997             uint32_t         load_code)
3998{
3999    struct ecore_func_state_params func_params = { NULL };
4000
4001    /* Prepare parameters for function state transitions */
4002    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4003
4004    func_params.f_obj = &sc->func_obj;
4005    func_params.cmd = ECORE_F_CMD_HW_RESET;
4006
4007    func_params.params.hw_init.load_phase = load_code;
4008
4009    return (ecore_func_state_change(sc, &func_params));
4010}
4011
4012static void
4013bxe_int_disable_sync(struct bxe_softc *sc,
4014                     int              disable_hw)
4015{
4016    if (disable_hw) {
4017        /* prevent the HW from sending interrupts */
4018        bxe_int_disable(sc);
4019    }
4020
4021    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4022    /* make sure all ISRs are done */
4023
4024    /* XXX make sure sp_task is not running */
4025    /* cancel and flush work queues */
4026}
4027
4028static void
4029bxe_chip_cleanup(struct bxe_softc *sc,
4030                 uint32_t         unload_mode,
4031                 uint8_t          keep_link)
4032{
4033    int port = SC_PORT(sc);
4034    struct ecore_mcast_ramrod_params rparam = { NULL };
4035    uint32_t reset_code;
4036    int i, rc = 0;
4037
4038    bxe_drain_tx_queues(sc);
4039
4040    /* give HW time to discard old tx messages */
4041    DELAY(1000);
4042
4043    /* Clean all ETH MACs */
4044    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4045    if (rc < 0) {
4046        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4047    }
4048
4049    /* Clean up UC list  */
4050    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4051    if (rc < 0) {
4052        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4053    }
4054
4055    /* Disable LLH */
4056    if (!CHIP_IS_E1(sc)) {
4057        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4058    }
4059
4060    /* Set "drop all" to stop Rx */
4061
4062    /*
4063     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4064     * a race between the completion code and this code.
4065     */
4066    BXE_MCAST_LOCK(sc);
4067
4068    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4069        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4070    } else {
4071        bxe_set_storm_rx_mode(sc);
4072    }
4073
4074    /* Clean up multicast configuration */
4075    rparam.mcast_obj = &sc->mcast_obj;
4076    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4077    if (rc < 0) {
4078        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4079    }
4080
4081    BXE_MCAST_UNLOCK(sc);
4082
4083    // XXX bxe_iov_chip_cleanup(sc);
4084
4085    /*
4086     * Send the UNLOAD_REQUEST to the MCP. This will return if
4087     * this function should perform FUNCTION, PORT, or COMMON HW
4088     * reset.
4089     */
4090    reset_code = bxe_send_unload_req(sc, unload_mode);
4091
4092    /*
4093     * (assumption: No Attention from MCP at this stage)
4094     * PMF probably in the middle of TX disable/enable transaction
4095     */
4096    rc = bxe_func_wait_started(sc);
4097    if (rc) {
4098        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4099    }
4100
4101    /*
4102     * Close multi and leading connections
4103     * Completions for ramrods are collected in a synchronous way
4104     */
4105    for (i = 0; i < sc->num_queues; i++) {
4106        if (bxe_stop_queue(sc, i)) {
4107            goto unload_error;
4108        }
4109    }
4110
4111    /*
4112     * If SP settings didn't get completed so far - something
4113     * very wrong has happen.
4114     */
4115    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4116        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4117    }
4118
4119unload_error:
4120
4121    rc = bxe_func_stop(sc);
4122    if (rc) {
4123        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4124    }
4125
4126    /* disable HW interrupts */
4127    bxe_int_disable_sync(sc, TRUE);
4128
4129    /* detach interrupts */
4130    bxe_interrupt_detach(sc);
4131
4132    /* Reset the chip */
4133    rc = bxe_reset_hw(sc, reset_code);
4134    if (rc) {
4135        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4136    }
4137
4138    /* Report UNLOAD_DONE to MCP */
4139    bxe_send_unload_done(sc, keep_link);
4140}
4141
4142static void
4143bxe_disable_close_the_gate(struct bxe_softc *sc)
4144{
4145    uint32_t val;
4146    int port = SC_PORT(sc);
4147
4148    BLOGD(sc, DBG_LOAD,
4149          "Disabling 'close the gates'\n");
4150
4151    if (CHIP_IS_E1(sc)) {
4152        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4153                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4154        val = REG_RD(sc, addr);
4155        val &= ~(0x300);
4156        REG_WR(sc, addr, val);
4157    } else {
4158        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4159        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4160                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4161        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4162    }
4163}
4164
4165/*
4166 * Cleans the object that have internal lists without sending
4167 * ramrods. Should be run when interrutps are disabled.
4168 */
4169static void
4170bxe_squeeze_objects(struct bxe_softc *sc)
4171{
4172    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4173    struct ecore_mcast_ramrod_params rparam = { NULL };
4174    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4175    int rc;
4176
4177    /* Cleanup MACs' object first... */
4178
4179    /* Wait for completion of requested */
4180    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4181    /* Perform a dry cleanup */
4182    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4183
4184    /* Clean ETH primary MAC */
4185    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4186    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4187                             &ramrod_flags);
4188    if (rc != 0) {
4189        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4190    }
4191
4192    /* Cleanup UC list */
4193    vlan_mac_flags = 0;
4194    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4195    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4196                             &ramrod_flags);
4197    if (rc != 0) {
4198        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4199    }
4200
4201    /* Now clean mcast object... */
4202
4203    rparam.mcast_obj = &sc->mcast_obj;
4204    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4205
4206    /* Add a DEL command... */
4207    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4208    if (rc < 0) {
4209        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4210    }
4211
4212    /* now wait until all pending commands are cleared */
4213
4214    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4215    while (rc != 0) {
4216        if (rc < 0) {
4217            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4218            return;
4219        }
4220
4221        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4222    }
4223}
4224
4225/* stop the controller */
4226static __noinline int
4227bxe_nic_unload(struct bxe_softc *sc,
4228               uint32_t         unload_mode,
4229               uint8_t          keep_link)
4230{
4231    uint8_t global = FALSE;
4232    uint32_t val;
4233    int i;
4234
4235    BXE_CORE_LOCK_ASSERT(sc);
4236
4237    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4238
4239    for (i = 0; i < sc->num_queues; i++) {
4240        struct bxe_fastpath *fp;
4241
4242        fp = &sc->fp[i];
4243        BXE_FP_TX_LOCK(fp);
4244        BXE_FP_TX_UNLOCK(fp);
4245    }
4246
4247    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4248
4249    /* mark driver as unloaded in shmem2 */
4250    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4251        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4252        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4253                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4254    }
4255
4256    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4257        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4258        /*
4259         * We can get here if the driver has been unloaded
4260         * during parity error recovery and is either waiting for a
4261         * leader to complete or for other functions to unload and
4262         * then ifconfig down has been issued. In this case we want to
4263         * unload and let other functions to complete a recovery
4264         * process.
4265         */
4266        sc->recovery_state = BXE_RECOVERY_DONE;
4267        sc->is_leader = 0;
4268        bxe_release_leader_lock(sc);
4269        mb();
4270
4271        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4272        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4273            " state = 0x%x\n", sc->recovery_state, sc->state);
4274        return (-1);
4275    }
4276
4277    /*
4278     * Nothing to do during unload if previous bxe_nic_load()
4279     * did not completed successfully - all resourses are released.
4280     */
4281    if ((sc->state == BXE_STATE_CLOSED) ||
4282        (sc->state == BXE_STATE_ERROR)) {
4283        return (0);
4284    }
4285
4286    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4287    mb();
4288
4289    /* stop tx */
4290    bxe_tx_disable(sc);
4291
4292    sc->rx_mode = BXE_RX_MODE_NONE;
4293    /* XXX set rx mode ??? */
4294
4295    if (IS_PF(sc) && !sc->grcdump_done) {
4296        /* set ALWAYS_ALIVE bit in shmem */
4297        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4298
4299        bxe_drv_pulse(sc);
4300
4301        bxe_stats_handle(sc, STATS_EVENT_STOP);
4302        bxe_save_statistics(sc);
4303    }
4304
4305    /* wait till consumers catch up with producers in all queues */
4306    bxe_drain_tx_queues(sc);
4307
4308    /* if VF indicate to PF this function is going down (PF will delete sp
4309     * elements and clear initializations
4310     */
4311    if (IS_VF(sc)) {
4312        ; /* bxe_vfpf_close_vf(sc); */
4313    } else if (unload_mode != UNLOAD_RECOVERY) {
4314        /* if this is a normal/close unload need to clean up chip */
4315        if (!sc->grcdump_done)
4316            bxe_chip_cleanup(sc, unload_mode, keep_link);
4317    } else {
4318        /* Send the UNLOAD_REQUEST to the MCP */
4319        bxe_send_unload_req(sc, unload_mode);
4320
4321        /*
4322         * Prevent transactions to host from the functions on the
4323         * engine that doesn't reset global blocks in case of global
4324         * attention once gloabl blocks are reset and gates are opened
4325         * (the engine which leader will perform the recovery
4326         * last).
4327         */
4328        if (!CHIP_IS_E1x(sc)) {
4329            bxe_pf_disable(sc);
4330        }
4331
4332        /* disable HW interrupts */
4333        bxe_int_disable_sync(sc, TRUE);
4334
4335        /* detach interrupts */
4336        bxe_interrupt_detach(sc);
4337
4338        /* Report UNLOAD_DONE to MCP */
4339        bxe_send_unload_done(sc, FALSE);
4340    }
4341
4342    /*
4343     * At this stage no more interrupts will arrive so we may safely clean
4344     * the queue'able objects here in case they failed to get cleaned so far.
4345     */
4346    if (IS_PF(sc)) {
4347        bxe_squeeze_objects(sc);
4348    }
4349
4350    /* There should be no more pending SP commands at this stage */
4351    sc->sp_state = 0;
4352
4353    sc->port.pmf = 0;
4354
4355    bxe_free_fp_buffers(sc);
4356
4357    if (IS_PF(sc)) {
4358        bxe_free_mem(sc);
4359    }
4360
4361    bxe_free_fw_stats_mem(sc);
4362
4363    sc->state = BXE_STATE_CLOSED;
4364
4365    /*
4366     * Check if there are pending parity attentions. If there are - set
4367     * RECOVERY_IN_PROGRESS.
4368     */
4369    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4370        bxe_set_reset_in_progress(sc);
4371
4372        /* Set RESET_IS_GLOBAL if needed */
4373        if (global) {
4374            bxe_set_reset_global(sc);
4375        }
4376    }
4377
4378    /*
4379     * The last driver must disable a "close the gate" if there is no
4380     * parity attention or "process kill" pending.
4381     */
4382    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4383        bxe_reset_is_done(sc, SC_PATH(sc))) {
4384        bxe_disable_close_the_gate(sc);
4385    }
4386
4387    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4388
4389    return (0);
4390}
4391
4392/*
4393 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4394 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4395 */
4396static int
4397bxe_ifmedia_update(struct ifnet  *ifp)
4398{
4399    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4400    struct ifmedia *ifm;
4401
4402    ifm = &sc->ifmedia;
4403
4404    /* We only support Ethernet media type. */
4405    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4406        return (EINVAL);
4407    }
4408
4409    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4410    case IFM_AUTO:
4411         break;
4412    case IFM_10G_CX4:
4413    case IFM_10G_SR:
4414    case IFM_10G_T:
4415    case IFM_10G_TWINAX:
4416    default:
4417        /* We don't support changing the media type. */
4418        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4419              IFM_SUBTYPE(ifm->ifm_media));
4420        return (EINVAL);
4421    }
4422
4423    return (0);
4424}
4425
4426/*
4427 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4428 */
4429static void
4430bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4431{
4432    struct bxe_softc *sc = if_getsoftc(ifp);
4433
4434    /* Report link down if the driver isn't running. */
4435    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4436        ifmr->ifm_active |= IFM_NONE;
4437        return;
4438    }
4439
4440    /* Setup the default interface info. */
4441    ifmr->ifm_status = IFM_AVALID;
4442    ifmr->ifm_active = IFM_ETHER;
4443
4444    if (sc->link_vars.link_up) {
4445        ifmr->ifm_status |= IFM_ACTIVE;
4446    } else {
4447        ifmr->ifm_active |= IFM_NONE;
4448        return;
4449    }
4450
4451    ifmr->ifm_active |= sc->media;
4452
4453    if (sc->link_vars.duplex == DUPLEX_FULL) {
4454        ifmr->ifm_active |= IFM_FDX;
4455    } else {
4456        ifmr->ifm_active |= IFM_HDX;
4457    }
4458}
4459
4460static void
4461bxe_handle_chip_tq(void *context,
4462                   int  pending)
4463{
4464    struct bxe_softc *sc = (struct bxe_softc *)context;
4465    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4466
4467    switch (work)
4468    {
4469
4470    case CHIP_TQ_REINIT:
4471        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4472            /* restart the interface */
4473            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4474            bxe_periodic_stop(sc);
4475            BXE_CORE_LOCK(sc);
4476            bxe_stop_locked(sc);
4477            bxe_init_locked(sc);
4478            BXE_CORE_UNLOCK(sc);
4479        }
4480        break;
4481
4482    default:
4483        break;
4484    }
4485}
4486
4487/*
4488 * Handles any IOCTL calls from the operating system.
4489 *
4490 * Returns:
4491 *   0 = Success, >0 Failure
4492 */
4493static int
4494bxe_ioctl(if_t ifp,
4495          u_long       command,
4496          caddr_t      data)
4497{
4498    struct bxe_softc *sc = if_getsoftc(ifp);
4499    struct ifreq *ifr = (struct ifreq *)data;
4500    int mask = 0;
4501    int reinit = 0;
4502    int error = 0;
4503
4504    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4505    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4506
4507    switch (command)
4508    {
4509    case SIOCSIFMTU:
4510        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4511              ifr->ifr_mtu);
4512
4513        if (sc->mtu == ifr->ifr_mtu) {
4514            /* nothing to change */
4515            break;
4516        }
4517
4518        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4519            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4520                  ifr->ifr_mtu, mtu_min, mtu_max);
4521            error = EINVAL;
4522            break;
4523        }
4524
4525        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4526                             (unsigned long)ifr->ifr_mtu);
4527	/*
4528        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4529                              (unsigned long)ifr->ifr_mtu);
4530	XXX - Not sure why it needs to be atomic
4531	*/
4532	if_setmtu(ifp, ifr->ifr_mtu);
4533        reinit = 1;
4534        break;
4535
4536    case SIOCSIFFLAGS:
4537        /* toggle the interface state up or down */
4538        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4539
4540	BXE_CORE_LOCK(sc);
4541        /* check if the interface is up */
4542        if (if_getflags(ifp) & IFF_UP) {
4543            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4544                /* set the receive mode flags */
4545                bxe_set_rx_mode(sc);
4546            } else if(sc->state != BXE_STATE_DISABLED) {
4547		bxe_init_locked(sc);
4548            }
4549        } else {
4550            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4551		bxe_periodic_stop(sc);
4552		bxe_stop_locked(sc);
4553            }
4554        }
4555	BXE_CORE_UNLOCK(sc);
4556
4557        break;
4558
4559    case SIOCADDMULTI:
4560    case SIOCDELMULTI:
4561        /* add/delete multicast addresses */
4562        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4563
4564        /* check if the interface is up */
4565        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4566            /* set the receive mode flags */
4567	    BXE_CORE_LOCK(sc);
4568            bxe_set_rx_mode(sc);
4569	    BXE_CORE_UNLOCK(sc);
4570        }
4571
4572        break;
4573
4574    case SIOCSIFCAP:
4575        /* find out which capabilities have changed */
4576        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4577
4578        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4579              mask);
4580
4581        /* toggle the LRO capabilites enable flag */
4582        if (mask & IFCAP_LRO) {
4583	    if_togglecapenable(ifp, IFCAP_LRO);
4584            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4585                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4586            reinit = 1;
4587        }
4588
4589        /* toggle the TXCSUM checksum capabilites enable flag */
4590        if (mask & IFCAP_TXCSUM) {
4591	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4592            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4593                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4594            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4595                if_sethwassistbits(ifp, (CSUM_IP      |
4596                                    CSUM_TCP      |
4597                                    CSUM_UDP      |
4598                                    CSUM_TSO      |
4599                                    CSUM_TCP_IPV6 |
4600                                    CSUM_UDP_IPV6), 0);
4601            } else {
4602		if_clearhwassist(ifp); /* XXX */
4603            }
4604        }
4605
4606        /* toggle the RXCSUM checksum capabilities enable flag */
4607        if (mask & IFCAP_RXCSUM) {
4608	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4609            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4610                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4611            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4612                if_sethwassistbits(ifp, (CSUM_IP      |
4613                                    CSUM_TCP      |
4614                                    CSUM_UDP      |
4615                                    CSUM_TSO      |
4616                                    CSUM_TCP_IPV6 |
4617                                    CSUM_UDP_IPV6), 0);
4618            } else {
4619		if_clearhwassist(ifp); /* XXX */
4620            }
4621        }
4622
4623        /* toggle TSO4 capabilities enabled flag */
4624        if (mask & IFCAP_TSO4) {
4625            if_togglecapenable(ifp, IFCAP_TSO4);
4626            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4627                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4628        }
4629
4630        /* toggle TSO6 capabilities enabled flag */
4631        if (mask & IFCAP_TSO6) {
4632	    if_togglecapenable(ifp, IFCAP_TSO6);
4633            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4634                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4635        }
4636
4637        /* toggle VLAN_HWTSO capabilities enabled flag */
4638        if (mask & IFCAP_VLAN_HWTSO) {
4639
4640	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4641            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4642                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4643        }
4644
4645        /* toggle VLAN_HWCSUM capabilities enabled flag */
4646        if (mask & IFCAP_VLAN_HWCSUM) {
4647            /* XXX investigate this... */
4648            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4649            error = EINVAL;
4650        }
4651
4652        /* toggle VLAN_MTU capabilities enable flag */
4653        if (mask & IFCAP_VLAN_MTU) {
4654            /* XXX investigate this... */
4655            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4656            error = EINVAL;
4657        }
4658
4659        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4660        if (mask & IFCAP_VLAN_HWTAGGING) {
4661            /* XXX investigate this... */
4662            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4663            error = EINVAL;
4664        }
4665
4666        /* toggle VLAN_HWFILTER capabilities enabled flag */
4667        if (mask & IFCAP_VLAN_HWFILTER) {
4668            /* XXX investigate this... */
4669            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4670            error = EINVAL;
4671        }
4672
4673        /* XXX not yet...
4674         * IFCAP_WOL_MAGIC
4675         */
4676
4677        break;
4678
4679    case SIOCSIFMEDIA:
4680    case SIOCGIFMEDIA:
4681        /* set/get interface media */
4682        BLOGD(sc, DBG_IOCTL,
4683              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4684              (command & 0xff));
4685        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4686        break;
4687
4688    default:
4689        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4690              (command & 0xff));
4691        error = ether_ioctl(ifp, command, data);
4692        break;
4693    }
4694
4695    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4696        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4697              "Re-initializing hardware from IOCTL change\n");
4698	bxe_periodic_stop(sc);
4699	BXE_CORE_LOCK(sc);
4700	bxe_stop_locked(sc);
4701	bxe_init_locked(sc);
4702	BXE_CORE_UNLOCK(sc);
4703    }
4704
4705    return (error);
4706}
4707
4708static __noinline void
4709bxe_dump_mbuf(struct bxe_softc *sc,
4710              struct mbuf      *m,
4711              uint8_t          contents)
4712{
4713    char * type;
4714    int i = 0;
4715
4716    if (!(sc->debug & DBG_MBUF)) {
4717        return;
4718    }
4719
4720    if (m == NULL) {
4721        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4722        return;
4723    }
4724
4725    while (m) {
4726
4727#if __FreeBSD_version >= 1000000
4728        BLOGD(sc, DBG_MBUF,
4729              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4730              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4731
4732        if (m->m_flags & M_PKTHDR) {
4733             BLOGD(sc, DBG_MBUF,
4734                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4735                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4736                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4737        }
4738#else
4739        BLOGD(sc, DBG_MBUF,
4740              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4741              i, m, m->m_len, m->m_flags,
4742              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4743
4744        if (m->m_flags & M_PKTHDR) {
4745             BLOGD(sc, DBG_MBUF,
4746                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4747                   i, m->m_pkthdr.len, m->m_flags,
4748                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4749                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4750                   "\22M_PROMISC\23M_NOFREE",
4751                   (int)m->m_pkthdr.csum_flags,
4752                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4753                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4754                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4755                   "\14CSUM_PSEUDO_HDR");
4756        }
4757#endif /* #if __FreeBSD_version >= 1000000 */
4758
4759        if (m->m_flags & M_EXT) {
4760            switch (m->m_ext.ext_type) {
4761            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4762            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4763            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4764            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4765            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4766            case EXT_PACKET:     type = "EXT_PACKET";     break;
4767            case EXT_MBUF:       type = "EXT_MBUF";       break;
4768            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4769            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4770            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4771            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4772            default:             type = "UNKNOWN";        break;
4773            }
4774
4775            BLOGD(sc, DBG_MBUF,
4776                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4777                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4778        }
4779
4780        if (contents) {
4781            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4782        }
4783
4784        m = m->m_next;
4785        i++;
4786    }
4787}
4788
4789/*
4790 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4791 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4792 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4793 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4794 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4795 */
4796static int
4797bxe_chktso_window(struct bxe_softc  *sc,
4798                  int               nsegs,
4799                  bus_dma_segment_t *segs,
4800                  struct mbuf       *m)
4801{
4802    uint32_t num_wnds, wnd_size, wnd_sum;
4803    int32_t frag_idx, wnd_idx;
4804    unsigned short lso_mss;
4805    int defrag;
4806
4807    defrag = 0;
4808    wnd_sum = 0;
4809    wnd_size = 10;
4810    num_wnds = nsegs - wnd_size;
4811    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4812
4813    /*
4814     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4815     * first window sum of data while skipping the first assuming it is the
4816     * header in FreeBSD.
4817     */
4818    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4819        wnd_sum += htole16(segs[frag_idx].ds_len);
4820    }
4821
4822    /* check the first 10 bd window size */
4823    if (wnd_sum < lso_mss) {
4824        return (1);
4825    }
4826
4827    /* run through the windows */
4828    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4829        /* subtract the first mbuf->m_len of the last wndw(-header) */
4830        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4831        /* add the next mbuf len to the len of our new window */
4832        wnd_sum += htole16(segs[frag_idx].ds_len);
4833        if (wnd_sum < lso_mss) {
4834            return (1);
4835        }
4836    }
4837
4838    return (0);
4839}
4840
4841static uint8_t
4842bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4843                    struct mbuf         *m,
4844                    uint32_t            *parsing_data)
4845{
4846    struct ether_vlan_header *eh = NULL;
4847    struct ip *ip4 = NULL;
4848    struct ip6_hdr *ip6 = NULL;
4849    caddr_t ip = NULL;
4850    struct tcphdr *th = NULL;
4851    int e_hlen, ip_hlen, l4_off;
4852    uint16_t proto;
4853
4854    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4855        /* no L4 checksum offload needed */
4856        return (0);
4857    }
4858
4859    /* get the Ethernet header */
4860    eh = mtod(m, struct ether_vlan_header *);
4861
4862    /* handle VLAN encapsulation if present */
4863    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4864        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4865        proto  = ntohs(eh->evl_proto);
4866    } else {
4867        e_hlen = ETHER_HDR_LEN;
4868        proto  = ntohs(eh->evl_encap_proto);
4869    }
4870
4871    switch (proto) {
4872    case ETHERTYPE_IP:
4873        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4874        ip4 = (m->m_len < sizeof(struct ip)) ?
4875                  (struct ip *)m->m_next->m_data :
4876                  (struct ip *)(m->m_data + e_hlen);
4877        /* ip_hl is number of 32-bit words */
4878        ip_hlen = (ip4->ip_hl << 2);
4879        ip = (caddr_t)ip4;
4880        break;
4881    case ETHERTYPE_IPV6:
4882        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4883        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4884                  (struct ip6_hdr *)m->m_next->m_data :
4885                  (struct ip6_hdr *)(m->m_data + e_hlen);
4886        /* XXX cannot support offload with IPv6 extensions */
4887        ip_hlen = sizeof(struct ip6_hdr);
4888        ip = (caddr_t)ip6;
4889        break;
4890    default:
4891        /* We can't offload in this case... */
4892        /* XXX error stat ??? */
4893        return (0);
4894    }
4895
4896    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4897    l4_off = (e_hlen + ip_hlen);
4898
4899    *parsing_data |=
4900        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4901         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4902
4903    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4904                                  CSUM_TSO |
4905                                  CSUM_TCP_IPV6)) {
4906        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4907        th = (struct tcphdr *)(ip + ip_hlen);
4908        /* th_off is number of 32-bit words */
4909        *parsing_data |= ((th->th_off <<
4910                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4911                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4912        return (l4_off + (th->th_off << 2)); /* entire header length */
4913    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4914                                         CSUM_UDP_IPV6)) {
4915        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4916        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4917    } else {
4918        /* XXX error stat ??? */
4919        return (0);
4920    }
4921}
4922
4923static uint8_t
4924bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4925                 struct mbuf                *m,
4926                 struct eth_tx_parse_bd_e1x *pbd)
4927{
4928    struct ether_vlan_header *eh = NULL;
4929    struct ip *ip4 = NULL;
4930    struct ip6_hdr *ip6 = NULL;
4931    caddr_t ip = NULL;
4932    struct tcphdr *th = NULL;
4933    struct udphdr *uh = NULL;
4934    int e_hlen, ip_hlen;
4935    uint16_t proto;
4936    uint8_t hlen;
4937    uint16_t tmp_csum;
4938    uint32_t *tmp_uh;
4939
4940    /* get the Ethernet header */
4941    eh = mtod(m, struct ether_vlan_header *);
4942
4943    /* handle VLAN encapsulation if present */
4944    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4945        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4946        proto  = ntohs(eh->evl_proto);
4947    } else {
4948        e_hlen = ETHER_HDR_LEN;
4949        proto  = ntohs(eh->evl_encap_proto);
4950    }
4951
4952    switch (proto) {
4953    case ETHERTYPE_IP:
4954        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4955        ip4 = (m->m_len < sizeof(struct ip)) ?
4956                  (struct ip *)m->m_next->m_data :
4957                  (struct ip *)(m->m_data + e_hlen);
4958        /* ip_hl is number of 32-bit words */
4959        ip_hlen = (ip4->ip_hl << 1);
4960        ip = (caddr_t)ip4;
4961        break;
4962    case ETHERTYPE_IPV6:
4963        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4964        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4965                  (struct ip6_hdr *)m->m_next->m_data :
4966                  (struct ip6_hdr *)(m->m_data + e_hlen);
4967        /* XXX cannot support offload with IPv6 extensions */
4968        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4969        ip = (caddr_t)ip6;
4970        break;
4971    default:
4972        /* We can't offload in this case... */
4973        /* XXX error stat ??? */
4974        return (0);
4975    }
4976
4977    hlen = (e_hlen >> 1);
4978
4979    /* note that rest of global_data is indirectly zeroed here */
4980    if (m->m_flags & M_VLANTAG) {
4981        pbd->global_data =
4982            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4983    } else {
4984        pbd->global_data = htole16(hlen);
4985    }
4986
4987    pbd->ip_hlen_w = ip_hlen;
4988
4989    hlen += pbd->ip_hlen_w;
4990
4991    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4992
4993    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4994                                  CSUM_TSO |
4995                                  CSUM_TCP_IPV6)) {
4996        th = (struct tcphdr *)(ip + (ip_hlen << 1));
4997        /* th_off is number of 32-bit words */
4998        hlen += (uint16_t)(th->th_off << 1);
4999    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5000                                         CSUM_UDP_IPV6)) {
5001        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5002        hlen += (sizeof(struct udphdr) / 2);
5003    } else {
5004        /* valid case as only CSUM_IP was set */
5005        return (0);
5006    }
5007
5008    pbd->total_hlen_w = htole16(hlen);
5009
5010    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5011                                  CSUM_TSO |
5012                                  CSUM_TCP_IPV6)) {
5013        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5014        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5015    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5016                                         CSUM_UDP_IPV6)) {
5017        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5018
5019        /*
5020         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5021         * checksums and does not know anything about the UDP header and where
5022         * the checksum field is located. It only knows about TCP. Therefore
5023         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5024         * offload. Since the checksum field offset for TCP is 16 bytes and
5025         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5026         * bytes less than the start of the UDP header. This allows the
5027         * hardware to write the checksum in the correct spot. But the
5028         * hardware will compute a checksum which includes the last 10 bytes
5029         * of the IP header. To correct this we tweak the stack computed
5030         * pseudo checksum by folding in the calculation of the inverse
5031         * checksum for those final 10 bytes of the IP header. This allows
5032         * the correct checksum to be computed by the hardware.
5033         */
5034
5035        /* set pointer 10 bytes before UDP header */
5036        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5037
5038        /* calculate a pseudo header checksum over the first 10 bytes */
5039        tmp_csum = in_pseudo(*tmp_uh,
5040                             *(tmp_uh + 1),
5041                             *(uint16_t *)(tmp_uh + 2));
5042
5043        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5044    }
5045
5046    return (hlen * 2); /* entire header length, number of bytes */
5047}
5048
5049static void
5050bxe_set_pbd_lso_e2(struct mbuf *m,
5051                   uint32_t    *parsing_data)
5052{
5053    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5054                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5055                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5056
5057    /* XXX test for IPv6 with extension header... */
5058}
5059
5060static void
5061bxe_set_pbd_lso(struct mbuf                *m,
5062                struct eth_tx_parse_bd_e1x *pbd)
5063{
5064    struct ether_vlan_header *eh = NULL;
5065    struct ip *ip = NULL;
5066    struct tcphdr *th = NULL;
5067    int e_hlen;
5068
5069    /* get the Ethernet header */
5070    eh = mtod(m, struct ether_vlan_header *);
5071
5072    /* handle VLAN encapsulation if present */
5073    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5074                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5075
5076    /* get the IP and TCP header, with LSO entire header in first mbuf */
5077    /* XXX assuming IPv4 */
5078    ip = (struct ip *)(m->m_data + e_hlen);
5079    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5080
5081    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5082    pbd->tcp_send_seq = ntohl(th->th_seq);
5083    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5084
5085#if 1
5086        /* XXX IPv4 */
5087        pbd->ip_id = ntohs(ip->ip_id);
5088        pbd->tcp_pseudo_csum =
5089            ntohs(in_pseudo(ip->ip_src.s_addr,
5090                            ip->ip_dst.s_addr,
5091                            htons(IPPROTO_TCP)));
5092#else
5093        /* XXX IPv6 */
5094        pbd->tcp_pseudo_csum =
5095            ntohs(in_pseudo(&ip6->ip6_src,
5096                            &ip6->ip6_dst,
5097                            htons(IPPROTO_TCP)));
5098#endif
5099
5100    pbd->global_data |=
5101        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5102}
5103
5104/*
5105 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5106 * visible to the controller.
5107 *
5108 * If an mbuf is submitted to this routine and cannot be given to the
5109 * controller (e.g. it has too many fragments) then the function may free
5110 * the mbuf and return to the caller.
5111 *
5112 * Returns:
5113 *   0 = Success, !0 = Failure
5114 *   Note the side effect that an mbuf may be freed if it causes a problem.
5115 */
5116static int
5117bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5118{
5119    bus_dma_segment_t segs[32];
5120    struct mbuf *m0;
5121    struct bxe_sw_tx_bd *tx_buf;
5122    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5123    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5124    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5125    struct eth_tx_bd *tx_data_bd;
5126    struct eth_tx_bd *tx_total_pkt_size_bd;
5127    struct eth_tx_start_bd *tx_start_bd;
5128    uint16_t bd_prod, pkt_prod, total_pkt_size;
5129    uint8_t mac_type;
5130    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5131    struct bxe_softc *sc;
5132    uint16_t tx_bd_avail;
5133    struct ether_vlan_header *eh;
5134    uint32_t pbd_e2_parsing_data = 0;
5135    uint8_t hlen = 0;
5136    int tmp_bd;
5137    int i;
5138
5139    sc = fp->sc;
5140
5141#if __FreeBSD_version >= 800000
5142    M_ASSERTPKTHDR(*m_head);
5143#endif /* #if __FreeBSD_version >= 800000 */
5144
5145    m0 = *m_head;
5146    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5147    tx_start_bd = NULL;
5148    tx_data_bd = NULL;
5149    tx_total_pkt_size_bd = NULL;
5150
5151    /* get the H/W pointer for packets and BDs */
5152    pkt_prod = fp->tx_pkt_prod;
5153    bd_prod = fp->tx_bd_prod;
5154
5155    mac_type = UNICAST_ADDRESS;
5156
5157    /* map the mbuf into the next open DMAable memory */
5158    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5159    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5160                                    tx_buf->m_map, m0,
5161                                    segs, &nsegs, BUS_DMA_NOWAIT);
5162
5163    /* mapping errors */
5164    if(__predict_false(error != 0)) {
5165        fp->eth_q_stats.tx_dma_mapping_failure++;
5166        if (error == ENOMEM) {
5167            /* resource issue, try again later */
5168            rc = ENOMEM;
5169        } else if (error == EFBIG) {
5170            /* possibly recoverable with defragmentation */
5171            fp->eth_q_stats.mbuf_defrag_attempts++;
5172            m0 = m_defrag(*m_head, M_NOWAIT);
5173            if (m0 == NULL) {
5174                fp->eth_q_stats.mbuf_defrag_failures++;
5175                rc = ENOBUFS;
5176            } else {
5177                /* defrag successful, try mapping again */
5178                *m_head = m0;
5179                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5180                                                tx_buf->m_map, m0,
5181                                                segs, &nsegs, BUS_DMA_NOWAIT);
5182                if (error) {
5183                    fp->eth_q_stats.tx_dma_mapping_failure++;
5184                    rc = error;
5185                }
5186            }
5187        } else {
5188            /* unknown, unrecoverable mapping error */
5189            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5190            bxe_dump_mbuf(sc, m0, FALSE);
5191            rc = error;
5192        }
5193
5194        goto bxe_tx_encap_continue;
5195    }
5196
5197    tx_bd_avail = bxe_tx_avail(sc, fp);
5198
5199    /* make sure there is enough room in the send queue */
5200    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5201        /* Recoverable, try again later. */
5202        fp->eth_q_stats.tx_hw_queue_full++;
5203        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5204        rc = ENOMEM;
5205        goto bxe_tx_encap_continue;
5206    }
5207
5208    /* capture the current H/W TX chain high watermark */
5209    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5210                        (TX_BD_USABLE - tx_bd_avail))) {
5211        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5212    }
5213
5214    /* make sure it fits in the packet window */
5215    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5216        /*
5217         * The mbuf may be to big for the controller to handle. If the frame
5218         * is a TSO frame we'll need to do an additional check.
5219         */
5220        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5221            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5222                goto bxe_tx_encap_continue; /* OK to send */
5223            } else {
5224                fp->eth_q_stats.tx_window_violation_tso++;
5225            }
5226        } else {
5227            fp->eth_q_stats.tx_window_violation_std++;
5228        }
5229
5230        /* lets try to defragment this mbuf and remap it */
5231        fp->eth_q_stats.mbuf_defrag_attempts++;
5232        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5233
5234        m0 = m_defrag(*m_head, M_NOWAIT);
5235        if (m0 == NULL) {
5236            fp->eth_q_stats.mbuf_defrag_failures++;
5237            /* Ugh, just drop the frame... :( */
5238            rc = ENOBUFS;
5239        } else {
5240            /* defrag successful, try mapping again */
5241            *m_head = m0;
5242            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5243                                            tx_buf->m_map, m0,
5244                                            segs, &nsegs, BUS_DMA_NOWAIT);
5245            if (error) {
5246                fp->eth_q_stats.tx_dma_mapping_failure++;
5247                /* No sense in trying to defrag/copy chain, drop it. :( */
5248                rc = error;
5249            } else {
5250               /* if the chain is still too long then drop it */
5251                if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5252                    /*
5253                     * in case TSO is enabled nsegs should be checked against
5254                     * BXE_TSO_MAX_SEGMENTS
5255                     */
5256                    if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5257                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5258                        fp->eth_q_stats.nsegs_path1_errors++;
5259                        rc = ENODEV;
5260                    }
5261                } else {
5262                    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5263                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5264                        fp->eth_q_stats.nsegs_path2_errors++;
5265                        rc = ENODEV;
5266                    }
5267                }
5268            }
5269        }
5270    }
5271
5272bxe_tx_encap_continue:
5273
5274    /* Check for errors */
5275    if (rc) {
5276        if (rc == ENOMEM) {
5277            /* recoverable try again later  */
5278        } else {
5279            fp->eth_q_stats.tx_soft_errors++;
5280            fp->eth_q_stats.mbuf_alloc_tx--;
5281            m_freem(*m_head);
5282            *m_head = NULL;
5283        }
5284
5285        return (rc);
5286    }
5287
5288    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5289    if (m0->m_flags & M_BCAST) {
5290        mac_type = BROADCAST_ADDRESS;
5291    } else if (m0->m_flags & M_MCAST) {
5292        mac_type = MULTICAST_ADDRESS;
5293    }
5294
5295    /* store the mbuf into the mbuf ring */
5296    tx_buf->m        = m0;
5297    tx_buf->first_bd = fp->tx_bd_prod;
5298    tx_buf->flags    = 0;
5299
5300    /* prepare the first transmit (start) BD for the mbuf */
5301    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5302
5303    BLOGD(sc, DBG_TX,
5304          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5305          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5306
5307    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5308    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5309    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5310    total_pkt_size += tx_start_bd->nbytes;
5311    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5312
5313    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5314
5315    /* all frames have at least Start BD + Parsing BD */
5316    nbds = nsegs + 1;
5317    tx_start_bd->nbd = htole16(nbds);
5318
5319    if (m0->m_flags & M_VLANTAG) {
5320        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5321        tx_start_bd->bd_flags.as_bitfield |=
5322            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5323    } else {
5324        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5325        if (IS_VF(sc)) {
5326            /* map ethernet header to find type and header length */
5327            eh = mtod(m0, struct ether_vlan_header *);
5328            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5329        } else {
5330            /* used by FW for packet accounting */
5331            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5332        }
5333    }
5334
5335    /*
5336     * add a parsing BD from the chain. The parsing BD is always added
5337     * though it is only used for TSO and chksum
5338     */
5339    bd_prod = TX_BD_NEXT(bd_prod);
5340
5341    if (m0->m_pkthdr.csum_flags) {
5342        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5343            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5344            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5345        }
5346
5347        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5348            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5349                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5350        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5351            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5352                                                  ETH_TX_BD_FLAGS_IS_UDP |
5353                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5354        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5355                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5356            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5357        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5358            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5359                                                  ETH_TX_BD_FLAGS_IS_UDP);
5360        }
5361    }
5362
5363    if (!CHIP_IS_E1x(sc)) {
5364        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5365        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5366
5367        if (m0->m_pkthdr.csum_flags) {
5368            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5369        }
5370
5371        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5372                 mac_type);
5373    } else {
5374        uint16_t global_data = 0;
5375
5376        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5377        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5378
5379        if (m0->m_pkthdr.csum_flags) {
5380            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5381        }
5382
5383        SET_FLAG(global_data,
5384                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5385        pbd_e1x->global_data |= htole16(global_data);
5386    }
5387
5388    /* setup the parsing BD with TSO specific info */
5389    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5390        fp->eth_q_stats.tx_ofld_frames_lso++;
5391        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5392
5393        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5394            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5395
5396            /* split the first BD into header/data making the fw job easy */
5397            nbds++;
5398            tx_start_bd->nbd = htole16(nbds);
5399            tx_start_bd->nbytes = htole16(hlen);
5400
5401            bd_prod = TX_BD_NEXT(bd_prod);
5402
5403            /* new transmit BD after the tx_parse_bd */
5404            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5405            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5406            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5407            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5408            if (tx_total_pkt_size_bd == NULL) {
5409                tx_total_pkt_size_bd = tx_data_bd;
5410            }
5411
5412            BLOGD(sc, DBG_TX,
5413                  "TSO split header size is %d (%x:%x) nbds %d\n",
5414                  le16toh(tx_start_bd->nbytes),
5415                  le32toh(tx_start_bd->addr_hi),
5416                  le32toh(tx_start_bd->addr_lo),
5417                  nbds);
5418        }
5419
5420        if (!CHIP_IS_E1x(sc)) {
5421            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5422        } else {
5423            bxe_set_pbd_lso(m0, pbd_e1x);
5424        }
5425    }
5426
5427    if (pbd_e2_parsing_data) {
5428        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5429    }
5430
5431    /* prepare remaining BDs, start tx bd contains first seg/frag */
5432    for (i = 1; i < nsegs ; i++) {
5433        bd_prod = TX_BD_NEXT(bd_prod);
5434        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5435        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5436        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5437        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5438        if (tx_total_pkt_size_bd == NULL) {
5439            tx_total_pkt_size_bd = tx_data_bd;
5440        }
5441        total_pkt_size += tx_data_bd->nbytes;
5442    }
5443
5444    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5445
5446    if (tx_total_pkt_size_bd != NULL) {
5447        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5448    }
5449
5450    if (__predict_false(sc->debug & DBG_TX)) {
5451        tmp_bd = tx_buf->first_bd;
5452        for (i = 0; i < nbds; i++)
5453        {
5454            if (i == 0) {
5455                BLOGD(sc, DBG_TX,
5456                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5457                      "bd_flags=0x%x hdr_nbds=%d\n",
5458                      tx_start_bd,
5459                      tmp_bd,
5460                      le16toh(tx_start_bd->nbd),
5461                      le16toh(tx_start_bd->vlan_or_ethertype),
5462                      tx_start_bd->bd_flags.as_bitfield,
5463                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5464            } else if (i == 1) {
5465                if (pbd_e1x) {
5466                    BLOGD(sc, DBG_TX,
5467                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5468                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5469                          "tcp_seq=%u total_hlen_w=%u\n",
5470                          pbd_e1x,
5471                          tmp_bd,
5472                          pbd_e1x->global_data,
5473                          pbd_e1x->ip_hlen_w,
5474                          pbd_e1x->ip_id,
5475                          pbd_e1x->lso_mss,
5476                          pbd_e1x->tcp_flags,
5477                          pbd_e1x->tcp_pseudo_csum,
5478                          pbd_e1x->tcp_send_seq,
5479                          le16toh(pbd_e1x->total_hlen_w));
5480                } else { /* if (pbd_e2) */
5481                    BLOGD(sc, DBG_TX,
5482                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5483                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5484                          pbd_e2,
5485                          tmp_bd,
5486                          pbd_e2->data.mac_addr.dst_hi,
5487                          pbd_e2->data.mac_addr.dst_mid,
5488                          pbd_e2->data.mac_addr.dst_lo,
5489                          pbd_e2->data.mac_addr.src_hi,
5490                          pbd_e2->data.mac_addr.src_mid,
5491                          pbd_e2->data.mac_addr.src_lo,
5492                          pbd_e2->parsing_data);
5493                }
5494            }
5495
5496            if (i != 1) { /* skip parse db as it doesn't hold data */
5497                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5498                BLOGD(sc, DBG_TX,
5499                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5500                      tx_data_bd,
5501                      tmp_bd,
5502                      le16toh(tx_data_bd->nbytes),
5503                      le32toh(tx_data_bd->addr_hi),
5504                      le32toh(tx_data_bd->addr_lo));
5505            }
5506
5507            tmp_bd = TX_BD_NEXT(tmp_bd);
5508        }
5509    }
5510
5511    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5512
5513    /* update TX BD producer index value for next TX */
5514    bd_prod = TX_BD_NEXT(bd_prod);
5515
5516    /*
5517     * If the chain of tx_bd's describing this frame is adjacent to or spans
5518     * an eth_tx_next_bd element then we need to increment the nbds value.
5519     */
5520    if (TX_BD_IDX(bd_prod) < nbds) {
5521        nbds++;
5522    }
5523
5524    /* don't allow reordering of writes for nbd and packets */
5525    mb();
5526
5527    fp->tx_db.data.prod += nbds;
5528
5529    /* producer points to the next free tx_bd at this point */
5530    fp->tx_pkt_prod++;
5531    fp->tx_bd_prod = bd_prod;
5532
5533    DOORBELL(sc, fp->index, fp->tx_db.raw);
5534
5535    fp->eth_q_stats.tx_pkts++;
5536
5537    /* Prevent speculative reads from getting ahead of the status block. */
5538    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5539                      0, 0, BUS_SPACE_BARRIER_READ);
5540
5541    /* Prevent speculative reads from getting ahead of the doorbell. */
5542    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5543                      0, 0, BUS_SPACE_BARRIER_READ);
5544
5545    return (0);
5546}
5547
5548static void
5549bxe_tx_start_locked(struct bxe_softc *sc,
5550                    if_t ifp,
5551                    struct bxe_fastpath *fp)
5552{
5553    struct mbuf *m = NULL;
5554    int tx_count = 0;
5555    uint16_t tx_bd_avail;
5556
5557    BXE_FP_TX_LOCK_ASSERT(fp);
5558
5559    /* keep adding entries while there are frames to send */
5560    while (!if_sendq_empty(ifp)) {
5561
5562        /*
5563         * check for any frames to send
5564         * dequeue can still be NULL even if queue is not empty
5565         */
5566        m = if_dequeue(ifp);
5567        if (__predict_false(m == NULL)) {
5568            break;
5569        }
5570
5571        /* the mbuf now belongs to us */
5572        fp->eth_q_stats.mbuf_alloc_tx++;
5573
5574        /*
5575         * Put the frame into the transmit ring. If we don't have room,
5576         * place the mbuf back at the head of the TX queue, set the
5577         * OACTIVE flag, and wait for the NIC to drain the chain.
5578         */
5579        if (__predict_false(bxe_tx_encap(fp, &m))) {
5580            fp->eth_q_stats.tx_encap_failures++;
5581            if (m != NULL) {
5582                /* mark the TX queue as full and return the frame */
5583                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5584		if_sendq_prepend(ifp, m);
5585                fp->eth_q_stats.mbuf_alloc_tx--;
5586                fp->eth_q_stats.tx_queue_xoff++;
5587            }
5588
5589            /* stop looking for more work */
5590            break;
5591        }
5592
5593        /* the frame was enqueued successfully */
5594        tx_count++;
5595
5596        /* send a copy of the frame to any BPF listeners. */
5597        if_etherbpfmtap(ifp, m);
5598
5599        tx_bd_avail = bxe_tx_avail(sc, fp);
5600
5601        /* handle any completions if we're running low */
5602        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5603            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5604            bxe_txeof(sc, fp);
5605            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5606                break;
5607            }
5608        }
5609    }
5610
5611    /* all TX packets were dequeued and/or the tx ring is full */
5612    if (tx_count > 0) {
5613        /* reset the TX watchdog timeout timer */
5614        fp->watchdog_timer = BXE_TX_TIMEOUT;
5615    }
5616}
5617
5618/* Legacy (non-RSS) dispatch routine */
5619static void
5620bxe_tx_start(if_t ifp)
5621{
5622    struct bxe_softc *sc;
5623    struct bxe_fastpath *fp;
5624
5625    sc = if_getsoftc(ifp);
5626
5627    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5628        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5629        return;
5630    }
5631
5632    if (!sc->link_vars.link_up) {
5633        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5634        return;
5635    }
5636
5637    fp = &sc->fp[0];
5638
5639    if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5640        fp->eth_q_stats.tx_queue_full_return++;
5641        return;
5642    }
5643
5644    BXE_FP_TX_LOCK(fp);
5645    bxe_tx_start_locked(sc, ifp, fp);
5646    BXE_FP_TX_UNLOCK(fp);
5647}
5648
5649#if __FreeBSD_version >= 901504
5650
5651static int
5652bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5653                       if_t                ifp,
5654                       struct bxe_fastpath *fp,
5655                       struct mbuf         *m)
5656{
5657    struct buf_ring *tx_br = fp->tx_br;
5658    struct mbuf *next;
5659    int depth, rc, tx_count;
5660    uint16_t tx_bd_avail;
5661
5662    rc = tx_count = 0;
5663
5664    BXE_FP_TX_LOCK_ASSERT(fp);
5665
5666    if (sc->state != BXE_STATE_OPEN)  {
5667        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5668        return ENETDOWN;
5669    }
5670
5671    if (!tx_br) {
5672        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5673        return (EINVAL);
5674    }
5675
5676    if (m != NULL) {
5677        rc = drbr_enqueue(ifp, tx_br, m);
5678        if (rc != 0) {
5679            fp->eth_q_stats.tx_soft_errors++;
5680            goto bxe_tx_mq_start_locked_exit;
5681        }
5682    }
5683
5684    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5685        fp->eth_q_stats.tx_request_link_down_failures++;
5686        goto bxe_tx_mq_start_locked_exit;
5687    }
5688
5689    /* fetch the depth of the driver queue */
5690    depth = drbr_inuse_drv(ifp, tx_br);
5691    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5692        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5693    }
5694
5695    /* keep adding entries while there are frames to send */
5696    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5697        /* handle any completions if we're running low */
5698        tx_bd_avail = bxe_tx_avail(sc, fp);
5699        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5700            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5701            bxe_txeof(sc, fp);
5702            tx_bd_avail = bxe_tx_avail(sc, fp);
5703            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5704                fp->eth_q_stats.bd_avail_too_less_failures++;
5705                m_freem(next);
5706                drbr_advance(ifp, tx_br);
5707                rc = ENOBUFS;
5708                break;
5709            }
5710        }
5711
5712        /* the mbuf now belongs to us */
5713        fp->eth_q_stats.mbuf_alloc_tx++;
5714
5715        /*
5716         * Put the frame into the transmit ring. If we don't have room,
5717         * place the mbuf back at the head of the TX queue, set the
5718         * OACTIVE flag, and wait for the NIC to drain the chain.
5719         */
5720        rc = bxe_tx_encap(fp, &next);
5721        if (__predict_false(rc != 0)) {
5722            fp->eth_q_stats.tx_encap_failures++;
5723            if (next != NULL) {
5724                /* mark the TX queue as full and save the frame */
5725                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5726                drbr_putback(ifp, tx_br, next);
5727                fp->eth_q_stats.mbuf_alloc_tx--;
5728                fp->eth_q_stats.tx_frames_deferred++;
5729            } else
5730                drbr_advance(ifp, tx_br);
5731
5732            /* stop looking for more work */
5733            break;
5734        }
5735
5736        /* the transmit frame was enqueued successfully */
5737        tx_count++;
5738
5739        /* send a copy of the frame to any BPF listeners */
5740	if_etherbpfmtap(ifp, next);
5741
5742        drbr_advance(ifp, tx_br);
5743    }
5744
5745    /* all TX packets were dequeued and/or the tx ring is full */
5746    if (tx_count > 0) {
5747        /* reset the TX watchdog timeout timer */
5748        fp->watchdog_timer = BXE_TX_TIMEOUT;
5749    }
5750
5751bxe_tx_mq_start_locked_exit:
5752    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5753    if (!drbr_empty(ifp, tx_br)) {
5754        fp->eth_q_stats.tx_mq_not_empty++;
5755        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5756    }
5757
5758    return (rc);
5759}
5760
5761static void
5762bxe_tx_mq_start_deferred(void *arg,
5763                         int pending)
5764{
5765    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5766    struct bxe_softc *sc = fp->sc;
5767    if_t ifp = sc->ifp;
5768
5769    BXE_FP_TX_LOCK(fp);
5770    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5771    BXE_FP_TX_UNLOCK(fp);
5772}
5773
5774/* Multiqueue (TSS) dispatch routine. */
5775static int
5776bxe_tx_mq_start(struct ifnet *ifp,
5777                struct mbuf  *m)
5778{
5779    struct bxe_softc *sc = if_getsoftc(ifp);
5780    struct bxe_fastpath *fp;
5781    int fp_index, rc;
5782
5783    fp_index = 0; /* default is the first queue */
5784
5785    /* check if flowid is set */
5786
5787    if (BXE_VALID_FLOWID(m))
5788        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5789
5790    fp = &sc->fp[fp_index];
5791
5792    if (sc->state != BXE_STATE_OPEN)  {
5793        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5794        return ENETDOWN;
5795    }
5796
5797    if (BXE_FP_TX_TRYLOCK(fp)) {
5798        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5799        BXE_FP_TX_UNLOCK(fp);
5800    } else {
5801        rc = drbr_enqueue(ifp, fp->tx_br, m);
5802        taskqueue_enqueue(fp->tq, &fp->tx_task);
5803    }
5804
5805    return (rc);
5806}
5807
5808static void
5809bxe_mq_flush(struct ifnet *ifp)
5810{
5811    struct bxe_softc *sc = if_getsoftc(ifp);
5812    struct bxe_fastpath *fp;
5813    struct mbuf *m;
5814    int i;
5815
5816    for (i = 0; i < sc->num_queues; i++) {
5817        fp = &sc->fp[i];
5818
5819        if (fp->state != BXE_FP_STATE_IRQ) {
5820            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5821                  fp->index, fp->state);
5822            continue;
5823        }
5824
5825        if (fp->tx_br != NULL) {
5826            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5827            BXE_FP_TX_LOCK(fp);
5828            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5829                m_freem(m);
5830            }
5831            BXE_FP_TX_UNLOCK(fp);
5832        }
5833    }
5834
5835    if_qflush(ifp);
5836}
5837
5838#endif /* FreeBSD_version >= 901504 */
5839
5840static uint16_t
5841bxe_cid_ilt_lines(struct bxe_softc *sc)
5842{
5843    if (IS_SRIOV(sc)) {
5844        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5845    }
5846    return (L2_ILT_LINES(sc));
5847}
5848
5849static void
5850bxe_ilt_set_info(struct bxe_softc *sc)
5851{
5852    struct ilt_client_info *ilt_client;
5853    struct ecore_ilt *ilt = sc->ilt;
5854    uint16_t line = 0;
5855
5856    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5857    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5858
5859    /* CDU */
5860    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5861    ilt_client->client_num = ILT_CLIENT_CDU;
5862    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5863    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5864    ilt_client->start = line;
5865    line += bxe_cid_ilt_lines(sc);
5866
5867    if (CNIC_SUPPORT(sc)) {
5868        line += CNIC_ILT_LINES;
5869    }
5870
5871    ilt_client->end = (line - 1);
5872
5873    BLOGD(sc, DBG_LOAD,
5874          "ilt client[CDU]: start %d, end %d, "
5875          "psz 0x%x, flags 0x%x, hw psz %d\n",
5876          ilt_client->start, ilt_client->end,
5877          ilt_client->page_size,
5878          ilt_client->flags,
5879          ilog2(ilt_client->page_size >> 12));
5880
5881    /* QM */
5882    if (QM_INIT(sc->qm_cid_count)) {
5883        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5884        ilt_client->client_num = ILT_CLIENT_QM;
5885        ilt_client->page_size = QM_ILT_PAGE_SZ;
5886        ilt_client->flags = 0;
5887        ilt_client->start = line;
5888
5889        /* 4 bytes for each cid */
5890        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5891                             QM_ILT_PAGE_SZ);
5892
5893        ilt_client->end = (line - 1);
5894
5895        BLOGD(sc, DBG_LOAD,
5896              "ilt client[QM]: start %d, end %d, "
5897              "psz 0x%x, flags 0x%x, hw psz %d\n",
5898              ilt_client->start, ilt_client->end,
5899              ilt_client->page_size, ilt_client->flags,
5900              ilog2(ilt_client->page_size >> 12));
5901    }
5902
5903    if (CNIC_SUPPORT(sc)) {
5904        /* SRC */
5905        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5906        ilt_client->client_num = ILT_CLIENT_SRC;
5907        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5908        ilt_client->flags = 0;
5909        ilt_client->start = line;
5910        line += SRC_ILT_LINES;
5911        ilt_client->end = (line - 1);
5912
5913        BLOGD(sc, DBG_LOAD,
5914              "ilt client[SRC]: start %d, end %d, "
5915              "psz 0x%x, flags 0x%x, hw psz %d\n",
5916              ilt_client->start, ilt_client->end,
5917              ilt_client->page_size, ilt_client->flags,
5918              ilog2(ilt_client->page_size >> 12));
5919
5920        /* TM */
5921        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5922        ilt_client->client_num = ILT_CLIENT_TM;
5923        ilt_client->page_size = TM_ILT_PAGE_SZ;
5924        ilt_client->flags = 0;
5925        ilt_client->start = line;
5926        line += TM_ILT_LINES;
5927        ilt_client->end = (line - 1);
5928
5929        BLOGD(sc, DBG_LOAD,
5930              "ilt client[TM]: start %d, end %d, "
5931              "psz 0x%x, flags 0x%x, hw psz %d\n",
5932              ilt_client->start, ilt_client->end,
5933              ilt_client->page_size, ilt_client->flags,
5934              ilog2(ilt_client->page_size >> 12));
5935    }
5936
5937    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5938}
5939
5940static void
5941bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5942{
5943    int i;
5944    uint32_t rx_buf_size;
5945
5946    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5947
5948    for (i = 0; i < sc->num_queues; i++) {
5949        if(rx_buf_size <= MCLBYTES){
5950            sc->fp[i].rx_buf_size = rx_buf_size;
5951            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5952        }else if (rx_buf_size <= MJUMPAGESIZE){
5953            sc->fp[i].rx_buf_size = rx_buf_size;
5954            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5955        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5956            sc->fp[i].rx_buf_size = MCLBYTES;
5957            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5958        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5959            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5960            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5961        }else {
5962            sc->fp[i].rx_buf_size = MCLBYTES;
5963            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5964        }
5965    }
5966}
5967
5968static int
5969bxe_alloc_ilt_mem(struct bxe_softc *sc)
5970{
5971    int rc = 0;
5972
5973    if ((sc->ilt =
5974         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5975                                    M_BXE_ILT,
5976                                    (M_NOWAIT | M_ZERO))) == NULL) {
5977        rc = 1;
5978    }
5979
5980    return (rc);
5981}
5982
5983static int
5984bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5985{
5986    int rc = 0;
5987
5988    if ((sc->ilt->lines =
5989         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5990                                    M_BXE_ILT,
5991                                    (M_NOWAIT | M_ZERO))) == NULL) {
5992        rc = 1;
5993    }
5994
5995    return (rc);
5996}
5997
5998static void
5999bxe_free_ilt_mem(struct bxe_softc *sc)
6000{
6001    if (sc->ilt != NULL) {
6002        free(sc->ilt, M_BXE_ILT);
6003        sc->ilt = NULL;
6004    }
6005}
6006
6007static void
6008bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6009{
6010    if (sc->ilt->lines != NULL) {
6011        free(sc->ilt->lines, M_BXE_ILT);
6012        sc->ilt->lines = NULL;
6013    }
6014}
6015
6016static void
6017bxe_free_mem(struct bxe_softc *sc)
6018{
6019    int i;
6020
6021    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6022        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6023        sc->context[i].vcxt = NULL;
6024        sc->context[i].size = 0;
6025    }
6026
6027    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6028
6029    bxe_free_ilt_lines_mem(sc);
6030
6031}
6032
6033static int
6034bxe_alloc_mem(struct bxe_softc *sc)
6035{
6036
6037    int context_size;
6038    int allocated;
6039    int i;
6040
6041    /*
6042     * Allocate memory for CDU context:
6043     * This memory is allocated separately and not in the generic ILT
6044     * functions because CDU differs in few aspects:
6045     * 1. There can be multiple entities allocating memory for context -
6046     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6047     * its own ILT lines.
6048     * 2. Since CDU page-size is not a single 4KB page (which is the case
6049     * for the other ILT clients), to be efficient we want to support
6050     * allocation of sub-page-size in the last entry.
6051     * 3. Context pointers are used by the driver to pass to FW / update
6052     * the context (for the other ILT clients the pointers are used just to
6053     * free the memory during unload).
6054     */
6055    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6056    for (i = 0, allocated = 0; allocated < context_size; i++) {
6057        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6058                                  (context_size - allocated));
6059
6060        if (bxe_dma_alloc(sc, sc->context[i].size,
6061                          &sc->context[i].vcxt_dma,
6062                          "cdu context") != 0) {
6063            bxe_free_mem(sc);
6064            return (-1);
6065        }
6066
6067        sc->context[i].vcxt =
6068            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6069
6070        allocated += sc->context[i].size;
6071    }
6072
6073    bxe_alloc_ilt_lines_mem(sc);
6074
6075    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6076          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6077    {
6078        for (i = 0; i < 4; i++) {
6079            BLOGD(sc, DBG_LOAD,
6080                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6081                  i,
6082                  sc->ilt->clients[i].page_size,
6083                  sc->ilt->clients[i].start,
6084                  sc->ilt->clients[i].end,
6085                  sc->ilt->clients[i].client_num,
6086                  sc->ilt->clients[i].flags);
6087        }
6088    }
6089    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6090        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6091        bxe_free_mem(sc);
6092        return (-1);
6093    }
6094
6095    return (0);
6096}
6097
6098static void
6099bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6100{
6101    struct bxe_softc *sc;
6102    int i;
6103
6104    sc = fp->sc;
6105
6106    if (fp->rx_mbuf_tag == NULL) {
6107        return;
6108    }
6109
6110    /* free all mbufs and unload all maps */
6111    for (i = 0; i < RX_BD_TOTAL; i++) {
6112        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6113            bus_dmamap_sync(fp->rx_mbuf_tag,
6114                            fp->rx_mbuf_chain[i].m_map,
6115                            BUS_DMASYNC_POSTREAD);
6116            bus_dmamap_unload(fp->rx_mbuf_tag,
6117                              fp->rx_mbuf_chain[i].m_map);
6118        }
6119
6120        if (fp->rx_mbuf_chain[i].m != NULL) {
6121            m_freem(fp->rx_mbuf_chain[i].m);
6122            fp->rx_mbuf_chain[i].m = NULL;
6123            fp->eth_q_stats.mbuf_alloc_rx--;
6124        }
6125    }
6126}
6127
6128static void
6129bxe_free_tpa_pool(struct bxe_fastpath *fp)
6130{
6131    struct bxe_softc *sc;
6132    int i, max_agg_queues;
6133
6134    sc = fp->sc;
6135
6136    if (fp->rx_mbuf_tag == NULL) {
6137        return;
6138    }
6139
6140    max_agg_queues = MAX_AGG_QS(sc);
6141
6142    /* release all mbufs and unload all DMA maps in the TPA pool */
6143    for (i = 0; i < max_agg_queues; i++) {
6144        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6145            bus_dmamap_sync(fp->rx_mbuf_tag,
6146                            fp->rx_tpa_info[i].bd.m_map,
6147                            BUS_DMASYNC_POSTREAD);
6148            bus_dmamap_unload(fp->rx_mbuf_tag,
6149                              fp->rx_tpa_info[i].bd.m_map);
6150        }
6151
6152        if (fp->rx_tpa_info[i].bd.m != NULL) {
6153            m_freem(fp->rx_tpa_info[i].bd.m);
6154            fp->rx_tpa_info[i].bd.m = NULL;
6155            fp->eth_q_stats.mbuf_alloc_tpa--;
6156        }
6157    }
6158}
6159
6160static void
6161bxe_free_sge_chain(struct bxe_fastpath *fp)
6162{
6163    struct bxe_softc *sc;
6164    int i;
6165
6166    sc = fp->sc;
6167
6168    if (fp->rx_sge_mbuf_tag == NULL) {
6169        return;
6170    }
6171
6172    /* rree all mbufs and unload all maps */
6173    for (i = 0; i < RX_SGE_TOTAL; i++) {
6174        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6175            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6176                            fp->rx_sge_mbuf_chain[i].m_map,
6177                            BUS_DMASYNC_POSTREAD);
6178            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6179                              fp->rx_sge_mbuf_chain[i].m_map);
6180        }
6181
6182        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6183            m_freem(fp->rx_sge_mbuf_chain[i].m);
6184            fp->rx_sge_mbuf_chain[i].m = NULL;
6185            fp->eth_q_stats.mbuf_alloc_sge--;
6186        }
6187    }
6188}
6189
6190static void
6191bxe_free_fp_buffers(struct bxe_softc *sc)
6192{
6193    struct bxe_fastpath *fp;
6194    int i;
6195
6196    for (i = 0; i < sc->num_queues; i++) {
6197        fp = &sc->fp[i];
6198
6199#if __FreeBSD_version >= 901504
6200        if (fp->tx_br != NULL) {
6201            /* just in case bxe_mq_flush() wasn't called */
6202            if (mtx_initialized(&fp->tx_mtx)) {
6203                struct mbuf *m;
6204
6205                BXE_FP_TX_LOCK(fp);
6206                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6207                    m_freem(m);
6208                BXE_FP_TX_UNLOCK(fp);
6209            }
6210        }
6211#endif
6212
6213        /* free all RX buffers */
6214        bxe_free_rx_bd_chain(fp);
6215        bxe_free_tpa_pool(fp);
6216        bxe_free_sge_chain(fp);
6217
6218        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6219            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6220                  fp->eth_q_stats.mbuf_alloc_rx);
6221        }
6222
6223        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6224            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6225                  fp->eth_q_stats.mbuf_alloc_sge);
6226        }
6227
6228        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6229            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6230                  fp->eth_q_stats.mbuf_alloc_tpa);
6231        }
6232
6233        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6234            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6235                  fp->eth_q_stats.mbuf_alloc_tx);
6236        }
6237
6238        /* XXX verify all mbufs were reclaimed */
6239    }
6240}
6241
6242static int
6243bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6244                     uint16_t            prev_index,
6245                     uint16_t            index)
6246{
6247    struct bxe_sw_rx_bd *rx_buf;
6248    struct eth_rx_bd *rx_bd;
6249    bus_dma_segment_t segs[1];
6250    bus_dmamap_t map;
6251    struct mbuf *m;
6252    int nsegs, rc;
6253
6254    rc = 0;
6255
6256    /* allocate the new RX BD mbuf */
6257    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6258    if (__predict_false(m == NULL)) {
6259        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6260        return (ENOBUFS);
6261    }
6262
6263    fp->eth_q_stats.mbuf_alloc_rx++;
6264
6265    /* initialize the mbuf buffer length */
6266    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6267
6268    /* map the mbuf into non-paged pool */
6269    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6270                                 fp->rx_mbuf_spare_map,
6271                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6272    if (__predict_false(rc != 0)) {
6273        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6274        m_freem(m);
6275        fp->eth_q_stats.mbuf_alloc_rx--;
6276        return (rc);
6277    }
6278
6279    /* all mbufs must map to a single segment */
6280    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6281
6282    /* release any existing RX BD mbuf mappings */
6283
6284    if (prev_index != index) {
6285        rx_buf = &fp->rx_mbuf_chain[prev_index];
6286
6287        if (rx_buf->m_map != NULL) {
6288            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6289                            BUS_DMASYNC_POSTREAD);
6290            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6291        }
6292
6293        /*
6294         * We only get here from bxe_rxeof() when the maximum number
6295         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6296         * holds the mbuf in the prev_index so it's OK to NULL it out
6297         * here without concern of a memory leak.
6298         */
6299        fp->rx_mbuf_chain[prev_index].m = NULL;
6300    }
6301
6302    rx_buf = &fp->rx_mbuf_chain[index];
6303
6304    if (rx_buf->m_map != NULL) {
6305        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6306                        BUS_DMASYNC_POSTREAD);
6307        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6308    }
6309
6310    /* save the mbuf and mapping info for a future packet */
6311    map = (prev_index != index) ?
6312              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6313    rx_buf->m_map = fp->rx_mbuf_spare_map;
6314    fp->rx_mbuf_spare_map = map;
6315    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6316                    BUS_DMASYNC_PREREAD);
6317    rx_buf->m = m;
6318
6319    rx_bd = &fp->rx_chain[index];
6320    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6321    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6322
6323    return (rc);
6324}
6325
6326static int
6327bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6328                      int                 queue)
6329{
6330    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6331    bus_dma_segment_t segs[1];
6332    bus_dmamap_t map;
6333    struct mbuf *m;
6334    int nsegs;
6335    int rc = 0;
6336
6337    /* allocate the new TPA mbuf */
6338    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6339    if (__predict_false(m == NULL)) {
6340        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6341        return (ENOBUFS);
6342    }
6343
6344    fp->eth_q_stats.mbuf_alloc_tpa++;
6345
6346    /* initialize the mbuf buffer length */
6347    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6348
6349    /* map the mbuf into non-paged pool */
6350    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6351                                 fp->rx_tpa_info_mbuf_spare_map,
6352                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6353    if (__predict_false(rc != 0)) {
6354        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6355        m_free(m);
6356        fp->eth_q_stats.mbuf_alloc_tpa--;
6357        return (rc);
6358    }
6359
6360    /* all mbufs must map to a single segment */
6361    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6362
6363    /* release any existing TPA mbuf mapping */
6364    if (tpa_info->bd.m_map != NULL) {
6365        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6366                        BUS_DMASYNC_POSTREAD);
6367        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6368    }
6369
6370    /* save the mbuf and mapping info for the TPA mbuf */
6371    map = tpa_info->bd.m_map;
6372    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6373    fp->rx_tpa_info_mbuf_spare_map = map;
6374    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6375                    BUS_DMASYNC_PREREAD);
6376    tpa_info->bd.m = m;
6377    tpa_info->seg = segs[0];
6378
6379    return (rc);
6380}
6381
6382/*
6383 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6384 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6385 * chain.
6386 */
6387static int
6388bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6389                      uint16_t            index)
6390{
6391    struct bxe_sw_rx_bd *sge_buf;
6392    struct eth_rx_sge *sge;
6393    bus_dma_segment_t segs[1];
6394    bus_dmamap_t map;
6395    struct mbuf *m;
6396    int nsegs;
6397    int rc = 0;
6398
6399    /* allocate a new SGE mbuf */
6400    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6401    if (__predict_false(m == NULL)) {
6402        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6403        return (ENOMEM);
6404    }
6405
6406    fp->eth_q_stats.mbuf_alloc_sge++;
6407
6408    /* initialize the mbuf buffer length */
6409    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6410
6411    /* map the SGE mbuf into non-paged pool */
6412    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6413                                 fp->rx_sge_mbuf_spare_map,
6414                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6415    if (__predict_false(rc != 0)) {
6416        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6417        m_freem(m);
6418        fp->eth_q_stats.mbuf_alloc_sge--;
6419        return (rc);
6420    }
6421
6422    /* all mbufs must map to a single segment */
6423    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6424
6425    sge_buf = &fp->rx_sge_mbuf_chain[index];
6426
6427    /* release any existing SGE mbuf mapping */
6428    if (sge_buf->m_map != NULL) {
6429        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6430                        BUS_DMASYNC_POSTREAD);
6431        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6432    }
6433
6434    /* save the mbuf and mapping info for a future packet */
6435    map = sge_buf->m_map;
6436    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6437    fp->rx_sge_mbuf_spare_map = map;
6438    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6439                    BUS_DMASYNC_PREREAD);
6440    sge_buf->m = m;
6441
6442    sge = &fp->rx_sge_chain[index];
6443    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6444    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6445
6446    return (rc);
6447}
6448
6449static __noinline int
6450bxe_alloc_fp_buffers(struct bxe_softc *sc)
6451{
6452    struct bxe_fastpath *fp;
6453    int i, j, rc = 0;
6454    int ring_prod, cqe_ring_prod;
6455    int max_agg_queues;
6456
6457    for (i = 0; i < sc->num_queues; i++) {
6458        fp = &sc->fp[i];
6459
6460        ring_prod = cqe_ring_prod = 0;
6461        fp->rx_bd_cons = 0;
6462        fp->rx_cq_cons = 0;
6463
6464        /* allocate buffers for the RX BDs in RX BD chain */
6465        for (j = 0; j < sc->max_rx_bufs; j++) {
6466            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6467            if (rc != 0) {
6468                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6469                      i, rc);
6470                goto bxe_alloc_fp_buffers_error;
6471            }
6472
6473            ring_prod     = RX_BD_NEXT(ring_prod);
6474            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6475        }
6476
6477        fp->rx_bd_prod = ring_prod;
6478        fp->rx_cq_prod = cqe_ring_prod;
6479        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6480
6481        max_agg_queues = MAX_AGG_QS(sc);
6482
6483        fp->tpa_enable = TRUE;
6484
6485        /* fill the TPA pool */
6486        for (j = 0; j < max_agg_queues; j++) {
6487            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6488            if (rc != 0) {
6489                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6490                          i, j);
6491                fp->tpa_enable = FALSE;
6492                goto bxe_alloc_fp_buffers_error;
6493            }
6494
6495            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6496        }
6497
6498        if (fp->tpa_enable) {
6499            /* fill the RX SGE chain */
6500            ring_prod = 0;
6501            for (j = 0; j < RX_SGE_USABLE; j++) {
6502                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6503                if (rc != 0) {
6504                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6505                              i, ring_prod);
6506                    fp->tpa_enable = FALSE;
6507                    ring_prod = 0;
6508                    goto bxe_alloc_fp_buffers_error;
6509                }
6510
6511                ring_prod = RX_SGE_NEXT(ring_prod);
6512            }
6513
6514            fp->rx_sge_prod = ring_prod;
6515        }
6516    }
6517
6518    return (0);
6519
6520bxe_alloc_fp_buffers_error:
6521
6522    /* unwind what was already allocated */
6523    bxe_free_rx_bd_chain(fp);
6524    bxe_free_tpa_pool(fp);
6525    bxe_free_sge_chain(fp);
6526
6527    return (ENOBUFS);
6528}
6529
6530static void
6531bxe_free_fw_stats_mem(struct bxe_softc *sc)
6532{
6533    bxe_dma_free(sc, &sc->fw_stats_dma);
6534
6535    sc->fw_stats_num = 0;
6536
6537    sc->fw_stats_req_size = 0;
6538    sc->fw_stats_req = NULL;
6539    sc->fw_stats_req_mapping = 0;
6540
6541    sc->fw_stats_data_size = 0;
6542    sc->fw_stats_data = NULL;
6543    sc->fw_stats_data_mapping = 0;
6544}
6545
6546static int
6547bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6548{
6549    uint8_t num_queue_stats;
6550    int num_groups;
6551
6552    /* number of queues for statistics is number of eth queues */
6553    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6554
6555    /*
6556     * Total number of FW statistics requests =
6557     *   1 for port stats + 1 for PF stats + num of queues
6558     */
6559    sc->fw_stats_num = (2 + num_queue_stats);
6560
6561    /*
6562     * Request is built from stats_query_header and an array of
6563     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6564     * rules. The real number or requests is configured in the
6565     * stats_query_header.
6566     */
6567    num_groups =
6568        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6569         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6570
6571    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6572          sc->fw_stats_num, num_groups);
6573
6574    sc->fw_stats_req_size =
6575        (sizeof(struct stats_query_header) +
6576         (num_groups * sizeof(struct stats_query_cmd_group)));
6577
6578    /*
6579     * Data for statistics requests + stats_counter.
6580     * stats_counter holds per-STORM counters that are incremented when
6581     * STORM has finished with the current request. Memory for FCoE
6582     * offloaded statistics are counted anyway, even if they will not be sent.
6583     * VF stats are not accounted for here as the data of VF stats is stored
6584     * in memory allocated by the VF, not here.
6585     */
6586    sc->fw_stats_data_size =
6587        (sizeof(struct stats_counter) +
6588         sizeof(struct per_port_stats) +
6589         sizeof(struct per_pf_stats) +
6590         /* sizeof(struct fcoe_statistics_params) + */
6591         (sizeof(struct per_queue_stats) * num_queue_stats));
6592
6593    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6594                      &sc->fw_stats_dma, "fw stats") != 0) {
6595        bxe_free_fw_stats_mem(sc);
6596        return (-1);
6597    }
6598
6599    /* set up the shortcuts */
6600
6601    sc->fw_stats_req =
6602        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6603    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6604
6605    sc->fw_stats_data =
6606        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6607                                     sc->fw_stats_req_size);
6608    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6609                                 sc->fw_stats_req_size);
6610
6611    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6612          (uintmax_t)sc->fw_stats_req_mapping);
6613
6614    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6615          (uintmax_t)sc->fw_stats_data_mapping);
6616
6617    return (0);
6618}
6619
6620/*
6621 * Bits map:
6622 * 0-7  - Engine0 load counter.
6623 * 8-15 - Engine1 load counter.
6624 * 16   - Engine0 RESET_IN_PROGRESS bit.
6625 * 17   - Engine1 RESET_IN_PROGRESS bit.
6626 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6627 *        function on the engine
6628 * 19   - Engine1 ONE_IS_LOADED.
6629 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6630 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6631 *        for just the one belonging to its engine).
6632 */
6633#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6634#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6635#define BXE_PATH0_LOAD_CNT_SHIFT  0
6636#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6637#define BXE_PATH1_LOAD_CNT_SHIFT  8
6638#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6639#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6640#define BXE_GLOBAL_RESET_BIT      0x00040000
6641
6642/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6643static void
6644bxe_set_reset_global(struct bxe_softc *sc)
6645{
6646    uint32_t val;
6647    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6648    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6649    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6650    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6651}
6652
6653/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6654static void
6655bxe_clear_reset_global(struct bxe_softc *sc)
6656{
6657    uint32_t val;
6658    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6659    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6660    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6661    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6662}
6663
6664/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6665static uint8_t
6666bxe_reset_is_global(struct bxe_softc *sc)
6667{
6668    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6669    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6670    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6671}
6672
6673/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6674static void
6675bxe_set_reset_done(struct bxe_softc *sc)
6676{
6677    uint32_t val;
6678    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6679                                 BXE_PATH0_RST_IN_PROG_BIT;
6680
6681    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6682
6683    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6684    /* Clear the bit */
6685    val &= ~bit;
6686    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6687
6688    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6689}
6690
6691/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6692static void
6693bxe_set_reset_in_progress(struct bxe_softc *sc)
6694{
6695    uint32_t val;
6696    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6697                                 BXE_PATH0_RST_IN_PROG_BIT;
6698
6699    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6700
6701    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6702    /* Set the bit */
6703    val |= bit;
6704    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6705
6706    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6707}
6708
6709/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6710static uint8_t
6711bxe_reset_is_done(struct bxe_softc *sc,
6712                  int              engine)
6713{
6714    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6715    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6716                            BXE_PATH0_RST_IN_PROG_BIT;
6717
6718    /* return false if bit is set */
6719    return (val & bit) ? FALSE : TRUE;
6720}
6721
6722/* get the load status for an engine, should be run under rtnl lock */
6723static uint8_t
6724bxe_get_load_status(struct bxe_softc *sc,
6725                    int              engine)
6726{
6727    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6728                             BXE_PATH0_LOAD_CNT_MASK;
6729    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6730                              BXE_PATH0_LOAD_CNT_SHIFT;
6731    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6732
6733    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6734
6735    val = ((val & mask) >> shift);
6736
6737    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6738
6739    return (val != 0);
6740}
6741
6742/* set pf load mark */
6743/* XXX needs to be under rtnl lock */
6744static void
6745bxe_set_pf_load(struct bxe_softc *sc)
6746{
6747    uint32_t val;
6748    uint32_t val1;
6749    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6750                                  BXE_PATH0_LOAD_CNT_MASK;
6751    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6752                                   BXE_PATH0_LOAD_CNT_SHIFT;
6753
6754    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6755
6756    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6757    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6758
6759    /* get the current counter value */
6760    val1 = ((val & mask) >> shift);
6761
6762    /* set bit of this PF */
6763    val1 |= (1 << SC_ABS_FUNC(sc));
6764
6765    /* clear the old value */
6766    val &= ~mask;
6767
6768    /* set the new one */
6769    val |= ((val1 << shift) & mask);
6770
6771    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6772
6773    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6774}
6775
6776/* clear pf load mark */
6777/* XXX needs to be under rtnl lock */
6778static uint8_t
6779bxe_clear_pf_load(struct bxe_softc *sc)
6780{
6781    uint32_t val1, val;
6782    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6783                                  BXE_PATH0_LOAD_CNT_MASK;
6784    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6785                                   BXE_PATH0_LOAD_CNT_SHIFT;
6786
6787    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6788    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6789    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6790
6791    /* get the current counter value */
6792    val1 = (val & mask) >> shift;
6793
6794    /* clear bit of that PF */
6795    val1 &= ~(1 << SC_ABS_FUNC(sc));
6796
6797    /* clear the old value */
6798    val &= ~mask;
6799
6800    /* set the new one */
6801    val |= ((val1 << shift) & mask);
6802
6803    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6804    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6805    return (val1 != 0);
6806}
6807
6808/* send load requrest to mcp and analyze response */
6809static int
6810bxe_nic_load_request(struct bxe_softc *sc,
6811                     uint32_t         *load_code)
6812{
6813    /* init fw_seq */
6814    sc->fw_seq =
6815        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6816         DRV_MSG_SEQ_NUMBER_MASK);
6817
6818    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6819
6820    /* get the current FW pulse sequence */
6821    sc->fw_drv_pulse_wr_seq =
6822        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6823         DRV_PULSE_SEQ_MASK);
6824
6825    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6826          sc->fw_drv_pulse_wr_seq);
6827
6828    /* load request */
6829    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6830                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6831
6832    /* if the MCP fails to respond we must abort */
6833    if (!(*load_code)) {
6834        BLOGE(sc, "MCP response failure!\n");
6835        return (-1);
6836    }
6837
6838    /* if MCP refused then must abort */
6839    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6840        BLOGE(sc, "MCP refused load request\n");
6841        return (-1);
6842    }
6843
6844    return (0);
6845}
6846
6847/*
6848 * Check whether another PF has already loaded FW to chip. In virtualized
6849 * environments a pf from anoth VM may have already initialized the device
6850 * including loading FW.
6851 */
6852static int
6853bxe_nic_load_analyze_req(struct bxe_softc *sc,
6854                         uint32_t         load_code)
6855{
6856    uint32_t my_fw, loaded_fw;
6857
6858    /* is another pf loaded on this engine? */
6859    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6860        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6861        /* build my FW version dword */
6862        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6863                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6864                 (BCM_5710_FW_REVISION_VERSION << 16) +
6865                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6866
6867        /* read loaded FW from chip */
6868        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6869        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6870              loaded_fw, my_fw);
6871
6872        /* abort nic load if version mismatch */
6873        if (my_fw != loaded_fw) {
6874            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6875                  loaded_fw, my_fw);
6876            return (-1);
6877        }
6878    }
6879
6880    return (0);
6881}
6882
6883/* mark PMF if applicable */
6884static void
6885bxe_nic_load_pmf(struct bxe_softc *sc,
6886                 uint32_t         load_code)
6887{
6888    uint32_t ncsi_oem_data_addr;
6889
6890    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6891        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6892        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6893        /*
6894         * Barrier here for ordering between the writing to sc->port.pmf here
6895         * and reading it from the periodic task.
6896         */
6897        sc->port.pmf = 1;
6898        mb();
6899    } else {
6900        sc->port.pmf = 0;
6901    }
6902
6903    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6904
6905    /* XXX needed? */
6906    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6907        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6908            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6909            if (ncsi_oem_data_addr) {
6910                REG_WR(sc,
6911                       (ncsi_oem_data_addr +
6912                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6913                       0);
6914            }
6915        }
6916    }
6917}
6918
6919static void
6920bxe_read_mf_cfg(struct bxe_softc *sc)
6921{
6922    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6923    int abs_func;
6924    int vn;
6925
6926    if (BXE_NOMCP(sc)) {
6927        return; /* what should be the default bvalue in this case */
6928    }
6929
6930    /*
6931     * The formula for computing the absolute function number is...
6932     * For 2 port configuration (4 functions per port):
6933     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6934     * For 4 port configuration (2 functions per port):
6935     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6936     */
6937    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6938        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6939        if (abs_func >= E1H_FUNC_MAX) {
6940            break;
6941        }
6942        sc->devinfo.mf_info.mf_config[vn] =
6943            MFCFG_RD(sc, func_mf_config[abs_func].config);
6944    }
6945
6946    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6947        FUNC_MF_CFG_FUNC_DISABLED) {
6948        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6949        sc->flags |= BXE_MF_FUNC_DIS;
6950    } else {
6951        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6952        sc->flags &= ~BXE_MF_FUNC_DIS;
6953    }
6954}
6955
6956/* acquire split MCP access lock register */
6957static int bxe_acquire_alr(struct bxe_softc *sc)
6958{
6959    uint32_t j, val;
6960
6961    for (j = 0; j < 1000; j++) {
6962        val = (1UL << 31);
6963        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6964        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6965        if (val & (1L << 31))
6966            break;
6967
6968        DELAY(5000);
6969    }
6970
6971    if (!(val & (1L << 31))) {
6972        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6973        return (-1);
6974    }
6975
6976    return (0);
6977}
6978
6979/* release split MCP access lock register */
6980static void bxe_release_alr(struct bxe_softc *sc)
6981{
6982    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6983}
6984
6985static void
6986bxe_fan_failure(struct bxe_softc *sc)
6987{
6988    int port = SC_PORT(sc);
6989    uint32_t ext_phy_config;
6990
6991    /* mark the failure */
6992    ext_phy_config =
6993        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6994
6995    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6996    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6997    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6998             ext_phy_config);
6999
7000    /* log the failure */
7001    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7002              "the card to prevent permanent damage. "
7003              "Please contact OEM Support for assistance\n");
7004
7005    /* XXX */
7006#if 1
7007    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7008#else
7009    /*
7010     * Schedule device reset (unload)
7011     * This is due to some boards consuming sufficient power when driver is
7012     * up to overheat if fan fails.
7013     */
7014    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7015    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7016#endif
7017}
7018
7019/* this function is called upon a link interrupt */
7020static void
7021bxe_link_attn(struct bxe_softc *sc)
7022{
7023    uint32_t pause_enabled = 0;
7024    struct host_port_stats *pstats;
7025    int cmng_fns;
7026    struct bxe_fastpath *fp;
7027    int i;
7028
7029    /* Make sure that we are synced with the current statistics */
7030    bxe_stats_handle(sc, STATS_EVENT_STOP);
7031	BLOGI(sc, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7032    elink_link_update(&sc->link_params, &sc->link_vars);
7033
7034    if (sc->link_vars.link_up) {
7035
7036        /* dropless flow control */
7037        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7038            pause_enabled = 0;
7039
7040            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7041                pause_enabled = 1;
7042            }
7043
7044            REG_WR(sc,
7045                   (BAR_USTRORM_INTMEM +
7046                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7047                   pause_enabled);
7048        }
7049
7050        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7051            pstats = BXE_SP(sc, port_stats);
7052            /* reset old mac stats */
7053            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7054        }
7055
7056        if (sc->state == BXE_STATE_OPEN) {
7057            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7058        }
7059
7060	/* Restart tx when the link comes back. */
7061        FOR_EACH_ETH_QUEUE(sc, i) {
7062            fp = &sc->fp[i];
7063            taskqueue_enqueue(fp->tq, &fp->tx_task);
7064	}
7065    }
7066
7067    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7068        cmng_fns = bxe_get_cmng_fns_mode(sc);
7069
7070        if (cmng_fns != CMNG_FNS_NONE) {
7071            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7072            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7073        } else {
7074            /* rate shaping and fairness are disabled */
7075            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7076        }
7077    }
7078
7079    bxe_link_report_locked(sc);
7080
7081    if (IS_MF(sc)) {
7082        ; // XXX bxe_link_sync_notify(sc);
7083    }
7084}
7085
7086static void
7087bxe_attn_int_asserted(struct bxe_softc *sc,
7088                      uint32_t         asserted)
7089{
7090    int port = SC_PORT(sc);
7091    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7092                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7093    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7094                                        NIG_REG_MASK_INTERRUPT_PORT0;
7095    uint32_t aeu_mask;
7096    uint32_t nig_mask = 0;
7097    uint32_t reg_addr;
7098    uint32_t igu_acked;
7099    uint32_t cnt;
7100
7101    if (sc->attn_state & asserted) {
7102        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7103    }
7104
7105    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7106
7107    aeu_mask = REG_RD(sc, aeu_addr);
7108
7109    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7110          aeu_mask, asserted);
7111
7112    aeu_mask &= ~(asserted & 0x3ff);
7113
7114    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7115
7116    REG_WR(sc, aeu_addr, aeu_mask);
7117
7118    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7119
7120    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7121    sc->attn_state |= asserted;
7122    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7123
7124    if (asserted & ATTN_HARD_WIRED_MASK) {
7125        if (asserted & ATTN_NIG_FOR_FUNC) {
7126
7127	    bxe_acquire_phy_lock(sc);
7128            /* save nig interrupt mask */
7129            nig_mask = REG_RD(sc, nig_int_mask_addr);
7130
7131            /* If nig_mask is not set, no need to call the update function */
7132            if (nig_mask) {
7133                REG_WR(sc, nig_int_mask_addr, 0);
7134
7135                bxe_link_attn(sc);
7136            }
7137
7138            /* handle unicore attn? */
7139        }
7140
7141        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7142            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7143        }
7144
7145        if (asserted & GPIO_2_FUNC) {
7146            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7147        }
7148
7149        if (asserted & GPIO_3_FUNC) {
7150            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7151        }
7152
7153        if (asserted & GPIO_4_FUNC) {
7154            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7155        }
7156
7157        if (port == 0) {
7158            if (asserted & ATTN_GENERAL_ATTN_1) {
7159                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7160                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7161            }
7162            if (asserted & ATTN_GENERAL_ATTN_2) {
7163                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7164                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7165            }
7166            if (asserted & ATTN_GENERAL_ATTN_3) {
7167                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7168                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7169            }
7170        } else {
7171            if (asserted & ATTN_GENERAL_ATTN_4) {
7172                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7173                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7174            }
7175            if (asserted & ATTN_GENERAL_ATTN_5) {
7176                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7177                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7178            }
7179            if (asserted & ATTN_GENERAL_ATTN_6) {
7180                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7181                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7182            }
7183        }
7184    } /* hardwired */
7185
7186    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7187        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7188    } else {
7189        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7190    }
7191
7192    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7193          asserted,
7194          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7195    REG_WR(sc, reg_addr, asserted);
7196
7197    /* now set back the mask */
7198    if (asserted & ATTN_NIG_FOR_FUNC) {
7199        /*
7200         * Verify that IGU ack through BAR was written before restoring
7201         * NIG mask. This loop should exit after 2-3 iterations max.
7202         */
7203        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7204            cnt = 0;
7205
7206            do {
7207                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7208            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7209                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7210
7211            if (!igu_acked) {
7212                BLOGE(sc, "Failed to verify IGU ack on time\n");
7213            }
7214
7215            mb();
7216        }
7217
7218        REG_WR(sc, nig_int_mask_addr, nig_mask);
7219
7220	bxe_release_phy_lock(sc);
7221    }
7222}
7223
7224static void
7225bxe_print_next_block(struct bxe_softc *sc,
7226                     int              idx,
7227                     const char       *blk)
7228{
7229    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7230}
7231
7232static int
7233bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7234                              uint32_t         sig,
7235                              int              par_num,
7236                              uint8_t          print)
7237{
7238    uint32_t cur_bit = 0;
7239    int i = 0;
7240
7241    for (i = 0; sig; i++) {
7242        cur_bit = ((uint32_t)0x1 << i);
7243        if (sig & cur_bit) {
7244            switch (cur_bit) {
7245            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7246                if (print)
7247                    bxe_print_next_block(sc, par_num++, "BRB");
7248                break;
7249            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7250                if (print)
7251                    bxe_print_next_block(sc, par_num++, "PARSER");
7252                break;
7253            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7254                if (print)
7255                    bxe_print_next_block(sc, par_num++, "TSDM");
7256                break;
7257            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7258                if (print)
7259                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7260                break;
7261            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7262                if (print)
7263                    bxe_print_next_block(sc, par_num++, "TCM");
7264                break;
7265            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7266                if (print)
7267                    bxe_print_next_block(sc, par_num++, "TSEMI");
7268                break;
7269            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7270                if (print)
7271                    bxe_print_next_block(sc, par_num++, "XPB");
7272                break;
7273            }
7274
7275            /* Clear the bit */
7276            sig &= ~cur_bit;
7277        }
7278    }
7279
7280    return (par_num);
7281}
7282
7283static int
7284bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7285                              uint32_t         sig,
7286                              int              par_num,
7287                              uint8_t          *global,
7288                              uint8_t          print)
7289{
7290    int i = 0;
7291    uint32_t cur_bit = 0;
7292    for (i = 0; sig; i++) {
7293        cur_bit = ((uint32_t)0x1 << i);
7294        if (sig & cur_bit) {
7295            switch (cur_bit) {
7296            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7297                if (print)
7298                    bxe_print_next_block(sc, par_num++, "PBF");
7299                break;
7300            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7301                if (print)
7302                    bxe_print_next_block(sc, par_num++, "QM");
7303                break;
7304            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7305                if (print)
7306                    bxe_print_next_block(sc, par_num++, "TM");
7307                break;
7308            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7309                if (print)
7310                    bxe_print_next_block(sc, par_num++, "XSDM");
7311                break;
7312            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7313                if (print)
7314                    bxe_print_next_block(sc, par_num++, "XCM");
7315                break;
7316            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7317                if (print)
7318                    bxe_print_next_block(sc, par_num++, "XSEMI");
7319                break;
7320            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7321                if (print)
7322                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7323                break;
7324            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7325                if (print)
7326                    bxe_print_next_block(sc, par_num++, "NIG");
7327                break;
7328            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7329                if (print)
7330                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7331                *global = TRUE;
7332                break;
7333            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7334                if (print)
7335                    bxe_print_next_block(sc, par_num++, "DEBUG");
7336                break;
7337            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7338                if (print)
7339                    bxe_print_next_block(sc, par_num++, "USDM");
7340                break;
7341            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7342                if (print)
7343                    bxe_print_next_block(sc, par_num++, "UCM");
7344                break;
7345            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7346                if (print)
7347                    bxe_print_next_block(sc, par_num++, "USEMI");
7348                break;
7349            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7350                if (print)
7351                    bxe_print_next_block(sc, par_num++, "UPB");
7352                break;
7353            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7354                if (print)
7355                    bxe_print_next_block(sc, par_num++, "CSDM");
7356                break;
7357            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7358                if (print)
7359                    bxe_print_next_block(sc, par_num++, "CCM");
7360                break;
7361            }
7362
7363            /* Clear the bit */
7364            sig &= ~cur_bit;
7365        }
7366    }
7367
7368    return (par_num);
7369}
7370
7371static int
7372bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7373                              uint32_t         sig,
7374                              int              par_num,
7375                              uint8_t          print)
7376{
7377    uint32_t cur_bit = 0;
7378    int i = 0;
7379
7380    for (i = 0; sig; i++) {
7381        cur_bit = ((uint32_t)0x1 << i);
7382        if (sig & cur_bit) {
7383            switch (cur_bit) {
7384            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7385                if (print)
7386                    bxe_print_next_block(sc, par_num++, "CSEMI");
7387                break;
7388            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7389                if (print)
7390                    bxe_print_next_block(sc, par_num++, "PXP");
7391                break;
7392            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7393                if (print)
7394                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7395                break;
7396            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7397                if (print)
7398                    bxe_print_next_block(sc, par_num++, "CFC");
7399                break;
7400            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7401                if (print)
7402                    bxe_print_next_block(sc, par_num++, "CDU");
7403                break;
7404            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7405                if (print)
7406                    bxe_print_next_block(sc, par_num++, "DMAE");
7407                break;
7408            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7409                if (print)
7410                    bxe_print_next_block(sc, par_num++, "IGU");
7411                break;
7412            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7413                if (print)
7414                    bxe_print_next_block(sc, par_num++, "MISC");
7415                break;
7416            }
7417
7418            /* Clear the bit */
7419            sig &= ~cur_bit;
7420        }
7421    }
7422
7423    return (par_num);
7424}
7425
7426static int
7427bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7428                              uint32_t         sig,
7429                              int              par_num,
7430                              uint8_t          *global,
7431                              uint8_t          print)
7432{
7433    uint32_t cur_bit = 0;
7434    int i = 0;
7435
7436    for (i = 0; sig; i++) {
7437        cur_bit = ((uint32_t)0x1 << i);
7438        if (sig & cur_bit) {
7439            switch (cur_bit) {
7440            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7441                if (print)
7442                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7443                *global = TRUE;
7444                break;
7445            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7446                if (print)
7447                    bxe_print_next_block(sc, par_num++,
7448                              "MCP UMP RX");
7449                *global = TRUE;
7450                break;
7451            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7452                if (print)
7453                    bxe_print_next_block(sc, par_num++,
7454                              "MCP UMP TX");
7455                *global = TRUE;
7456                break;
7457            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7458                if (print)
7459                    bxe_print_next_block(sc, par_num++,
7460                              "MCP SCPAD");
7461                *global = TRUE;
7462                break;
7463            }
7464
7465            /* Clear the bit */
7466            sig &= ~cur_bit;
7467        }
7468    }
7469
7470    return (par_num);
7471}
7472
7473static int
7474bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7475                              uint32_t         sig,
7476                              int              par_num,
7477                              uint8_t          print)
7478{
7479    uint32_t cur_bit = 0;
7480    int i = 0;
7481
7482    for (i = 0; sig; i++) {
7483        cur_bit = ((uint32_t)0x1 << i);
7484        if (sig & cur_bit) {
7485            switch (cur_bit) {
7486            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7487                if (print)
7488                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7489                break;
7490            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7491                if (print)
7492                    bxe_print_next_block(sc, par_num++, "ATC");
7493                break;
7494            }
7495
7496            /* Clear the bit */
7497            sig &= ~cur_bit;
7498        }
7499    }
7500
7501    return (par_num);
7502}
7503
7504static uint8_t
7505bxe_parity_attn(struct bxe_softc *sc,
7506                uint8_t          *global,
7507                uint8_t          print,
7508                uint32_t         *sig)
7509{
7510    int par_num = 0;
7511
7512    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7513        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7514        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7515        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7516        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7517        BLOGE(sc, "Parity error: HW block parity attention:\n"
7518                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7519              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7520              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7521              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7522              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7523              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7524
7525        if (print)
7526            BLOGI(sc, "Parity errors detected in blocks: ");
7527
7528        par_num =
7529            bxe_check_blocks_with_parity0(sc, sig[0] &
7530                                          HW_PRTY_ASSERT_SET_0,
7531                                          par_num, print);
7532        par_num =
7533            bxe_check_blocks_with_parity1(sc, sig[1] &
7534                                          HW_PRTY_ASSERT_SET_1,
7535                                          par_num, global, print);
7536        par_num =
7537            bxe_check_blocks_with_parity2(sc, sig[2] &
7538                                          HW_PRTY_ASSERT_SET_2,
7539                                          par_num, print);
7540        par_num =
7541            bxe_check_blocks_with_parity3(sc, sig[3] &
7542                                          HW_PRTY_ASSERT_SET_3,
7543                                          par_num, global, print);
7544        par_num =
7545            bxe_check_blocks_with_parity4(sc, sig[4] &
7546                                          HW_PRTY_ASSERT_SET_4,
7547                                          par_num, print);
7548
7549        if (print)
7550            BLOGI(sc, "\n");
7551
7552        return (TRUE);
7553    }
7554
7555    return (FALSE);
7556}
7557
7558static uint8_t
7559bxe_chk_parity_attn(struct bxe_softc *sc,
7560                    uint8_t          *global,
7561                    uint8_t          print)
7562{
7563    struct attn_route attn = { {0} };
7564    int port = SC_PORT(sc);
7565
7566    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7567    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7568    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7569    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7570
7571    /*
7572     * Since MCP attentions can't be disabled inside the block, we need to
7573     * read AEU registers to see whether they're currently disabled
7574     */
7575    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7576                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7577                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7578                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7579
7580
7581    if (!CHIP_IS_E1x(sc))
7582        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7583
7584    return (bxe_parity_attn(sc, global, print, attn.sig));
7585}
7586
7587static void
7588bxe_attn_int_deasserted4(struct bxe_softc *sc,
7589                         uint32_t         attn)
7590{
7591    uint32_t val;
7592
7593    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7594        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7595        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7596        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7597            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7598        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7599            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7600        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7601            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7602        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7603            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7604        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7605            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7606        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7607            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7608        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7609            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7610        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7611            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7612        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7613            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7614    }
7615
7616    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7617        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7618        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7619        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7620            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7621        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7622            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7623        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7624            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7625        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7626            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7627        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7628            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7629        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7630            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7631    }
7632
7633    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7634                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7635        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7636              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7637                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7638    }
7639}
7640
7641static void
7642bxe_e1h_disable(struct bxe_softc *sc)
7643{
7644    int port = SC_PORT(sc);
7645
7646    bxe_tx_disable(sc);
7647
7648    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7649}
7650
7651static void
7652bxe_e1h_enable(struct bxe_softc *sc)
7653{
7654    int port = SC_PORT(sc);
7655
7656    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7657
7658    // XXX bxe_tx_enable(sc);
7659}
7660
7661/*
7662 * called due to MCP event (on pmf):
7663 *   reread new bandwidth configuration
7664 *   configure FW
7665 *   notify others function about the change
7666 */
7667static void
7668bxe_config_mf_bw(struct bxe_softc *sc)
7669{
7670    if (sc->link_vars.link_up) {
7671        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7672        // XXX bxe_link_sync_notify(sc);
7673    }
7674
7675    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7676}
7677
7678static void
7679bxe_set_mf_bw(struct bxe_softc *sc)
7680{
7681    bxe_config_mf_bw(sc);
7682    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7683}
7684
7685static void
7686bxe_handle_eee_event(struct bxe_softc *sc)
7687{
7688    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7689    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7690}
7691
7692#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7693
7694static void
7695bxe_drv_info_ether_stat(struct bxe_softc *sc)
7696{
7697    struct eth_stats_info *ether_stat =
7698        &sc->sp->drv_info_to_mcp.ether_stat;
7699
7700    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7701            ETH_STAT_INFO_VERSION_LEN);
7702
7703    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7704    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7705                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7706                                          ether_stat->mac_local + MAC_PAD,
7707                                          MAC_PAD, ETH_ALEN);
7708
7709    ether_stat->mtu_size = sc->mtu;
7710
7711    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7712    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7713        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7714    }
7715
7716    // XXX ether_stat->feature_flags |= ???;
7717
7718    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7719
7720    ether_stat->txq_size = sc->tx_ring_size;
7721    ether_stat->rxq_size = sc->rx_ring_size;
7722}
7723
7724static void
7725bxe_handle_drv_info_req(struct bxe_softc *sc)
7726{
7727    enum drv_info_opcode op_code;
7728    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7729
7730    /* if drv_info version supported by MFW doesn't match - send NACK */
7731    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7732        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7733        return;
7734    }
7735
7736    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7737               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7738
7739    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7740
7741    switch (op_code) {
7742    case ETH_STATS_OPCODE:
7743        bxe_drv_info_ether_stat(sc);
7744        break;
7745    case FCOE_STATS_OPCODE:
7746    case ISCSI_STATS_OPCODE:
7747    default:
7748        /* if op code isn't supported - send NACK */
7749        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7750        return;
7751    }
7752
7753    /*
7754     * If we got drv_info attn from MFW then these fields are defined in
7755     * shmem2 for sure
7756     */
7757    SHMEM2_WR(sc, drv_info_host_addr_lo,
7758              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7759    SHMEM2_WR(sc, drv_info_host_addr_hi,
7760              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7761
7762    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7763}
7764
7765static void
7766bxe_dcc_event(struct bxe_softc *sc,
7767              uint32_t         dcc_event)
7768{
7769    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7770
7771    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7772        /*
7773         * This is the only place besides the function initialization
7774         * where the sc->flags can change so it is done without any
7775         * locks
7776         */
7777        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7778            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7779            sc->flags |= BXE_MF_FUNC_DIS;
7780            bxe_e1h_disable(sc);
7781        } else {
7782            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7783            sc->flags &= ~BXE_MF_FUNC_DIS;
7784            bxe_e1h_enable(sc);
7785        }
7786        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7787    }
7788
7789    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7790        bxe_config_mf_bw(sc);
7791        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7792    }
7793
7794    /* Report results to MCP */
7795    if (dcc_event)
7796        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7797    else
7798        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7799}
7800
7801static void
7802bxe_pmf_update(struct bxe_softc *sc)
7803{
7804    int port = SC_PORT(sc);
7805    uint32_t val;
7806
7807    sc->port.pmf = 1;
7808    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7809
7810    /*
7811     * We need the mb() to ensure the ordering between the writing to
7812     * sc->port.pmf here and reading it from the bxe_periodic_task().
7813     */
7814    mb();
7815
7816    /* queue a periodic task */
7817    // XXX schedule task...
7818
7819    // XXX bxe_dcbx_pmf_update(sc);
7820
7821    /* enable nig attention */
7822    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7823    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7824        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7825        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7826    } else if (!CHIP_IS_E1x(sc)) {
7827        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7828        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7829    }
7830
7831    bxe_stats_handle(sc, STATS_EVENT_PMF);
7832}
7833
7834static int
7835bxe_mc_assert(struct bxe_softc *sc)
7836{
7837    char last_idx;
7838    int i, rc = 0;
7839    uint32_t row0, row1, row2, row3;
7840
7841    /* XSTORM */
7842    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7843    if (last_idx)
7844        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7845
7846    /* print the asserts */
7847    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7848
7849        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7850        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7851        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7852        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7853
7854        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7855            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7856                  i, row3, row2, row1, row0);
7857            rc++;
7858        } else {
7859            break;
7860        }
7861    }
7862
7863    /* TSTORM */
7864    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7865    if (last_idx) {
7866        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7867    }
7868
7869    /* print the asserts */
7870    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7871
7872        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7873        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7874        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7875        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7876
7877        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7878            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7879                  i, row3, row2, row1, row0);
7880            rc++;
7881        } else {
7882            break;
7883        }
7884    }
7885
7886    /* CSTORM */
7887    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7888    if (last_idx) {
7889        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7890    }
7891
7892    /* print the asserts */
7893    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7894
7895        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7896        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7897        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7898        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7899
7900        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7901            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7902                  i, row3, row2, row1, row0);
7903            rc++;
7904        } else {
7905            break;
7906        }
7907    }
7908
7909    /* USTORM */
7910    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7911    if (last_idx) {
7912        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7913    }
7914
7915    /* print the asserts */
7916    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7917
7918        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7919        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7920        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7921        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7922
7923        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7924            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7925                  i, row3, row2, row1, row0);
7926            rc++;
7927        } else {
7928            break;
7929        }
7930    }
7931
7932    return (rc);
7933}
7934
7935static void
7936bxe_attn_int_deasserted3(struct bxe_softc *sc,
7937                         uint32_t         attn)
7938{
7939    int func = SC_FUNC(sc);
7940    uint32_t val;
7941
7942    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7943
7944        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7945
7946            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7947            bxe_read_mf_cfg(sc);
7948            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7949                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7950            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7951
7952            if (val & DRV_STATUS_DCC_EVENT_MASK)
7953                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7954
7955            if (val & DRV_STATUS_SET_MF_BW)
7956                bxe_set_mf_bw(sc);
7957
7958            if (val & DRV_STATUS_DRV_INFO_REQ)
7959                bxe_handle_drv_info_req(sc);
7960
7961            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7962                bxe_pmf_update(sc);
7963
7964            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7965                bxe_handle_eee_event(sc);
7966
7967            if (sc->link_vars.periodic_flags &
7968                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7969                /* sync with link */
7970		bxe_acquire_phy_lock(sc);
7971                sc->link_vars.periodic_flags &=
7972                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7973		bxe_release_phy_lock(sc);
7974                if (IS_MF(sc))
7975                    ; // XXX bxe_link_sync_notify(sc);
7976                bxe_link_report(sc);
7977            }
7978
7979            /*
7980             * Always call it here: bxe_link_report() will
7981             * prevent the link indication duplication.
7982             */
7983            bxe_link_status_update(sc);
7984
7985        } else if (attn & BXE_MC_ASSERT_BITS) {
7986
7987            BLOGE(sc, "MC assert!\n");
7988            bxe_mc_assert(sc);
7989            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7990            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7991            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7992            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7993            bxe_panic(sc, ("MC assert!\n"));
7994
7995        } else if (attn & BXE_MCP_ASSERT) {
7996
7997            BLOGE(sc, "MCP assert!\n");
7998            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7999            // XXX bxe_fw_dump(sc);
8000
8001        } else {
8002            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8003        }
8004    }
8005
8006    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8007        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8008        if (attn & BXE_GRC_TIMEOUT) {
8009            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8010            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8011        }
8012        if (attn & BXE_GRC_RSV) {
8013            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8014            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8015        }
8016        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8017    }
8018}
8019
8020static void
8021bxe_attn_int_deasserted2(struct bxe_softc *sc,
8022                         uint32_t         attn)
8023{
8024    int port = SC_PORT(sc);
8025    int reg_offset;
8026    uint32_t val0, mask0, val1, mask1;
8027    uint32_t val;
8028
8029    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8030        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8031        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8032        /* CFC error attention */
8033        if (val & 0x2) {
8034            BLOGE(sc, "FATAL error from CFC\n");
8035        }
8036    }
8037
8038    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8039        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8040        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8041        /* RQ_USDMDP_FIFO_OVERFLOW */
8042        if (val & 0x18000) {
8043            BLOGE(sc, "FATAL error from PXP\n");
8044        }
8045
8046        if (!CHIP_IS_E1x(sc)) {
8047            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8048            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8049        }
8050    }
8051
8052#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8053#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8054
8055    if (attn & AEU_PXP2_HW_INT_BIT) {
8056        /*  CQ47854 workaround do not panic on
8057         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8058         */
8059        if (!CHIP_IS_E1x(sc)) {
8060            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8061            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8062            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8063            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8064            /*
8065             * If the only PXP2_EOP_ERROR_BIT is set in
8066             * STS0 and STS1 - clear it
8067             *
8068             * probably we lose additional attentions between
8069             * STS0 and STS_CLR0, in this case user will not
8070             * be notified about them
8071             */
8072            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8073                !(val1 & mask1))
8074                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8075
8076            /* print the register, since no one can restore it */
8077            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8078
8079            /*
8080             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8081             * then notify
8082             */
8083            if (val0 & PXP2_EOP_ERROR_BIT) {
8084                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8085
8086                /*
8087                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8088                 * set then clear attention from PXP2 block without panic
8089                 */
8090                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8091                    ((val1 & mask1) == 0))
8092                    attn &= ~AEU_PXP2_HW_INT_BIT;
8093            }
8094        }
8095    }
8096
8097    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8098        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8099                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8100
8101        val = REG_RD(sc, reg_offset);
8102        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8103        REG_WR(sc, reg_offset, val);
8104
8105        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8106              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8107        bxe_panic(sc, ("HW block attention set2\n"));
8108    }
8109}
8110
8111static void
8112bxe_attn_int_deasserted1(struct bxe_softc *sc,
8113                         uint32_t         attn)
8114{
8115    int port = SC_PORT(sc);
8116    int reg_offset;
8117    uint32_t val;
8118
8119    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8120        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8121        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8122        /* DORQ discard attention */
8123        if (val & 0x2) {
8124            BLOGE(sc, "FATAL error from DORQ\n");
8125        }
8126    }
8127
8128    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8129        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8130                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8131
8132        val = REG_RD(sc, reg_offset);
8133        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8134        REG_WR(sc, reg_offset, val);
8135
8136        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8137              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8138        bxe_panic(sc, ("HW block attention set1\n"));
8139    }
8140}
8141
8142static void
8143bxe_attn_int_deasserted0(struct bxe_softc *sc,
8144                         uint32_t         attn)
8145{
8146    int port = SC_PORT(sc);
8147    int reg_offset;
8148    uint32_t val;
8149
8150    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8151                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8152
8153    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8154        val = REG_RD(sc, reg_offset);
8155        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8156        REG_WR(sc, reg_offset, val);
8157
8158        BLOGW(sc, "SPIO5 hw attention\n");
8159
8160        /* Fan failure attention */
8161        elink_hw_reset_phy(&sc->link_params);
8162        bxe_fan_failure(sc);
8163    }
8164
8165    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8166	bxe_acquire_phy_lock(sc);
8167        elink_handle_module_detect_int(&sc->link_params);
8168	bxe_release_phy_lock(sc);
8169    }
8170
8171    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8172        val = REG_RD(sc, reg_offset);
8173        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8174        REG_WR(sc, reg_offset, val);
8175
8176        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8177                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8178    }
8179}
8180
8181static void
8182bxe_attn_int_deasserted(struct bxe_softc *sc,
8183                        uint32_t         deasserted)
8184{
8185    struct attn_route attn;
8186    struct attn_route *group_mask;
8187    int port = SC_PORT(sc);
8188    int index;
8189    uint32_t reg_addr;
8190    uint32_t val;
8191    uint32_t aeu_mask;
8192    uint8_t global = FALSE;
8193
8194    /*
8195     * Need to take HW lock because MCP or other port might also
8196     * try to handle this event.
8197     */
8198    bxe_acquire_alr(sc);
8199
8200    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8201        /* XXX
8202         * In case of parity errors don't handle attentions so that
8203         * other function would "see" parity errors.
8204         */
8205        sc->recovery_state = BXE_RECOVERY_INIT;
8206        // XXX schedule a recovery task...
8207        /* disable HW interrupts */
8208        bxe_int_disable(sc);
8209        bxe_release_alr(sc);
8210        return;
8211    }
8212
8213    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8214    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8215    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8216    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8217    if (!CHIP_IS_E1x(sc)) {
8218        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8219    } else {
8220        attn.sig[4] = 0;
8221    }
8222
8223    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8224          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8225
8226    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8227        if (deasserted & (1 << index)) {
8228            group_mask = &sc->attn_group[index];
8229
8230            BLOGD(sc, DBG_INTR,
8231                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8232                  group_mask->sig[0], group_mask->sig[1],
8233                  group_mask->sig[2], group_mask->sig[3],
8234                  group_mask->sig[4]);
8235
8236            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8237            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8238            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8239            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8240            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8241        }
8242    }
8243
8244    bxe_release_alr(sc);
8245
8246    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8247        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8248                    COMMAND_REG_ATTN_BITS_CLR);
8249    } else {
8250        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8251    }
8252
8253    val = ~deasserted;
8254    BLOGD(sc, DBG_INTR,
8255          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8256          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8257    REG_WR(sc, reg_addr, val);
8258
8259    if (~sc->attn_state & deasserted) {
8260        BLOGE(sc, "IGU error\n");
8261    }
8262
8263    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8264                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8265
8266    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8267
8268    aeu_mask = REG_RD(sc, reg_addr);
8269
8270    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8271          aeu_mask, deasserted);
8272    aeu_mask |= (deasserted & 0x3ff);
8273    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8274
8275    REG_WR(sc, reg_addr, aeu_mask);
8276    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8277
8278    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8279    sc->attn_state &= ~deasserted;
8280    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8281}
8282
8283static void
8284bxe_attn_int(struct bxe_softc *sc)
8285{
8286    /* read local copy of bits */
8287    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8288    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8289    uint32_t attn_state = sc->attn_state;
8290
8291    /* look for changed bits */
8292    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8293    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8294
8295    BLOGD(sc, DBG_INTR,
8296          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8297          attn_bits, attn_ack, asserted, deasserted);
8298
8299    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8300        BLOGE(sc, "BAD attention state\n");
8301    }
8302
8303    /* handle bits that were raised */
8304    if (asserted) {
8305        bxe_attn_int_asserted(sc, asserted);
8306    }
8307
8308    if (deasserted) {
8309        bxe_attn_int_deasserted(sc, deasserted);
8310    }
8311}
8312
8313static uint16_t
8314bxe_update_dsb_idx(struct bxe_softc *sc)
8315{
8316    struct host_sp_status_block *def_sb = sc->def_sb;
8317    uint16_t rc = 0;
8318
8319    mb(); /* status block is written to by the chip */
8320
8321    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8322        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8323        rc |= BXE_DEF_SB_ATT_IDX;
8324    }
8325
8326    if (sc->def_idx != def_sb->sp_sb.running_index) {
8327        sc->def_idx = def_sb->sp_sb.running_index;
8328        rc |= BXE_DEF_SB_IDX;
8329    }
8330
8331    mb();
8332
8333    return (rc);
8334}
8335
8336static inline struct ecore_queue_sp_obj *
8337bxe_cid_to_q_obj(struct bxe_softc *sc,
8338                 uint32_t         cid)
8339{
8340    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8341    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8342}
8343
8344static void
8345bxe_handle_mcast_eqe(struct bxe_softc *sc)
8346{
8347    struct ecore_mcast_ramrod_params rparam;
8348    int rc;
8349
8350    memset(&rparam, 0, sizeof(rparam));
8351
8352    rparam.mcast_obj = &sc->mcast_obj;
8353
8354    BXE_MCAST_LOCK(sc);
8355
8356    /* clear pending state for the last command */
8357    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8358
8359    /* if there are pending mcast commands - send them */
8360    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8361        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8362        if (rc < 0) {
8363            BLOGD(sc, DBG_SP,
8364                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8365        }
8366    }
8367
8368    BXE_MCAST_UNLOCK(sc);
8369}
8370
8371static void
8372bxe_handle_classification_eqe(struct bxe_softc      *sc,
8373                              union event_ring_elem *elem)
8374{
8375    unsigned long ramrod_flags = 0;
8376    int rc = 0;
8377    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8378    struct ecore_vlan_mac_obj *vlan_mac_obj;
8379
8380    /* always push next commands out, don't wait here */
8381    bit_set(&ramrod_flags, RAMROD_CONT);
8382
8383    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8384    case ECORE_FILTER_MAC_PENDING:
8385        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8386        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8387        break;
8388
8389    case ECORE_FILTER_MCAST_PENDING:
8390        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8391        /*
8392         * This is only relevant for 57710 where multicast MACs are
8393         * configured as unicast MACs using the same ramrod.
8394         */
8395        bxe_handle_mcast_eqe(sc);
8396        return;
8397
8398    default:
8399        BLOGE(sc, "Unsupported classification command: %d\n",
8400              elem->message.data.eth_event.echo);
8401        return;
8402    }
8403
8404    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8405
8406    if (rc < 0) {
8407        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8408    } else if (rc > 0) {
8409        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8410    }
8411}
8412
8413static void
8414bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8415                       union event_ring_elem *elem)
8416{
8417    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8418
8419    /* send rx_mode command again if was requested */
8420    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8421                               &sc->sp_state)) {
8422        bxe_set_storm_rx_mode(sc);
8423    }
8424}
8425
8426static void
8427bxe_update_eq_prod(struct bxe_softc *sc,
8428                   uint16_t         prod)
8429{
8430    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8431    wmb(); /* keep prod updates ordered */
8432}
8433
8434static void
8435bxe_eq_int(struct bxe_softc *sc)
8436{
8437    uint16_t hw_cons, sw_cons, sw_prod;
8438    union event_ring_elem *elem;
8439    uint8_t echo;
8440    uint32_t cid;
8441    uint8_t opcode;
8442    int spqe_cnt = 0;
8443    struct ecore_queue_sp_obj *q_obj;
8444    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8445    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8446
8447    hw_cons = le16toh(*sc->eq_cons_sb);
8448
8449    /*
8450     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8451     * when we get to the next-page we need to adjust so the loop
8452     * condition below will be met. The next element is the size of a
8453     * regular element and hence incrementing by 1
8454     */
8455    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8456        hw_cons++;
8457    }
8458
8459    /*
8460     * This function may never run in parallel with itself for a
8461     * specific sc and no need for a read memory barrier here.
8462     */
8463    sw_cons = sc->eq_cons;
8464    sw_prod = sc->eq_prod;
8465
8466    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8467          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8468
8469    for (;
8470         sw_cons != hw_cons;
8471         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8472
8473        elem = &sc->eq[EQ_DESC(sw_cons)];
8474
8475        /* elem CID originates from FW, actually LE */
8476        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8477        opcode = elem->message.opcode;
8478
8479        /* handle eq element */
8480        switch (opcode) {
8481
8482        case EVENT_RING_OPCODE_STAT_QUERY:
8483            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8484                  sc->stats_comp++);
8485            /* nothing to do with stats comp */
8486            goto next_spqe;
8487
8488        case EVENT_RING_OPCODE_CFC_DEL:
8489            /* handle according to cid range */
8490            /* we may want to verify here that the sc state is HALTING */
8491            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8492            q_obj = bxe_cid_to_q_obj(sc, cid);
8493            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8494                break;
8495            }
8496            goto next_spqe;
8497
8498        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8499            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8500            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8501                break;
8502            }
8503            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8504            goto next_spqe;
8505
8506        case EVENT_RING_OPCODE_START_TRAFFIC:
8507            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8508            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8509                break;
8510            }
8511            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8512            goto next_spqe;
8513
8514        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8515            echo = elem->message.data.function_update_event.echo;
8516            if (echo == SWITCH_UPDATE) {
8517                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8518                if (f_obj->complete_cmd(sc, f_obj,
8519                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8520                    break;
8521                }
8522            }
8523            else {
8524                BLOGD(sc, DBG_SP,
8525                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8526            }
8527            goto next_spqe;
8528
8529        case EVENT_RING_OPCODE_FORWARD_SETUP:
8530            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8531            if (q_obj->complete_cmd(sc, q_obj,
8532                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8533                break;
8534            }
8535            goto next_spqe;
8536
8537        case EVENT_RING_OPCODE_FUNCTION_START:
8538            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8539            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8540                break;
8541            }
8542            goto next_spqe;
8543
8544        case EVENT_RING_OPCODE_FUNCTION_STOP:
8545            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8546            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8547                break;
8548            }
8549            goto next_spqe;
8550        }
8551
8552        switch (opcode | sc->state) {
8553        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8554        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8555            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8556            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8557            rss_raw->clear_pending(rss_raw);
8558            break;
8559
8560        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8561        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8562        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8563        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8564        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8565        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8566            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8567            bxe_handle_classification_eqe(sc, elem);
8568            break;
8569
8570        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8571        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8572        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8573            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8574            bxe_handle_mcast_eqe(sc);
8575            break;
8576
8577        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8578        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8579        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8580            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8581            bxe_handle_rx_mode_eqe(sc, elem);
8582            break;
8583
8584        default:
8585            /* unknown event log error and continue */
8586            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8587                  elem->message.opcode, sc->state);
8588        }
8589
8590next_spqe:
8591        spqe_cnt++;
8592    } /* for */
8593
8594    mb();
8595    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8596
8597    sc->eq_cons = sw_cons;
8598    sc->eq_prod = sw_prod;
8599
8600    /* make sure that above mem writes were issued towards the memory */
8601    wmb();
8602
8603    /* update producer */
8604    bxe_update_eq_prod(sc, sc->eq_prod);
8605}
8606
8607static void
8608bxe_handle_sp_tq(void *context,
8609                 int  pending)
8610{
8611    struct bxe_softc *sc = (struct bxe_softc *)context;
8612    uint16_t status;
8613
8614    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8615
8616    /* what work needs to be performed? */
8617    status = bxe_update_dsb_idx(sc);
8618
8619    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8620
8621    /* HW attentions */
8622    if (status & BXE_DEF_SB_ATT_IDX) {
8623        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8624        bxe_attn_int(sc);
8625        status &= ~BXE_DEF_SB_ATT_IDX;
8626    }
8627
8628    /* SP events: STAT_QUERY and others */
8629    if (status & BXE_DEF_SB_IDX) {
8630        /* handle EQ completions */
8631        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8632        bxe_eq_int(sc);
8633        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8634                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8635        status &= ~BXE_DEF_SB_IDX;
8636    }
8637
8638    /* if status is non zero then something went wrong */
8639    if (__predict_false(status)) {
8640        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8641    }
8642
8643    /* ack status block only if something was actually handled */
8644    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8645               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8646
8647    /*
8648     * Must be called after the EQ processing (since eq leads to sriov
8649     * ramrod completion flows).
8650     * This flow may have been scheduled by the arrival of a ramrod
8651     * completion, or by the sriov code rescheduling itself.
8652     */
8653    // XXX bxe_iov_sp_task(sc);
8654
8655}
8656
8657static void
8658bxe_handle_fp_tq(void *context,
8659                 int  pending)
8660{
8661    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8662    struct bxe_softc *sc = fp->sc;
8663    uint8_t more_tx = FALSE;
8664    uint8_t more_rx = FALSE;
8665
8666    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8667
8668    /* XXX
8669     * IFF_DRV_RUNNING state can't be checked here since we process
8670     * slowpath events on a client queue during setup. Instead
8671     * we need to add a "process/continue" flag here that the driver
8672     * can use to tell the task here not to do anything.
8673     */
8674#if 0
8675    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8676        return;
8677    }
8678#endif
8679
8680    /* update the fastpath index */
8681    bxe_update_fp_sb_idx(fp);
8682
8683    /* XXX add loop here if ever support multiple tx CoS */
8684    /* fp->txdata[cos] */
8685    if (bxe_has_tx_work(fp)) {
8686        BXE_FP_TX_LOCK(fp);
8687        more_tx = bxe_txeof(sc, fp);
8688        BXE_FP_TX_UNLOCK(fp);
8689    }
8690
8691    if (bxe_has_rx_work(fp)) {
8692        more_rx = bxe_rxeof(sc, fp);
8693    }
8694
8695    if (more_rx /*|| more_tx*/) {
8696        /* still more work to do */
8697        taskqueue_enqueue(fp->tq, &fp->tq_task);
8698        return;
8699    }
8700
8701    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8702               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8703}
8704
8705static void
8706bxe_task_fp(struct bxe_fastpath *fp)
8707{
8708    struct bxe_softc *sc = fp->sc;
8709    uint8_t more_tx = FALSE;
8710    uint8_t more_rx = FALSE;
8711
8712    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8713
8714    /* update the fastpath index */
8715    bxe_update_fp_sb_idx(fp);
8716
8717    /* XXX add loop here if ever support multiple tx CoS */
8718    /* fp->txdata[cos] */
8719    if (bxe_has_tx_work(fp)) {
8720        BXE_FP_TX_LOCK(fp);
8721        more_tx = bxe_txeof(sc, fp);
8722        BXE_FP_TX_UNLOCK(fp);
8723    }
8724
8725    if (bxe_has_rx_work(fp)) {
8726        more_rx = bxe_rxeof(sc, fp);
8727    }
8728
8729    if (more_rx /*|| more_tx*/) {
8730        /* still more work to do, bail out if this ISR and process later */
8731        taskqueue_enqueue(fp->tq, &fp->tq_task);
8732        return;
8733    }
8734
8735    /*
8736     * Here we write the fastpath index taken before doing any tx or rx work.
8737     * It is very well possible other hw events occurred up to this point and
8738     * they were actually processed accordingly above. Since we're going to
8739     * write an older fastpath index, an interrupt is coming which we might
8740     * not do any work in.
8741     */
8742    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8743               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8744}
8745
8746/*
8747 * Legacy interrupt entry point.
8748 *
8749 * Verifies that the controller generated the interrupt and
8750 * then calls a separate routine to handle the various
8751 * interrupt causes: link, RX, and TX.
8752 */
8753static void
8754bxe_intr_legacy(void *xsc)
8755{
8756    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8757    struct bxe_fastpath *fp;
8758    uint16_t status, mask;
8759    int i;
8760
8761    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8762
8763    /*
8764     * 0 for ustorm, 1 for cstorm
8765     * the bits returned from ack_int() are 0-15
8766     * bit 0 = attention status block
8767     * bit 1 = fast path status block
8768     * a mask of 0x2 or more = tx/rx event
8769     * a mask of 1 = slow path event
8770     */
8771
8772    status = bxe_ack_int(sc);
8773
8774    /* the interrupt is not for us */
8775    if (__predict_false(status == 0)) {
8776        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8777        return;
8778    }
8779
8780    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8781
8782    FOR_EACH_ETH_QUEUE(sc, i) {
8783        fp = &sc->fp[i];
8784        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8785        if (status & mask) {
8786            /* acknowledge and disable further fastpath interrupts */
8787            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8788            bxe_task_fp(fp);
8789            status &= ~mask;
8790        }
8791    }
8792
8793    if (__predict_false(status & 0x1)) {
8794        /* acknowledge and disable further slowpath interrupts */
8795        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8796
8797        /* schedule slowpath handler */
8798        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8799
8800        status &= ~0x1;
8801    }
8802
8803    if (__predict_false(status)) {
8804        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8805    }
8806}
8807
8808/* slowpath interrupt entry point */
8809static void
8810bxe_intr_sp(void *xsc)
8811{
8812    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8813
8814    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8815
8816    /* acknowledge and disable further slowpath interrupts */
8817    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8818
8819    /* schedule slowpath handler */
8820    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8821}
8822
8823/* fastpath interrupt entry point */
8824static void
8825bxe_intr_fp(void *xfp)
8826{
8827    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8828    struct bxe_softc *sc = fp->sc;
8829
8830    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8831
8832    BLOGD(sc, DBG_INTR,
8833          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8834          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8835
8836    /* acknowledge and disable further fastpath interrupts */
8837    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8838
8839    bxe_task_fp(fp);
8840}
8841
8842/* Release all interrupts allocated by the driver. */
8843static void
8844bxe_interrupt_free(struct bxe_softc *sc)
8845{
8846    int i;
8847
8848    switch (sc->interrupt_mode) {
8849    case INTR_MODE_INTX:
8850        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8851        if (sc->intr[0].resource != NULL) {
8852            bus_release_resource(sc->dev,
8853                                 SYS_RES_IRQ,
8854                                 sc->intr[0].rid,
8855                                 sc->intr[0].resource);
8856        }
8857        break;
8858    case INTR_MODE_MSI:
8859        for (i = 0; i < sc->intr_count; i++) {
8860            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8861            if (sc->intr[i].resource && sc->intr[i].rid) {
8862                bus_release_resource(sc->dev,
8863                                     SYS_RES_IRQ,
8864                                     sc->intr[i].rid,
8865                                     sc->intr[i].resource);
8866            }
8867        }
8868        pci_release_msi(sc->dev);
8869        break;
8870    case INTR_MODE_MSIX:
8871        for (i = 0; i < sc->intr_count; i++) {
8872            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8873            if (sc->intr[i].resource && sc->intr[i].rid) {
8874                bus_release_resource(sc->dev,
8875                                     SYS_RES_IRQ,
8876                                     sc->intr[i].rid,
8877                                     sc->intr[i].resource);
8878            }
8879        }
8880        pci_release_msi(sc->dev);
8881        break;
8882    default:
8883        /* nothing to do as initial allocation failed */
8884        break;
8885    }
8886}
8887
8888/*
8889 * This function determines and allocates the appropriate
8890 * interrupt based on system capabilites and user request.
8891 *
8892 * The user may force a particular interrupt mode, specify
8893 * the number of receive queues, specify the method for
8894 * distribuitng received frames to receive queues, or use
8895 * the default settings which will automatically select the
8896 * best supported combination.  In addition, the OS may or
8897 * may not support certain combinations of these settings.
8898 * This routine attempts to reconcile the settings requested
8899 * by the user with the capabilites available from the system
8900 * to select the optimal combination of features.
8901 *
8902 * Returns:
8903 *   0 = Success, !0 = Failure.
8904 */
8905static int
8906bxe_interrupt_alloc(struct bxe_softc *sc)
8907{
8908    int msix_count = 0;
8909    int msi_count = 0;
8910    int num_requested = 0;
8911    int num_allocated = 0;
8912    int rid, i, j;
8913    int rc;
8914
8915    /* get the number of available MSI/MSI-X interrupts from the OS */
8916    if (sc->interrupt_mode > 0) {
8917        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8918            msix_count = pci_msix_count(sc->dev);
8919        }
8920
8921        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8922            msi_count = pci_msi_count(sc->dev);
8923        }
8924
8925        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8926              msi_count, msix_count);
8927    }
8928
8929    do { /* try allocating MSI-X interrupt resources (at least 2) */
8930        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8931            break;
8932        }
8933
8934        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8935            (msix_count < 2)) {
8936            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8937            break;
8938        }
8939
8940        /* ask for the necessary number of MSI-X vectors */
8941        num_requested = min((sc->num_queues + 1), msix_count);
8942
8943        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8944
8945        num_allocated = num_requested;
8946        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8947            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8948            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8949            break;
8950        }
8951
8952        if (num_allocated < 2) { /* possible? */
8953            BLOGE(sc, "MSI-X allocation less than 2!\n");
8954            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8955            pci_release_msi(sc->dev);
8956            break;
8957        }
8958
8959        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8960              num_requested, num_allocated);
8961
8962        /* best effort so use the number of vectors allocated to us */
8963        sc->intr_count = num_allocated;
8964        sc->num_queues = num_allocated - 1;
8965
8966        rid = 1; /* initial resource identifier */
8967
8968        /* allocate the MSI-X vectors */
8969        for (i = 0; i < num_allocated; i++) {
8970            sc->intr[i].rid = (rid + i);
8971
8972            if ((sc->intr[i].resource =
8973                 bus_alloc_resource_any(sc->dev,
8974                                        SYS_RES_IRQ,
8975                                        &sc->intr[i].rid,
8976                                        RF_ACTIVE)) == NULL) {
8977                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8978                      i, (rid + i));
8979
8980                for (j = (i - 1); j >= 0; j--) {
8981                    bus_release_resource(sc->dev,
8982                                         SYS_RES_IRQ,
8983                                         sc->intr[j].rid,
8984                                         sc->intr[j].resource);
8985                }
8986
8987                sc->intr_count = 0;
8988                sc->num_queues = 0;
8989                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8990                pci_release_msi(sc->dev);
8991                break;
8992            }
8993
8994            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
8995        }
8996    } while (0);
8997
8998    do { /* try allocating MSI vector resources (at least 2) */
8999        if (sc->interrupt_mode != INTR_MODE_MSI) {
9000            break;
9001        }
9002
9003        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9004            (msi_count < 1)) {
9005            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9006            break;
9007        }
9008
9009        /* ask for a single MSI vector */
9010        num_requested = 1;
9011
9012        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9013
9014        num_allocated = num_requested;
9015        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9016            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9017            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9018            break;
9019        }
9020
9021        if (num_allocated != 1) { /* possible? */
9022            BLOGE(sc, "MSI allocation is not 1!\n");
9023            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9024            pci_release_msi(sc->dev);
9025            break;
9026        }
9027
9028        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9029              num_requested, num_allocated);
9030
9031        /* best effort so use the number of vectors allocated to us */
9032        sc->intr_count = num_allocated;
9033        sc->num_queues = num_allocated;
9034
9035        rid = 1; /* initial resource identifier */
9036
9037        sc->intr[0].rid = rid;
9038
9039        if ((sc->intr[0].resource =
9040             bus_alloc_resource_any(sc->dev,
9041                                    SYS_RES_IRQ,
9042                                    &sc->intr[0].rid,
9043                                    RF_ACTIVE)) == NULL) {
9044            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9045            sc->intr_count = 0;
9046            sc->num_queues = 0;
9047            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9048            pci_release_msi(sc->dev);
9049            break;
9050        }
9051
9052        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9053    } while (0);
9054
9055    do { /* try allocating INTx vector resources */
9056        if (sc->interrupt_mode != INTR_MODE_INTX) {
9057            break;
9058        }
9059
9060        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9061
9062        /* only one vector for INTx */
9063        sc->intr_count = 1;
9064        sc->num_queues = 1;
9065
9066        rid = 0; /* initial resource identifier */
9067
9068        sc->intr[0].rid = rid;
9069
9070        if ((sc->intr[0].resource =
9071             bus_alloc_resource_any(sc->dev,
9072                                    SYS_RES_IRQ,
9073                                    &sc->intr[0].rid,
9074                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9075            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9076            sc->intr_count = 0;
9077            sc->num_queues = 0;
9078            sc->interrupt_mode = -1; /* Failed! */
9079            break;
9080        }
9081
9082        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9083    } while (0);
9084
9085    if (sc->interrupt_mode == -1) {
9086        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9087        rc = 1;
9088    } else {
9089        BLOGD(sc, DBG_LOAD,
9090              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9091              sc->interrupt_mode, sc->num_queues);
9092        rc = 0;
9093    }
9094
9095    return (rc);
9096}
9097
9098static void
9099bxe_interrupt_detach(struct bxe_softc *sc)
9100{
9101    struct bxe_fastpath *fp;
9102    int i;
9103
9104    /* release interrupt resources */
9105    for (i = 0; i < sc->intr_count; i++) {
9106        if (sc->intr[i].resource && sc->intr[i].tag) {
9107            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9108            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9109        }
9110    }
9111
9112    for (i = 0; i < sc->num_queues; i++) {
9113        fp = &sc->fp[i];
9114        if (fp->tq) {
9115            taskqueue_drain(fp->tq, &fp->tq_task);
9116            taskqueue_drain(fp->tq, &fp->tx_task);
9117            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9118                NULL))
9119                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9120            taskqueue_free(fp->tq);
9121            fp->tq = NULL;
9122        }
9123    }
9124
9125
9126    if (sc->sp_tq) {
9127        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9128        taskqueue_free(sc->sp_tq);
9129        sc->sp_tq = NULL;
9130    }
9131}
9132
9133/*
9134 * Enables interrupts and attach to the ISR.
9135 *
9136 * When using multiple MSI/MSI-X vectors the first vector
9137 * is used for slowpath operations while all remaining
9138 * vectors are used for fastpath operations.  If only a
9139 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9140 * ISR must look for both slowpath and fastpath completions.
9141 */
9142static int
9143bxe_interrupt_attach(struct bxe_softc *sc)
9144{
9145    struct bxe_fastpath *fp;
9146    int rc = 0;
9147    int i;
9148
9149    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9150             "bxe%d_sp_tq", sc->unit);
9151    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9152    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9153                                 taskqueue_thread_enqueue,
9154                                 &sc->sp_tq);
9155    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9156                            "%s", sc->sp_tq_name);
9157
9158
9159    for (i = 0; i < sc->num_queues; i++) {
9160        fp = &sc->fp[i];
9161        snprintf(fp->tq_name, sizeof(fp->tq_name),
9162                 "bxe%d_fp%d_tq", sc->unit, i);
9163        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9164        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9165        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9166                                  taskqueue_thread_enqueue,
9167                                  &fp->tq);
9168        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9169                          bxe_tx_mq_start_deferred, fp);
9170        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9171                                "%s", fp->tq_name);
9172    }
9173
9174    /* setup interrupt handlers */
9175    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9176        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9177
9178        /*
9179         * Setup the interrupt handler. Note that we pass the driver instance
9180         * to the interrupt handler for the slowpath.
9181         */
9182        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9183                                 (INTR_TYPE_NET | INTR_MPSAFE),
9184                                 NULL, bxe_intr_sp, sc,
9185                                 &sc->intr[0].tag)) != 0) {
9186            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9187            goto bxe_interrupt_attach_exit;
9188        }
9189
9190        bus_describe_intr(sc->dev, sc->intr[0].resource,
9191                          sc->intr[0].tag, "sp");
9192
9193        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9194
9195        /* initialize the fastpath vectors (note the first was used for sp) */
9196        for (i = 0; i < sc->num_queues; i++) {
9197            fp = &sc->fp[i];
9198            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9199
9200            /*
9201             * Setup the interrupt handler. Note that we pass the
9202             * fastpath context to the interrupt handler in this
9203             * case.
9204             */
9205            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9206                                     (INTR_TYPE_NET | INTR_MPSAFE),
9207                                     NULL, bxe_intr_fp, fp,
9208                                     &sc->intr[i + 1].tag)) != 0) {
9209                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9210                      (i + 1), rc);
9211                goto bxe_interrupt_attach_exit;
9212            }
9213
9214            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9215                              sc->intr[i + 1].tag, "fp%02d", i);
9216
9217            /* bind the fastpath instance to a cpu */
9218            if (sc->num_queues > 1) {
9219                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9220            }
9221
9222            fp->state = BXE_FP_STATE_IRQ;
9223        }
9224    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9225        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9226
9227        /*
9228         * Setup the interrupt handler. Note that we pass the
9229         * driver instance to the interrupt handler which
9230         * will handle both the slowpath and fastpath.
9231         */
9232        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9233                                 (INTR_TYPE_NET | INTR_MPSAFE),
9234                                 NULL, bxe_intr_legacy, sc,
9235                                 &sc->intr[0].tag)) != 0) {
9236            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9237            goto bxe_interrupt_attach_exit;
9238        }
9239
9240    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9241        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9242
9243        /*
9244         * Setup the interrupt handler. Note that we pass the
9245         * driver instance to the interrupt handler which
9246         * will handle both the slowpath and fastpath.
9247         */
9248        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9249                                 (INTR_TYPE_NET | INTR_MPSAFE),
9250                                 NULL, bxe_intr_legacy, sc,
9251                                 &sc->intr[0].tag)) != 0) {
9252            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9253            goto bxe_interrupt_attach_exit;
9254        }
9255    }
9256
9257bxe_interrupt_attach_exit:
9258
9259    return (rc);
9260}
9261
9262static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9263static int  bxe_init_hw_common(struct bxe_softc *sc);
9264static int  bxe_init_hw_port(struct bxe_softc *sc);
9265static int  bxe_init_hw_func(struct bxe_softc *sc);
9266static void bxe_reset_common(struct bxe_softc *sc);
9267static void bxe_reset_port(struct bxe_softc *sc);
9268static void bxe_reset_func(struct bxe_softc *sc);
9269static int  bxe_gunzip_init(struct bxe_softc *sc);
9270static void bxe_gunzip_end(struct bxe_softc *sc);
9271static int  bxe_init_firmware(struct bxe_softc *sc);
9272static void bxe_release_firmware(struct bxe_softc *sc);
9273
9274static struct
9275ecore_func_sp_drv_ops bxe_func_sp_drv = {
9276    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9277    .init_hw_cmn      = bxe_init_hw_common,
9278    .init_hw_port     = bxe_init_hw_port,
9279    .init_hw_func     = bxe_init_hw_func,
9280
9281    .reset_hw_cmn     = bxe_reset_common,
9282    .reset_hw_port    = bxe_reset_port,
9283    .reset_hw_func    = bxe_reset_func,
9284
9285    .gunzip_init      = bxe_gunzip_init,
9286    .gunzip_end       = bxe_gunzip_end,
9287
9288    .init_fw          = bxe_init_firmware,
9289    .release_fw       = bxe_release_firmware,
9290};
9291
9292static void
9293bxe_init_func_obj(struct bxe_softc *sc)
9294{
9295    sc->dmae_ready = 0;
9296
9297    ecore_init_func_obj(sc,
9298                        &sc->func_obj,
9299                        BXE_SP(sc, func_rdata),
9300                        BXE_SP_MAPPING(sc, func_rdata),
9301                        BXE_SP(sc, func_afex_rdata),
9302                        BXE_SP_MAPPING(sc, func_afex_rdata),
9303                        &bxe_func_sp_drv);
9304}
9305
9306static int
9307bxe_init_hw(struct bxe_softc *sc,
9308            uint32_t         load_code)
9309{
9310    struct ecore_func_state_params func_params = { NULL };
9311    int rc;
9312
9313    /* prepare the parameters for function state transitions */
9314    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9315
9316    func_params.f_obj = &sc->func_obj;
9317    func_params.cmd = ECORE_F_CMD_HW_INIT;
9318
9319    func_params.params.hw_init.load_phase = load_code;
9320
9321    /*
9322     * Via a plethora of function pointers, we will eventually reach
9323     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9324     */
9325    rc = ecore_func_state_change(sc, &func_params);
9326
9327    return (rc);
9328}
9329
9330static void
9331bxe_fill(struct bxe_softc *sc,
9332         uint32_t         addr,
9333         int              fill,
9334         uint32_t         len)
9335{
9336    uint32_t i;
9337
9338    if (!(len % 4) && !(addr % 4)) {
9339        for (i = 0; i < len; i += 4) {
9340            REG_WR(sc, (addr + i), fill);
9341        }
9342    } else {
9343        for (i = 0; i < len; i++) {
9344            REG_WR8(sc, (addr + i), fill);
9345        }
9346    }
9347}
9348
9349/* writes FP SP data to FW - data_size in dwords */
9350static void
9351bxe_wr_fp_sb_data(struct bxe_softc *sc,
9352                  int              fw_sb_id,
9353                  uint32_t         *sb_data_p,
9354                  uint32_t         data_size)
9355{
9356    int index;
9357
9358    for (index = 0; index < data_size; index++) {
9359        REG_WR(sc,
9360               (BAR_CSTRORM_INTMEM +
9361                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9362                (sizeof(uint32_t) * index)),
9363               *(sb_data_p + index));
9364    }
9365}
9366
9367static void
9368bxe_zero_fp_sb(struct bxe_softc *sc,
9369               int              fw_sb_id)
9370{
9371    struct hc_status_block_data_e2 sb_data_e2;
9372    struct hc_status_block_data_e1x sb_data_e1x;
9373    uint32_t *sb_data_p;
9374    uint32_t data_size = 0;
9375
9376    if (!CHIP_IS_E1x(sc)) {
9377        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9378        sb_data_e2.common.state = SB_DISABLED;
9379        sb_data_e2.common.p_func.vf_valid = FALSE;
9380        sb_data_p = (uint32_t *)&sb_data_e2;
9381        data_size = (sizeof(struct hc_status_block_data_e2) /
9382                     sizeof(uint32_t));
9383    } else {
9384        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9385        sb_data_e1x.common.state = SB_DISABLED;
9386        sb_data_e1x.common.p_func.vf_valid = FALSE;
9387        sb_data_p = (uint32_t *)&sb_data_e1x;
9388        data_size = (sizeof(struct hc_status_block_data_e1x) /
9389                     sizeof(uint32_t));
9390    }
9391
9392    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9393
9394    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9395             0, CSTORM_STATUS_BLOCK_SIZE);
9396    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9397             0, CSTORM_SYNC_BLOCK_SIZE);
9398}
9399
9400static void
9401bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9402                  struct hc_sp_status_block_data *sp_sb_data)
9403{
9404    int i;
9405
9406    for (i = 0;
9407         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9408         i++) {
9409        REG_WR(sc,
9410               (BAR_CSTRORM_INTMEM +
9411                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9412                (i * sizeof(uint32_t))),
9413               *((uint32_t *)sp_sb_data + i));
9414    }
9415}
9416
9417static void
9418bxe_zero_sp_sb(struct bxe_softc *sc)
9419{
9420    struct hc_sp_status_block_data sp_sb_data;
9421
9422    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9423
9424    sp_sb_data.state           = SB_DISABLED;
9425    sp_sb_data.p_func.vf_valid = FALSE;
9426
9427    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9428
9429    bxe_fill(sc,
9430             (BAR_CSTRORM_INTMEM +
9431              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9432              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9433    bxe_fill(sc,
9434             (BAR_CSTRORM_INTMEM +
9435              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9436              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9437}
9438
9439static void
9440bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9441                             int                       igu_sb_id,
9442                             int                       igu_seg_id)
9443{
9444    hc_sm->igu_sb_id      = igu_sb_id;
9445    hc_sm->igu_seg_id     = igu_seg_id;
9446    hc_sm->timer_value    = 0xFF;
9447    hc_sm->time_to_expire = 0xFFFFFFFF;
9448}
9449
9450static void
9451bxe_map_sb_state_machines(struct hc_index_data *index_data)
9452{
9453    /* zero out state machine indices */
9454
9455    /* rx indices */
9456    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9457
9458    /* tx indices */
9459    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9460    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9461    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9462    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9463
9464    /* map indices */
9465
9466    /* rx indices */
9467    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9468        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9469
9470    /* tx indices */
9471    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9472        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9473    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9474        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9475    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9476        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9477    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9478        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9479}
9480
9481static void
9482bxe_init_sb(struct bxe_softc *sc,
9483            bus_addr_t       busaddr,
9484            int              vfid,
9485            uint8_t          vf_valid,
9486            int              fw_sb_id,
9487            int              igu_sb_id)
9488{
9489    struct hc_status_block_data_e2  sb_data_e2;
9490    struct hc_status_block_data_e1x sb_data_e1x;
9491    struct hc_status_block_sm       *hc_sm_p;
9492    uint32_t *sb_data_p;
9493    int igu_seg_id;
9494    int data_size;
9495
9496    if (CHIP_INT_MODE_IS_BC(sc)) {
9497        igu_seg_id = HC_SEG_ACCESS_NORM;
9498    } else {
9499        igu_seg_id = IGU_SEG_ACCESS_NORM;
9500    }
9501
9502    bxe_zero_fp_sb(sc, fw_sb_id);
9503
9504    if (!CHIP_IS_E1x(sc)) {
9505        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9506        sb_data_e2.common.state = SB_ENABLED;
9507        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9508        sb_data_e2.common.p_func.vf_id = vfid;
9509        sb_data_e2.common.p_func.vf_valid = vf_valid;
9510        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9511        sb_data_e2.common.same_igu_sb_1b = TRUE;
9512        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9513        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9514        hc_sm_p = sb_data_e2.common.state_machine;
9515        sb_data_p = (uint32_t *)&sb_data_e2;
9516        data_size = (sizeof(struct hc_status_block_data_e2) /
9517                     sizeof(uint32_t));
9518        bxe_map_sb_state_machines(sb_data_e2.index_data);
9519    } else {
9520        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9521        sb_data_e1x.common.state = SB_ENABLED;
9522        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9523        sb_data_e1x.common.p_func.vf_id = 0xff;
9524        sb_data_e1x.common.p_func.vf_valid = FALSE;
9525        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9526        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9527        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9528        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9529        hc_sm_p = sb_data_e1x.common.state_machine;
9530        sb_data_p = (uint32_t *)&sb_data_e1x;
9531        data_size = (sizeof(struct hc_status_block_data_e1x) /
9532                     sizeof(uint32_t));
9533        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9534    }
9535
9536    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9537    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9538
9539    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9540
9541    /* write indices to HW - PCI guarantees endianity of regpairs */
9542    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9543}
9544
9545static inline uint8_t
9546bxe_fp_qzone_id(struct bxe_fastpath *fp)
9547{
9548    if (CHIP_IS_E1x(fp->sc)) {
9549        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9550    } else {
9551        return (fp->cl_id);
9552    }
9553}
9554
9555static inline uint32_t
9556bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9557                           struct bxe_fastpath *fp)
9558{
9559    uint32_t offset = BAR_USTRORM_INTMEM;
9560
9561    if (!CHIP_IS_E1x(sc)) {
9562        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9563    } else {
9564        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9565    }
9566
9567    return (offset);
9568}
9569
9570static void
9571bxe_init_eth_fp(struct bxe_softc *sc,
9572                int              idx)
9573{
9574    struct bxe_fastpath *fp = &sc->fp[idx];
9575    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9576    unsigned long q_type = 0;
9577    int cos;
9578
9579    fp->sc    = sc;
9580    fp->index = idx;
9581
9582    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9583    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9584
9585    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9586                    (SC_L_ID(sc) + idx) :
9587                    /* want client ID same as IGU SB ID for non-E1 */
9588                    fp->igu_sb_id;
9589    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9590
9591    /* setup sb indices */
9592    if (!CHIP_IS_E1x(sc)) {
9593        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9594        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9595    } else {
9596        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9597        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9598    }
9599
9600    /* init shortcut */
9601    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9602
9603    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9604
9605    /*
9606     * XXX If multiple CoS is ever supported then each fastpath structure
9607     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9608     */
9609    for (cos = 0; cos < sc->max_cos; cos++) {
9610        cids[cos] = idx;
9611    }
9612    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9613
9614    /* nothing more for a VF to do */
9615    if (IS_VF(sc)) {
9616        return;
9617    }
9618
9619    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9620                fp->fw_sb_id, fp->igu_sb_id);
9621
9622    bxe_update_fp_sb_idx(fp);
9623
9624    /* Configure Queue State object */
9625    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9626    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9627
9628    ecore_init_queue_obj(sc,
9629                         &sc->sp_objs[idx].q_obj,
9630                         fp->cl_id,
9631                         cids,
9632                         sc->max_cos,
9633                         SC_FUNC(sc),
9634                         BXE_SP(sc, q_rdata),
9635                         BXE_SP_MAPPING(sc, q_rdata),
9636                         q_type);
9637
9638    /* configure classification DBs */
9639    ecore_init_mac_obj(sc,
9640                       &sc->sp_objs[idx].mac_obj,
9641                       fp->cl_id,
9642                       idx,
9643                       SC_FUNC(sc),
9644                       BXE_SP(sc, mac_rdata),
9645                       BXE_SP_MAPPING(sc, mac_rdata),
9646                       ECORE_FILTER_MAC_PENDING,
9647                       &sc->sp_state,
9648                       ECORE_OBJ_TYPE_RX_TX,
9649                       &sc->macs_pool);
9650
9651    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9652          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9653}
9654
9655static inline void
9656bxe_update_rx_prod(struct bxe_softc    *sc,
9657                   struct bxe_fastpath *fp,
9658                   uint16_t            rx_bd_prod,
9659                   uint16_t            rx_cq_prod,
9660                   uint16_t            rx_sge_prod)
9661{
9662    struct ustorm_eth_rx_producers rx_prods = { 0 };
9663    uint32_t i;
9664
9665    /* update producers */
9666    rx_prods.bd_prod  = rx_bd_prod;
9667    rx_prods.cqe_prod = rx_cq_prod;
9668    rx_prods.sge_prod = rx_sge_prod;
9669
9670    /*
9671     * Make sure that the BD and SGE data is updated before updating the
9672     * producers since FW might read the BD/SGE right after the producer
9673     * is updated.
9674     * This is only applicable for weak-ordered memory model archs such
9675     * as IA-64. The following barrier is also mandatory since FW will
9676     * assumes BDs must have buffers.
9677     */
9678    wmb();
9679
9680    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9681        REG_WR(sc,
9682               (fp->ustorm_rx_prods_offset + (i * 4)),
9683               ((uint32_t *)&rx_prods)[i]);
9684    }
9685
9686    wmb(); /* keep prod updates ordered */
9687
9688    BLOGD(sc, DBG_RX,
9689          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9690          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9691}
9692
9693static void
9694bxe_init_rx_rings(struct bxe_softc *sc)
9695{
9696    struct bxe_fastpath *fp;
9697    int i;
9698
9699    for (i = 0; i < sc->num_queues; i++) {
9700        fp = &sc->fp[i];
9701
9702        fp->rx_bd_cons = 0;
9703
9704        /*
9705         * Activate the BD ring...
9706         * Warning, this will generate an interrupt (to the TSTORM)
9707         * so this can only be done after the chip is initialized
9708         */
9709        bxe_update_rx_prod(sc, fp,
9710                           fp->rx_bd_prod,
9711                           fp->rx_cq_prod,
9712                           fp->rx_sge_prod);
9713
9714        if (i != 0) {
9715            continue;
9716        }
9717
9718        if (CHIP_IS_E1(sc)) {
9719            REG_WR(sc,
9720                   (BAR_USTRORM_INTMEM +
9721                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9722                   U64_LO(fp->rcq_dma.paddr));
9723            REG_WR(sc,
9724                   (BAR_USTRORM_INTMEM +
9725                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9726                   U64_HI(fp->rcq_dma.paddr));
9727        }
9728    }
9729}
9730
9731static void
9732bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9733{
9734    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9735    fp->tx_db.data.zero_fill1 = 0;
9736    fp->tx_db.data.prod = 0;
9737
9738    fp->tx_pkt_prod = 0;
9739    fp->tx_pkt_cons = 0;
9740    fp->tx_bd_prod = 0;
9741    fp->tx_bd_cons = 0;
9742    fp->eth_q_stats.tx_pkts = 0;
9743}
9744
9745static inline void
9746bxe_init_tx_rings(struct bxe_softc *sc)
9747{
9748    int i;
9749
9750    for (i = 0; i < sc->num_queues; i++) {
9751        bxe_init_tx_ring_one(&sc->fp[i]);
9752    }
9753}
9754
9755static void
9756bxe_init_def_sb(struct bxe_softc *sc)
9757{
9758    struct host_sp_status_block *def_sb = sc->def_sb;
9759    bus_addr_t mapping = sc->def_sb_dma.paddr;
9760    int igu_sp_sb_index;
9761    int igu_seg_id;
9762    int port = SC_PORT(sc);
9763    int func = SC_FUNC(sc);
9764    int reg_offset, reg_offset_en5;
9765    uint64_t section;
9766    int index, sindex;
9767    struct hc_sp_status_block_data sp_sb_data;
9768
9769    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9770
9771    if (CHIP_INT_MODE_IS_BC(sc)) {
9772        igu_sp_sb_index = DEF_SB_IGU_ID;
9773        igu_seg_id = HC_SEG_ACCESS_DEF;
9774    } else {
9775        igu_sp_sb_index = sc->igu_dsb_id;
9776        igu_seg_id = IGU_SEG_ACCESS_DEF;
9777    }
9778
9779    /* attentions */
9780    section = ((uint64_t)mapping +
9781               offsetof(struct host_sp_status_block, atten_status_block));
9782    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9783    sc->attn_state = 0;
9784
9785    reg_offset = (port) ?
9786                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9787                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9788    reg_offset_en5 = (port) ?
9789                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9790                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9791
9792    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9793        /* take care of sig[0]..sig[4] */
9794        for (sindex = 0; sindex < 4; sindex++) {
9795            sc->attn_group[index].sig[sindex] =
9796                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9797        }
9798
9799        if (!CHIP_IS_E1x(sc)) {
9800            /*
9801             * enable5 is separate from the rest of the registers,
9802             * and the address skip is 4 and not 16 between the
9803             * different groups
9804             */
9805            sc->attn_group[index].sig[4] =
9806                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9807        } else {
9808            sc->attn_group[index].sig[4] = 0;
9809        }
9810    }
9811
9812    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9813        reg_offset = (port) ?
9814                         HC_REG_ATTN_MSG1_ADDR_L :
9815                         HC_REG_ATTN_MSG0_ADDR_L;
9816        REG_WR(sc, reg_offset, U64_LO(section));
9817        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9818    } else if (!CHIP_IS_E1x(sc)) {
9819        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9820        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9821    }
9822
9823    section = ((uint64_t)mapping +
9824               offsetof(struct host_sp_status_block, sp_sb));
9825
9826    bxe_zero_sp_sb(sc);
9827
9828    /* PCI guarantees endianity of regpair */
9829    sp_sb_data.state           = SB_ENABLED;
9830    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9831    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9832    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9833    sp_sb_data.igu_seg_id      = igu_seg_id;
9834    sp_sb_data.p_func.pf_id    = func;
9835    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9836    sp_sb_data.p_func.vf_id    = 0xff;
9837
9838    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9839
9840    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9841}
9842
9843static void
9844bxe_init_sp_ring(struct bxe_softc *sc)
9845{
9846    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9847    sc->spq_prod_idx = 0;
9848    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9849    sc->spq_prod_bd = sc->spq;
9850    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9851}
9852
9853static void
9854bxe_init_eq_ring(struct bxe_softc *sc)
9855{
9856    union event_ring_elem *elem;
9857    int i;
9858
9859    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9860        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9861
9862        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9863                                                 BCM_PAGE_SIZE *
9864                                                 (i % NUM_EQ_PAGES)));
9865        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9866                                                 BCM_PAGE_SIZE *
9867                                                 (i % NUM_EQ_PAGES)));
9868    }
9869
9870    sc->eq_cons    = 0;
9871    sc->eq_prod    = NUM_EQ_DESC;
9872    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9873
9874    atomic_store_rel_long(&sc->eq_spq_left,
9875                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9876                               NUM_EQ_DESC) - 1));
9877}
9878
9879static void
9880bxe_init_internal_common(struct bxe_softc *sc)
9881{
9882    int i;
9883
9884    /*
9885     * Zero this manually as its initialization is currently missing
9886     * in the initTool.
9887     */
9888    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9889        REG_WR(sc,
9890               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9891               0);
9892    }
9893
9894    if (!CHIP_IS_E1x(sc)) {
9895        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9896                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9897    }
9898}
9899
9900static void
9901bxe_init_internal(struct bxe_softc *sc,
9902                  uint32_t         load_code)
9903{
9904    switch (load_code) {
9905    case FW_MSG_CODE_DRV_LOAD_COMMON:
9906    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9907        bxe_init_internal_common(sc);
9908        /* no break */
9909
9910    case FW_MSG_CODE_DRV_LOAD_PORT:
9911        /* nothing to do */
9912        /* no break */
9913
9914    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9915        /* internal memory per function is initialized inside bxe_pf_init */
9916        break;
9917
9918    default:
9919        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9920        break;
9921    }
9922}
9923
9924static void
9925storm_memset_func_cfg(struct bxe_softc                         *sc,
9926                      struct tstorm_eth_function_common_config *tcfg,
9927                      uint16_t                                  abs_fid)
9928{
9929    uint32_t addr;
9930    size_t size;
9931
9932    addr = (BAR_TSTRORM_INTMEM +
9933            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9934    size = sizeof(struct tstorm_eth_function_common_config);
9935    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9936}
9937
9938static void
9939bxe_func_init(struct bxe_softc            *sc,
9940              struct bxe_func_init_params *p)
9941{
9942    struct tstorm_eth_function_common_config tcfg = { 0 };
9943
9944    if (CHIP_IS_E1x(sc)) {
9945        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9946    }
9947
9948    /* Enable the function in the FW */
9949    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9950    storm_memset_func_en(sc, p->func_id, 1);
9951
9952    /* spq */
9953    if (p->func_flgs & FUNC_FLG_SPQ) {
9954        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9955        REG_WR(sc,
9956               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9957               p->spq_prod);
9958    }
9959}
9960
9961/*
9962 * Calculates the sum of vn_min_rates.
9963 * It's needed for further normalizing of the min_rates.
9964 * Returns:
9965 *   sum of vn_min_rates.
9966 *     or
9967 *   0 - if all the min_rates are 0.
9968 * In the later case fainess algorithm should be deactivated.
9969 * If all min rates are not zero then those that are zeroes will be set to 1.
9970 */
9971static void
9972bxe_calc_vn_min(struct bxe_softc       *sc,
9973                struct cmng_init_input *input)
9974{
9975    uint32_t vn_cfg;
9976    uint32_t vn_min_rate;
9977    int all_zero = 1;
9978    int vn;
9979
9980    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9981        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9982        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9983                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
9984
9985        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9986            /* skip hidden VNs */
9987            vn_min_rate = 0;
9988        } else if (!vn_min_rate) {
9989            /* If min rate is zero - set it to 100 */
9990            vn_min_rate = DEF_MIN_RATE;
9991        } else {
9992            all_zero = 0;
9993        }
9994
9995        input->vnic_min_rate[vn] = vn_min_rate;
9996    }
9997
9998    /* if ETS or all min rates are zeros - disable fairness */
9999    if (BXE_IS_ETS_ENABLED(sc)) {
10000        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10001        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10002    } else if (all_zero) {
10003        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10004        BLOGD(sc, DBG_LOAD,
10005              "Fariness disabled (all MIN values are zeroes)\n");
10006    } else {
10007        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10008    }
10009}
10010
10011static inline uint16_t
10012bxe_extract_max_cfg(struct bxe_softc *sc,
10013                    uint32_t         mf_cfg)
10014{
10015    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10016                        FUNC_MF_CFG_MAX_BW_SHIFT);
10017
10018    if (!max_cfg) {
10019        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10020        max_cfg = 100;
10021    }
10022
10023    return (max_cfg);
10024}
10025
10026static void
10027bxe_calc_vn_max(struct bxe_softc       *sc,
10028                int                    vn,
10029                struct cmng_init_input *input)
10030{
10031    uint16_t vn_max_rate;
10032    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10033    uint32_t max_cfg;
10034
10035    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10036        vn_max_rate = 0;
10037    } else {
10038        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10039
10040        if (IS_MF_SI(sc)) {
10041            /* max_cfg in percents of linkspeed */
10042            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10043        } else { /* SD modes */
10044            /* max_cfg is absolute in 100Mb units */
10045            vn_max_rate = (max_cfg * 100);
10046        }
10047    }
10048
10049    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10050
10051    input->vnic_max_rate[vn] = vn_max_rate;
10052}
10053
10054static void
10055bxe_cmng_fns_init(struct bxe_softc *sc,
10056                  uint8_t          read_cfg,
10057                  uint8_t          cmng_type)
10058{
10059    struct cmng_init_input input;
10060    int vn;
10061
10062    memset(&input, 0, sizeof(struct cmng_init_input));
10063
10064    input.port_rate = sc->link_vars.line_speed;
10065
10066    if (cmng_type == CMNG_FNS_MINMAX) {
10067        /* read mf conf from shmem */
10068        if (read_cfg) {
10069            bxe_read_mf_cfg(sc);
10070        }
10071
10072        /* get VN min rate and enable fairness if not 0 */
10073        bxe_calc_vn_min(sc, &input);
10074
10075        /* get VN max rate */
10076        if (sc->port.pmf) {
10077            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10078                bxe_calc_vn_max(sc, vn, &input);
10079            }
10080        }
10081
10082        /* always enable rate shaping and fairness */
10083        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10084
10085        ecore_init_cmng(&input, &sc->cmng);
10086        return;
10087    }
10088
10089    /* rate shaping and fairness are disabled */
10090    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10091}
10092
10093static int
10094bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10095{
10096    if (CHIP_REV_IS_SLOW(sc)) {
10097        return (CMNG_FNS_NONE);
10098    }
10099
10100    if (IS_MF(sc)) {
10101        return (CMNG_FNS_MINMAX);
10102    }
10103
10104    return (CMNG_FNS_NONE);
10105}
10106
10107static void
10108storm_memset_cmng(struct bxe_softc *sc,
10109                  struct cmng_init *cmng,
10110                  uint8_t          port)
10111{
10112    int vn;
10113    int func;
10114    uint32_t addr;
10115    size_t size;
10116
10117    addr = (BAR_XSTRORM_INTMEM +
10118            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10119    size = sizeof(struct cmng_struct_per_port);
10120    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10121
10122    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10123        func = func_by_vn(sc, vn);
10124
10125        addr = (BAR_XSTRORM_INTMEM +
10126                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10127        size = sizeof(struct rate_shaping_vars_per_vn);
10128        ecore_storm_memset_struct(sc, addr, size,
10129                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10130
10131        addr = (BAR_XSTRORM_INTMEM +
10132                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10133        size = sizeof(struct fairness_vars_per_vn);
10134        ecore_storm_memset_struct(sc, addr, size,
10135                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10136    }
10137}
10138
10139static void
10140bxe_pf_init(struct bxe_softc *sc)
10141{
10142    struct bxe_func_init_params func_init = { 0 };
10143    struct event_ring_data eq_data = { { 0 } };
10144    uint16_t flags;
10145
10146    if (!CHIP_IS_E1x(sc)) {
10147        /* reset IGU PF statistics: MSIX + ATTN */
10148        /* PF */
10149        REG_WR(sc,
10150               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10151                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10152                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10153               0);
10154        /* ATTN */
10155        REG_WR(sc,
10156               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10157                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10158                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10159                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10160               0);
10161    }
10162
10163    /* function setup flags */
10164    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10165
10166    /*
10167     * This flag is relevant for E1x only.
10168     * E2 doesn't have a TPA configuration in a function level.
10169     */
10170    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10171
10172    func_init.func_flgs = flags;
10173    func_init.pf_id     = SC_FUNC(sc);
10174    func_init.func_id   = SC_FUNC(sc);
10175    func_init.spq_map   = sc->spq_dma.paddr;
10176    func_init.spq_prod  = sc->spq_prod_idx;
10177
10178    bxe_func_init(sc, &func_init);
10179
10180    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10181
10182    /*
10183     * Congestion management values depend on the link rate.
10184     * There is no active link so initial link rate is set to 10Gbps.
10185     * When the link comes up the congestion management values are
10186     * re-calculated according to the actual link rate.
10187     */
10188    sc->link_vars.line_speed = SPEED_10000;
10189    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10190
10191    /* Only the PMF sets the HW */
10192    if (sc->port.pmf) {
10193        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10194    }
10195
10196    /* init Event Queue - PCI bus guarantees correct endainity */
10197    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10198    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10199    eq_data.producer     = sc->eq_prod;
10200    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10201    eq_data.sb_id        = DEF_SB_ID;
10202    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10203}
10204
10205static void
10206bxe_hc_int_enable(struct bxe_softc *sc)
10207{
10208    int port = SC_PORT(sc);
10209    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10210    uint32_t val = REG_RD(sc, addr);
10211    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10212    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10213                           (sc->intr_count == 1)) ? TRUE : FALSE;
10214    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10215
10216    if (msix) {
10217        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10218                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10219        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10220                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10221        if (single_msix) {
10222            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10223        }
10224    } else if (msi) {
10225        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10226        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10227                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10228                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10229    } else {
10230        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10231                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10232                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10233                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10234
10235        if (!CHIP_IS_E1(sc)) {
10236            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10237                  val, port, addr);
10238
10239            REG_WR(sc, addr, val);
10240
10241            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10242        }
10243    }
10244
10245    if (CHIP_IS_E1(sc)) {
10246        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10247    }
10248
10249    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10250          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10251
10252    REG_WR(sc, addr, val);
10253
10254    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10255    mb();
10256
10257    if (!CHIP_IS_E1(sc)) {
10258        /* init leading/trailing edge */
10259        if (IS_MF(sc)) {
10260            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10261            if (sc->port.pmf) {
10262                /* enable nig and gpio3 attention */
10263                val |= 0x1100;
10264            }
10265        } else {
10266            val = 0xffff;
10267        }
10268
10269        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10270        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10271    }
10272
10273    /* make sure that interrupts are indeed enabled from here on */
10274    mb();
10275}
10276
10277static void
10278bxe_igu_int_enable(struct bxe_softc *sc)
10279{
10280    uint32_t val;
10281    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10282    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10283                           (sc->intr_count == 1)) ? TRUE : FALSE;
10284    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10285
10286    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10287
10288    if (msix) {
10289        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10290                 IGU_PF_CONF_SINGLE_ISR_EN);
10291        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10292                IGU_PF_CONF_ATTN_BIT_EN);
10293        if (single_msix) {
10294            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10295        }
10296    } else if (msi) {
10297        val &= ~IGU_PF_CONF_INT_LINE_EN;
10298        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10299                IGU_PF_CONF_ATTN_BIT_EN |
10300                IGU_PF_CONF_SINGLE_ISR_EN);
10301    } else {
10302        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10303        val |= (IGU_PF_CONF_INT_LINE_EN |
10304                IGU_PF_CONF_ATTN_BIT_EN |
10305                IGU_PF_CONF_SINGLE_ISR_EN);
10306    }
10307
10308    /* clean previous status - need to configure igu prior to ack*/
10309    if ((!msix) || single_msix) {
10310        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10311        bxe_ack_int(sc);
10312    }
10313
10314    val |= IGU_PF_CONF_FUNC_EN;
10315
10316    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10317          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10318
10319    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10320
10321    mb();
10322
10323    /* init leading/trailing edge */
10324    if (IS_MF(sc)) {
10325        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10326        if (sc->port.pmf) {
10327            /* enable nig and gpio3 attention */
10328            val |= 0x1100;
10329        }
10330    } else {
10331        val = 0xffff;
10332    }
10333
10334    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10335    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10336
10337    /* make sure that interrupts are indeed enabled from here on */
10338    mb();
10339}
10340
10341static void
10342bxe_int_enable(struct bxe_softc *sc)
10343{
10344    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10345        bxe_hc_int_enable(sc);
10346    } else {
10347        bxe_igu_int_enable(sc);
10348    }
10349}
10350
10351static void
10352bxe_hc_int_disable(struct bxe_softc *sc)
10353{
10354    int port = SC_PORT(sc);
10355    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10356    uint32_t val = REG_RD(sc, addr);
10357
10358    /*
10359     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10360     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10361     * block
10362     */
10363    if (CHIP_IS_E1(sc)) {
10364        /*
10365         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10366         * to prevent from HC sending interrupts after we exit the function
10367         */
10368        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10369
10370        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10371                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10372                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10373    } else {
10374        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10375                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10376                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10377                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10378    }
10379
10380    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10381
10382    /* flush all outstanding writes */
10383    mb();
10384
10385    REG_WR(sc, addr, val);
10386    if (REG_RD(sc, addr) != val) {
10387        BLOGE(sc, "proper val not read from HC IGU!\n");
10388    }
10389}
10390
10391static void
10392bxe_igu_int_disable(struct bxe_softc *sc)
10393{
10394    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10395
10396    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10397             IGU_PF_CONF_INT_LINE_EN |
10398             IGU_PF_CONF_ATTN_BIT_EN);
10399
10400    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10401
10402    /* flush all outstanding writes */
10403    mb();
10404
10405    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10406    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10407        BLOGE(sc, "proper val not read from IGU!\n");
10408    }
10409}
10410
10411static void
10412bxe_int_disable(struct bxe_softc *sc)
10413{
10414    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10415        bxe_hc_int_disable(sc);
10416    } else {
10417        bxe_igu_int_disable(sc);
10418    }
10419}
10420
10421static void
10422bxe_nic_init(struct bxe_softc *sc,
10423             int              load_code)
10424{
10425    int i;
10426
10427    for (i = 0; i < sc->num_queues; i++) {
10428        bxe_init_eth_fp(sc, i);
10429    }
10430
10431    rmb(); /* ensure status block indices were read */
10432
10433    bxe_init_rx_rings(sc);
10434    bxe_init_tx_rings(sc);
10435
10436    if (IS_VF(sc)) {
10437        return;
10438    }
10439
10440    /* initialize MOD_ABS interrupts */
10441    elink_init_mod_abs_int(sc, &sc->link_vars,
10442                           sc->devinfo.chip_id,
10443                           sc->devinfo.shmem_base,
10444                           sc->devinfo.shmem2_base,
10445                           SC_PORT(sc));
10446
10447    bxe_init_def_sb(sc);
10448    bxe_update_dsb_idx(sc);
10449    bxe_init_sp_ring(sc);
10450    bxe_init_eq_ring(sc);
10451    bxe_init_internal(sc, load_code);
10452    bxe_pf_init(sc);
10453    bxe_stats_init(sc);
10454
10455    /* flush all before enabling interrupts */
10456    mb();
10457
10458    bxe_int_enable(sc);
10459
10460    /* check for SPIO5 */
10461    bxe_attn_int_deasserted0(sc,
10462                             REG_RD(sc,
10463                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10464                                     SC_PORT(sc)*4)) &
10465                             AEU_INPUTS_ATTN_BITS_SPIO5);
10466}
10467
10468static inline void
10469bxe_init_objs(struct bxe_softc *sc)
10470{
10471    /* mcast rules must be added to tx if tx switching is enabled */
10472    ecore_obj_type o_type =
10473        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10474                                         ECORE_OBJ_TYPE_RX;
10475
10476    /* RX_MODE controlling object */
10477    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10478
10479    /* multicast configuration controlling object */
10480    ecore_init_mcast_obj(sc,
10481                         &sc->mcast_obj,
10482                         sc->fp[0].cl_id,
10483                         sc->fp[0].index,
10484                         SC_FUNC(sc),
10485                         SC_FUNC(sc),
10486                         BXE_SP(sc, mcast_rdata),
10487                         BXE_SP_MAPPING(sc, mcast_rdata),
10488                         ECORE_FILTER_MCAST_PENDING,
10489                         &sc->sp_state,
10490                         o_type);
10491
10492    /* Setup CAM credit pools */
10493    ecore_init_mac_credit_pool(sc,
10494                               &sc->macs_pool,
10495                               SC_FUNC(sc),
10496                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10497                                                 VNICS_PER_PATH(sc));
10498
10499    ecore_init_vlan_credit_pool(sc,
10500                                &sc->vlans_pool,
10501                                SC_ABS_FUNC(sc) >> 1,
10502                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10503                                                  VNICS_PER_PATH(sc));
10504
10505    /* RSS configuration object */
10506    ecore_init_rss_config_obj(sc,
10507                              &sc->rss_conf_obj,
10508                              sc->fp[0].cl_id,
10509                              sc->fp[0].index,
10510                              SC_FUNC(sc),
10511                              SC_FUNC(sc),
10512                              BXE_SP(sc, rss_rdata),
10513                              BXE_SP_MAPPING(sc, rss_rdata),
10514                              ECORE_FILTER_RSS_CONF_PENDING,
10515                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10516}
10517
10518/*
10519 * Initialize the function. This must be called before sending CLIENT_SETUP
10520 * for the first client.
10521 */
10522static inline int
10523bxe_func_start(struct bxe_softc *sc)
10524{
10525    struct ecore_func_state_params func_params = { NULL };
10526    struct ecore_func_start_params *start_params = &func_params.params.start;
10527
10528    /* Prepare parameters for function state transitions */
10529    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10530
10531    func_params.f_obj = &sc->func_obj;
10532    func_params.cmd = ECORE_F_CMD_START;
10533
10534    /* Function parameters */
10535    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10536    start_params->sd_vlan_tag = OVLAN(sc);
10537
10538    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10539        start_params->network_cos_mode = STATIC_COS;
10540    } else { /* CHIP_IS_E1X */
10541        start_params->network_cos_mode = FW_WRR;
10542    }
10543
10544    //start_params->gre_tunnel_mode = 0;
10545    //start_params->gre_tunnel_rss  = 0;
10546
10547    return (ecore_func_state_change(sc, &func_params));
10548}
10549
10550static int
10551bxe_set_power_state(struct bxe_softc *sc,
10552                    uint8_t          state)
10553{
10554    uint16_t pmcsr;
10555
10556    /* If there is no power capability, silently succeed */
10557    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10558        BLOGW(sc, "No power capability\n");
10559        return (0);
10560    }
10561
10562    pmcsr = pci_read_config(sc->dev,
10563                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10564                            2);
10565
10566    switch (state) {
10567    case PCI_PM_D0:
10568        pci_write_config(sc->dev,
10569                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10570                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10571
10572        if (pmcsr & PCIM_PSTAT_DMASK) {
10573            /* delay required during transition out of D3hot */
10574            DELAY(20000);
10575        }
10576
10577        break;
10578
10579    case PCI_PM_D3hot:
10580        /* XXX if there are other clients above don't shut down the power */
10581
10582        /* don't shut down the power for emulation and FPGA */
10583        if (CHIP_REV_IS_SLOW(sc)) {
10584            return (0);
10585        }
10586
10587        pmcsr &= ~PCIM_PSTAT_DMASK;
10588        pmcsr |= PCIM_PSTAT_D3;
10589
10590        if (sc->wol) {
10591            pmcsr |= PCIM_PSTAT_PMEENABLE;
10592        }
10593
10594        pci_write_config(sc->dev,
10595                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10596                         pmcsr, 4);
10597
10598        /*
10599         * No more memory access after this point until device is brought back
10600         * to D0 state.
10601         */
10602        break;
10603
10604    default:
10605        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10606            state, pmcsr);
10607        return (-1);
10608    }
10609
10610    return (0);
10611}
10612
10613
10614/* return true if succeeded to acquire the lock */
10615static uint8_t
10616bxe_trylock_hw_lock(struct bxe_softc *sc,
10617                    uint32_t         resource)
10618{
10619    uint32_t lock_status;
10620    uint32_t resource_bit = (1 << resource);
10621    int func = SC_FUNC(sc);
10622    uint32_t hw_lock_control_reg;
10623
10624    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10625
10626    /* Validating that the resource is within range */
10627    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10628        BLOGD(sc, DBG_LOAD,
10629              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10630              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10631        return (FALSE);
10632    }
10633
10634    if (func <= 5) {
10635        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10636    } else {
10637        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10638    }
10639
10640    /* try to acquire the lock */
10641    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10642    lock_status = REG_RD(sc, hw_lock_control_reg);
10643    if (lock_status & resource_bit) {
10644        return (TRUE);
10645    }
10646
10647    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10648        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10649        lock_status, resource_bit);
10650
10651    return (FALSE);
10652}
10653
10654/*
10655 * Get the recovery leader resource id according to the engine this function
10656 * belongs to. Currently only only 2 engines is supported.
10657 */
10658static int
10659bxe_get_leader_lock_resource(struct bxe_softc *sc)
10660{
10661    if (SC_PATH(sc)) {
10662        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10663    } else {
10664        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10665    }
10666}
10667
10668/* try to acquire a leader lock for current engine */
10669static uint8_t
10670bxe_trylock_leader_lock(struct bxe_softc *sc)
10671{
10672    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10673}
10674
10675static int
10676bxe_release_leader_lock(struct bxe_softc *sc)
10677{
10678    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10679}
10680
10681/* close gates #2, #3 and #4 */
10682static void
10683bxe_set_234_gates(struct bxe_softc *sc,
10684                  uint8_t          close)
10685{
10686    uint32_t val;
10687
10688    /* gates #2 and #4a are closed/opened for "not E1" only */
10689    if (!CHIP_IS_E1(sc)) {
10690        /* #4 */
10691        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10692        /* #2 */
10693        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10694    }
10695
10696    /* #3 */
10697    if (CHIP_IS_E1x(sc)) {
10698        /* prevent interrupts from HC on both ports */
10699        val = REG_RD(sc, HC_REG_CONFIG_1);
10700        REG_WR(sc, HC_REG_CONFIG_1,
10701               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10702               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10703
10704        val = REG_RD(sc, HC_REG_CONFIG_0);
10705        REG_WR(sc, HC_REG_CONFIG_0,
10706               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10707               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10708    } else {
10709        /* Prevent incoming interrupts in IGU */
10710        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10711
10712        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10713               (!close) ?
10714               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10715               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10716    }
10717
10718    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10719          close ? "closing" : "opening");
10720
10721    wmb();
10722}
10723
10724/* poll for pending writes bit, it should get cleared in no more than 1s */
10725static int
10726bxe_er_poll_igu_vq(struct bxe_softc *sc)
10727{
10728    uint32_t cnt = 1000;
10729    uint32_t pend_bits = 0;
10730
10731    do {
10732        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10733
10734        if (pend_bits == 0) {
10735            break;
10736        }
10737
10738        DELAY(1000);
10739    } while (--cnt > 0);
10740
10741    if (cnt == 0) {
10742        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10743        return (-1);
10744    }
10745
10746    return (0);
10747}
10748
10749#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10750
10751static void
10752bxe_clp_reset_prep(struct bxe_softc *sc,
10753                   uint32_t         *magic_val)
10754{
10755    /* Do some magic... */
10756    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10757    *magic_val = val & SHARED_MF_CLP_MAGIC;
10758    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10759}
10760
10761/* restore the value of the 'magic' bit */
10762static void
10763bxe_clp_reset_done(struct bxe_softc *sc,
10764                   uint32_t         magic_val)
10765{
10766    /* Restore the 'magic' bit value... */
10767    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10768    MFCFG_WR(sc, shared_mf_config.clp_mb,
10769              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10770}
10771
10772/* prepare for MCP reset, takes care of CLP configurations */
10773static void
10774bxe_reset_mcp_prep(struct bxe_softc *sc,
10775                   uint32_t         *magic_val)
10776{
10777    uint32_t shmem;
10778    uint32_t validity_offset;
10779
10780    /* set `magic' bit in order to save MF config */
10781    if (!CHIP_IS_E1(sc)) {
10782        bxe_clp_reset_prep(sc, magic_val);
10783    }
10784
10785    /* get shmem offset */
10786    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10787    validity_offset =
10788        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10789
10790    /* Clear validity map flags */
10791    if (shmem > 0) {
10792        REG_WR(sc, shmem + validity_offset, 0);
10793    }
10794}
10795
10796#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10797#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10798
10799static void
10800bxe_mcp_wait_one(struct bxe_softc *sc)
10801{
10802    /* special handling for emulation and FPGA (10 times longer) */
10803    if (CHIP_REV_IS_SLOW(sc)) {
10804        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10805    } else {
10806        DELAY((MCP_ONE_TIMEOUT) * 1000);
10807    }
10808}
10809
10810/* initialize shmem_base and waits for validity signature to appear */
10811static int
10812bxe_init_shmem(struct bxe_softc *sc)
10813{
10814    int cnt = 0;
10815    uint32_t val = 0;
10816
10817    do {
10818        sc->devinfo.shmem_base     =
10819        sc->link_params.shmem_base =
10820            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10821
10822        if (sc->devinfo.shmem_base) {
10823            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10824            if (val & SHR_MEM_VALIDITY_MB)
10825                return (0);
10826        }
10827
10828        bxe_mcp_wait_one(sc);
10829
10830    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10831
10832    BLOGE(sc, "BAD MCP validity signature\n");
10833
10834    return (-1);
10835}
10836
10837static int
10838bxe_reset_mcp_comp(struct bxe_softc *sc,
10839                   uint32_t         magic_val)
10840{
10841    int rc = bxe_init_shmem(sc);
10842
10843    /* Restore the `magic' bit value */
10844    if (!CHIP_IS_E1(sc)) {
10845        bxe_clp_reset_done(sc, magic_val);
10846    }
10847
10848    return (rc);
10849}
10850
10851static void
10852bxe_pxp_prep(struct bxe_softc *sc)
10853{
10854    if (!CHIP_IS_E1(sc)) {
10855        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10856        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10857        wmb();
10858    }
10859}
10860
10861/*
10862 * Reset the whole chip except for:
10863 *      - PCIE core
10864 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10865 *      - IGU
10866 *      - MISC (including AEU)
10867 *      - GRC
10868 *      - RBCN, RBCP
10869 */
10870static void
10871bxe_process_kill_chip_reset(struct bxe_softc *sc,
10872                            uint8_t          global)
10873{
10874    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10875    uint32_t global_bits2, stay_reset2;
10876
10877    /*
10878     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10879     * (per chip) blocks.
10880     */
10881    global_bits2 =
10882        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10883        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10884
10885    /*
10886     * Don't reset the following blocks.
10887     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10888     *            reset, as in 4 port device they might still be owned
10889     *            by the MCP (there is only one leader per path).
10890     */
10891    not_reset_mask1 =
10892        MISC_REGISTERS_RESET_REG_1_RST_HC |
10893        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10894        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10895
10896    not_reset_mask2 =
10897        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10898        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10899        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10900        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10901        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10902        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10903        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10904        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10905        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10906        MISC_REGISTERS_RESET_REG_2_PGLC |
10907        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10908        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10909        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10910        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10911        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10912        MISC_REGISTERS_RESET_REG_2_UMAC1;
10913
10914    /*
10915     * Keep the following blocks in reset:
10916     *  - all xxMACs are handled by the elink code.
10917     */
10918    stay_reset2 =
10919        MISC_REGISTERS_RESET_REG_2_XMAC |
10920        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10921
10922    /* Full reset masks according to the chip */
10923    reset_mask1 = 0xffffffff;
10924
10925    if (CHIP_IS_E1(sc))
10926        reset_mask2 = 0xffff;
10927    else if (CHIP_IS_E1H(sc))
10928        reset_mask2 = 0x1ffff;
10929    else if (CHIP_IS_E2(sc))
10930        reset_mask2 = 0xfffff;
10931    else /* CHIP_IS_E3 */
10932        reset_mask2 = 0x3ffffff;
10933
10934    /* Don't reset global blocks unless we need to */
10935    if (!global)
10936        reset_mask2 &= ~global_bits2;
10937
10938    /*
10939     * In case of attention in the QM, we need to reset PXP
10940     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10941     * because otherwise QM reset would release 'close the gates' shortly
10942     * before resetting the PXP, then the PSWRQ would send a write
10943     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10944     * read the payload data from PSWWR, but PSWWR would not
10945     * respond. The write queue in PGLUE would stuck, dmae commands
10946     * would not return. Therefore it's important to reset the second
10947     * reset register (containing the
10948     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10949     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10950     * bit).
10951     */
10952    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10953           reset_mask2 & (~not_reset_mask2));
10954
10955    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10956           reset_mask1 & (~not_reset_mask1));
10957
10958    mb();
10959    wmb();
10960
10961    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10962           reset_mask2 & (~stay_reset2));
10963
10964    mb();
10965    wmb();
10966
10967    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10968    wmb();
10969}
10970
10971static int
10972bxe_process_kill(struct bxe_softc *sc,
10973                 uint8_t          global)
10974{
10975    int cnt = 1000;
10976    uint32_t val = 0;
10977    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10978    uint32_t tags_63_32 = 0;
10979
10980    /* Empty the Tetris buffer, wait for 1s */
10981    do {
10982        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10983        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
10984        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
10985        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
10986        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
10987        if (CHIP_IS_E3(sc)) {
10988            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
10989        }
10990
10991        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
10992            ((port_is_idle_0 & 0x1) == 0x1) &&
10993            ((port_is_idle_1 & 0x1) == 0x1) &&
10994            (pgl_exp_rom2 == 0xffffffff) &&
10995            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
10996            break;
10997        DELAY(1000);
10998    } while (cnt-- > 0);
10999
11000    if (cnt <= 0) {
11001        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11002                  "are still outstanding read requests after 1s! "
11003                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11004                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11005              sr_cnt, blk_cnt, port_is_idle_0,
11006              port_is_idle_1, pgl_exp_rom2);
11007        return (-1);
11008    }
11009
11010    mb();
11011
11012    /* Close gates #2, #3 and #4 */
11013    bxe_set_234_gates(sc, TRUE);
11014
11015    /* Poll for IGU VQs for 57712 and newer chips */
11016    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11017        return (-1);
11018    }
11019
11020    /* XXX indicate that "process kill" is in progress to MCP */
11021
11022    /* clear "unprepared" bit */
11023    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11024    mb();
11025
11026    /* Make sure all is written to the chip before the reset */
11027    wmb();
11028
11029    /*
11030     * Wait for 1ms to empty GLUE and PCI-E core queues,
11031     * PSWHST, GRC and PSWRD Tetris buffer.
11032     */
11033    DELAY(1000);
11034
11035    /* Prepare to chip reset: */
11036    /* MCP */
11037    if (global) {
11038        bxe_reset_mcp_prep(sc, &val);
11039    }
11040
11041    /* PXP */
11042    bxe_pxp_prep(sc);
11043    mb();
11044
11045    /* reset the chip */
11046    bxe_process_kill_chip_reset(sc, global);
11047    mb();
11048
11049    /* clear errors in PGB */
11050    if (!CHIP_IS_E1(sc))
11051        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11052
11053    /* Recover after reset: */
11054    /* MCP */
11055    if (global && bxe_reset_mcp_comp(sc, val)) {
11056        return (-1);
11057    }
11058
11059    /* XXX add resetting the NO_MCP mode DB here */
11060
11061    /* Open the gates #2, #3 and #4 */
11062    bxe_set_234_gates(sc, FALSE);
11063
11064    /* XXX
11065     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11066     * re-enable attentions
11067     */
11068
11069    return (0);
11070}
11071
11072static int
11073bxe_leader_reset(struct bxe_softc *sc)
11074{
11075    int rc = 0;
11076    uint8_t global = bxe_reset_is_global(sc);
11077    uint32_t load_code;
11078
11079    /*
11080     * If not going to reset MCP, load "fake" driver to reset HW while
11081     * driver is owner of the HW.
11082     */
11083    if (!global && !BXE_NOMCP(sc)) {
11084        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11085                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11086        if (!load_code) {
11087            BLOGE(sc, "MCP response failure, aborting\n");
11088            rc = -1;
11089            goto exit_leader_reset;
11090        }
11091
11092        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11093            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11094            BLOGE(sc, "MCP unexpected response, aborting\n");
11095            rc = -1;
11096            goto exit_leader_reset2;
11097        }
11098
11099        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11100        if (!load_code) {
11101            BLOGE(sc, "MCP response failure, aborting\n");
11102            rc = -1;
11103            goto exit_leader_reset2;
11104        }
11105    }
11106
11107    /* try to recover after the failure */
11108    if (bxe_process_kill(sc, global)) {
11109        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11110        rc = -1;
11111        goto exit_leader_reset2;
11112    }
11113
11114    /*
11115     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11116     * state.
11117     */
11118    bxe_set_reset_done(sc);
11119    if (global) {
11120        bxe_clear_reset_global(sc);
11121    }
11122
11123exit_leader_reset2:
11124
11125    /* unload "fake driver" if it was loaded */
11126    if (!global && !BXE_NOMCP(sc)) {
11127        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11128        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11129    }
11130
11131exit_leader_reset:
11132
11133    sc->is_leader = 0;
11134    bxe_release_leader_lock(sc);
11135
11136    mb();
11137    return (rc);
11138}
11139
11140/*
11141 * prepare INIT transition, parameters configured:
11142 *   - HC configuration
11143 *   - Queue's CDU context
11144 */
11145static void
11146bxe_pf_q_prep_init(struct bxe_softc               *sc,
11147                   struct bxe_fastpath            *fp,
11148                   struct ecore_queue_init_params *init_params)
11149{
11150    uint8_t cos;
11151    int cxt_index, cxt_offset;
11152
11153    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11154    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11155
11156    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11157    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11158
11159    /* HC rate */
11160    init_params->rx.hc_rate =
11161        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11162    init_params->tx.hc_rate =
11163        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11164
11165    /* FW SB ID */
11166    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11167
11168    /* CQ index among the SB indices */
11169    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11170    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11171
11172    /* set maximum number of COSs supported by this queue */
11173    init_params->max_cos = sc->max_cos;
11174
11175    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11176          fp->index, init_params->max_cos);
11177
11178    /* set the context pointers queue object */
11179    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11180        /* XXX change index/cid here if ever support multiple tx CoS */
11181        /* fp->txdata[cos]->cid */
11182        cxt_index = fp->index / ILT_PAGE_CIDS;
11183        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11184        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11185    }
11186}
11187
11188/* set flags that are common for the Tx-only and not normal connections */
11189static unsigned long
11190bxe_get_common_flags(struct bxe_softc    *sc,
11191                     struct bxe_fastpath *fp,
11192                     uint8_t             zero_stats)
11193{
11194    unsigned long flags = 0;
11195
11196    /* PF driver will always initialize the Queue to an ACTIVE state */
11197    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11198
11199    /*
11200     * tx only connections collect statistics (on the same index as the
11201     * parent connection). The statistics are zeroed when the parent
11202     * connection is initialized.
11203     */
11204
11205    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11206    if (zero_stats) {
11207        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11208    }
11209
11210    /*
11211     * tx only connections can support tx-switching, though their
11212     * CoS-ness doesn't survive the loopback
11213     */
11214    if (sc->flags & BXE_TX_SWITCHING) {
11215        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11216    }
11217
11218    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11219
11220    return (flags);
11221}
11222
11223static unsigned long
11224bxe_get_q_flags(struct bxe_softc    *sc,
11225                struct bxe_fastpath *fp,
11226                uint8_t             leading)
11227{
11228    unsigned long flags = 0;
11229
11230    if (IS_MF_SD(sc)) {
11231        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11232    }
11233
11234    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11235        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11236#if __FreeBSD_version >= 800000
11237        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11238#endif
11239    }
11240
11241    if (leading) {
11242        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11243        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11244    }
11245
11246    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11247
11248    /* merge with common flags */
11249    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11250}
11251
11252static void
11253bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11254                      struct bxe_fastpath               *fp,
11255                      struct ecore_general_setup_params *gen_init,
11256                      uint8_t                           cos)
11257{
11258    gen_init->stat_id = bxe_stats_id(fp);
11259    gen_init->spcl_id = fp->cl_id;
11260    gen_init->mtu = sc->mtu;
11261    gen_init->cos = cos;
11262}
11263
11264static void
11265bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11266                 struct bxe_fastpath           *fp,
11267                 struct rxq_pause_params       *pause,
11268                 struct ecore_rxq_setup_params *rxq_init)
11269{
11270    uint8_t max_sge = 0;
11271    uint16_t sge_sz = 0;
11272    uint16_t tpa_agg_size = 0;
11273
11274    pause->sge_th_lo = SGE_TH_LO(sc);
11275    pause->sge_th_hi = SGE_TH_HI(sc);
11276
11277    /* validate SGE ring has enough to cross high threshold */
11278    if (sc->dropless_fc &&
11279            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11280            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11281        BLOGW(sc, "sge ring threshold limit\n");
11282    }
11283
11284    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11285    tpa_agg_size = (2 * sc->mtu);
11286    if (tpa_agg_size < sc->max_aggregation_size) {
11287        tpa_agg_size = sc->max_aggregation_size;
11288    }
11289
11290    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11291    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11292                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11293    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11294
11295    /* pause - not for e1 */
11296    if (!CHIP_IS_E1(sc)) {
11297        pause->bd_th_lo = BD_TH_LO(sc);
11298        pause->bd_th_hi = BD_TH_HI(sc);
11299
11300        pause->rcq_th_lo = RCQ_TH_LO(sc);
11301        pause->rcq_th_hi = RCQ_TH_HI(sc);
11302
11303        /* validate rings have enough entries to cross high thresholds */
11304        if (sc->dropless_fc &&
11305            pause->bd_th_hi + FW_PREFETCH_CNT >
11306            sc->rx_ring_size) {
11307            BLOGW(sc, "rx bd ring threshold limit\n");
11308        }
11309
11310        if (sc->dropless_fc &&
11311            pause->rcq_th_hi + FW_PREFETCH_CNT >
11312            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11313            BLOGW(sc, "rcq ring threshold limit\n");
11314        }
11315
11316        pause->pri_map = 1;
11317    }
11318
11319    /* rxq setup */
11320    rxq_init->dscr_map   = fp->rx_dma.paddr;
11321    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11322    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11323    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11324
11325    /*
11326     * This should be a maximum number of data bytes that may be
11327     * placed on the BD (not including paddings).
11328     */
11329    rxq_init->buf_sz = (fp->rx_buf_size -
11330                        IP_HEADER_ALIGNMENT_PADDING);
11331
11332    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11333    rxq_init->tpa_agg_sz      = tpa_agg_size;
11334    rxq_init->sge_buf_sz      = sge_sz;
11335    rxq_init->max_sges_pkt    = max_sge;
11336    rxq_init->rss_engine_id   = SC_FUNC(sc);
11337    rxq_init->mcast_engine_id = SC_FUNC(sc);
11338
11339    /*
11340     * Maximum number or simultaneous TPA aggregation for this Queue.
11341     * For PF Clients it should be the maximum available number.
11342     * VF driver(s) may want to define it to a smaller value.
11343     */
11344    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11345
11346    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11347    rxq_init->fw_sb_id = fp->fw_sb_id;
11348
11349    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11350
11351    /*
11352     * configure silent vlan removal
11353     * if multi function mode is afex, then mask default vlan
11354     */
11355    if (IS_MF_AFEX(sc)) {
11356        rxq_init->silent_removal_value =
11357            sc->devinfo.mf_info.afex_def_vlan_tag;
11358        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11359    }
11360}
11361
11362static void
11363bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11364                 struct bxe_fastpath           *fp,
11365                 struct ecore_txq_setup_params *txq_init,
11366                 uint8_t                       cos)
11367{
11368    /*
11369     * XXX If multiple CoS is ever supported then each fastpath structure
11370     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11371     * fp->txdata[cos]->tx_dma.paddr;
11372     */
11373    txq_init->dscr_map     = fp->tx_dma.paddr;
11374    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11375    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11376    txq_init->fw_sb_id     = fp->fw_sb_id;
11377
11378    /*
11379     * set the TSS leading client id for TX classfication to the
11380     * leading RSS client id
11381     */
11382    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11383}
11384
11385/*
11386 * This function performs 2 steps in a queue state machine:
11387 *   1) RESET->INIT
11388 *   2) INIT->SETUP
11389 */
11390static int
11391bxe_setup_queue(struct bxe_softc    *sc,
11392                struct bxe_fastpath *fp,
11393                uint8_t             leading)
11394{
11395    struct ecore_queue_state_params q_params = { NULL };
11396    struct ecore_queue_setup_params *setup_params =
11397                        &q_params.params.setup;
11398    int rc;
11399
11400    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11401
11402    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11403
11404    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11405
11406    /* we want to wait for completion in this context */
11407    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11408
11409    /* prepare the INIT parameters */
11410    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11411
11412    /* Set the command */
11413    q_params.cmd = ECORE_Q_CMD_INIT;
11414
11415    /* Change the state to INIT */
11416    rc = ecore_queue_state_change(sc, &q_params);
11417    if (rc) {
11418        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11419        return (rc);
11420    }
11421
11422    BLOGD(sc, DBG_LOAD, "init complete\n");
11423
11424    /* now move the Queue to the SETUP state */
11425    memset(setup_params, 0, sizeof(*setup_params));
11426
11427    /* set Queue flags */
11428    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11429
11430    /* set general SETUP parameters */
11431    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11432                          FIRST_TX_COS_INDEX);
11433
11434    bxe_pf_rx_q_prep(sc, fp,
11435                     &setup_params->pause_params,
11436                     &setup_params->rxq_params);
11437
11438    bxe_pf_tx_q_prep(sc, fp,
11439                     &setup_params->txq_params,
11440                     FIRST_TX_COS_INDEX);
11441
11442    /* Set the command */
11443    q_params.cmd = ECORE_Q_CMD_SETUP;
11444
11445    /* change the state to SETUP */
11446    rc = ecore_queue_state_change(sc, &q_params);
11447    if (rc) {
11448        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11449        return (rc);
11450    }
11451
11452    return (rc);
11453}
11454
11455static int
11456bxe_setup_leading(struct bxe_softc *sc)
11457{
11458    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11459}
11460
11461static int
11462bxe_config_rss_pf(struct bxe_softc            *sc,
11463                  struct ecore_rss_config_obj *rss_obj,
11464                  uint8_t                     config_hash)
11465{
11466    struct ecore_config_rss_params params = { NULL };
11467    int i;
11468
11469    /*
11470     * Although RSS is meaningless when there is a single HW queue we
11471     * still need it enabled in order to have HW Rx hash generated.
11472     */
11473
11474    params.rss_obj = rss_obj;
11475
11476    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11477
11478    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11479
11480    /* RSS configuration */
11481    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11482    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11483    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11484    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11485    if (rss_obj->udp_rss_v4) {
11486        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11487    }
11488    if (rss_obj->udp_rss_v6) {
11489        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11490    }
11491
11492    /* Hash bits */
11493    params.rss_result_mask = MULTI_MASK;
11494
11495    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11496
11497    if (config_hash) {
11498        /* RSS keys */
11499        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11500            params.rss_key[i] = arc4random();
11501        }
11502
11503        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11504    }
11505
11506    return (ecore_config_rss(sc, &params));
11507}
11508
11509static int
11510bxe_config_rss_eth(struct bxe_softc *sc,
11511                   uint8_t          config_hash)
11512{
11513    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11514}
11515
11516static int
11517bxe_init_rss_pf(struct bxe_softc *sc)
11518{
11519    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11520    int i;
11521
11522    /*
11523     * Prepare the initial contents of the indirection table if
11524     * RSS is enabled
11525     */
11526    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11527        sc->rss_conf_obj.ind_table[i] =
11528            (sc->fp->cl_id + (i % num_eth_queues));
11529    }
11530
11531    if (sc->udp_rss) {
11532        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11533    }
11534
11535    /*
11536     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11537     * per-port, so if explicit configuration is needed, do it only
11538     * for a PMF.
11539     *
11540     * For 57712 and newer it's a per-function configuration.
11541     */
11542    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11543}
11544
11545static int
11546bxe_set_mac_one(struct bxe_softc          *sc,
11547                uint8_t                   *mac,
11548                struct ecore_vlan_mac_obj *obj,
11549                uint8_t                   set,
11550                int                       mac_type,
11551                unsigned long             *ramrod_flags)
11552{
11553    struct ecore_vlan_mac_ramrod_params ramrod_param;
11554    int rc;
11555
11556    memset(&ramrod_param, 0, sizeof(ramrod_param));
11557
11558    /* fill in general parameters */
11559    ramrod_param.vlan_mac_obj = obj;
11560    ramrod_param.ramrod_flags = *ramrod_flags;
11561
11562    /* fill a user request section if needed */
11563    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11564        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11565
11566        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11567
11568        /* Set the command: ADD or DEL */
11569        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11570                                            ECORE_VLAN_MAC_DEL;
11571    }
11572
11573    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11574
11575    if (rc == ECORE_EXISTS) {
11576        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11577        /* do not treat adding same MAC as error */
11578        rc = 0;
11579    } else if (rc < 0) {
11580        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11581    }
11582
11583    return (rc);
11584}
11585
11586static int
11587bxe_set_eth_mac(struct bxe_softc *sc,
11588                uint8_t          set)
11589{
11590    unsigned long ramrod_flags = 0;
11591
11592    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11593
11594    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11595
11596    /* Eth MAC is set on RSS leading client (fp[0]) */
11597    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11598                            &sc->sp_objs->mac_obj,
11599                            set, ECORE_ETH_MAC, &ramrod_flags));
11600}
11601
11602static int
11603bxe_get_cur_phy_idx(struct bxe_softc *sc)
11604{
11605    uint32_t sel_phy_idx = 0;
11606
11607    if (sc->link_params.num_phys <= 1) {
11608        return (ELINK_INT_PHY);
11609    }
11610
11611    if (sc->link_vars.link_up) {
11612        sel_phy_idx = ELINK_EXT_PHY1;
11613        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11614        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11615            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11616             ELINK_SUPPORTED_FIBRE))
11617            sel_phy_idx = ELINK_EXT_PHY2;
11618    } else {
11619        switch (elink_phy_selection(&sc->link_params)) {
11620        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11621        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11622        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11623               sel_phy_idx = ELINK_EXT_PHY1;
11624               break;
11625        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11626        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11627               sel_phy_idx = ELINK_EXT_PHY2;
11628               break;
11629        }
11630    }
11631
11632    return (sel_phy_idx);
11633}
11634
11635static int
11636bxe_get_link_cfg_idx(struct bxe_softc *sc)
11637{
11638    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11639
11640    /*
11641     * The selected activated PHY is always after swapping (in case PHY
11642     * swapping is enabled). So when swapping is enabled, we need to reverse
11643     * the configuration
11644     */
11645
11646    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11647        if (sel_phy_idx == ELINK_EXT_PHY1)
11648            sel_phy_idx = ELINK_EXT_PHY2;
11649        else if (sel_phy_idx == ELINK_EXT_PHY2)
11650            sel_phy_idx = ELINK_EXT_PHY1;
11651    }
11652
11653    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11654}
11655
11656static void
11657bxe_set_requested_fc(struct bxe_softc *sc)
11658{
11659    /*
11660     * Initialize link parameters structure variables
11661     * It is recommended to turn off RX FC for jumbo frames
11662     * for better performance
11663     */
11664    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11665        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11666    } else {
11667        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11668    }
11669}
11670
11671static void
11672bxe_calc_fc_adv(struct bxe_softc *sc)
11673{
11674    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11675
11676
11677    sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11678                                           ADVERTISED_Pause);
11679
11680    switch (sc->link_vars.ieee_fc &
11681            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11682
11683    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11684        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11685                                          ADVERTISED_Pause);
11686        break;
11687
11688    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11689        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11690        break;
11691
11692    default:
11693        break;
11694
11695    }
11696}
11697
11698static uint16_t
11699bxe_get_mf_speed(struct bxe_softc *sc)
11700{
11701    uint16_t line_speed = sc->link_vars.line_speed;
11702    if (IS_MF(sc)) {
11703        uint16_t maxCfg =
11704            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11705
11706        /* calculate the current MAX line speed limit for the MF devices */
11707        if (IS_MF_SI(sc)) {
11708            line_speed = (line_speed * maxCfg) / 100;
11709        } else { /* SD mode */
11710            uint16_t vn_max_rate = maxCfg * 100;
11711
11712            if (vn_max_rate < line_speed) {
11713                line_speed = vn_max_rate;
11714            }
11715        }
11716    }
11717
11718    return (line_speed);
11719}
11720
11721static void
11722bxe_fill_report_data(struct bxe_softc            *sc,
11723                     struct bxe_link_report_data *data)
11724{
11725    uint16_t line_speed = bxe_get_mf_speed(sc);
11726
11727    memset(data, 0, sizeof(*data));
11728
11729    /* fill the report data with the effective line speed */
11730    data->line_speed = line_speed;
11731
11732    /* Link is down */
11733    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11734        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11735    }
11736
11737    /* Full DUPLEX */
11738    if (sc->link_vars.duplex == DUPLEX_FULL) {
11739        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11740    }
11741
11742    /* Rx Flow Control is ON */
11743    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11744        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11745    }
11746
11747    /* Tx Flow Control is ON */
11748    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11749        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11750    }
11751}
11752
11753/* report link status to OS, should be called under phy_lock */
11754static void
11755bxe_link_report_locked(struct bxe_softc *sc)
11756{
11757    struct bxe_link_report_data cur_data;
11758
11759    /* reread mf_cfg */
11760    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11761        bxe_read_mf_cfg(sc);
11762    }
11763
11764    /* Read the current link report info */
11765    bxe_fill_report_data(sc, &cur_data);
11766
11767    /* Don't report link down or exactly the same link status twice */
11768    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11769        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11770                      &sc->last_reported_link.link_report_flags) &&
11771         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11772                      &cur_data.link_report_flags))) {
11773        return;
11774    }
11775
11776	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11777					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11778    sc->link_cnt++;
11779
11780	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11781    /* report new link params and remember the state for the next time */
11782    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11783
11784    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11785                     &cur_data.link_report_flags)) {
11786        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11787    } else {
11788        const char *duplex;
11789        const char *flow;
11790
11791        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11792                                   &cur_data.link_report_flags)) {
11793            duplex = "full";
11794			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11795        } else {
11796            duplex = "half";
11797			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11798        }
11799
11800        /*
11801         * Handle the FC at the end so that only these flags would be
11802         * possibly set. This way we may easily check if there is no FC
11803         * enabled.
11804         */
11805        if (cur_data.link_report_flags) {
11806            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11807                             &cur_data.link_report_flags) &&
11808                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11809                             &cur_data.link_report_flags)) {
11810                flow = "ON - receive & transmit";
11811            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11812                                    &cur_data.link_report_flags) &&
11813                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11814                                     &cur_data.link_report_flags)) {
11815                flow = "ON - receive";
11816            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11817                                     &cur_data.link_report_flags) &&
11818                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11819                                    &cur_data.link_report_flags)) {
11820                flow = "ON - transmit";
11821            } else {
11822                flow = "none"; /* possible? */
11823            }
11824        } else {
11825            flow = "none";
11826        }
11827
11828        if_link_state_change(sc->ifp, LINK_STATE_UP);
11829        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11830              cur_data.line_speed, duplex, flow);
11831    }
11832}
11833
11834static void
11835bxe_link_report(struct bxe_softc *sc)
11836{
11837    bxe_acquire_phy_lock(sc);
11838    bxe_link_report_locked(sc);
11839    bxe_release_phy_lock(sc);
11840}
11841
11842static void
11843bxe_link_status_update(struct bxe_softc *sc)
11844{
11845    if (sc->state != BXE_STATE_OPEN) {
11846        return;
11847    }
11848
11849    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11850        elink_link_status_update(&sc->link_params, &sc->link_vars);
11851    } else {
11852        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11853                                  ELINK_SUPPORTED_10baseT_Full |
11854                                  ELINK_SUPPORTED_100baseT_Half |
11855                                  ELINK_SUPPORTED_100baseT_Full |
11856                                  ELINK_SUPPORTED_1000baseT_Full |
11857                                  ELINK_SUPPORTED_2500baseX_Full |
11858                                  ELINK_SUPPORTED_10000baseT_Full |
11859                                  ELINK_SUPPORTED_TP |
11860                                  ELINK_SUPPORTED_FIBRE |
11861                                  ELINK_SUPPORTED_Autoneg |
11862                                  ELINK_SUPPORTED_Pause |
11863                                  ELINK_SUPPORTED_Asym_Pause);
11864        sc->port.advertising[0] = sc->port.supported[0];
11865
11866        sc->link_params.sc                = sc;
11867        sc->link_params.port              = SC_PORT(sc);
11868        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11869        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11870        sc->link_params.req_line_speed[0] = SPEED_10000;
11871        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11872        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11873
11874        if (CHIP_REV_IS_FPGA(sc)) {
11875            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11876            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11877            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11878                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11879        } else {
11880            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11881            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11882            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11883                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11884        }
11885
11886        sc->link_vars.link_up = 1;
11887
11888        sc->link_vars.duplex    = DUPLEX_FULL;
11889        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11890
11891        if (IS_PF(sc)) {
11892            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11893            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11894            bxe_link_report(sc);
11895        }
11896    }
11897
11898    if (IS_PF(sc)) {
11899        if (sc->link_vars.link_up) {
11900            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11901        } else {
11902            bxe_stats_handle(sc, STATS_EVENT_STOP);
11903        }
11904        bxe_link_report(sc);
11905    } else {
11906        bxe_link_report(sc);
11907        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11908    }
11909}
11910
11911static int
11912bxe_initial_phy_init(struct bxe_softc *sc,
11913                     int              load_mode)
11914{
11915    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11916    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11917    struct elink_params *lp = &sc->link_params;
11918
11919    bxe_set_requested_fc(sc);
11920
11921    if (CHIP_REV_IS_SLOW(sc)) {
11922        uint32_t bond = CHIP_BOND_ID(sc);
11923        uint32_t feat = 0;
11924
11925        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11926            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11927        } else if (bond & 0x4) {
11928            if (CHIP_IS_E3(sc)) {
11929                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11930            } else {
11931                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11932            }
11933        } else if (bond & 0x8) {
11934            if (CHIP_IS_E3(sc)) {
11935                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11936            } else {
11937                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11938            }
11939        }
11940
11941        /* disable EMAC for E3 and above */
11942        if (bond & 0x2) {
11943            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11944        }
11945
11946        sc->link_params.feature_config_flags |= feat;
11947    }
11948
11949    bxe_acquire_phy_lock(sc);
11950
11951    if (load_mode == LOAD_DIAG) {
11952        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11953        /* Prefer doing PHY loopback at 10G speed, if possible */
11954        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11955            if (lp->speed_cap_mask[cfg_idx] &
11956                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11957                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11958            } else {
11959                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11960            }
11961        }
11962    }
11963
11964    if (load_mode == LOAD_LOOPBACK_EXT) {
11965        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11966    }
11967
11968    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11969
11970    bxe_release_phy_lock(sc);
11971
11972    bxe_calc_fc_adv(sc);
11973
11974    if (sc->link_vars.link_up) {
11975        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11976        bxe_link_report(sc);
11977    }
11978
11979    if (!CHIP_REV_IS_SLOW(sc)) {
11980        bxe_periodic_start(sc);
11981    }
11982
11983    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11984    return (rc);
11985}
11986
11987/* must be called under IF_ADDR_LOCK */
11988
11989static int
11990bxe_set_mc_list(struct bxe_softc *sc)
11991{
11992    struct ecore_mcast_ramrod_params rparam = { NULL };
11993    int rc = 0;
11994    int mc_count = 0;
11995    int mcnt, i;
11996    struct ecore_mcast_list_elem *mc_mac, *mc_mac_start;
11997    unsigned char *mta;
11998    if_t ifp = sc->ifp;
11999
12000    mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
12001    if (!mc_count)
12002        return (0);
12003
12004    mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
12005            mc_count, M_DEVBUF, M_NOWAIT);
12006
12007    if(mta == NULL) {
12008        BLOGE(sc, "Failed to allocate temp mcast list\n");
12009        return (-1);
12010    }
12011    bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count));
12012
12013    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO));
12014    mc_mac_start = mc_mac;
12015
12016    if (!mc_mac) {
12017        free(mta, M_DEVBUF);
12018        BLOGE(sc, "Failed to allocate temp mcast list\n");
12019        return (-1);
12020    }
12021    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12022
12023    /* mta and mcnt not expected to be  different */
12024    if_multiaddr_array(ifp, mta, &mcnt, mc_count);
12025
12026
12027    rparam.mcast_obj = &sc->mcast_obj;
12028    ECORE_LIST_INIT(&rparam.mcast_list);
12029
12030    for(i=0; i< mcnt; i++) {
12031
12032        mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN));
12033        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list);
12034
12035        BLOGD(sc, DBG_LOAD,
12036              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12037              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12038              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12039
12040        mc_mac++;
12041    }
12042    rparam.mcast_list_len = mc_count;
12043
12044    BXE_MCAST_LOCK(sc);
12045
12046    /* first, clear all configured multicast MACs */
12047    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12048    if (rc < 0) {
12049        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12050        BXE_MCAST_UNLOCK(sc);
12051    	free(mc_mac_start, M_DEVBUF);
12052        free(mta, M_DEVBUF);
12053        return (rc);
12054    }
12055
12056    /* Now add the new MACs */
12057    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12058    if (rc < 0) {
12059        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12060    }
12061
12062    BXE_MCAST_UNLOCK(sc);
12063
12064    free(mc_mac_start, M_DEVBUF);
12065    free(mta, M_DEVBUF);
12066
12067    return (rc);
12068}
12069
12070static int
12071bxe_set_uc_list(struct bxe_softc *sc)
12072{
12073    if_t ifp = sc->ifp;
12074    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12075    struct ifaddr *ifa;
12076    unsigned long ramrod_flags = 0;
12077    int rc;
12078
12079#if __FreeBSD_version < 800000
12080    IF_ADDR_LOCK(ifp);
12081#else
12082    if_addr_rlock(ifp);
12083#endif
12084
12085    /* first schedule a cleanup up of old configuration */
12086    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12087    if (rc < 0) {
12088        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12089#if __FreeBSD_version < 800000
12090        IF_ADDR_UNLOCK(ifp);
12091#else
12092        if_addr_runlock(ifp);
12093#endif
12094        return (rc);
12095    }
12096
12097    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12098    while (ifa) {
12099        if (ifa->ifa_addr->sa_family != AF_LINK) {
12100            ifa = TAILQ_NEXT(ifa, ifa_link);
12101            continue;
12102        }
12103
12104        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12105                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12106        if (rc == -EEXIST) {
12107            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12108            /* do not treat adding same MAC as an error */
12109            rc = 0;
12110        } else if (rc < 0) {
12111            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12112#if __FreeBSD_version < 800000
12113            IF_ADDR_UNLOCK(ifp);
12114#else
12115            if_addr_runlock(ifp);
12116#endif
12117            return (rc);
12118        }
12119
12120        ifa = TAILQ_NEXT(ifa, ifa_link);
12121    }
12122
12123#if __FreeBSD_version < 800000
12124    IF_ADDR_UNLOCK(ifp);
12125#else
12126    if_addr_runlock(ifp);
12127#endif
12128
12129    /* Execute the pending commands */
12130    bit_set(&ramrod_flags, RAMROD_CONT);
12131    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12132                            ECORE_UC_LIST_MAC, &ramrod_flags));
12133}
12134
12135static void
12136bxe_set_rx_mode(struct bxe_softc *sc)
12137{
12138    if_t ifp = sc->ifp;
12139    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12140
12141    if (sc->state != BXE_STATE_OPEN) {
12142        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12143        return;
12144    }
12145
12146    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12147
12148    if (if_getflags(ifp) & IFF_PROMISC) {
12149        rx_mode = BXE_RX_MODE_PROMISC;
12150    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12151               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12152                CHIP_IS_E1(sc))) {
12153        rx_mode = BXE_RX_MODE_ALLMULTI;
12154    } else {
12155        if (IS_PF(sc)) {
12156            /* some multicasts */
12157            if (bxe_set_mc_list(sc) < 0) {
12158                rx_mode = BXE_RX_MODE_ALLMULTI;
12159            }
12160            if (bxe_set_uc_list(sc) < 0) {
12161                rx_mode = BXE_RX_MODE_PROMISC;
12162            }
12163        }
12164    }
12165
12166    sc->rx_mode = rx_mode;
12167
12168    /* schedule the rx_mode command */
12169    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12170        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12171        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12172        return;
12173    }
12174
12175    if (IS_PF(sc)) {
12176        bxe_set_storm_rx_mode(sc);
12177    }
12178}
12179
12180
12181/* update flags in shmem */
12182static void
12183bxe_update_drv_flags(struct bxe_softc *sc,
12184                     uint32_t         flags,
12185                     uint32_t         set)
12186{
12187    uint32_t drv_flags;
12188
12189    if (SHMEM2_HAS(sc, drv_flags)) {
12190        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12191        drv_flags = SHMEM2_RD(sc, drv_flags);
12192
12193        if (set) {
12194            SET_FLAGS(drv_flags, flags);
12195        } else {
12196            RESET_FLAGS(drv_flags, flags);
12197        }
12198
12199        SHMEM2_WR(sc, drv_flags, drv_flags);
12200        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12201
12202        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12203    }
12204}
12205
12206/* periodic timer callout routine, only runs when the interface is up */
12207
12208static void
12209bxe_periodic_callout_func(void *xsc)
12210{
12211    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12212    int i;
12213
12214    if (!BXE_CORE_TRYLOCK(sc)) {
12215        /* just bail and try again next time */
12216
12217        if ((sc->state == BXE_STATE_OPEN) &&
12218            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12219            /* schedule the next periodic callout */
12220            callout_reset(&sc->periodic_callout, hz,
12221                          bxe_periodic_callout_func, sc);
12222        }
12223
12224        return;
12225    }
12226
12227    if ((sc->state != BXE_STATE_OPEN) ||
12228        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12229        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12230        BXE_CORE_UNLOCK(sc);
12231        return;
12232        }
12233
12234
12235    /* Check for TX timeouts on any fastpath. */
12236    FOR_EACH_QUEUE(sc, i) {
12237        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12238            /* Ruh-Roh, chip was reset! */
12239            break;
12240        }
12241    }
12242
12243    if (!CHIP_REV_IS_SLOW(sc)) {
12244        /*
12245         * This barrier is needed to ensure the ordering between the writing
12246         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12247         * the reading here.
12248         */
12249        mb();
12250        if (sc->port.pmf) {
12251	    bxe_acquire_phy_lock(sc);
12252            elink_period_func(&sc->link_params, &sc->link_vars);
12253	    bxe_release_phy_lock(sc);
12254        }
12255    }
12256
12257    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12258        int mb_idx = SC_FW_MB_IDX(sc);
12259        uint32_t drv_pulse;
12260        uint32_t mcp_pulse;
12261
12262        ++sc->fw_drv_pulse_wr_seq;
12263        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12264
12265        drv_pulse = sc->fw_drv_pulse_wr_seq;
12266        bxe_drv_pulse(sc);
12267
12268        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12269                     MCP_PULSE_SEQ_MASK);
12270
12271        /*
12272         * The delta between driver pulse and mcp response should
12273         * be 1 (before mcp response) or 0 (after mcp response).
12274         */
12275        if ((drv_pulse != mcp_pulse) &&
12276            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12277            /* someone lost a heartbeat... */
12278            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12279                  drv_pulse, mcp_pulse);
12280        }
12281    }
12282
12283    /* state is BXE_STATE_OPEN */
12284    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12285
12286    BXE_CORE_UNLOCK(sc);
12287
12288    if ((sc->state == BXE_STATE_OPEN) &&
12289        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12290        /* schedule the next periodic callout */
12291        callout_reset(&sc->periodic_callout, hz,
12292                      bxe_periodic_callout_func, sc);
12293    }
12294}
12295
12296static void
12297bxe_periodic_start(struct bxe_softc *sc)
12298{
12299    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12300    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12301}
12302
12303static void
12304bxe_periodic_stop(struct bxe_softc *sc)
12305{
12306    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12307    callout_drain(&sc->periodic_callout);
12308}
12309
12310/* start the controller */
12311static __noinline int
12312bxe_nic_load(struct bxe_softc *sc,
12313             int              load_mode)
12314{
12315    uint32_t val;
12316    int load_code = 0;
12317    int i, rc = 0;
12318
12319    BXE_CORE_LOCK_ASSERT(sc);
12320
12321    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12322
12323    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12324
12325    if (IS_PF(sc)) {
12326        /* must be called before memory allocation and HW init */
12327        bxe_ilt_set_info(sc);
12328    }
12329
12330    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12331
12332    bxe_set_fp_rx_buf_size(sc);
12333
12334    if (bxe_alloc_fp_buffers(sc) != 0) {
12335        BLOGE(sc, "Failed to allocate fastpath memory\n");
12336        sc->state = BXE_STATE_CLOSED;
12337        rc = ENOMEM;
12338        goto bxe_nic_load_error0;
12339    }
12340
12341    if (bxe_alloc_mem(sc) != 0) {
12342        sc->state = BXE_STATE_CLOSED;
12343        rc = ENOMEM;
12344        goto bxe_nic_load_error0;
12345    }
12346
12347    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12348        sc->state = BXE_STATE_CLOSED;
12349        rc = ENOMEM;
12350        goto bxe_nic_load_error0;
12351    }
12352
12353    if (IS_PF(sc)) {
12354        /* set pf load just before approaching the MCP */
12355        bxe_set_pf_load(sc);
12356
12357        /* if MCP exists send load request and analyze response */
12358        if (!BXE_NOMCP(sc)) {
12359            /* attempt to load pf */
12360            if (bxe_nic_load_request(sc, &load_code) != 0) {
12361                sc->state = BXE_STATE_CLOSED;
12362                rc = ENXIO;
12363                goto bxe_nic_load_error1;
12364            }
12365
12366            /* what did the MCP say? */
12367            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12368                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12369                sc->state = BXE_STATE_CLOSED;
12370                rc = ENXIO;
12371                goto bxe_nic_load_error2;
12372            }
12373        } else {
12374            BLOGI(sc, "Device has no MCP!\n");
12375            load_code = bxe_nic_load_no_mcp(sc);
12376        }
12377
12378        /* mark PMF if applicable */
12379        bxe_nic_load_pmf(sc, load_code);
12380
12381        /* Init Function state controlling object */
12382        bxe_init_func_obj(sc);
12383
12384        /* Initialize HW */
12385        if (bxe_init_hw(sc, load_code) != 0) {
12386            BLOGE(sc, "HW init failed\n");
12387            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12388            sc->state = BXE_STATE_CLOSED;
12389            rc = ENXIO;
12390            goto bxe_nic_load_error2;
12391        }
12392    }
12393
12394    /* set ALWAYS_ALIVE bit in shmem */
12395    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12396    bxe_drv_pulse(sc);
12397    sc->flags |= BXE_NO_PULSE;
12398
12399    /* attach interrupts */
12400    if (bxe_interrupt_attach(sc) != 0) {
12401        sc->state = BXE_STATE_CLOSED;
12402        rc = ENXIO;
12403        goto bxe_nic_load_error2;
12404    }
12405
12406    bxe_nic_init(sc, load_code);
12407
12408    /* Init per-function objects */
12409    if (IS_PF(sc)) {
12410        bxe_init_objs(sc);
12411        // XXX bxe_iov_nic_init(sc);
12412
12413        /* set AFEX default VLAN tag to an invalid value */
12414        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12415        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12416
12417        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12418        rc = bxe_func_start(sc);
12419        if (rc) {
12420            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12421            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12422            sc->state = BXE_STATE_ERROR;
12423            goto bxe_nic_load_error3;
12424        }
12425
12426        /* send LOAD_DONE command to MCP */
12427        if (!BXE_NOMCP(sc)) {
12428            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12429            if (!load_code) {
12430                BLOGE(sc, "MCP response failure, aborting\n");
12431                sc->state = BXE_STATE_ERROR;
12432                rc = ENXIO;
12433                goto bxe_nic_load_error3;
12434            }
12435        }
12436
12437        rc = bxe_setup_leading(sc);
12438        if (rc) {
12439            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12440            sc->state = BXE_STATE_ERROR;
12441            goto bxe_nic_load_error3;
12442        }
12443
12444        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12445            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12446            if (rc) {
12447                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12448                sc->state = BXE_STATE_ERROR;
12449                goto bxe_nic_load_error3;
12450            }
12451        }
12452
12453        rc = bxe_init_rss_pf(sc);
12454        if (rc) {
12455            BLOGE(sc, "PF RSS init failed\n");
12456            sc->state = BXE_STATE_ERROR;
12457            goto bxe_nic_load_error3;
12458        }
12459    }
12460    /* XXX VF */
12461
12462    /* now when Clients are configured we are ready to work */
12463    sc->state = BXE_STATE_OPEN;
12464
12465    /* Configure a ucast MAC */
12466    if (IS_PF(sc)) {
12467        rc = bxe_set_eth_mac(sc, TRUE);
12468    }
12469    if (rc) {
12470        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12471        sc->state = BXE_STATE_ERROR;
12472        goto bxe_nic_load_error3;
12473    }
12474
12475    if (sc->port.pmf) {
12476        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12477        if (rc) {
12478            sc->state = BXE_STATE_ERROR;
12479            goto bxe_nic_load_error3;
12480        }
12481    }
12482
12483    sc->link_params.feature_config_flags &=
12484        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12485
12486    /* start fast path */
12487
12488    /* Initialize Rx filter */
12489    bxe_set_rx_mode(sc);
12490
12491    /* start the Tx */
12492    switch (/* XXX load_mode */LOAD_OPEN) {
12493    case LOAD_NORMAL:
12494    case LOAD_OPEN:
12495        break;
12496
12497    case LOAD_DIAG:
12498    case LOAD_LOOPBACK_EXT:
12499        sc->state = BXE_STATE_DIAG;
12500        break;
12501
12502    default:
12503        break;
12504    }
12505
12506    if (sc->port.pmf) {
12507        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12508    } else {
12509        bxe_link_status_update(sc);
12510    }
12511
12512    /* start the periodic timer callout */
12513    bxe_periodic_start(sc);
12514
12515    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12516        /* mark driver is loaded in shmem2 */
12517        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12518        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12519                  (val |
12520                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12521                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12522    }
12523
12524    /* wait for all pending SP commands to complete */
12525    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12526        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12527        bxe_periodic_stop(sc);
12528        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12529        return (ENXIO);
12530    }
12531
12532    /* Tell the stack the driver is running! */
12533    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12534
12535    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12536
12537    return (0);
12538
12539bxe_nic_load_error3:
12540
12541    if (IS_PF(sc)) {
12542        bxe_int_disable_sync(sc, 1);
12543
12544        /* clean out queued objects */
12545        bxe_squeeze_objects(sc);
12546    }
12547
12548    bxe_interrupt_detach(sc);
12549
12550bxe_nic_load_error2:
12551
12552    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12553        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12554        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12555    }
12556
12557    sc->port.pmf = 0;
12558
12559bxe_nic_load_error1:
12560
12561    /* clear pf_load status, as it was already set */
12562    if (IS_PF(sc)) {
12563        bxe_clear_pf_load(sc);
12564    }
12565
12566bxe_nic_load_error0:
12567
12568    bxe_free_fw_stats_mem(sc);
12569    bxe_free_fp_buffers(sc);
12570    bxe_free_mem(sc);
12571
12572    return (rc);
12573}
12574
12575static int
12576bxe_init_locked(struct bxe_softc *sc)
12577{
12578    int other_engine = SC_PATH(sc) ? 0 : 1;
12579    uint8_t other_load_status, load_status;
12580    uint8_t global = FALSE;
12581    int rc;
12582
12583    BXE_CORE_LOCK_ASSERT(sc);
12584
12585    /* check if the driver is already running */
12586    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12587        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12588        return (0);
12589    }
12590
12591    bxe_set_power_state(sc, PCI_PM_D0);
12592
12593    /*
12594     * If parity occurred during the unload, then attentions and/or
12595     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12596     * loaded on the current engine to complete the recovery. Parity recovery
12597     * is only relevant for PF driver.
12598     */
12599    if (IS_PF(sc)) {
12600        other_load_status = bxe_get_load_status(sc, other_engine);
12601        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12602
12603        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12604            bxe_chk_parity_attn(sc, &global, TRUE)) {
12605            do {
12606                /*
12607                 * If there are attentions and they are in global blocks, set
12608                 * the GLOBAL_RESET bit regardless whether it will be this
12609                 * function that will complete the recovery or not.
12610                 */
12611                if (global) {
12612                    bxe_set_reset_global(sc);
12613                }
12614
12615                /*
12616                 * Only the first function on the current engine should try
12617                 * to recover in open. In case of attentions in global blocks
12618                 * only the first in the chip should try to recover.
12619                 */
12620                if ((!load_status && (!global || !other_load_status)) &&
12621                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12622                    BLOGI(sc, "Recovered during init\n");
12623                    break;
12624                }
12625
12626                /* recovery has failed... */
12627                bxe_set_power_state(sc, PCI_PM_D3hot);
12628                sc->recovery_state = BXE_RECOVERY_FAILED;
12629
12630                BLOGE(sc, "Recovery flow hasn't properly "
12631                          "completed yet, try again later. "
12632                          "If you still see this message after a "
12633                          "few retries then power cycle is required.\n");
12634
12635                rc = ENXIO;
12636                goto bxe_init_locked_done;
12637            } while (0);
12638        }
12639    }
12640
12641    sc->recovery_state = BXE_RECOVERY_DONE;
12642
12643    rc = bxe_nic_load(sc, LOAD_OPEN);
12644
12645bxe_init_locked_done:
12646
12647    if (rc) {
12648        /* Tell the stack the driver is NOT running! */
12649        BLOGE(sc, "Initialization failed, "
12650                  "stack notified driver is NOT running!\n");
12651	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12652    }
12653
12654    return (rc);
12655}
12656
12657static int
12658bxe_stop_locked(struct bxe_softc *sc)
12659{
12660    BXE_CORE_LOCK_ASSERT(sc);
12661    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12662}
12663
12664/*
12665 * Handles controller initialization when called from an unlocked routine.
12666 * ifconfig calls this function.
12667 *
12668 * Returns:
12669 *   void
12670 */
12671static void
12672bxe_init(void *xsc)
12673{
12674    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12675
12676    BXE_CORE_LOCK(sc);
12677    bxe_init_locked(sc);
12678    BXE_CORE_UNLOCK(sc);
12679}
12680
12681static int
12682bxe_init_ifnet(struct bxe_softc *sc)
12683{
12684    if_t ifp;
12685    int capabilities;
12686
12687    /* ifconfig entrypoint for media type/status reporting */
12688    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12689                 bxe_ifmedia_update,
12690                 bxe_ifmedia_status);
12691
12692    /* set the default interface values */
12693    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12694    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12695    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12696
12697    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12698	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12699
12700    /* allocate the ifnet structure */
12701    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12702        BLOGE(sc, "Interface allocation failed!\n");
12703        return (ENXIO);
12704    }
12705
12706    if_setsoftc(ifp, sc);
12707    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12708    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12709    if_setioctlfn(ifp, bxe_ioctl);
12710    if_setstartfn(ifp, bxe_tx_start);
12711    if_setgetcounterfn(ifp, bxe_get_counter);
12712#if __FreeBSD_version >= 901504
12713    if_settransmitfn(ifp, bxe_tx_mq_start);
12714    if_setqflushfn(ifp, bxe_mq_flush);
12715#endif
12716#ifdef FreeBSD8_0
12717    if_settimer(ifp, 0);
12718#endif
12719    if_setinitfn(ifp, bxe_init);
12720    if_setmtu(ifp, sc->mtu);
12721    if_sethwassist(ifp, (CSUM_IP      |
12722                        CSUM_TCP      |
12723                        CSUM_UDP      |
12724                        CSUM_TSO      |
12725                        CSUM_TCP_IPV6 |
12726                        CSUM_UDP_IPV6));
12727
12728    capabilities =
12729#if __FreeBSD_version < 700000
12730        (IFCAP_VLAN_MTU       |
12731         IFCAP_VLAN_HWTAGGING |
12732         IFCAP_HWCSUM         |
12733         IFCAP_JUMBO_MTU      |
12734         IFCAP_LRO);
12735#else
12736        (IFCAP_VLAN_MTU       |
12737         IFCAP_VLAN_HWTAGGING |
12738         IFCAP_VLAN_HWTSO     |
12739         IFCAP_VLAN_HWFILTER  |
12740         IFCAP_VLAN_HWCSUM    |
12741         IFCAP_HWCSUM         |
12742         IFCAP_JUMBO_MTU      |
12743         IFCAP_LRO            |
12744         IFCAP_TSO4           |
12745         IFCAP_TSO6           |
12746         IFCAP_WOL_MAGIC);
12747#endif
12748    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12749    if_setbaudrate(ifp, IF_Gbps(10));
12750/* XXX */
12751    if_setsendqlen(ifp, sc->tx_ring_size);
12752    if_setsendqready(ifp);
12753/* XXX */
12754
12755    sc->ifp = ifp;
12756
12757    /* attach to the Ethernet interface list */
12758    ether_ifattach(ifp, sc->link_params.mac_addr);
12759
12760    return (0);
12761}
12762
12763static void
12764bxe_deallocate_bars(struct bxe_softc *sc)
12765{
12766    int i;
12767
12768    for (i = 0; i < MAX_BARS; i++) {
12769        if (sc->bar[i].resource != NULL) {
12770            bus_release_resource(sc->dev,
12771                                 SYS_RES_MEMORY,
12772                                 sc->bar[i].rid,
12773                                 sc->bar[i].resource);
12774            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12775                  i, PCIR_BAR(i));
12776        }
12777    }
12778}
12779
12780static int
12781bxe_allocate_bars(struct bxe_softc *sc)
12782{
12783    u_int flags;
12784    int i;
12785
12786    memset(sc->bar, 0, sizeof(sc->bar));
12787
12788    for (i = 0; i < MAX_BARS; i++) {
12789
12790        /* memory resources reside at BARs 0, 2, 4 */
12791        /* Run `pciconf -lb` to see mappings */
12792        if ((i != 0) && (i != 2) && (i != 4)) {
12793            continue;
12794        }
12795
12796        sc->bar[i].rid = PCIR_BAR(i);
12797
12798        flags = RF_ACTIVE;
12799        if (i == 0) {
12800            flags |= RF_SHAREABLE;
12801        }
12802
12803        if ((sc->bar[i].resource =
12804             bus_alloc_resource_any(sc->dev,
12805                                    SYS_RES_MEMORY,
12806                                    &sc->bar[i].rid,
12807                                    flags)) == NULL) {
12808            return (0);
12809        }
12810
12811        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12812        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12813        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12814
12815        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n",
12816              i, PCIR_BAR(i),
12817              (void *)rman_get_start(sc->bar[i].resource),
12818              (void *)rman_get_end(sc->bar[i].resource),
12819              rman_get_size(sc->bar[i].resource),
12820              (void *)sc->bar[i].kva);
12821    }
12822
12823    return (0);
12824}
12825
12826static void
12827bxe_get_function_num(struct bxe_softc *sc)
12828{
12829    uint32_t val = 0;
12830
12831    /*
12832     * Read the ME register to get the function number. The ME register
12833     * holds the relative-function number and absolute-function number. The
12834     * absolute-function number appears only in E2 and above. Before that
12835     * these bits always contained zero, therefore we cannot blindly use them.
12836     */
12837
12838    val = REG_RD(sc, BAR_ME_REGISTER);
12839
12840    sc->pfunc_rel =
12841        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12842    sc->path_id =
12843        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12844
12845    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12846        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12847    } else {
12848        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12849    }
12850
12851    BLOGD(sc, DBG_LOAD,
12852          "Relative function %d, Absolute function %d, Path %d\n",
12853          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12854}
12855
12856static uint32_t
12857bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12858{
12859    uint32_t shmem2_size;
12860    uint32_t offset;
12861    uint32_t mf_cfg_offset_value;
12862
12863    /* Non 57712 */
12864    offset = (SHMEM_RD(sc, func_mb) +
12865              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12866
12867    /* 57712 plus */
12868    if (sc->devinfo.shmem2_base != 0) {
12869        shmem2_size = SHMEM2_RD(sc, size);
12870        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12871            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12872            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12873                offset = mf_cfg_offset_value;
12874            }
12875        }
12876    }
12877
12878    return (offset);
12879}
12880
12881static uint32_t
12882bxe_pcie_capability_read(struct bxe_softc *sc,
12883                         int    reg,
12884                         int    width)
12885{
12886    int pcie_reg;
12887
12888    /* ensure PCIe capability is enabled */
12889    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12890        if (pcie_reg != 0) {
12891            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12892            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12893        }
12894    }
12895
12896    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12897
12898    return (0);
12899}
12900
12901static uint8_t
12902bxe_is_pcie_pending(struct bxe_softc *sc)
12903{
12904    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12905            PCIM_EXP_STA_TRANSACTION_PND);
12906}
12907
12908/*
12909 * Walk the PCI capabiites list for the device to find what features are
12910 * supported. These capabilites may be enabled/disabled by firmware so it's
12911 * best to walk the list rather than make assumptions.
12912 */
12913static void
12914bxe_probe_pci_caps(struct bxe_softc *sc)
12915{
12916    uint16_t link_status;
12917    int reg;
12918
12919    /* check if PCI Power Management is enabled */
12920    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12921        if (reg != 0) {
12922            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12923
12924            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12925            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12926        }
12927    }
12928
12929    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12930
12931    /* handle PCIe 2.0 workarounds for 57710 */
12932    if (CHIP_IS_E1(sc)) {
12933        /* workaround for 57710 errata E4_57710_27462 */
12934        sc->devinfo.pcie_link_speed =
12935            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12936
12937        /* workaround for 57710 errata E4_57710_27488 */
12938        sc->devinfo.pcie_link_width =
12939            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12940        if (sc->devinfo.pcie_link_speed > 1) {
12941            sc->devinfo.pcie_link_width =
12942                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12943        }
12944    } else {
12945        sc->devinfo.pcie_link_speed =
12946            (link_status & PCIM_LINK_STA_SPEED);
12947        sc->devinfo.pcie_link_width =
12948            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12949    }
12950
12951    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12952          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12953
12954    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
12955    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
12956
12957    /* check if MSI capability is enabled */
12958    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
12959        if (reg != 0) {
12960            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
12961
12962            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
12963            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
12964        }
12965    }
12966
12967    /* check if MSI-X capability is enabled */
12968    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
12969        if (reg != 0) {
12970            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
12971
12972            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
12973            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
12974        }
12975    }
12976}
12977
12978static int
12979bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
12980{
12981    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12982    uint32_t val;
12983
12984    /* get the outer vlan if we're in switch-dependent mode */
12985
12986    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
12987    mf_info->ext_id = (uint16_t)val;
12988
12989    mf_info->multi_vnics_mode = 1;
12990
12991    if (!VALID_OVLAN(mf_info->ext_id)) {
12992        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
12993        return (1);
12994    }
12995
12996    /* get the capabilities */
12997    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12998        FUNC_MF_CFG_PROTOCOL_ISCSI) {
12999        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13000    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13001               FUNC_MF_CFG_PROTOCOL_FCOE) {
13002        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13003    } else {
13004        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13005    }
13006
13007    mf_info->vnics_per_port =
13008        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13009
13010    return (0);
13011}
13012
13013static uint32_t
13014bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13015{
13016    uint32_t retval = 0;
13017    uint32_t val;
13018
13019    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13020
13021    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13022        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13023            retval |= MF_PROTO_SUPPORT_ETHERNET;
13024        }
13025        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13026            retval |= MF_PROTO_SUPPORT_ISCSI;
13027        }
13028        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13029            retval |= MF_PROTO_SUPPORT_FCOE;
13030        }
13031    }
13032
13033    return (retval);
13034}
13035
13036static int
13037bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13038{
13039    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13040    uint32_t val;
13041
13042    /*
13043     * There is no outer vlan if we're in switch-independent mode.
13044     * If the mac is valid then assume multi-function.
13045     */
13046
13047    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13048
13049    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13050
13051    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13052
13053    mf_info->vnics_per_port =
13054        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13055
13056    return (0);
13057}
13058
13059static int
13060bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13061{
13062    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13063    uint32_t e1hov_tag;
13064    uint32_t func_config;
13065    uint32_t niv_config;
13066
13067    mf_info->multi_vnics_mode = 1;
13068
13069    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13070    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13071    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13072
13073    mf_info->ext_id =
13074        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13075                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13076
13077    mf_info->default_vlan =
13078        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13079                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13080
13081    mf_info->niv_allowed_priorities =
13082        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13083                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13084
13085    mf_info->niv_default_cos =
13086        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13087                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13088
13089    mf_info->afex_vlan_mode =
13090        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13091         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13092
13093    mf_info->niv_mba_enabled =
13094        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13095         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13096
13097    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13098
13099    mf_info->vnics_per_port =
13100        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13101
13102    return (0);
13103}
13104
13105static int
13106bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13107{
13108    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13109    uint32_t mf_cfg1;
13110    uint32_t mf_cfg2;
13111    uint32_t ovlan1;
13112    uint32_t ovlan2;
13113    uint8_t i, j;
13114
13115    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13116          SC_PORT(sc));
13117    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13118          mf_info->mf_config[SC_VN(sc)]);
13119    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13120          mf_info->multi_vnics_mode);
13121    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13122          mf_info->vnics_per_port);
13123    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13124          mf_info->ext_id);
13125    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13126          mf_info->min_bw[0], mf_info->min_bw[1],
13127          mf_info->min_bw[2], mf_info->min_bw[3]);
13128    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13129          mf_info->max_bw[0], mf_info->max_bw[1],
13130          mf_info->max_bw[2], mf_info->max_bw[3]);
13131    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13132          sc->mac_addr_str);
13133
13134    /* various MF mode sanity checks... */
13135
13136    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13137        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13138              SC_PORT(sc));
13139        return (1);
13140    }
13141
13142    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13143        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13144              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13145        return (1);
13146    }
13147
13148    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13149        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13150        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13151            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13152                  SC_VN(sc), OVLAN(sc));
13153            return (1);
13154        }
13155
13156        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13157            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13158                  mf_info->multi_vnics_mode, OVLAN(sc));
13159            return (1);
13160        }
13161
13162        /*
13163         * Verify all functions are either MF or SF mode. If MF, make sure
13164         * sure that all non-hidden functions have a valid ovlan. If SF,
13165         * make sure that all non-hidden functions have an invalid ovlan.
13166         */
13167        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13168            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13169            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13170            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13171                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13172                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13173                BLOGE(sc, "mf_mode=SD function %d MF config "
13174                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13175                      i, mf_info->multi_vnics_mode, ovlan1);
13176                return (1);
13177            }
13178        }
13179
13180        /* Verify all funcs on the same port each have a different ovlan. */
13181        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13182            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13183            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13184            /* iterate from the next function on the port to the max func */
13185            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13186                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13187                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13188                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13189                    VALID_OVLAN(ovlan1) &&
13190                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13191                    VALID_OVLAN(ovlan2) &&
13192                    (ovlan1 == ovlan2)) {
13193                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13194                              "have the same ovlan (%d)\n",
13195                          i, j, ovlan1);
13196                    return (1);
13197                }
13198            }
13199        }
13200    } /* MULTI_FUNCTION_SD */
13201
13202    return (0);
13203}
13204
13205static int
13206bxe_get_mf_cfg_info(struct bxe_softc *sc)
13207{
13208    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13209    uint32_t val, mac_upper;
13210    uint8_t i, vnic;
13211
13212    /* initialize mf_info defaults */
13213    mf_info->vnics_per_port   = 1;
13214    mf_info->multi_vnics_mode = FALSE;
13215    mf_info->path_has_ovlan   = FALSE;
13216    mf_info->mf_mode          = SINGLE_FUNCTION;
13217
13218    if (!CHIP_IS_MF_CAP(sc)) {
13219        return (0);
13220    }
13221
13222    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13223        BLOGE(sc, "Invalid mf_cfg_base!\n");
13224        return (1);
13225    }
13226
13227    /* get the MF mode (switch dependent / independent / single-function) */
13228
13229    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13230
13231    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13232    {
13233    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13234
13235        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13236
13237        /* check for legal upper mac bytes */
13238        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13239            mf_info->mf_mode = MULTI_FUNCTION_SI;
13240        } else {
13241            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13242        }
13243
13244        break;
13245
13246    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13247    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13248
13249        /* get outer vlan configuration */
13250        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13251
13252        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13253            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13254            mf_info->mf_mode = MULTI_FUNCTION_SD;
13255        } else {
13256            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13257        }
13258
13259        break;
13260
13261    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13262
13263        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13264        return (0);
13265
13266    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13267
13268        /*
13269         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13270         * and the MAC address is valid.
13271         */
13272        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13273
13274        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13275            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13276            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13277        } else {
13278            BLOGE(sc, "Invalid config for AFEX mode\n");
13279        }
13280
13281        break;
13282
13283    default:
13284
13285        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13286              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13287
13288        return (1);
13289    }
13290
13291    /* set path mf_mode (which could be different than function mf_mode) */
13292    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13293        mf_info->path_has_ovlan = TRUE;
13294    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13295        /*
13296         * Decide on path multi vnics mode. If we're not in MF mode and in
13297         * 4-port mode, this is good enough to check vnic-0 of the other port
13298         * on the same path
13299         */
13300        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13301            uint8_t other_port = !(PORT_ID(sc) & 1);
13302            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13303
13304            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13305
13306            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13307        }
13308    }
13309
13310    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13311        /* invalid MF config */
13312        if (SC_VN(sc) >= 1) {
13313            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13314            return (1);
13315        }
13316
13317        return (0);
13318    }
13319
13320    /* get the MF configuration */
13321    mf_info->mf_config[SC_VN(sc)] =
13322        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13323
13324    switch(mf_info->mf_mode)
13325    {
13326    case MULTI_FUNCTION_SD:
13327
13328        bxe_get_shmem_mf_cfg_info_sd(sc);
13329        break;
13330
13331    case MULTI_FUNCTION_SI:
13332
13333        bxe_get_shmem_mf_cfg_info_si(sc);
13334        break;
13335
13336    case MULTI_FUNCTION_AFEX:
13337
13338        bxe_get_shmem_mf_cfg_info_niv(sc);
13339        break;
13340
13341    default:
13342
13343        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13344              mf_info->mf_mode);
13345        return (1);
13346    }
13347
13348    /* get the congestion management parameters */
13349
13350    vnic = 0;
13351    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13352        /* get min/max bw */
13353        val = MFCFG_RD(sc, func_mf_config[i].config);
13354        mf_info->min_bw[vnic] =
13355            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13356        mf_info->max_bw[vnic] =
13357            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13358        vnic++;
13359    }
13360
13361    return (bxe_check_valid_mf_cfg(sc));
13362}
13363
13364static int
13365bxe_get_shmem_info(struct bxe_softc *sc)
13366{
13367    int port;
13368    uint32_t mac_hi, mac_lo, val;
13369
13370    port = SC_PORT(sc);
13371    mac_hi = mac_lo = 0;
13372
13373    sc->link_params.sc   = sc;
13374    sc->link_params.port = port;
13375
13376    /* get the hardware config info */
13377    sc->devinfo.hw_config =
13378        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13379    sc->devinfo.hw_config2 =
13380        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13381
13382    sc->link_params.hw_led_mode =
13383        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13384         SHARED_HW_CFG_LED_MODE_SHIFT);
13385
13386    /* get the port feature config */
13387    sc->port.config =
13388        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13389
13390    /* get the link params */
13391    sc->link_params.speed_cap_mask[0] =
13392        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13393    sc->link_params.speed_cap_mask[1] =
13394        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13395
13396    /* get the lane config */
13397    sc->link_params.lane_config =
13398        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13399
13400    /* get the link config */
13401    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13402    sc->port.link_config[ELINK_INT_PHY] = val;
13403    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13404    sc->port.link_config[ELINK_EXT_PHY1] =
13405        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13406
13407    /* get the override preemphasis flag and enable it or turn it off */
13408    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13409    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13410        sc->link_params.feature_config_flags |=
13411            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13412    } else {
13413        sc->link_params.feature_config_flags &=
13414            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13415    }
13416
13417    /* get the initial value of the link params */
13418    sc->link_params.multi_phy_config =
13419        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13420
13421    /* get external phy info */
13422    sc->port.ext_phy_config =
13423        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13424
13425    /* get the multifunction configuration */
13426    bxe_get_mf_cfg_info(sc);
13427
13428    /* get the mac address */
13429    if (IS_MF(sc)) {
13430        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13431        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13432    } else {
13433        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13434        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13435    }
13436
13437    if ((mac_lo == 0) && (mac_hi == 0)) {
13438        *sc->mac_addr_str = 0;
13439        BLOGE(sc, "No Ethernet address programmed!\n");
13440    } else {
13441        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13442        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13443        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13444        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13445        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13446        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13447        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13448                 "%02x:%02x:%02x:%02x:%02x:%02x",
13449                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13450                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13451                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13452        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13453    }
13454
13455    return (0);
13456}
13457
13458static void
13459bxe_get_tunable_params(struct bxe_softc *sc)
13460{
13461    /* sanity checks */
13462
13463    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13464        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13465        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13466        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13467        bxe_interrupt_mode = INTR_MODE_MSIX;
13468    }
13469
13470    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13471        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13472        bxe_queue_count = 0;
13473    }
13474
13475    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13476        if (bxe_max_rx_bufs == 0) {
13477            bxe_max_rx_bufs = RX_BD_USABLE;
13478        } else {
13479            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13480            bxe_max_rx_bufs = 2048;
13481        }
13482    }
13483
13484    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13485        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13486        bxe_hc_rx_ticks = 25;
13487    }
13488
13489    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13490        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13491        bxe_hc_tx_ticks = 50;
13492    }
13493
13494    if (bxe_max_aggregation_size == 0) {
13495        bxe_max_aggregation_size = TPA_AGG_SIZE;
13496    }
13497
13498    if (bxe_max_aggregation_size > 0xffff) {
13499        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13500              bxe_max_aggregation_size);
13501        bxe_max_aggregation_size = TPA_AGG_SIZE;
13502    }
13503
13504    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13505        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13506        bxe_mrrs = -1;
13507    }
13508
13509    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13510        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13511        bxe_autogreeen = 0;
13512    }
13513
13514    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13515        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13516        bxe_udp_rss = 0;
13517    }
13518
13519    /* pull in user settings */
13520
13521    sc->interrupt_mode       = bxe_interrupt_mode;
13522    sc->max_rx_bufs          = bxe_max_rx_bufs;
13523    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13524    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13525    sc->max_aggregation_size = bxe_max_aggregation_size;
13526    sc->mrrs                 = bxe_mrrs;
13527    sc->autogreeen           = bxe_autogreeen;
13528    sc->udp_rss              = bxe_udp_rss;
13529
13530    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13531        sc->num_queues = 1;
13532    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13533        sc->num_queues =
13534            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13535                MAX_RSS_CHAINS);
13536        if (sc->num_queues > mp_ncpus) {
13537            sc->num_queues = mp_ncpus;
13538        }
13539    }
13540
13541    BLOGD(sc, DBG_LOAD,
13542          "User Config: "
13543          "debug=0x%lx "
13544          "interrupt_mode=%d "
13545          "queue_count=%d "
13546          "hc_rx_ticks=%d "
13547          "hc_tx_ticks=%d "
13548          "rx_budget=%d "
13549          "max_aggregation_size=%d "
13550          "mrrs=%d "
13551          "autogreeen=%d "
13552          "udp_rss=%d\n",
13553          bxe_debug,
13554          sc->interrupt_mode,
13555          sc->num_queues,
13556          sc->hc_rx_ticks,
13557          sc->hc_tx_ticks,
13558          bxe_rx_budget,
13559          sc->max_aggregation_size,
13560          sc->mrrs,
13561          sc->autogreeen,
13562          sc->udp_rss);
13563}
13564
13565static int
13566bxe_media_detect(struct bxe_softc *sc)
13567{
13568    int port_type;
13569    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13570
13571    switch (sc->link_params.phy[phy_idx].media_type) {
13572    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13573    case ELINK_ETH_PHY_XFP_FIBER:
13574        BLOGI(sc, "Found 10Gb Fiber media.\n");
13575        sc->media = IFM_10G_SR;
13576        port_type = PORT_FIBRE;
13577        break;
13578    case ELINK_ETH_PHY_SFP_1G_FIBER:
13579        BLOGI(sc, "Found 1Gb Fiber media.\n");
13580        sc->media = IFM_1000_SX;
13581        port_type = PORT_FIBRE;
13582        break;
13583    case ELINK_ETH_PHY_KR:
13584    case ELINK_ETH_PHY_CX4:
13585        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13586        sc->media = IFM_10G_CX4;
13587        port_type = PORT_FIBRE;
13588        break;
13589    case ELINK_ETH_PHY_DA_TWINAX:
13590        BLOGI(sc, "Found 10Gb Twinax media.\n");
13591        sc->media = IFM_10G_TWINAX;
13592        port_type = PORT_DA;
13593        break;
13594    case ELINK_ETH_PHY_BASE_T:
13595        if (sc->link_params.speed_cap_mask[0] &
13596            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13597            BLOGI(sc, "Found 10GBase-T media.\n");
13598            sc->media = IFM_10G_T;
13599            port_type = PORT_TP;
13600        } else {
13601            BLOGI(sc, "Found 1000Base-T media.\n");
13602            sc->media = IFM_1000_T;
13603            port_type = PORT_TP;
13604        }
13605        break;
13606    case ELINK_ETH_PHY_NOT_PRESENT:
13607        BLOGI(sc, "Media not present.\n");
13608        sc->media = 0;
13609        port_type = PORT_OTHER;
13610        break;
13611    case ELINK_ETH_PHY_UNSPECIFIED:
13612    default:
13613        BLOGI(sc, "Unknown media!\n");
13614        sc->media = 0;
13615        port_type = PORT_OTHER;
13616        break;
13617    }
13618    return port_type;
13619}
13620
13621#define GET_FIELD(value, fname)                     \
13622    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13623#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13624#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13625
13626static int
13627bxe_get_igu_cam_info(struct bxe_softc *sc)
13628{
13629    int pfid = SC_FUNC(sc);
13630    int igu_sb_id;
13631    uint32_t val;
13632    uint8_t fid, igu_sb_cnt = 0;
13633
13634    sc->igu_base_sb = 0xff;
13635
13636    if (CHIP_INT_MODE_IS_BC(sc)) {
13637        int vn = SC_VN(sc);
13638        igu_sb_cnt = sc->igu_sb_cnt;
13639        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13640                           FP_SB_MAX_E1x);
13641        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13642                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13643        return (0);
13644    }
13645
13646    /* IGU in normal mode - read CAM */
13647    for (igu_sb_id = 0;
13648         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13649         igu_sb_id++) {
13650        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13651        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13652            continue;
13653        }
13654        fid = IGU_FID(val);
13655        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13656            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13657                continue;
13658            }
13659            if (IGU_VEC(val) == 0) {
13660                /* default status block */
13661                sc->igu_dsb_id = igu_sb_id;
13662            } else {
13663                if (sc->igu_base_sb == 0xff) {
13664                    sc->igu_base_sb = igu_sb_id;
13665                }
13666                igu_sb_cnt++;
13667            }
13668        }
13669    }
13670
13671    /*
13672     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13673     * that number of CAM entries will not be equal to the value advertised in
13674     * PCI. Driver should use the minimal value of both as the actual status
13675     * block count
13676     */
13677    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13678
13679    if (igu_sb_cnt == 0) {
13680        BLOGE(sc, "CAM configuration error\n");
13681        return (-1);
13682    }
13683
13684    return (0);
13685}
13686
13687/*
13688 * Gather various information from the device config space, the device itself,
13689 * shmem, and the user input.
13690 */
13691static int
13692bxe_get_device_info(struct bxe_softc *sc)
13693{
13694    uint32_t val;
13695    int rc;
13696
13697    /* Get the data for the device */
13698    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13699    sc->devinfo.device_id    = pci_get_device(sc->dev);
13700    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13701    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13702
13703    /* get the chip revision (chip metal comes from pci config space) */
13704    sc->devinfo.chip_id     =
13705    sc->link_params.chip_id =
13706        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13707         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13708         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13709         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13710
13711    /* force 57811 according to MISC register */
13712    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13713        if (CHIP_IS_57810(sc)) {
13714            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13715                                   (sc->devinfo.chip_id & 0x0000ffff));
13716        } else if (CHIP_IS_57810_MF(sc)) {
13717            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13718                                   (sc->devinfo.chip_id & 0x0000ffff));
13719        }
13720        sc->devinfo.chip_id |= 0x1;
13721    }
13722
13723    BLOGD(sc, DBG_LOAD,
13724          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13725          sc->devinfo.chip_id,
13726          ((sc->devinfo.chip_id >> 16) & 0xffff),
13727          ((sc->devinfo.chip_id >> 12) & 0xf),
13728          ((sc->devinfo.chip_id >>  4) & 0xff),
13729          ((sc->devinfo.chip_id >>  0) & 0xf));
13730
13731    val = (REG_RD(sc, 0x2874) & 0x55);
13732    if ((sc->devinfo.chip_id & 0x1) ||
13733        (CHIP_IS_E1(sc) && val) ||
13734        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13735        sc->flags |= BXE_ONE_PORT_FLAG;
13736        BLOGD(sc, DBG_LOAD, "single port device\n");
13737    }
13738
13739    /* set the doorbell size */
13740    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13741
13742    /* determine whether the device is in 2 port or 4 port mode */
13743    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13744    if (CHIP_IS_E2E3(sc)) {
13745        /*
13746         * Read port4mode_en_ovwr[0]:
13747         *   If 1, four port mode is in port4mode_en_ovwr[1].
13748         *   If 0, four port mode is in port4mode_en[0].
13749         */
13750        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13751        if (val & 1) {
13752            val = ((val >> 1) & 1);
13753        } else {
13754            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13755        }
13756
13757        sc->devinfo.chip_port_mode =
13758            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13759
13760        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13761    }
13762
13763    /* get the function and path info for the device */
13764    bxe_get_function_num(sc);
13765
13766    /* get the shared memory base address */
13767    sc->devinfo.shmem_base     =
13768    sc->link_params.shmem_base =
13769        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13770    sc->devinfo.shmem2_base =
13771        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13772                                  MISC_REG_GENERIC_CR_0));
13773
13774    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13775          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13776
13777    if (!sc->devinfo.shmem_base) {
13778        /* this should ONLY prevent upcoming shmem reads */
13779        BLOGI(sc, "MCP not active\n");
13780        sc->flags |= BXE_NO_MCP_FLAG;
13781        return (0);
13782    }
13783
13784    /* make sure the shared memory contents are valid */
13785    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13786    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13787        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13788        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13789        return (0);
13790    }
13791    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13792
13793    /* get the bootcode version */
13794    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13795    snprintf(sc->devinfo.bc_ver_str,
13796             sizeof(sc->devinfo.bc_ver_str),
13797             "%d.%d.%d",
13798             ((sc->devinfo.bc_ver >> 24) & 0xff),
13799             ((sc->devinfo.bc_ver >> 16) & 0xff),
13800             ((sc->devinfo.bc_ver >>  8) & 0xff));
13801    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13802
13803    /* get the bootcode shmem address */
13804    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13805    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13806
13807    /* clean indirect addresses as they're not used */
13808    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13809    if (IS_PF(sc)) {
13810        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13811        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13812        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13813        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13814        if (CHIP_IS_E1x(sc)) {
13815            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13816            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13817            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13818            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13819        }
13820
13821        /*
13822         * Enable internal target-read (in case we are probed after PF
13823         * FLR). Must be done prior to any BAR read access. Only for
13824         * 57712 and up
13825         */
13826        if (!CHIP_IS_E1x(sc)) {
13827            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13828        }
13829    }
13830
13831    /* get the nvram size */
13832    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13833    sc->devinfo.flash_size =
13834        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13835    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13836
13837    /* get PCI capabilites */
13838    bxe_probe_pci_caps(sc);
13839
13840    bxe_set_power_state(sc, PCI_PM_D0);
13841
13842    /* get various configuration parameters from shmem */
13843    bxe_get_shmem_info(sc);
13844
13845    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13846        val = pci_read_config(sc->dev,
13847                              (sc->devinfo.pcie_msix_cap_reg +
13848                               PCIR_MSIX_CTRL),
13849                              2);
13850        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13851    } else {
13852        sc->igu_sb_cnt = 1;
13853    }
13854
13855    sc->igu_base_addr = BAR_IGU_INTMEM;
13856
13857    /* initialize IGU parameters */
13858    if (CHIP_IS_E1x(sc)) {
13859        sc->devinfo.int_block = INT_BLOCK_HC;
13860        sc->igu_dsb_id = DEF_SB_IGU_ID;
13861        sc->igu_base_sb = 0;
13862    } else {
13863        sc->devinfo.int_block = INT_BLOCK_IGU;
13864
13865        /* do not allow device reset during IGU info preocessing */
13866        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13867
13868        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13869
13870        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13871            int tout = 5000;
13872
13873            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13874
13875            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13876            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13877            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13878
13879            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13880                tout--;
13881                DELAY(1000);
13882            }
13883
13884            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13885                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13886                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13887                return (-1);
13888            }
13889        }
13890
13891        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13892            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13893            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13894        } else {
13895            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13896        }
13897
13898        rc = bxe_get_igu_cam_info(sc);
13899
13900        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13901
13902        if (rc) {
13903            return (rc);
13904        }
13905    }
13906
13907    /*
13908     * Get base FW non-default (fast path) status block ID. This value is
13909     * used to initialize the fw_sb_id saved on the fp/queue structure to
13910     * determine the id used by the FW.
13911     */
13912    if (CHIP_IS_E1x(sc)) {
13913        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13914    } else {
13915        /*
13916         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13917         * the same queue are indicated on the same IGU SB). So we prefer
13918         * FW and IGU SBs to be the same value.
13919         */
13920        sc->base_fw_ndsb = sc->igu_base_sb;
13921    }
13922
13923    BLOGD(sc, DBG_LOAD,
13924          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13925          sc->igu_dsb_id, sc->igu_base_sb,
13926          sc->igu_sb_cnt, sc->base_fw_ndsb);
13927
13928    elink_phy_probe(&sc->link_params);
13929
13930    return (0);
13931}
13932
13933static void
13934bxe_link_settings_supported(struct bxe_softc *sc,
13935                            uint32_t         switch_cfg)
13936{
13937    uint32_t cfg_size = 0;
13938    uint32_t idx;
13939    uint8_t port = SC_PORT(sc);
13940
13941    /* aggregation of supported attributes of all external phys */
13942    sc->port.supported[0] = 0;
13943    sc->port.supported[1] = 0;
13944
13945    switch (sc->link_params.num_phys) {
13946    case 1:
13947        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13948        cfg_size = 1;
13949        break;
13950    case 2:
13951        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13952        cfg_size = 1;
13953        break;
13954    case 3:
13955        if (sc->link_params.multi_phy_config &
13956            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13957            sc->port.supported[1] =
13958                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13959            sc->port.supported[0] =
13960                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13961        } else {
13962            sc->port.supported[0] =
13963                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13964            sc->port.supported[1] =
13965                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13966        }
13967        cfg_size = 2;
13968        break;
13969    }
13970
13971    if (!(sc->port.supported[0] || sc->port.supported[1])) {
13972        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
13973              SHMEM_RD(sc,
13974                       dev_info.port_hw_config[port].external_phy_config),
13975              SHMEM_RD(sc,
13976                       dev_info.port_hw_config[port].external_phy_config2));
13977        return;
13978    }
13979
13980    if (CHIP_IS_E3(sc))
13981        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
13982    else {
13983        switch (switch_cfg) {
13984        case ELINK_SWITCH_CFG_1G:
13985            sc->port.phy_addr =
13986                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
13987            break;
13988        case ELINK_SWITCH_CFG_10G:
13989            sc->port.phy_addr =
13990                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
13991            break;
13992        default:
13993            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
13994                  sc->port.link_config[0]);
13995            return;
13996        }
13997    }
13998
13999    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14000
14001    /* mask what we support according to speed_cap_mask per configuration */
14002    for (idx = 0; idx < cfg_size; idx++) {
14003        if (!(sc->link_params.speed_cap_mask[idx] &
14004              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14005            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14006        }
14007
14008        if (!(sc->link_params.speed_cap_mask[idx] &
14009              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14010            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14011        }
14012
14013        if (!(sc->link_params.speed_cap_mask[idx] &
14014              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14015            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14016        }
14017
14018        if (!(sc->link_params.speed_cap_mask[idx] &
14019              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14020            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14021        }
14022
14023        if (!(sc->link_params.speed_cap_mask[idx] &
14024              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14025            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14026        }
14027
14028        if (!(sc->link_params.speed_cap_mask[idx] &
14029              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14030            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14031        }
14032
14033        if (!(sc->link_params.speed_cap_mask[idx] &
14034              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14035            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14036        }
14037
14038        if (!(sc->link_params.speed_cap_mask[idx] &
14039              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14040            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14041        }
14042    }
14043
14044    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14045          sc->port.supported[0], sc->port.supported[1]);
14046	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14047					sc->port.supported[0], sc->port.supported[1]);
14048}
14049
14050static void
14051bxe_link_settings_requested(struct bxe_softc *sc)
14052{
14053    uint32_t link_config;
14054    uint32_t idx;
14055    uint32_t cfg_size = 0;
14056
14057    sc->port.advertising[0] = 0;
14058    sc->port.advertising[1] = 0;
14059
14060    switch (sc->link_params.num_phys) {
14061    case 1:
14062    case 2:
14063        cfg_size = 1;
14064        break;
14065    case 3:
14066        cfg_size = 2;
14067        break;
14068    }
14069
14070    for (idx = 0; idx < cfg_size; idx++) {
14071        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14072        link_config = sc->port.link_config[idx];
14073
14074        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14075        case PORT_FEATURE_LINK_SPEED_AUTO:
14076            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14077                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14078                sc->port.advertising[idx] |= sc->port.supported[idx];
14079                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14080                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14081                    sc->port.advertising[idx] |=
14082                        (ELINK_SUPPORTED_100baseT_Half |
14083                         ELINK_SUPPORTED_100baseT_Full);
14084            } else {
14085                /* force 10G, no AN */
14086                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14087                sc->port.advertising[idx] |=
14088                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14089                continue;
14090            }
14091            break;
14092
14093        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14094            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14095                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14096                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14097                                              ADVERTISED_TP);
14098            } else {
14099                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14100                          "speed_cap_mask=0x%08x\n",
14101                      link_config, sc->link_params.speed_cap_mask[idx]);
14102                return;
14103            }
14104            break;
14105
14106        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14107            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14108                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14109                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14110                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14111                                              ADVERTISED_TP);
14112				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14113								sc->link_params.req_duplex[idx]);
14114            } else {
14115                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14116                          "speed_cap_mask=0x%08x\n",
14117                      link_config, sc->link_params.speed_cap_mask[idx]);
14118                return;
14119            }
14120            break;
14121
14122        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14123            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14124                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14125                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14126                                              ADVERTISED_TP);
14127            } else {
14128                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14129                          "speed_cap_mask=0x%08x\n",
14130                      link_config, sc->link_params.speed_cap_mask[idx]);
14131                return;
14132            }
14133            break;
14134
14135        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14136            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14137                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14138                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14139                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14140                                              ADVERTISED_TP);
14141            } else {
14142                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14143                          "speed_cap_mask=0x%08x\n",
14144                      link_config, sc->link_params.speed_cap_mask[idx]);
14145                return;
14146            }
14147            break;
14148
14149        case PORT_FEATURE_LINK_SPEED_1G:
14150            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14151                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14152                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14153                                              ADVERTISED_TP);
14154            } else {
14155                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14156                          "speed_cap_mask=0x%08x\n",
14157                      link_config, sc->link_params.speed_cap_mask[idx]);
14158                return;
14159            }
14160            break;
14161
14162        case PORT_FEATURE_LINK_SPEED_2_5G:
14163            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14164                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14165                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14166                                              ADVERTISED_TP);
14167            } else {
14168                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14169                          "speed_cap_mask=0x%08x\n",
14170                      link_config, sc->link_params.speed_cap_mask[idx]);
14171                return;
14172            }
14173            break;
14174
14175        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14176            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14177                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14178                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14179                                              ADVERTISED_FIBRE);
14180            } else {
14181                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14182                          "speed_cap_mask=0x%08x\n",
14183                      link_config, sc->link_params.speed_cap_mask[idx]);
14184                return;
14185            }
14186            break;
14187
14188        case PORT_FEATURE_LINK_SPEED_20G:
14189            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14190            break;
14191
14192        default:
14193            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14194                      "speed_cap_mask=0x%08x\n",
14195                  link_config, sc->link_params.speed_cap_mask[idx]);
14196            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14197            sc->port.advertising[idx] = sc->port.supported[idx];
14198            break;
14199        }
14200
14201        sc->link_params.req_flow_ctrl[idx] =
14202            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14203
14204        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14205            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14206                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14207            } else {
14208                bxe_set_requested_fc(sc);
14209            }
14210        }
14211
14212        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14213                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14214              sc->link_params.req_line_speed[idx],
14215              sc->link_params.req_duplex[idx],
14216              sc->link_params.req_flow_ctrl[idx],
14217              sc->port.advertising[idx]);
14218		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14219						"advertising=0x%x\n",
14220						sc->link_params.req_line_speed[idx],
14221						sc->link_params.req_duplex[idx],
14222						sc->port.advertising[idx]);
14223    }
14224}
14225
14226static void
14227bxe_get_phy_info(struct bxe_softc *sc)
14228{
14229    uint8_t port = SC_PORT(sc);
14230    uint32_t config = sc->port.config;
14231    uint32_t eee_mode;
14232
14233    /* shmem data already read in bxe_get_shmem_info() */
14234
14235    ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14236                        "link_config0=0x%08x\n",
14237               sc->link_params.lane_config,
14238               sc->link_params.speed_cap_mask[0],
14239               sc->port.link_config[0]);
14240
14241
14242    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14243    bxe_link_settings_requested(sc);
14244
14245    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14246        sc->link_params.feature_config_flags |=
14247            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14248    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14249        sc->link_params.feature_config_flags &=
14250            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14251    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14252        sc->link_params.feature_config_flags |=
14253            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14254    }
14255
14256    /* configure link feature according to nvram value */
14257    eee_mode =
14258        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14259          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14260         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14261    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14262        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14263                                    ELINK_EEE_MODE_ENABLE_LPI |
14264                                    ELINK_EEE_MODE_OUTPUT_TIME);
14265    } else {
14266        sc->link_params.eee_mode = 0;
14267    }
14268
14269    /* get the media type */
14270    bxe_media_detect(sc);
14271	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14272}
14273
14274static void
14275bxe_get_params(struct bxe_softc *sc)
14276{
14277    /* get user tunable params */
14278    bxe_get_tunable_params(sc);
14279
14280    /* select the RX and TX ring sizes */
14281    sc->tx_ring_size = TX_BD_USABLE;
14282    sc->rx_ring_size = RX_BD_USABLE;
14283
14284    /* XXX disable WoL */
14285    sc->wol = 0;
14286}
14287
14288static void
14289bxe_set_modes_bitmap(struct bxe_softc *sc)
14290{
14291    uint32_t flags = 0;
14292
14293    if (CHIP_REV_IS_FPGA(sc)) {
14294        SET_FLAGS(flags, MODE_FPGA);
14295    } else if (CHIP_REV_IS_EMUL(sc)) {
14296        SET_FLAGS(flags, MODE_EMUL);
14297    } else {
14298        SET_FLAGS(flags, MODE_ASIC);
14299    }
14300
14301    if (CHIP_IS_MODE_4_PORT(sc)) {
14302        SET_FLAGS(flags, MODE_PORT4);
14303    } else {
14304        SET_FLAGS(flags, MODE_PORT2);
14305    }
14306
14307    if (CHIP_IS_E2(sc)) {
14308        SET_FLAGS(flags, MODE_E2);
14309    } else if (CHIP_IS_E3(sc)) {
14310        SET_FLAGS(flags, MODE_E3);
14311        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14312            SET_FLAGS(flags, MODE_E3_A0);
14313        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14314            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14315        }
14316    }
14317
14318    if (IS_MF(sc)) {
14319        SET_FLAGS(flags, MODE_MF);
14320        switch (sc->devinfo.mf_info.mf_mode) {
14321        case MULTI_FUNCTION_SD:
14322            SET_FLAGS(flags, MODE_MF_SD);
14323            break;
14324        case MULTI_FUNCTION_SI:
14325            SET_FLAGS(flags, MODE_MF_SI);
14326            break;
14327        case MULTI_FUNCTION_AFEX:
14328            SET_FLAGS(flags, MODE_MF_AFEX);
14329            break;
14330        }
14331    } else {
14332        SET_FLAGS(flags, MODE_SF);
14333    }
14334
14335#if defined(__LITTLE_ENDIAN)
14336    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14337#else /* __BIG_ENDIAN */
14338    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14339#endif
14340
14341    INIT_MODE_FLAGS(sc) = flags;
14342}
14343
14344static int
14345bxe_alloc_hsi_mem(struct bxe_softc *sc)
14346{
14347    struct bxe_fastpath *fp;
14348    bus_addr_t busaddr;
14349    int max_agg_queues;
14350    int max_segments;
14351    bus_size_t max_size;
14352    bus_size_t max_seg_size;
14353    char buf[32];
14354    int rc;
14355    int i, j;
14356
14357    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14358
14359    /* allocate the parent bus DMA tag */
14360    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14361                            1,                        /* alignment */
14362                            0,                        /* boundary limit */
14363                            BUS_SPACE_MAXADDR,        /* restricted low */
14364                            BUS_SPACE_MAXADDR,        /* restricted hi */
14365                            NULL,                     /* addr filter() */
14366                            NULL,                     /* addr filter() arg */
14367                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14368                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14369                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14370                            0,                        /* flags */
14371                            NULL,                     /* lock() */
14372                            NULL,                     /* lock() arg */
14373                            &sc->parent_dma_tag);     /* returned dma tag */
14374    if (rc != 0) {
14375        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14376        return (1);
14377    }
14378
14379    /************************/
14380    /* DEFAULT STATUS BLOCK */
14381    /************************/
14382
14383    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14384                      &sc->def_sb_dma, "default status block") != 0) {
14385        /* XXX */
14386        bus_dma_tag_destroy(sc->parent_dma_tag);
14387        return (1);
14388    }
14389
14390    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14391
14392    /***************/
14393    /* EVENT QUEUE */
14394    /***************/
14395
14396    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14397                      &sc->eq_dma, "event queue") != 0) {
14398        /* XXX */
14399        bxe_dma_free(sc, &sc->def_sb_dma);
14400        sc->def_sb = NULL;
14401        bus_dma_tag_destroy(sc->parent_dma_tag);
14402        return (1);
14403    }
14404
14405    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14406
14407    /*************/
14408    /* SLOW PATH */
14409    /*************/
14410
14411    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14412                      &sc->sp_dma, "slow path") != 0) {
14413        /* XXX */
14414        bxe_dma_free(sc, &sc->eq_dma);
14415        sc->eq = NULL;
14416        bxe_dma_free(sc, &sc->def_sb_dma);
14417        sc->def_sb = NULL;
14418        bus_dma_tag_destroy(sc->parent_dma_tag);
14419        return (1);
14420    }
14421
14422    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14423
14424    /*******************/
14425    /* SLOW PATH QUEUE */
14426    /*******************/
14427
14428    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14429                      &sc->spq_dma, "slow path queue") != 0) {
14430        /* XXX */
14431        bxe_dma_free(sc, &sc->sp_dma);
14432        sc->sp = NULL;
14433        bxe_dma_free(sc, &sc->eq_dma);
14434        sc->eq = NULL;
14435        bxe_dma_free(sc, &sc->def_sb_dma);
14436        sc->def_sb = NULL;
14437        bus_dma_tag_destroy(sc->parent_dma_tag);
14438        return (1);
14439    }
14440
14441    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14442
14443    /***************************/
14444    /* FW DECOMPRESSION BUFFER */
14445    /***************************/
14446
14447    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14448                      "fw decompression buffer") != 0) {
14449        /* XXX */
14450        bxe_dma_free(sc, &sc->spq_dma);
14451        sc->spq = NULL;
14452        bxe_dma_free(sc, &sc->sp_dma);
14453        sc->sp = NULL;
14454        bxe_dma_free(sc, &sc->eq_dma);
14455        sc->eq = NULL;
14456        bxe_dma_free(sc, &sc->def_sb_dma);
14457        sc->def_sb = NULL;
14458        bus_dma_tag_destroy(sc->parent_dma_tag);
14459        return (1);
14460    }
14461
14462    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14463
14464    if ((sc->gz_strm =
14465         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14466        /* XXX */
14467        bxe_dma_free(sc, &sc->gz_buf_dma);
14468        sc->gz_buf = NULL;
14469        bxe_dma_free(sc, &sc->spq_dma);
14470        sc->spq = NULL;
14471        bxe_dma_free(sc, &sc->sp_dma);
14472        sc->sp = NULL;
14473        bxe_dma_free(sc, &sc->eq_dma);
14474        sc->eq = NULL;
14475        bxe_dma_free(sc, &sc->def_sb_dma);
14476        sc->def_sb = NULL;
14477        bus_dma_tag_destroy(sc->parent_dma_tag);
14478        return (1);
14479    }
14480
14481    /*************/
14482    /* FASTPATHS */
14483    /*************/
14484
14485    /* allocate DMA memory for each fastpath structure */
14486    for (i = 0; i < sc->num_queues; i++) {
14487        fp = &sc->fp[i];
14488        fp->sc    = sc;
14489        fp->index = i;
14490
14491        /*******************/
14492        /* FP STATUS BLOCK */
14493        /*******************/
14494
14495        snprintf(buf, sizeof(buf), "fp %d status block", i);
14496        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14497                          &fp->sb_dma, buf) != 0) {
14498            /* XXX unwind and free previous fastpath allocations */
14499            BLOGE(sc, "Failed to alloc %s\n", buf);
14500            return (1);
14501        } else {
14502            if (CHIP_IS_E2E3(sc)) {
14503                fp->status_block.e2_sb =
14504                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14505            } else {
14506                fp->status_block.e1x_sb =
14507                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14508            }
14509        }
14510
14511        /******************/
14512        /* FP TX BD CHAIN */
14513        /******************/
14514
14515        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14516        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14517                          &fp->tx_dma, buf) != 0) {
14518            /* XXX unwind and free previous fastpath allocations */
14519            BLOGE(sc, "Failed to alloc %s\n", buf);
14520            return (1);
14521        } else {
14522            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14523        }
14524
14525        /* link together the tx bd chain pages */
14526        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14527            /* index into the tx bd chain array to last entry per page */
14528            struct eth_tx_next_bd *tx_next_bd =
14529                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14530            /* point to the next page and wrap from last page */
14531            busaddr = (fp->tx_dma.paddr +
14532                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14533            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14534            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14535        }
14536
14537        /******************/
14538        /* FP RX BD CHAIN */
14539        /******************/
14540
14541        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14542        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14543                          &fp->rx_dma, buf) != 0) {
14544            /* XXX unwind and free previous fastpath allocations */
14545            BLOGE(sc, "Failed to alloc %s\n", buf);
14546            return (1);
14547        } else {
14548            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14549        }
14550
14551        /* link together the rx bd chain pages */
14552        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14553            /* index into the rx bd chain array to last entry per page */
14554            struct eth_rx_bd *rx_bd =
14555                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14556            /* point to the next page and wrap from last page */
14557            busaddr = (fp->rx_dma.paddr +
14558                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14559            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14560            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14561        }
14562
14563        /*******************/
14564        /* FP RX RCQ CHAIN */
14565        /*******************/
14566
14567        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14568        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14569                          &fp->rcq_dma, buf) != 0) {
14570            /* XXX unwind and free previous fastpath allocations */
14571            BLOGE(sc, "Failed to alloc %s\n", buf);
14572            return (1);
14573        } else {
14574            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14575        }
14576
14577        /* link together the rcq chain pages */
14578        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14579            /* index into the rcq chain array to last entry per page */
14580            struct eth_rx_cqe_next_page *rx_cqe_next =
14581                (struct eth_rx_cqe_next_page *)
14582                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14583            /* point to the next page and wrap from last page */
14584            busaddr = (fp->rcq_dma.paddr +
14585                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14586            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14587            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14588        }
14589
14590        /*******************/
14591        /* FP RX SGE CHAIN */
14592        /*******************/
14593
14594        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14595        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14596                          &fp->rx_sge_dma, buf) != 0) {
14597            /* XXX unwind and free previous fastpath allocations */
14598            BLOGE(sc, "Failed to alloc %s\n", buf);
14599            return (1);
14600        } else {
14601            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14602        }
14603
14604        /* link together the sge chain pages */
14605        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14606            /* index into the rcq chain array to last entry per page */
14607            struct eth_rx_sge *rx_sge =
14608                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14609            /* point to the next page and wrap from last page */
14610            busaddr = (fp->rx_sge_dma.paddr +
14611                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14612            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14613            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14614        }
14615
14616        /***********************/
14617        /* FP TX MBUF DMA MAPS */
14618        /***********************/
14619
14620        /* set required sizes before mapping to conserve resources */
14621        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14622            max_size     = BXE_TSO_MAX_SIZE;
14623            max_segments = BXE_TSO_MAX_SEGMENTS;
14624            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14625        } else {
14626            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14627            max_segments = BXE_MAX_SEGMENTS;
14628            max_seg_size = MCLBYTES;
14629        }
14630
14631        /* create a dma tag for the tx mbufs */
14632        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14633                                1,                  /* alignment */
14634                                0,                  /* boundary limit */
14635                                BUS_SPACE_MAXADDR,  /* restricted low */
14636                                BUS_SPACE_MAXADDR,  /* restricted hi */
14637                                NULL,               /* addr filter() */
14638                                NULL,               /* addr filter() arg */
14639                                max_size,           /* max map size */
14640                                max_segments,       /* num discontinuous */
14641                                max_seg_size,       /* max seg size */
14642                                0,                  /* flags */
14643                                NULL,               /* lock() */
14644                                NULL,               /* lock() arg */
14645                                &fp->tx_mbuf_tag);  /* returned dma tag */
14646        if (rc != 0) {
14647            /* XXX unwind and free previous fastpath allocations */
14648            BLOGE(sc, "Failed to create dma tag for "
14649                      "'fp %d tx mbufs' (%d)\n", i, rc);
14650            return (1);
14651        }
14652
14653        /* create dma maps for each of the tx mbuf clusters */
14654        for (j = 0; j < TX_BD_TOTAL; j++) {
14655            if (bus_dmamap_create(fp->tx_mbuf_tag,
14656                                  BUS_DMA_NOWAIT,
14657                                  &fp->tx_mbuf_chain[j].m_map)) {
14658                /* XXX unwind and free previous fastpath allocations */
14659                BLOGE(sc, "Failed to create dma map for "
14660                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14661                return (1);
14662            }
14663        }
14664
14665        /***********************/
14666        /* FP RX MBUF DMA MAPS */
14667        /***********************/
14668
14669        /* create a dma tag for the rx mbufs */
14670        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14671                                1,                  /* alignment */
14672                                0,                  /* boundary limit */
14673                                BUS_SPACE_MAXADDR,  /* restricted low */
14674                                BUS_SPACE_MAXADDR,  /* restricted hi */
14675                                NULL,               /* addr filter() */
14676                                NULL,               /* addr filter() arg */
14677                                MJUM9BYTES,         /* max map size */
14678                                1,                  /* num discontinuous */
14679                                MJUM9BYTES,         /* max seg size */
14680                                0,                  /* flags */
14681                                NULL,               /* lock() */
14682                                NULL,               /* lock() arg */
14683                                &fp->rx_mbuf_tag);  /* returned dma tag */
14684        if (rc != 0) {
14685            /* XXX unwind and free previous fastpath allocations */
14686            BLOGE(sc, "Failed to create dma tag for "
14687                      "'fp %d rx mbufs' (%d)\n", i, rc);
14688            return (1);
14689        }
14690
14691        /* create dma maps for each of the rx mbuf clusters */
14692        for (j = 0; j < RX_BD_TOTAL; j++) {
14693            if (bus_dmamap_create(fp->rx_mbuf_tag,
14694                                  BUS_DMA_NOWAIT,
14695                                  &fp->rx_mbuf_chain[j].m_map)) {
14696                /* XXX unwind and free previous fastpath allocations */
14697                BLOGE(sc, "Failed to create dma map for "
14698                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14699                return (1);
14700            }
14701        }
14702
14703        /* create dma map for the spare rx mbuf cluster */
14704        if (bus_dmamap_create(fp->rx_mbuf_tag,
14705                              BUS_DMA_NOWAIT,
14706                              &fp->rx_mbuf_spare_map)) {
14707            /* XXX unwind and free previous fastpath allocations */
14708            BLOGE(sc, "Failed to create dma map for "
14709                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14710            return (1);
14711        }
14712
14713        /***************************/
14714        /* FP RX SGE MBUF DMA MAPS */
14715        /***************************/
14716
14717        /* create a dma tag for the rx sge mbufs */
14718        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14719                                1,                  /* alignment */
14720                                0,                  /* boundary limit */
14721                                BUS_SPACE_MAXADDR,  /* restricted low */
14722                                BUS_SPACE_MAXADDR,  /* restricted hi */
14723                                NULL,               /* addr filter() */
14724                                NULL,               /* addr filter() arg */
14725                                BCM_PAGE_SIZE,      /* max map size */
14726                                1,                  /* num discontinuous */
14727                                BCM_PAGE_SIZE,      /* max seg size */
14728                                0,                  /* flags */
14729                                NULL,               /* lock() */
14730                                NULL,               /* lock() arg */
14731                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14732        if (rc != 0) {
14733            /* XXX unwind and free previous fastpath allocations */
14734            BLOGE(sc, "Failed to create dma tag for "
14735                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14736            return (1);
14737        }
14738
14739        /* create dma maps for the rx sge mbuf clusters */
14740        for (j = 0; j < RX_SGE_TOTAL; j++) {
14741            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14742                                  BUS_DMA_NOWAIT,
14743                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14744                /* XXX unwind and free previous fastpath allocations */
14745                BLOGE(sc, "Failed to create dma map for "
14746                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14747                return (1);
14748            }
14749        }
14750
14751        /* create dma map for the spare rx sge mbuf cluster */
14752        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14753                              BUS_DMA_NOWAIT,
14754                              &fp->rx_sge_mbuf_spare_map)) {
14755            /* XXX unwind and free previous fastpath allocations */
14756            BLOGE(sc, "Failed to create dma map for "
14757                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14758            return (1);
14759        }
14760
14761        /***************************/
14762        /* FP RX TPA MBUF DMA MAPS */
14763        /***************************/
14764
14765        /* create dma maps for the rx tpa mbuf clusters */
14766        max_agg_queues = MAX_AGG_QS(sc);
14767
14768        for (j = 0; j < max_agg_queues; j++) {
14769            if (bus_dmamap_create(fp->rx_mbuf_tag,
14770                                  BUS_DMA_NOWAIT,
14771                                  &fp->rx_tpa_info[j].bd.m_map)) {
14772                /* XXX unwind and free previous fastpath allocations */
14773                BLOGE(sc, "Failed to create dma map for "
14774                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14775                return (1);
14776            }
14777        }
14778
14779        /* create dma map for the spare rx tpa mbuf cluster */
14780        if (bus_dmamap_create(fp->rx_mbuf_tag,
14781                              BUS_DMA_NOWAIT,
14782                              &fp->rx_tpa_info_mbuf_spare_map)) {
14783            /* XXX unwind and free previous fastpath allocations */
14784            BLOGE(sc, "Failed to create dma map for "
14785                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14786            return (1);
14787        }
14788
14789        bxe_init_sge_ring_bit_mask(fp);
14790    }
14791
14792    return (0);
14793}
14794
14795static void
14796bxe_free_hsi_mem(struct bxe_softc *sc)
14797{
14798    struct bxe_fastpath *fp;
14799    int max_agg_queues;
14800    int i, j;
14801
14802    if (sc->parent_dma_tag == NULL) {
14803        return; /* assume nothing was allocated */
14804    }
14805
14806    for (i = 0; i < sc->num_queues; i++) {
14807        fp = &sc->fp[i];
14808
14809        /*******************/
14810        /* FP STATUS BLOCK */
14811        /*******************/
14812
14813        bxe_dma_free(sc, &fp->sb_dma);
14814        memset(&fp->status_block, 0, sizeof(fp->status_block));
14815
14816        /******************/
14817        /* FP TX BD CHAIN */
14818        /******************/
14819
14820        bxe_dma_free(sc, &fp->tx_dma);
14821        fp->tx_chain = NULL;
14822
14823        /******************/
14824        /* FP RX BD CHAIN */
14825        /******************/
14826
14827        bxe_dma_free(sc, &fp->rx_dma);
14828        fp->rx_chain = NULL;
14829
14830        /*******************/
14831        /* FP RX RCQ CHAIN */
14832        /*******************/
14833
14834        bxe_dma_free(sc, &fp->rcq_dma);
14835        fp->rcq_chain = NULL;
14836
14837        /*******************/
14838        /* FP RX SGE CHAIN */
14839        /*******************/
14840
14841        bxe_dma_free(sc, &fp->rx_sge_dma);
14842        fp->rx_sge_chain = NULL;
14843
14844        /***********************/
14845        /* FP TX MBUF DMA MAPS */
14846        /***********************/
14847
14848        if (fp->tx_mbuf_tag != NULL) {
14849            for (j = 0; j < TX_BD_TOTAL; j++) {
14850                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14851                    bus_dmamap_unload(fp->tx_mbuf_tag,
14852                                      fp->tx_mbuf_chain[j].m_map);
14853                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14854                                       fp->tx_mbuf_chain[j].m_map);
14855                }
14856            }
14857
14858            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14859            fp->tx_mbuf_tag = NULL;
14860        }
14861
14862        /***********************/
14863        /* FP RX MBUF DMA MAPS */
14864        /***********************/
14865
14866        if (fp->rx_mbuf_tag != NULL) {
14867            for (j = 0; j < RX_BD_TOTAL; j++) {
14868                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14869                    bus_dmamap_unload(fp->rx_mbuf_tag,
14870                                      fp->rx_mbuf_chain[j].m_map);
14871                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14872                                       fp->rx_mbuf_chain[j].m_map);
14873                }
14874            }
14875
14876            if (fp->rx_mbuf_spare_map != NULL) {
14877                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14878                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14879            }
14880
14881            /***************************/
14882            /* FP RX TPA MBUF DMA MAPS */
14883            /***************************/
14884
14885            max_agg_queues = MAX_AGG_QS(sc);
14886
14887            for (j = 0; j < max_agg_queues; j++) {
14888                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14889                    bus_dmamap_unload(fp->rx_mbuf_tag,
14890                                      fp->rx_tpa_info[j].bd.m_map);
14891                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14892                                       fp->rx_tpa_info[j].bd.m_map);
14893                }
14894            }
14895
14896            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14897                bus_dmamap_unload(fp->rx_mbuf_tag,
14898                                  fp->rx_tpa_info_mbuf_spare_map);
14899                bus_dmamap_destroy(fp->rx_mbuf_tag,
14900                                   fp->rx_tpa_info_mbuf_spare_map);
14901            }
14902
14903            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14904            fp->rx_mbuf_tag = NULL;
14905        }
14906
14907        /***************************/
14908        /* FP RX SGE MBUF DMA MAPS */
14909        /***************************/
14910
14911        if (fp->rx_sge_mbuf_tag != NULL) {
14912            for (j = 0; j < RX_SGE_TOTAL; j++) {
14913                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14914                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14915                                      fp->rx_sge_mbuf_chain[j].m_map);
14916                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14917                                       fp->rx_sge_mbuf_chain[j].m_map);
14918                }
14919            }
14920
14921            if (fp->rx_sge_mbuf_spare_map != NULL) {
14922                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14923                                  fp->rx_sge_mbuf_spare_map);
14924                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14925                                   fp->rx_sge_mbuf_spare_map);
14926            }
14927
14928            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14929            fp->rx_sge_mbuf_tag = NULL;
14930        }
14931    }
14932
14933    /***************************/
14934    /* FW DECOMPRESSION BUFFER */
14935    /***************************/
14936
14937    bxe_dma_free(sc, &sc->gz_buf_dma);
14938    sc->gz_buf = NULL;
14939    free(sc->gz_strm, M_DEVBUF);
14940    sc->gz_strm = NULL;
14941
14942    /*******************/
14943    /* SLOW PATH QUEUE */
14944    /*******************/
14945
14946    bxe_dma_free(sc, &sc->spq_dma);
14947    sc->spq = NULL;
14948
14949    /*************/
14950    /* SLOW PATH */
14951    /*************/
14952
14953    bxe_dma_free(sc, &sc->sp_dma);
14954    sc->sp = NULL;
14955
14956    /***************/
14957    /* EVENT QUEUE */
14958    /***************/
14959
14960    bxe_dma_free(sc, &sc->eq_dma);
14961    sc->eq = NULL;
14962
14963    /************************/
14964    /* DEFAULT STATUS BLOCK */
14965    /************************/
14966
14967    bxe_dma_free(sc, &sc->def_sb_dma);
14968    sc->def_sb = NULL;
14969
14970    bus_dma_tag_destroy(sc->parent_dma_tag);
14971    sc->parent_dma_tag = NULL;
14972}
14973
14974/*
14975 * Previous driver DMAE transaction may have occurred when pre-boot stage
14976 * ended and boot began. This would invalidate the addresses of the
14977 * transaction, resulting in was-error bit set in the PCI causing all
14978 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
14979 * the interrupt which detected this from the pglueb and the was-done bit
14980 */
14981static void
14982bxe_prev_interrupted_dmae(struct bxe_softc *sc)
14983{
14984    uint32_t val;
14985
14986    if (!CHIP_IS_E1x(sc)) {
14987        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
14988        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
14989            BLOGD(sc, DBG_LOAD,
14990                  "Clearing 'was-error' bit that was set in pglueb");
14991            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
14992        }
14993    }
14994}
14995
14996static int
14997bxe_prev_mcp_done(struct bxe_softc *sc)
14998{
14999    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15000                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15001    if (!rc) {
15002        BLOGE(sc, "MCP response failure, aborting\n");
15003        return (-1);
15004    }
15005
15006    return (0);
15007}
15008
15009static struct bxe_prev_list_node *
15010bxe_prev_path_get_entry(struct bxe_softc *sc)
15011{
15012    struct bxe_prev_list_node *tmp;
15013
15014    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15015        if ((sc->pcie_bus == tmp->bus) &&
15016            (sc->pcie_device == tmp->slot) &&
15017            (SC_PATH(sc) == tmp->path)) {
15018            return (tmp);
15019        }
15020    }
15021
15022    return (NULL);
15023}
15024
15025static uint8_t
15026bxe_prev_is_path_marked(struct bxe_softc *sc)
15027{
15028    struct bxe_prev_list_node *tmp;
15029    int rc = FALSE;
15030
15031    mtx_lock(&bxe_prev_mtx);
15032
15033    tmp = bxe_prev_path_get_entry(sc);
15034    if (tmp) {
15035        if (tmp->aer) {
15036            BLOGD(sc, DBG_LOAD,
15037                  "Path %d/%d/%d was marked by AER\n",
15038                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15039        } else {
15040            rc = TRUE;
15041            BLOGD(sc, DBG_LOAD,
15042                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15043                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15044        }
15045    }
15046
15047    mtx_unlock(&bxe_prev_mtx);
15048
15049    return (rc);
15050}
15051
15052static int
15053bxe_prev_mark_path(struct bxe_softc *sc,
15054                   uint8_t          after_undi)
15055{
15056    struct bxe_prev_list_node *tmp;
15057
15058    mtx_lock(&bxe_prev_mtx);
15059
15060    /* Check whether the entry for this path already exists */
15061    tmp = bxe_prev_path_get_entry(sc);
15062    if (tmp) {
15063        if (!tmp->aer) {
15064            BLOGD(sc, DBG_LOAD,
15065                  "Re-marking AER in path %d/%d/%d\n",
15066                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15067        } else {
15068            BLOGD(sc, DBG_LOAD,
15069                  "Removing AER indication from path %d/%d/%d\n",
15070                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15071            tmp->aer = 0;
15072        }
15073
15074        mtx_unlock(&bxe_prev_mtx);
15075        return (0);
15076    }
15077
15078    mtx_unlock(&bxe_prev_mtx);
15079
15080    /* Create an entry for this path and add it */
15081    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15082                 (M_NOWAIT | M_ZERO));
15083    if (!tmp) {
15084        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15085        return (-1);
15086    }
15087
15088    tmp->bus  = sc->pcie_bus;
15089    tmp->slot = sc->pcie_device;
15090    tmp->path = SC_PATH(sc);
15091    tmp->aer  = 0;
15092    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15093
15094    mtx_lock(&bxe_prev_mtx);
15095
15096    BLOGD(sc, DBG_LOAD,
15097          "Marked path %d/%d/%d - finished previous unload\n",
15098          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15099    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15100
15101    mtx_unlock(&bxe_prev_mtx);
15102
15103    return (0);
15104}
15105
15106static int
15107bxe_do_flr(struct bxe_softc *sc)
15108{
15109    int i;
15110
15111    /* only E2 and onwards support FLR */
15112    if (CHIP_IS_E1x(sc)) {
15113        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15114        return (-1);
15115    }
15116
15117    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15118    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15119        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15120              sc->devinfo.bc_ver);
15121        return (-1);
15122    }
15123
15124    /* Wait for Transaction Pending bit clean */
15125    for (i = 0; i < 4; i++) {
15126        if (i) {
15127            DELAY(((1 << (i - 1)) * 100) * 1000);
15128        }
15129
15130        if (!bxe_is_pcie_pending(sc)) {
15131            goto clear;
15132        }
15133    }
15134
15135    BLOGE(sc, "PCIE transaction is not cleared, "
15136              "proceeding with reset anyway\n");
15137
15138clear:
15139
15140    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15141    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15142
15143    return (0);
15144}
15145
15146struct bxe_mac_vals {
15147    uint32_t xmac_addr;
15148    uint32_t xmac_val;
15149    uint32_t emac_addr;
15150    uint32_t emac_val;
15151    uint32_t umac_addr;
15152    uint32_t umac_val;
15153    uint32_t bmac_addr;
15154    uint32_t bmac_val[2];
15155};
15156
15157static void
15158bxe_prev_unload_close_mac(struct bxe_softc *sc,
15159                          struct bxe_mac_vals *vals)
15160{
15161    uint32_t val, base_addr, offset, mask, reset_reg;
15162    uint8_t mac_stopped = FALSE;
15163    uint8_t port = SC_PORT(sc);
15164    uint32_t wb_data[2];
15165
15166    /* reset addresses as they also mark which values were changed */
15167    vals->bmac_addr = 0;
15168    vals->umac_addr = 0;
15169    vals->xmac_addr = 0;
15170    vals->emac_addr = 0;
15171
15172    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15173
15174    if (!CHIP_IS_E3(sc)) {
15175        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15176        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15177        if ((mask & reset_reg) && val) {
15178            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15179            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15180                                    : NIG_REG_INGRESS_BMAC0_MEM;
15181            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15182                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15183
15184            /*
15185             * use rd/wr since we cannot use dmae. This is safe
15186             * since MCP won't access the bus due to the request
15187             * to unload, and no function on the path can be
15188             * loaded at this time.
15189             */
15190            wb_data[0] = REG_RD(sc, base_addr + offset);
15191            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15192            vals->bmac_addr = base_addr + offset;
15193            vals->bmac_val[0] = wb_data[0];
15194            vals->bmac_val[1] = wb_data[1];
15195            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15196            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15197            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15198        }
15199
15200        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15201        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15202        vals->emac_val = REG_RD(sc, vals->emac_addr);
15203        REG_WR(sc, vals->emac_addr, 0);
15204        mac_stopped = TRUE;
15205    } else {
15206        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15207            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15208            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15209            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15210            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15211            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15212            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15213            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15214            REG_WR(sc, vals->xmac_addr, 0);
15215            mac_stopped = TRUE;
15216        }
15217
15218        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15219        if (mask & reset_reg) {
15220            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15221            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15222            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15223            vals->umac_val = REG_RD(sc, vals->umac_addr);
15224            REG_WR(sc, vals->umac_addr, 0);
15225            mac_stopped = TRUE;
15226        }
15227    }
15228
15229    if (mac_stopped) {
15230        DELAY(20000);
15231    }
15232}
15233
15234#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15235#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15236#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15237#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15238
15239static void
15240bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15241                         uint8_t          port,
15242                         uint8_t          inc)
15243{
15244    uint16_t rcq, bd;
15245    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15246
15247    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15248    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15249
15250    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15251    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15252
15253    BLOGD(sc, DBG_LOAD,
15254          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15255          port, bd, rcq);
15256}
15257
15258static int
15259bxe_prev_unload_common(struct bxe_softc *sc)
15260{
15261    uint32_t reset_reg, tmp_reg = 0, rc;
15262    uint8_t prev_undi = FALSE;
15263    struct bxe_mac_vals mac_vals;
15264    uint32_t timer_count = 1000;
15265    uint32_t prev_brb;
15266
15267    /*
15268     * It is possible a previous function received 'common' answer,
15269     * but hasn't loaded yet, therefore creating a scenario of
15270     * multiple functions receiving 'common' on the same path.
15271     */
15272    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15273
15274    memset(&mac_vals, 0, sizeof(mac_vals));
15275
15276    if (bxe_prev_is_path_marked(sc)) {
15277        return (bxe_prev_mcp_done(sc));
15278    }
15279
15280    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15281
15282    /* Reset should be performed after BRB is emptied */
15283    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15284        /* Close the MAC Rx to prevent BRB from filling up */
15285        bxe_prev_unload_close_mac(sc, &mac_vals);
15286
15287        /* close LLH filters towards the BRB */
15288        elink_set_rx_filter(&sc->link_params, 0);
15289
15290        /*
15291         * Check if the UNDI driver was previously loaded.
15292         * UNDI driver initializes CID offset for normal bell to 0x7
15293         */
15294        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15295            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15296            if (tmp_reg == 0x7) {
15297                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15298                prev_undi = TRUE;
15299                /* clear the UNDI indication */
15300                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15301                /* clear possible idle check errors */
15302                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15303            }
15304        }
15305
15306        /* wait until BRB is empty */
15307        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15308        while (timer_count) {
15309            prev_brb = tmp_reg;
15310
15311            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15312            if (!tmp_reg) {
15313                break;
15314            }
15315
15316            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15317
15318            /* reset timer as long as BRB actually gets emptied */
15319            if (prev_brb > tmp_reg) {
15320                timer_count = 1000;
15321            } else {
15322                timer_count--;
15323            }
15324
15325            /* If UNDI resides in memory, manually increment it */
15326            if (prev_undi) {
15327                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15328            }
15329
15330            DELAY(10);
15331        }
15332
15333        if (!timer_count) {
15334            BLOGE(sc, "Failed to empty BRB\n");
15335        }
15336    }
15337
15338    /* No packets are in the pipeline, path is ready for reset */
15339    bxe_reset_common(sc);
15340
15341    if (mac_vals.xmac_addr) {
15342        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15343    }
15344    if (mac_vals.umac_addr) {
15345        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15346    }
15347    if (mac_vals.emac_addr) {
15348        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15349    }
15350    if (mac_vals.bmac_addr) {
15351        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15352        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15353    }
15354
15355    rc = bxe_prev_mark_path(sc, prev_undi);
15356    if (rc) {
15357        bxe_prev_mcp_done(sc);
15358        return (rc);
15359    }
15360
15361    return (bxe_prev_mcp_done(sc));
15362}
15363
15364static int
15365bxe_prev_unload_uncommon(struct bxe_softc *sc)
15366{
15367    int rc;
15368
15369    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15370
15371    /* Test if previous unload process was already finished for this path */
15372    if (bxe_prev_is_path_marked(sc)) {
15373        return (bxe_prev_mcp_done(sc));
15374    }
15375
15376    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15377
15378    /*
15379     * If function has FLR capabilities, and existing FW version matches
15380     * the one required, then FLR will be sufficient to clean any residue
15381     * left by previous driver
15382     */
15383    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15384    if (!rc) {
15385        /* fw version is good */
15386        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15387        rc = bxe_do_flr(sc);
15388    }
15389
15390    if (!rc) {
15391        /* FLR was performed */
15392        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15393        return (0);
15394    }
15395
15396    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15397
15398    /* Close the MCP request, return failure*/
15399    rc = bxe_prev_mcp_done(sc);
15400    if (!rc) {
15401        rc = BXE_PREV_WAIT_NEEDED;
15402    }
15403
15404    return (rc);
15405}
15406
15407static int
15408bxe_prev_unload(struct bxe_softc *sc)
15409{
15410    int time_counter = 10;
15411    uint32_t fw, hw_lock_reg, hw_lock_val;
15412    uint32_t rc = 0;
15413
15414    /*
15415     * Clear HW from errors which may have resulted from an interrupted
15416     * DMAE transaction.
15417     */
15418    bxe_prev_interrupted_dmae(sc);
15419
15420    /* Release previously held locks */
15421    hw_lock_reg =
15422        (SC_FUNC(sc) <= 5) ?
15423            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15424            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15425
15426    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15427    if (hw_lock_val) {
15428        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15429            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15430            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15431                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15432        }
15433        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15434        REG_WR(sc, hw_lock_reg, 0xffffffff);
15435    } else {
15436        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15437    }
15438
15439    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15440        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15441        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15442    }
15443
15444    do {
15445        /* Lock MCP using an unload request */
15446        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15447        if (!fw) {
15448            BLOGE(sc, "MCP response failure, aborting\n");
15449            rc = -1;
15450            break;
15451        }
15452
15453        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15454            rc = bxe_prev_unload_common(sc);
15455            break;
15456        }
15457
15458        /* non-common reply from MCP night require looping */
15459        rc = bxe_prev_unload_uncommon(sc);
15460        if (rc != BXE_PREV_WAIT_NEEDED) {
15461            break;
15462        }
15463
15464        DELAY(20000);
15465    } while (--time_counter);
15466
15467    if (!time_counter || rc) {
15468        BLOGE(sc, "Failed to unload previous driver!"
15469            " time_counter %d rc %d\n", time_counter, rc);
15470        rc = -1;
15471    }
15472
15473    return (rc);
15474}
15475
15476void
15477bxe_dcbx_set_state(struct bxe_softc *sc,
15478                   uint8_t          dcb_on,
15479                   uint32_t         dcbx_enabled)
15480{
15481    if (!CHIP_IS_E1x(sc)) {
15482        sc->dcb_state = dcb_on;
15483        sc->dcbx_enabled = dcbx_enabled;
15484    } else {
15485        sc->dcb_state = FALSE;
15486        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15487    }
15488    BLOGD(sc, DBG_LOAD,
15489          "DCB state [%s:%s]\n",
15490          dcb_on ? "ON" : "OFF",
15491          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15492          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15493          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15494          "on-chip with negotiation" : "invalid");
15495}
15496
15497/* must be called after sriov-enable */
15498static int
15499bxe_set_qm_cid_count(struct bxe_softc *sc)
15500{
15501    int cid_count = BXE_L2_MAX_CID(sc);
15502
15503    if (IS_SRIOV(sc)) {
15504        cid_count += BXE_VF_CIDS;
15505    }
15506
15507    if (CNIC_SUPPORT(sc)) {
15508        cid_count += CNIC_CID_MAX;
15509    }
15510
15511    return (roundup(cid_count, QM_CID_ROUND));
15512}
15513
15514static void
15515bxe_init_multi_cos(struct bxe_softc *sc)
15516{
15517    int pri, cos;
15518
15519    uint32_t pri_map = 0; /* XXX change to user config */
15520
15521    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15522        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15523        if (cos < sc->max_cos) {
15524            sc->prio_to_cos[pri] = cos;
15525        } else {
15526            BLOGW(sc, "Invalid COS %d for priority %d "
15527                      "(max COS is %d), setting to 0\n",
15528                  cos, pri, (sc->max_cos - 1));
15529            sc->prio_to_cos[pri] = 0;
15530        }
15531    }
15532}
15533
15534static int
15535bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15536{
15537    struct bxe_softc *sc;
15538    int error, result;
15539
15540    result = 0;
15541    error = sysctl_handle_int(oidp, &result, 0, req);
15542
15543    if (error || !req->newptr) {
15544        return (error);
15545    }
15546
15547    if (result == 1) {
15548        uint32_t  temp;
15549        sc = (struct bxe_softc *)arg1;
15550
15551        BLOGI(sc, "... dumping driver state ...\n");
15552        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15553        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15554    }
15555
15556    return (error);
15557}
15558
15559static int
15560bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15561{
15562    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15563    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15564    uint32_t *offset;
15565    uint64_t value = 0;
15566    int index = (int)arg2;
15567
15568    if (index >= BXE_NUM_ETH_STATS) {
15569        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15570        return (-1);
15571    }
15572
15573    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15574
15575    switch (bxe_eth_stats_arr[index].size) {
15576    case 4:
15577        value = (uint64_t)*offset;
15578        break;
15579    case 8:
15580        value = HILO_U64(*offset, *(offset + 1));
15581        break;
15582    default:
15583        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15584              index, bxe_eth_stats_arr[index].size);
15585        return (-1);
15586    }
15587
15588    return (sysctl_handle_64(oidp, &value, 0, req));
15589}
15590
15591static int
15592bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15593{
15594    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15595    uint32_t *eth_stats;
15596    uint32_t *offset;
15597    uint64_t value = 0;
15598    uint32_t q_stat = (uint32_t)arg2;
15599    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15600    uint32_t index = (q_stat & 0xffff);
15601
15602    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15603
15604    if (index >= BXE_NUM_ETH_Q_STATS) {
15605        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15606        return (-1);
15607    }
15608
15609    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15610
15611    switch (bxe_eth_q_stats_arr[index].size) {
15612    case 4:
15613        value = (uint64_t)*offset;
15614        break;
15615    case 8:
15616        value = HILO_U64(*offset, *(offset + 1));
15617        break;
15618    default:
15619        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15620              index, bxe_eth_q_stats_arr[index].size);
15621        return (-1);
15622    }
15623
15624    return (sysctl_handle_64(oidp, &value, 0, req));
15625}
15626
15627static void bxe_force_link_reset(struct bxe_softc *sc)
15628{
15629
15630        bxe_acquire_phy_lock(sc);
15631        elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15632        bxe_release_phy_lock(sc);
15633}
15634
15635static int
15636bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15637{
15638        struct bxe_softc *sc = (struct bxe_softc *)arg1;;
15639        uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15640        int rc = 0;
15641        int error;
15642        int result;
15643
15644
15645        error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15646
15647        if (error || !req->newptr) {
15648                return (error);
15649        }
15650        if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15651                BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
15652                sc->bxe_pause_param = 8;
15653        }
15654
15655        result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15656
15657
15658        if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15659                        BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15660                        return -EINVAL;
15661        }
15662
15663        if(IS_MF(sc))
15664                return 0;
15665       sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15666        if(result & ELINK_FLOW_CTRL_RX)
15667                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15668
15669        if(result & ELINK_FLOW_CTRL_TX)
15670                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15671        if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15672                sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15673
15674        if(result & 0x400) {
15675                if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15676                        sc->link_params.req_flow_ctrl[cfg_idx] =
15677                                ELINK_FLOW_CTRL_AUTO;
15678                }
15679                sc->link_params.req_fc_auto_adv = 0;
15680                if (result & ELINK_FLOW_CTRL_RX)
15681                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15682
15683                if (result & ELINK_FLOW_CTRL_TX)
15684                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15685                if (!sc->link_params.req_fc_auto_adv)
15686                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15687        }
15688         if (IS_PF(sc)) {
15689                        if (sc->link_vars.link_up) {
15690                                bxe_stats_handle(sc, STATS_EVENT_STOP);
15691                        }
15692			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15693                        bxe_force_link_reset(sc);
15694                        bxe_acquire_phy_lock(sc);
15695
15696                        rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15697
15698                        bxe_release_phy_lock(sc);
15699
15700                        bxe_calc_fc_adv(sc);
15701                        }
15702        }
15703        return rc;
15704}
15705
15706
15707static void
15708bxe_add_sysctls(struct bxe_softc *sc)
15709{
15710    struct sysctl_ctx_list *ctx;
15711    struct sysctl_oid_list *children;
15712    struct sysctl_oid *queue_top, *queue;
15713    struct sysctl_oid_list *queue_top_children, *queue_children;
15714    char queue_num_buf[32];
15715    uint32_t q_stat;
15716    int i, j;
15717
15718    ctx = device_get_sysctl_ctx(sc->dev);
15719    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15720
15721    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15722                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15723                      "version");
15724
15725    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15726             BCM_5710_FW_MAJOR_VERSION,
15727             BCM_5710_FW_MINOR_VERSION,
15728             BCM_5710_FW_REVISION_VERSION,
15729             BCM_5710_FW_ENGINEERING_VERSION);
15730
15731    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15732        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15733         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15734         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15735         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15736                                                                "Unknown"));
15737    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15738                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15739                    "multifunction vnics per port");
15740
15741    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15742        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15743         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15744         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15745                                              "???GT/s"),
15746        sc->devinfo.pcie_link_width);
15747
15748    sc->debug = bxe_debug;
15749
15750#if __FreeBSD_version >= 900000
15751    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15752                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15753                      "bootcode version");
15754    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15755                      CTLFLAG_RD, sc->fw_ver_str, 0,
15756                      "firmware version");
15757    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15758                      CTLFLAG_RD, sc->mf_mode_str, 0,
15759                      "multifunction mode");
15760    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15761                      CTLFLAG_RD, sc->mac_addr_str, 0,
15762                      "mac address");
15763    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15764                      CTLFLAG_RD, sc->pci_link_str, 0,
15765                      "pci link status");
15766    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15767                    CTLFLAG_RW, &sc->debug,
15768                    "debug logging mode");
15769#else
15770    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15771                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15772                      "bootcode version");
15773    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15774                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15775                      "firmware version");
15776    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15777                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15778                      "multifunction mode");
15779    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15780                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15781                      "mac address");
15782    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15783                      CTLFLAG_RD, &sc->pci_link_str, 0,
15784                      "pci link status");
15785    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15786                    CTLFLAG_RW, &sc->debug, 0,
15787                    "debug logging mode");
15788#endif /* #if __FreeBSD_version >= 900000 */
15789
15790    sc->trigger_grcdump = 0;
15791    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15792                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
15793                   "trigger grcdump should be invoked"
15794                   "  before collecting grcdump");
15795
15796    sc->grcdump_started = 0;
15797    sc->grcdump_done = 0;
15798    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15799                   CTLFLAG_RD, &sc->grcdump_done, 0,
15800                   "set by driver when grcdump is done");
15801
15802    sc->rx_budget = bxe_rx_budget;
15803    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15804                    CTLFLAG_RW, &sc->rx_budget, 0,
15805                    "rx processing budget");
15806
15807   SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
15808                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15809                    bxe_sysctl_pauseparam, "IU",
15810                    "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
15811
15812
15813    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15814                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15815                    bxe_sysctl_state, "IU", "dump driver state");
15816
15817    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15818        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15819                        bxe_eth_stats_arr[i].string,
15820                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15821                        bxe_sysctl_eth_stat, "LU",
15822                        bxe_eth_stats_arr[i].string);
15823    }
15824
15825    /* add a new parent node for all queues "dev.bxe.#.queue" */
15826    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15827                                CTLFLAG_RD, NULL, "queue");
15828    queue_top_children = SYSCTL_CHILDREN(queue_top);
15829
15830    for (i = 0; i < sc->num_queues; i++) {
15831        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15832        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15833        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15834                                queue_num_buf, CTLFLAG_RD, NULL,
15835                                "single queue");
15836        queue_children = SYSCTL_CHILDREN(queue);
15837
15838        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15839            q_stat = ((i << 16) | j);
15840            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15841                            bxe_eth_q_stats_arr[j].string,
15842                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15843                            bxe_sysctl_eth_q_stat, "LU",
15844                            bxe_eth_q_stats_arr[j].string);
15845        }
15846    }
15847}
15848
15849static int
15850bxe_alloc_buf_rings(struct bxe_softc *sc)
15851{
15852#if __FreeBSD_version >= 901504
15853
15854    int i;
15855    struct bxe_fastpath *fp;
15856
15857    for (i = 0; i < sc->num_queues; i++) {
15858
15859        fp = &sc->fp[i];
15860
15861        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15862                                   M_NOWAIT, &fp->tx_mtx);
15863        if (fp->tx_br == NULL)
15864            return (-1);
15865    }
15866#endif
15867    return (0);
15868}
15869
15870static void
15871bxe_free_buf_rings(struct bxe_softc *sc)
15872{
15873#if __FreeBSD_version >= 901504
15874
15875    int i;
15876    struct bxe_fastpath *fp;
15877
15878    for (i = 0; i < sc->num_queues; i++) {
15879
15880        fp = &sc->fp[i];
15881
15882        if (fp->tx_br) {
15883            buf_ring_free(fp->tx_br, M_DEVBUF);
15884            fp->tx_br = NULL;
15885        }
15886    }
15887
15888#endif
15889}
15890
15891static void
15892bxe_init_fp_mutexs(struct bxe_softc *sc)
15893{
15894    int i;
15895    struct bxe_fastpath *fp;
15896
15897    for (i = 0; i < sc->num_queues; i++) {
15898
15899        fp = &sc->fp[i];
15900
15901        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15902            "bxe%d_fp%d_tx_lock", sc->unit, i);
15903        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15904
15905        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15906            "bxe%d_fp%d_rx_lock", sc->unit, i);
15907        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15908    }
15909}
15910
15911static void
15912bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15913{
15914    int i;
15915    struct bxe_fastpath *fp;
15916
15917    for (i = 0; i < sc->num_queues; i++) {
15918
15919        fp = &sc->fp[i];
15920
15921        if (mtx_initialized(&fp->tx_mtx)) {
15922            mtx_destroy(&fp->tx_mtx);
15923        }
15924
15925        if (mtx_initialized(&fp->rx_mtx)) {
15926            mtx_destroy(&fp->rx_mtx);
15927        }
15928    }
15929}
15930
15931
15932/*
15933 * Device attach function.
15934 *
15935 * Allocates device resources, performs secondary chip identification, and
15936 * initializes driver instance variables. This function is called from driver
15937 * load after a successful probe.
15938 *
15939 * Returns:
15940 *   0 = Success, >0 = Failure
15941 */
15942static int
15943bxe_attach(device_t dev)
15944{
15945    struct bxe_softc *sc;
15946
15947    sc = device_get_softc(dev);
15948
15949    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15950
15951    sc->state = BXE_STATE_CLOSED;
15952
15953    sc->dev  = dev;
15954    sc->unit = device_get_unit(dev);
15955
15956    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15957
15958    sc->pcie_bus    = pci_get_bus(dev);
15959    sc->pcie_device = pci_get_slot(dev);
15960    sc->pcie_func   = pci_get_function(dev);
15961
15962    /* enable bus master capability */
15963    pci_enable_busmaster(dev);
15964
15965    /* get the BARs */
15966    if (bxe_allocate_bars(sc) != 0) {
15967        return (ENXIO);
15968    }
15969
15970    /* initialize the mutexes */
15971    bxe_init_mutexes(sc);
15972
15973    /* prepare the periodic callout */
15974    callout_init(&sc->periodic_callout, 0);
15975
15976    /* prepare the chip taskqueue */
15977    sc->chip_tq_flags = CHIP_TQ_NONE;
15978    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15979             "bxe%d_chip_tq", sc->unit);
15980    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15981    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15982                                   taskqueue_thread_enqueue,
15983                                   &sc->chip_tq);
15984    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15985                            "%s", sc->chip_tq_name);
15986
15987    /* get device info and set params */
15988    if (bxe_get_device_info(sc) != 0) {
15989        BLOGE(sc, "getting device info\n");
15990        bxe_deallocate_bars(sc);
15991        pci_disable_busmaster(dev);
15992        return (ENXIO);
15993    }
15994
15995    /* get final misc params */
15996    bxe_get_params(sc);
15997
15998    /* set the default MTU (changed via ifconfig) */
15999    sc->mtu = ETHERMTU;
16000
16001    bxe_set_modes_bitmap(sc);
16002
16003    /* XXX
16004     * If in AFEX mode and the function is configured for FCoE
16005     * then bail... no L2 allowed.
16006     */
16007
16008    /* get phy settings from shmem and 'and' against admin settings */
16009    bxe_get_phy_info(sc);
16010
16011    /* initialize the FreeBSD ifnet interface */
16012    if (bxe_init_ifnet(sc) != 0) {
16013        bxe_release_mutexes(sc);
16014        bxe_deallocate_bars(sc);
16015        pci_disable_busmaster(dev);
16016        return (ENXIO);
16017    }
16018
16019    if (bxe_add_cdev(sc) != 0) {
16020        if (sc->ifp != NULL) {
16021            ether_ifdetach(sc->ifp);
16022        }
16023        ifmedia_removeall(&sc->ifmedia);
16024        bxe_release_mutexes(sc);
16025        bxe_deallocate_bars(sc);
16026        pci_disable_busmaster(dev);
16027        return (ENXIO);
16028    }
16029
16030    /* allocate device interrupts */
16031    if (bxe_interrupt_alloc(sc) != 0) {
16032        bxe_del_cdev(sc);
16033        if (sc->ifp != NULL) {
16034            ether_ifdetach(sc->ifp);
16035        }
16036        ifmedia_removeall(&sc->ifmedia);
16037        bxe_release_mutexes(sc);
16038        bxe_deallocate_bars(sc);
16039        pci_disable_busmaster(dev);
16040        return (ENXIO);
16041    }
16042
16043    bxe_init_fp_mutexs(sc);
16044
16045    if (bxe_alloc_buf_rings(sc) != 0) {
16046	bxe_free_buf_rings(sc);
16047        bxe_interrupt_free(sc);
16048        bxe_del_cdev(sc);
16049        if (sc->ifp != NULL) {
16050            ether_ifdetach(sc->ifp);
16051        }
16052        ifmedia_removeall(&sc->ifmedia);
16053        bxe_release_mutexes(sc);
16054        bxe_deallocate_bars(sc);
16055        pci_disable_busmaster(dev);
16056        return (ENXIO);
16057    }
16058
16059    /* allocate ilt */
16060    if (bxe_alloc_ilt_mem(sc) != 0) {
16061	bxe_free_buf_rings(sc);
16062        bxe_interrupt_free(sc);
16063        bxe_del_cdev(sc);
16064        if (sc->ifp != NULL) {
16065            ether_ifdetach(sc->ifp);
16066        }
16067        ifmedia_removeall(&sc->ifmedia);
16068        bxe_release_mutexes(sc);
16069        bxe_deallocate_bars(sc);
16070        pci_disable_busmaster(dev);
16071        return (ENXIO);
16072    }
16073
16074    /* allocate the host hardware/software hsi structures */
16075    if (bxe_alloc_hsi_mem(sc) != 0) {
16076        bxe_free_ilt_mem(sc);
16077	bxe_free_buf_rings(sc);
16078        bxe_interrupt_free(sc);
16079        bxe_del_cdev(sc);
16080        if (sc->ifp != NULL) {
16081            ether_ifdetach(sc->ifp);
16082        }
16083        ifmedia_removeall(&sc->ifmedia);
16084        bxe_release_mutexes(sc);
16085        bxe_deallocate_bars(sc);
16086        pci_disable_busmaster(dev);
16087        return (ENXIO);
16088    }
16089
16090    /* need to reset chip if UNDI was active */
16091    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16092        /* init fw_seq */
16093        sc->fw_seq =
16094            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16095             DRV_MSG_SEQ_NUMBER_MASK);
16096        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16097        bxe_prev_unload(sc);
16098    }
16099
16100#if 1
16101    /* XXX */
16102    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16103#else
16104    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16105        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16106        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16107        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16108        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16109        bxe_dcbx_init_params(sc);
16110    } else {
16111        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16112    }
16113#endif
16114
16115    /* calculate qm_cid_count */
16116    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16117    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16118
16119    sc->max_cos = 1;
16120    bxe_init_multi_cos(sc);
16121
16122    bxe_add_sysctls(sc);
16123
16124    return (0);
16125}
16126
16127/*
16128 * Device detach function.
16129 *
16130 * Stops the controller, resets the controller, and releases resources.
16131 *
16132 * Returns:
16133 *   0 = Success, >0 = Failure
16134 */
16135static int
16136bxe_detach(device_t dev)
16137{
16138    struct bxe_softc *sc;
16139    if_t ifp;
16140
16141    sc = device_get_softc(dev);
16142
16143    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16144
16145    ifp = sc->ifp;
16146    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16147        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16148        return(EBUSY);
16149    }
16150
16151    bxe_del_cdev(sc);
16152
16153    /* stop the periodic callout */
16154    bxe_periodic_stop(sc);
16155
16156    /* stop the chip taskqueue */
16157    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16158    if (sc->chip_tq) {
16159        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16160        taskqueue_free(sc->chip_tq);
16161        sc->chip_tq = NULL;
16162    }
16163
16164    /* stop and reset the controller if it was open */
16165    if (sc->state != BXE_STATE_CLOSED) {
16166        BXE_CORE_LOCK(sc);
16167        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16168        sc->state = BXE_STATE_DISABLED;
16169        BXE_CORE_UNLOCK(sc);
16170    }
16171
16172    /* release the network interface */
16173    if (ifp != NULL) {
16174        ether_ifdetach(ifp);
16175    }
16176    ifmedia_removeall(&sc->ifmedia);
16177
16178    /* XXX do the following based on driver state... */
16179
16180    /* free the host hardware/software hsi structures */
16181    bxe_free_hsi_mem(sc);
16182
16183    /* free ilt */
16184    bxe_free_ilt_mem(sc);
16185
16186    bxe_free_buf_rings(sc);
16187
16188    /* release the interrupts */
16189    bxe_interrupt_free(sc);
16190
16191    /* Release the mutexes*/
16192    bxe_destroy_fp_mutexs(sc);
16193    bxe_release_mutexes(sc);
16194
16195
16196    /* Release the PCIe BAR mapped memory */
16197    bxe_deallocate_bars(sc);
16198
16199    /* Release the FreeBSD interface. */
16200    if (sc->ifp != NULL) {
16201        if_free(sc->ifp);
16202    }
16203
16204    pci_disable_busmaster(dev);
16205
16206    return (0);
16207}
16208
16209/*
16210 * Device shutdown function.
16211 *
16212 * Stops and resets the controller.
16213 *
16214 * Returns:
16215 *   Nothing
16216 */
16217static int
16218bxe_shutdown(device_t dev)
16219{
16220    struct bxe_softc *sc;
16221
16222    sc = device_get_softc(dev);
16223
16224    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16225
16226    /* stop the periodic callout */
16227    bxe_periodic_stop(sc);
16228
16229    BXE_CORE_LOCK(sc);
16230    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16231    BXE_CORE_UNLOCK(sc);
16232
16233    return (0);
16234}
16235
16236void
16237bxe_igu_ack_sb(struct bxe_softc *sc,
16238               uint8_t          igu_sb_id,
16239               uint8_t          segment,
16240               uint16_t         index,
16241               uint8_t          op,
16242               uint8_t          update)
16243{
16244    uint32_t igu_addr = sc->igu_base_addr;
16245    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16246    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16247}
16248
16249static void
16250bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16251                     uint8_t          func,
16252                     uint8_t          idu_sb_id,
16253                     uint8_t          is_pf)
16254{
16255    uint32_t data, ctl, cnt = 100;
16256    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16257    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16258    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16259    uint32_t sb_bit =  1 << (idu_sb_id%32);
16260    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16261    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16262
16263    /* Not supported in BC mode */
16264    if (CHIP_INT_MODE_IS_BC(sc)) {
16265        return;
16266    }
16267
16268    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16269             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16270            IGU_REGULAR_CLEANUP_SET |
16271            IGU_REGULAR_BCLEANUP);
16272
16273    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16274           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16275           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16276
16277    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16278            data, igu_addr_data);
16279    REG_WR(sc, igu_addr_data, data);
16280
16281    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16282                      BUS_SPACE_BARRIER_WRITE);
16283    mb();
16284
16285    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16286            ctl, igu_addr_ctl);
16287    REG_WR(sc, igu_addr_ctl, ctl);
16288
16289    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16290                      BUS_SPACE_BARRIER_WRITE);
16291    mb();
16292
16293    /* wait for clean up to finish */
16294    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16295        DELAY(20000);
16296    }
16297
16298    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16299        BLOGD(sc, DBG_LOAD,
16300              "Unable to finish IGU cleanup: "
16301              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16302              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16303    }
16304}
16305
16306static void
16307bxe_igu_clear_sb(struct bxe_softc *sc,
16308                 uint8_t          idu_sb_id)
16309{
16310    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16311}
16312
16313
16314
16315
16316
16317
16318
16319/*******************/
16320/* ECORE CALLBACKS */
16321/*******************/
16322
16323static void
16324bxe_reset_common(struct bxe_softc *sc)
16325{
16326    uint32_t val = 0x1400;
16327
16328    /* reset_common */
16329    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16330
16331    if (CHIP_IS_E3(sc)) {
16332        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16333        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16334    }
16335
16336    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16337}
16338
16339static void
16340bxe_common_init_phy(struct bxe_softc *sc)
16341{
16342    uint32_t shmem_base[2];
16343    uint32_t shmem2_base[2];
16344
16345    /* Avoid common init in case MFW supports LFA */
16346    if (SHMEM2_RD(sc, size) >
16347        (uint32_t)offsetof(struct shmem2_region,
16348                           lfa_host_addr[SC_PORT(sc)])) {
16349        return;
16350    }
16351
16352    shmem_base[0]  = sc->devinfo.shmem_base;
16353    shmem2_base[0] = sc->devinfo.shmem2_base;
16354
16355    if (!CHIP_IS_E1x(sc)) {
16356        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16357        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16358    }
16359
16360    bxe_acquire_phy_lock(sc);
16361    elink_common_init_phy(sc, shmem_base, shmem2_base,
16362                          sc->devinfo.chip_id, 0);
16363    bxe_release_phy_lock(sc);
16364}
16365
16366static void
16367bxe_pf_disable(struct bxe_softc *sc)
16368{
16369    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16370
16371    val &= ~IGU_PF_CONF_FUNC_EN;
16372
16373    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16374    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16375    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16376}
16377
16378static void
16379bxe_init_pxp(struct bxe_softc *sc)
16380{
16381    uint16_t devctl;
16382    int r_order, w_order;
16383
16384    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16385
16386    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16387
16388    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16389
16390    if (sc->mrrs == -1) {
16391        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16392    } else {
16393        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16394        r_order = sc->mrrs;
16395    }
16396
16397    ecore_init_pxp_arb(sc, r_order, w_order);
16398}
16399
16400static uint32_t
16401bxe_get_pretend_reg(struct bxe_softc *sc)
16402{
16403    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16404    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16405    return (base + (SC_ABS_FUNC(sc)) * stride);
16406}
16407
16408/*
16409 * Called only on E1H or E2.
16410 * When pretending to be PF, the pretend value is the function number 0..7.
16411 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16412 * combination.
16413 */
16414static int
16415bxe_pretend_func(struct bxe_softc *sc,
16416                 uint16_t         pretend_func_val)
16417{
16418    uint32_t pretend_reg;
16419
16420    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16421        return (-1);
16422    }
16423
16424    /* get my own pretend register */
16425    pretend_reg = bxe_get_pretend_reg(sc);
16426    REG_WR(sc, pretend_reg, pretend_func_val);
16427    REG_RD(sc, pretend_reg);
16428    return (0);
16429}
16430
16431static void
16432bxe_iov_init_dmae(struct bxe_softc *sc)
16433{
16434    return;
16435}
16436
16437static void
16438bxe_iov_init_dq(struct bxe_softc *sc)
16439{
16440    return;
16441}
16442
16443/* send a NIG loopback debug packet */
16444static void
16445bxe_lb_pckt(struct bxe_softc *sc)
16446{
16447    uint32_t wb_write[3];
16448
16449    /* Ethernet source and destination addresses */
16450    wb_write[0] = 0x55555555;
16451    wb_write[1] = 0x55555555;
16452    wb_write[2] = 0x20;     /* SOP */
16453    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16454
16455    /* NON-IP protocol */
16456    wb_write[0] = 0x09000000;
16457    wb_write[1] = 0x55555555;
16458    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16459    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16460}
16461
16462/*
16463 * Some of the internal memories are not directly readable from the driver.
16464 * To test them we send debug packets.
16465 */
16466static int
16467bxe_int_mem_test(struct bxe_softc *sc)
16468{
16469    int factor;
16470    int count, i;
16471    uint32_t val = 0;
16472
16473    if (CHIP_REV_IS_FPGA(sc)) {
16474        factor = 120;
16475    } else if (CHIP_REV_IS_EMUL(sc)) {
16476        factor = 200;
16477    } else {
16478        factor = 1;
16479    }
16480
16481    /* disable inputs of parser neighbor blocks */
16482    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16483    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16484    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16485    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16486
16487    /*  write 0 to parser credits for CFC search request */
16488    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16489
16490    /* send Ethernet packet */
16491    bxe_lb_pckt(sc);
16492
16493    /* TODO do i reset NIG statistic? */
16494    /* Wait until NIG register shows 1 packet of size 0x10 */
16495    count = 1000 * factor;
16496    while (count) {
16497        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16498        val = *BXE_SP(sc, wb_data[0]);
16499        if (val == 0x10) {
16500            break;
16501        }
16502
16503        DELAY(10000);
16504        count--;
16505    }
16506
16507    if (val != 0x10) {
16508        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16509        return (-1);
16510    }
16511
16512    /* wait until PRS register shows 1 packet */
16513    count = (1000 * factor);
16514    while (count) {
16515        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16516        if (val == 1) {
16517            break;
16518        }
16519
16520        DELAY(10000);
16521        count--;
16522    }
16523
16524    if (val != 0x1) {
16525        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16526        return (-2);
16527    }
16528
16529    /* Reset and init BRB, PRS */
16530    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16531    DELAY(50000);
16532    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16533    DELAY(50000);
16534    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16535    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16536
16537    /* Disable inputs of parser neighbor blocks */
16538    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16539    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16540    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16541    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16542
16543    /* Write 0 to parser credits for CFC search request */
16544    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16545
16546    /* send 10 Ethernet packets */
16547    for (i = 0; i < 10; i++) {
16548        bxe_lb_pckt(sc);
16549    }
16550
16551    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16552    count = (1000 * factor);
16553    while (count) {
16554        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16555        val = *BXE_SP(sc, wb_data[0]);
16556        if (val == 0xb0) {
16557            break;
16558        }
16559
16560        DELAY(10000);
16561        count--;
16562    }
16563
16564    if (val != 0xb0) {
16565        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16566        return (-3);
16567    }
16568
16569    /* Wait until PRS register shows 2 packets */
16570    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16571    if (val != 2) {
16572        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16573    }
16574
16575    /* Write 1 to parser credits for CFC search request */
16576    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16577
16578    /* Wait until PRS register shows 3 packets */
16579    DELAY(10000 * factor);
16580
16581    /* Wait until NIG register shows 1 packet of size 0x10 */
16582    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16583    if (val != 3) {
16584        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16585    }
16586
16587    /* clear NIG EOP FIFO */
16588    for (i = 0; i < 11; i++) {
16589        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16590    }
16591
16592    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16593    if (val != 1) {
16594        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16595        return (-4);
16596    }
16597
16598    /* Reset and init BRB, PRS, NIG */
16599    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16600    DELAY(50000);
16601    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16602    DELAY(50000);
16603    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16604    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16605    if (!CNIC_SUPPORT(sc)) {
16606        /* set NIC mode */
16607        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16608    }
16609
16610    /* Enable inputs of parser neighbor blocks */
16611    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16612    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16613    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16614    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16615
16616    return (0);
16617}
16618
16619static void
16620bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16621{
16622    int is_required;
16623    uint32_t val;
16624    int port;
16625
16626    is_required = 0;
16627    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16628           SHARED_HW_CFG_FAN_FAILURE_MASK);
16629
16630    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16631        is_required = 1;
16632    }
16633    /*
16634     * The fan failure mechanism is usually related to the PHY type since
16635     * the power consumption of the board is affected by the PHY. Currently,
16636     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16637     */
16638    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16639        for (port = PORT_0; port < PORT_MAX; port++) {
16640            is_required |= elink_fan_failure_det_req(sc,
16641                                                     sc->devinfo.shmem_base,
16642                                                     sc->devinfo.shmem2_base,
16643                                                     port);
16644        }
16645    }
16646
16647    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16648
16649    if (is_required == 0) {
16650        return;
16651    }
16652
16653    /* Fan failure is indicated by SPIO 5 */
16654    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16655
16656    /* set to active low mode */
16657    val = REG_RD(sc, MISC_REG_SPIO_INT);
16658    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16659    REG_WR(sc, MISC_REG_SPIO_INT, val);
16660
16661    /* enable interrupt to signal the IGU */
16662    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16663    val |= MISC_SPIO_SPIO5;
16664    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16665}
16666
16667static void
16668bxe_enable_blocks_attention(struct bxe_softc *sc)
16669{
16670    uint32_t val;
16671
16672    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16673    if (!CHIP_IS_E1x(sc)) {
16674        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16675    } else {
16676        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16677    }
16678    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16679    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16680    /*
16681     * mask read length error interrupts in brb for parser
16682     * (parsing unit and 'checksum and crc' unit)
16683     * these errors are legal (PU reads fixed length and CAC can cause
16684     * read length error on truncated packets)
16685     */
16686    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16687    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16688    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16689    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16690    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16691    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16692/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16693/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16694    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16695    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16696    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16697/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16698/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16699    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16700    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16701    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16702    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16703/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16704/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16705
16706    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16707           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16708           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16709    if (!CHIP_IS_E1x(sc)) {
16710        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16711                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16712    }
16713    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16714
16715    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16716    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16717    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16718/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16719
16720    if (!CHIP_IS_E1x(sc)) {
16721        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16722        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16723    }
16724
16725    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16726    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16727/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16728    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16729}
16730
16731/**
16732 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16733 *
16734 * @sc:     driver handle
16735 */
16736static int
16737bxe_init_hw_common(struct bxe_softc *sc)
16738{
16739    uint8_t abs_func_id;
16740    uint32_t val;
16741
16742    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16743          SC_ABS_FUNC(sc));
16744
16745    /*
16746     * take the RESET lock to protect undi_unload flow from accessing
16747     * registers while we are resetting the chip
16748     */
16749    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16750
16751    bxe_reset_common(sc);
16752
16753    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16754
16755    val = 0xfffc;
16756    if (CHIP_IS_E3(sc)) {
16757        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16758        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16759    }
16760
16761    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16762
16763    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16764
16765    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16766    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16767
16768    if (!CHIP_IS_E1x(sc)) {
16769        /*
16770         * 4-port mode or 2-port mode we need to turn off master-enable for
16771         * everyone. After that we turn it back on for self. So, we disregard
16772         * multi-function, and always disable all functions on the given path,
16773         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16774         */
16775        for (abs_func_id = SC_PATH(sc);
16776             abs_func_id < (E2_FUNC_MAX * 2);
16777             abs_func_id += 2) {
16778            if (abs_func_id == SC_ABS_FUNC(sc)) {
16779                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16780                continue;
16781            }
16782
16783            bxe_pretend_func(sc, abs_func_id);
16784
16785            /* clear pf enable */
16786            bxe_pf_disable(sc);
16787
16788            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16789        }
16790    }
16791
16792    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16793
16794    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16795
16796    if (CHIP_IS_E1(sc)) {
16797        /*
16798         * enable HW interrupt from PXP on USDM overflow
16799         * bit 16 on INT_MASK_0
16800         */
16801        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16802    }
16803
16804    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16805    bxe_init_pxp(sc);
16806
16807#ifdef __BIG_ENDIAN
16808    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16809    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16810    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16811    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16812    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16813    /* make sure this value is 0 */
16814    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16815
16816    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16817    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16818    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16819    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16820    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16821#endif
16822
16823    ecore_ilt_init_page_size(sc, INITOP_SET);
16824
16825    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16826        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16827    }
16828
16829    /* let the HW do it's magic... */
16830    DELAY(100000);
16831
16832    /* finish PXP init */
16833    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16834    if (val != 1) {
16835        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16836            val);
16837        return (-1);
16838    }
16839    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16840    if (val != 1) {
16841        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16842        return (-1);
16843    }
16844
16845    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16846
16847    /*
16848     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16849     * entries with value "0" and valid bit on. This needs to be done by the
16850     * first PF that is loaded in a path (i.e. common phase)
16851     */
16852    if (!CHIP_IS_E1x(sc)) {
16853/*
16854 * In E2 there is a bug in the timers block that can cause function 6 / 7
16855 * (i.e. vnic3) to start even if it is marked as "scan-off".
16856 * This occurs when a different function (func2,3) is being marked
16857 * as "scan-off". Real-life scenario for example: if a driver is being
16858 * load-unloaded while func6,7 are down. This will cause the timer to access
16859 * the ilt, translate to a logical address and send a request to read/write.
16860 * Since the ilt for the function that is down is not valid, this will cause
16861 * a translation error which is unrecoverable.
16862 * The Workaround is intended to make sure that when this happens nothing
16863 * fatal will occur. The workaround:
16864 *  1.  First PF driver which loads on a path will:
16865 *      a.  After taking the chip out of reset, by using pretend,
16866 *          it will write "0" to the following registers of
16867 *          the other vnics.
16868 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16869 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16870 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16871 *          And for itself it will write '1' to
16872 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16873 *          dmae-operations (writing to pram for example.)
16874 *          note: can be done for only function 6,7 but cleaner this
16875 *            way.
16876 *      b.  Write zero+valid to the entire ILT.
16877 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16878 *          VNIC3 (of that port). The range allocated will be the
16879 *          entire ILT. This is needed to prevent  ILT range error.
16880 *  2.  Any PF driver load flow:
16881 *      a.  ILT update with the physical addresses of the allocated
16882 *          logical pages.
16883 *      b.  Wait 20msec. - note that this timeout is needed to make
16884 *          sure there are no requests in one of the PXP internal
16885 *          queues with "old" ILT addresses.
16886 *      c.  PF enable in the PGLC.
16887 *      d.  Clear the was_error of the PF in the PGLC. (could have
16888 *          occurred while driver was down)
16889 *      e.  PF enable in the CFC (WEAK + STRONG)
16890 *      f.  Timers scan enable
16891 *  3.  PF driver unload flow:
16892 *      a.  Clear the Timers scan_en.
16893 *      b.  Polling for scan_on=0 for that PF.
16894 *      c.  Clear the PF enable bit in the PXP.
16895 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16896 *      e.  Write zero+valid to all ILT entries (The valid bit must
16897 *          stay set)
16898 *      f.  If this is VNIC 3 of a port then also init
16899 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16900 *          to the last enrty in the ILT.
16901 *
16902 *      Notes:
16903 *      Currently the PF error in the PGLC is non recoverable.
16904 *      In the future the there will be a recovery routine for this error.
16905 *      Currently attention is masked.
16906 *      Having an MCP lock on the load/unload process does not guarantee that
16907 *      there is no Timer disable during Func6/7 enable. This is because the
16908 *      Timers scan is currently being cleared by the MCP on FLR.
16909 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16910 *      there is error before clearing it. But the flow above is simpler and
16911 *      more general.
16912 *      All ILT entries are written by zero+valid and not just PF6/7
16913 *      ILT entries since in the future the ILT entries allocation for
16914 *      PF-s might be dynamic.
16915 */
16916        struct ilt_client_info ilt_cli;
16917        struct ecore_ilt ilt;
16918
16919        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16920        memset(&ilt, 0, sizeof(struct ecore_ilt));
16921
16922        /* initialize dummy TM client */
16923        ilt_cli.start      = 0;
16924        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16925        ilt_cli.client_num = ILT_CLIENT_TM;
16926
16927        /*
16928         * Step 1: set zeroes to all ilt page entries with valid bit on
16929         * Step 2: set the timers first/last ilt entry to point
16930         * to the entire range to prevent ILT range error for 3rd/4th
16931         * vnic (this code assumes existence of the vnic)
16932         *
16933         * both steps performed by call to ecore_ilt_client_init_op()
16934         * with dummy TM client
16935         *
16936         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16937         * and his brother are split registers
16938         */
16939
16940        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16941        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16942        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16943
16944        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16945        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16946        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16947    }
16948
16949    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16950    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16951
16952    if (!CHIP_IS_E1x(sc)) {
16953        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16954                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16955
16956        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16957        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16958
16959        /* let the HW do it's magic... */
16960        do {
16961            DELAY(200000);
16962            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16963        } while (factor-- && (val != 1));
16964
16965        if (val != 1) {
16966            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16967            return (-1);
16968        }
16969    }
16970
16971    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16972
16973    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16974
16975    bxe_iov_init_dmae(sc);
16976
16977    /* clean the DMAE memory */
16978    sc->dmae_ready = 1;
16979    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16980
16981    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16982
16983    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16984
16985    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16986
16987    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16988
16989    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16990    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16991    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16992    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16993
16994    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16995
16996    /* QM queues pointers table */
16997    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16998
16999    /* soft reset pulse */
17000    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17001    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17002
17003    if (CNIC_SUPPORT(sc))
17004        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17005
17006    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17007    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17008    if (!CHIP_REV_IS_SLOW(sc)) {
17009        /* enable hw interrupt from doorbell Q */
17010        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17011    }
17012
17013    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17014
17015    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17016    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17017
17018    if (!CHIP_IS_E1(sc)) {
17019        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17020    }
17021
17022    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17023        if (IS_MF_AFEX(sc)) {
17024            /*
17025             * configure that AFEX and VLAN headers must be
17026             * received in AFEX mode
17027             */
17028            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17029            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17030            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17031            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17032            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17033        } else {
17034            /*
17035             * Bit-map indicating which L2 hdrs may appear
17036             * after the basic Ethernet header
17037             */
17038            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17039                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17040        }
17041    }
17042
17043    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17044    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17045    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17046    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17047
17048    if (!CHIP_IS_E1x(sc)) {
17049        /* reset VFC memories */
17050        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17051               VFC_MEMORIES_RST_REG_CAM_RST |
17052               VFC_MEMORIES_RST_REG_RAM_RST);
17053        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17054               VFC_MEMORIES_RST_REG_CAM_RST |
17055               VFC_MEMORIES_RST_REG_RAM_RST);
17056
17057        DELAY(20000);
17058    }
17059
17060    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17061    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17062    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17063    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17064
17065    /* sync semi rtc */
17066    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17067           0x80000000);
17068    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17069           0x80000000);
17070
17071    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17072    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17073    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17074
17075    if (!CHIP_IS_E1x(sc)) {
17076        if (IS_MF_AFEX(sc)) {
17077            /*
17078             * configure that AFEX and VLAN headers must be
17079             * sent in AFEX mode
17080             */
17081            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17082            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17083            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17084            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17085            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17086        } else {
17087            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17088                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17089        }
17090    }
17091
17092    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17093
17094    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17095
17096    if (CNIC_SUPPORT(sc)) {
17097        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17098        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17099        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17100        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17101        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17102        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17103        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17104        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17105        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17106        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17107    }
17108    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17109
17110    if (sizeof(union cdu_context) != 1024) {
17111        /* we currently assume that a context is 1024 bytes */
17112        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17113              (long)sizeof(union cdu_context));
17114    }
17115
17116    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17117    val = (4 << 24) + (0 << 12) + 1024;
17118    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17119
17120    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17121
17122    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17123    /* enable context validation interrupt from CFC */
17124    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17125
17126    /* set the thresholds to prevent CFC/CDU race */
17127    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17128    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17129
17130    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17131        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17132    }
17133
17134    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17135    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17136
17137    /* Reset PCIE errors for debug */
17138    REG_WR(sc, 0x2814, 0xffffffff);
17139    REG_WR(sc, 0x3820, 0xffffffff);
17140
17141    if (!CHIP_IS_E1x(sc)) {
17142        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17143               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17144                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17145        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17146               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17147                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17148                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17149        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17150               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17151                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17152                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17153    }
17154
17155    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17156
17157    if (!CHIP_IS_E1(sc)) {
17158        /* in E3 this done in per-port section */
17159        if (!CHIP_IS_E3(sc))
17160            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17161    }
17162
17163    if (CHIP_IS_E1H(sc)) {
17164        /* not applicable for E2 (and above ...) */
17165        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17166    }
17167
17168    if (CHIP_REV_IS_SLOW(sc)) {
17169        DELAY(200000);
17170    }
17171
17172    /* finish CFC init */
17173    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17174    if (val != 1) {
17175        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17176        return (-1);
17177    }
17178    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17179    if (val != 1) {
17180        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17181        return (-1);
17182    }
17183    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17184    if (val != 1) {
17185        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17186        return (-1);
17187    }
17188    REG_WR(sc, CFC_REG_DEBUG0, 0);
17189
17190    if (CHIP_IS_E1(sc)) {
17191        /* read NIG statistic to see if this is our first up since powerup */
17192        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17193        val = *BXE_SP(sc, wb_data[0]);
17194
17195        /* do internal memory self test */
17196        if ((val == 0) && bxe_int_mem_test(sc)) {
17197            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17198            return (-1);
17199        }
17200    }
17201
17202    bxe_setup_fan_failure_detection(sc);
17203
17204    /* clear PXP2 attentions */
17205    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17206
17207    bxe_enable_blocks_attention(sc);
17208
17209    if (!CHIP_REV_IS_SLOW(sc)) {
17210        ecore_enable_blocks_parity(sc);
17211    }
17212
17213    if (!BXE_NOMCP(sc)) {
17214        if (CHIP_IS_E1x(sc)) {
17215            bxe_common_init_phy(sc);
17216        }
17217    }
17218
17219    return (0);
17220}
17221
17222/**
17223 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17224 *
17225 * @sc:     driver handle
17226 */
17227static int
17228bxe_init_hw_common_chip(struct bxe_softc *sc)
17229{
17230    int rc = bxe_init_hw_common(sc);
17231
17232    if (rc) {
17233        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17234        return (rc);
17235    }
17236
17237    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17238    if (!BXE_NOMCP(sc)) {
17239        bxe_common_init_phy(sc);
17240    }
17241
17242    return (0);
17243}
17244
17245static int
17246bxe_init_hw_port(struct bxe_softc *sc)
17247{
17248    int port = SC_PORT(sc);
17249    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17250    uint32_t low, high;
17251    uint32_t val;
17252
17253    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17254
17255    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17256
17257    ecore_init_block(sc, BLOCK_MISC, init_phase);
17258    ecore_init_block(sc, BLOCK_PXP, init_phase);
17259    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17260
17261    /*
17262     * Timers bug workaround: disables the pf_master bit in pglue at
17263     * common phase, we need to enable it here before any dmae access are
17264     * attempted. Therefore we manually added the enable-master to the
17265     * port phase (it also happens in the function phase)
17266     */
17267    if (!CHIP_IS_E1x(sc)) {
17268        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17269    }
17270
17271    ecore_init_block(sc, BLOCK_ATC, init_phase);
17272    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17273    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17274    ecore_init_block(sc, BLOCK_QM, init_phase);
17275
17276    ecore_init_block(sc, BLOCK_TCM, init_phase);
17277    ecore_init_block(sc, BLOCK_UCM, init_phase);
17278    ecore_init_block(sc, BLOCK_CCM, init_phase);
17279    ecore_init_block(sc, BLOCK_XCM, init_phase);
17280
17281    /* QM cid (connection) count */
17282    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17283
17284    if (CNIC_SUPPORT(sc)) {
17285        ecore_init_block(sc, BLOCK_TM, init_phase);
17286        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17287        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17288    }
17289
17290    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17291
17292    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17293
17294    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17295        if (IS_MF(sc)) {
17296            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17297        } else if (sc->mtu > 4096) {
17298            if (BXE_ONE_PORT(sc)) {
17299                low = 160;
17300            } else {
17301                val = sc->mtu;
17302                /* (24*1024 + val*4)/256 */
17303                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17304            }
17305        } else {
17306            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17307        }
17308        high = (low + 56); /* 14*1024/256 */
17309        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17310        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17311    }
17312
17313    if (CHIP_IS_MODE_4_PORT(sc)) {
17314        REG_WR(sc, SC_PORT(sc) ?
17315               BRB1_REG_MAC_GUARANTIED_1 :
17316               BRB1_REG_MAC_GUARANTIED_0, 40);
17317    }
17318
17319    ecore_init_block(sc, BLOCK_PRS, init_phase);
17320    if (CHIP_IS_E3B0(sc)) {
17321        if (IS_MF_AFEX(sc)) {
17322            /* configure headers for AFEX mode */
17323            REG_WR(sc, SC_PORT(sc) ?
17324                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17325                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17326            REG_WR(sc, SC_PORT(sc) ?
17327                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17328                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17329            REG_WR(sc, SC_PORT(sc) ?
17330                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17331                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17332        } else {
17333            /* Ovlan exists only if we are in multi-function +
17334             * switch-dependent mode, in switch-independent there
17335             * is no ovlan headers
17336             */
17337            REG_WR(sc, SC_PORT(sc) ?
17338                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17339                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17340                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17341        }
17342    }
17343
17344    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17345    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17346    ecore_init_block(sc, BLOCK_USDM, init_phase);
17347    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17348
17349    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17350    ecore_init_block(sc, BLOCK_USEM, init_phase);
17351    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17352    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17353
17354    ecore_init_block(sc, BLOCK_UPB, init_phase);
17355    ecore_init_block(sc, BLOCK_XPB, init_phase);
17356
17357    ecore_init_block(sc, BLOCK_PBF, init_phase);
17358
17359    if (CHIP_IS_E1x(sc)) {
17360        /* configure PBF to work without PAUSE mtu 9000 */
17361        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17362
17363        /* update threshold */
17364        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17365        /* update init credit */
17366        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17367
17368        /* probe changes */
17369        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17370        DELAY(50);
17371        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17372    }
17373
17374    if (CNIC_SUPPORT(sc)) {
17375        ecore_init_block(sc, BLOCK_SRC, init_phase);
17376    }
17377
17378    ecore_init_block(sc, BLOCK_CDU, init_phase);
17379    ecore_init_block(sc, BLOCK_CFC, init_phase);
17380
17381    if (CHIP_IS_E1(sc)) {
17382        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17383        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17384    }
17385    ecore_init_block(sc, BLOCK_HC, init_phase);
17386
17387    ecore_init_block(sc, BLOCK_IGU, init_phase);
17388
17389    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17390    /* init aeu_mask_attn_func_0/1:
17391     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17392     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17393     *             bits 4-7 are used for "per vn group attention" */
17394    val = IS_MF(sc) ? 0xF7 : 0x7;
17395    /* Enable DCBX attention for all but E1 */
17396    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17397    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17398
17399    ecore_init_block(sc, BLOCK_NIG, init_phase);
17400
17401    if (!CHIP_IS_E1x(sc)) {
17402        /* Bit-map indicating which L2 hdrs may appear after the
17403         * basic Ethernet header
17404         */
17405        if (IS_MF_AFEX(sc)) {
17406            REG_WR(sc, SC_PORT(sc) ?
17407                   NIG_REG_P1_HDRS_AFTER_BASIC :
17408                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17409        } else {
17410            REG_WR(sc, SC_PORT(sc) ?
17411                   NIG_REG_P1_HDRS_AFTER_BASIC :
17412                   NIG_REG_P0_HDRS_AFTER_BASIC,
17413                   IS_MF_SD(sc) ? 7 : 6);
17414        }
17415
17416        if (CHIP_IS_E3(sc)) {
17417            REG_WR(sc, SC_PORT(sc) ?
17418                   NIG_REG_LLH1_MF_MODE :
17419                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17420        }
17421    }
17422    if (!CHIP_IS_E3(sc)) {
17423        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17424    }
17425
17426    if (!CHIP_IS_E1(sc)) {
17427        /* 0x2 disable mf_ov, 0x1 enable */
17428        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17429               (IS_MF_SD(sc) ? 0x1 : 0x2));
17430
17431        if (!CHIP_IS_E1x(sc)) {
17432            val = 0;
17433            switch (sc->devinfo.mf_info.mf_mode) {
17434            case MULTI_FUNCTION_SD:
17435                val = 1;
17436                break;
17437            case MULTI_FUNCTION_SI:
17438            case MULTI_FUNCTION_AFEX:
17439                val = 2;
17440                break;
17441            }
17442
17443            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17444                        NIG_REG_LLH0_CLS_TYPE), val);
17445        }
17446        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17447        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17448        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17449    }
17450
17451    /* If SPIO5 is set to generate interrupts, enable it for this port */
17452    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17453    if (val & MISC_SPIO_SPIO5) {
17454        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17455                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17456        val = REG_RD(sc, reg_addr);
17457        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17458        REG_WR(sc, reg_addr, val);
17459    }
17460
17461    return (0);
17462}
17463
17464static uint32_t
17465bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17466                       uint32_t         reg,
17467                       uint32_t         expected,
17468                       uint32_t         poll_count)
17469{
17470    uint32_t cur_cnt = poll_count;
17471    uint32_t val;
17472
17473    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17474        DELAY(FLR_WAIT_INTERVAL);
17475    }
17476
17477    return (val);
17478}
17479
17480static int
17481bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17482                              uint32_t         reg,
17483                              char             *msg,
17484                              uint32_t         poll_cnt)
17485{
17486    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17487
17488    if (val != 0) {
17489        BLOGE(sc, "%s usage count=%d\n", msg, val);
17490        return (1);
17491    }
17492
17493    return (0);
17494}
17495
17496/* Common routines with VF FLR cleanup */
17497static uint32_t
17498bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17499{
17500    /* adjust polling timeout */
17501    if (CHIP_REV_IS_EMUL(sc)) {
17502        return (FLR_POLL_CNT * 2000);
17503    }
17504
17505    if (CHIP_REV_IS_FPGA(sc)) {
17506        return (FLR_POLL_CNT * 120);
17507    }
17508
17509    return (FLR_POLL_CNT);
17510}
17511
17512static int
17513bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17514                           uint32_t         poll_cnt)
17515{
17516    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17517    if (bxe_flr_clnup_poll_hw_counter(sc,
17518                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17519                                      "CFC PF usage counter timed out",
17520                                      poll_cnt)) {
17521        return (1);
17522    }
17523
17524    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17525    if (bxe_flr_clnup_poll_hw_counter(sc,
17526                                      DORQ_REG_PF_USAGE_CNT,
17527                                      "DQ PF usage counter timed out",
17528                                      poll_cnt)) {
17529        return (1);
17530    }
17531
17532    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17533    if (bxe_flr_clnup_poll_hw_counter(sc,
17534                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17535                                      "QM PF usage counter timed out",
17536                                      poll_cnt)) {
17537        return (1);
17538    }
17539
17540    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17541    if (bxe_flr_clnup_poll_hw_counter(sc,
17542                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17543                                      "Timers VNIC usage counter timed out",
17544                                      poll_cnt)) {
17545        return (1);
17546    }
17547
17548    if (bxe_flr_clnup_poll_hw_counter(sc,
17549                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17550                                      "Timers NUM_SCANS usage counter timed out",
17551                                      poll_cnt)) {
17552        return (1);
17553    }
17554
17555    /* Wait DMAE PF usage counter to zero */
17556    if (bxe_flr_clnup_poll_hw_counter(sc,
17557                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17558                                      "DMAE dommand register timed out",
17559                                      poll_cnt)) {
17560        return (1);
17561    }
17562
17563    return (0);
17564}
17565
17566#define OP_GEN_PARAM(param)                                            \
17567    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17568#define OP_GEN_TYPE(type)                                           \
17569    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17570#define OP_GEN_AGG_VECT(index)                                             \
17571    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17572
17573static int
17574bxe_send_final_clnup(struct bxe_softc *sc,
17575                     uint8_t          clnup_func,
17576                     uint32_t         poll_cnt)
17577{
17578    uint32_t op_gen_command = 0;
17579    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17580                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17581    int ret = 0;
17582
17583    if (REG_RD(sc, comp_addr)) {
17584        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17585        return (1);
17586    }
17587
17588    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17589    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17590    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17591    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17592
17593    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17594    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17595
17596    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17597        BLOGE(sc, "FW final cleanup did not succeed\n");
17598        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17599              (REG_RD(sc, comp_addr)));
17600        bxe_panic(sc, ("FLR cleanup failed\n"));
17601        return (1);
17602    }
17603
17604    /* Zero completion for nxt FLR */
17605    REG_WR(sc, comp_addr, 0);
17606
17607    return (ret);
17608}
17609
17610static void
17611bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17612                       struct pbf_pN_buf_regs *regs,
17613                       uint32_t               poll_count)
17614{
17615    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17616    uint32_t cur_cnt = poll_count;
17617
17618    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17619    crd = crd_start = REG_RD(sc, regs->crd);
17620    init_crd = REG_RD(sc, regs->init_crd);
17621
17622    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17623    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17624    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17625
17626    while ((crd != init_crd) &&
17627           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17628            (init_crd - crd_start))) {
17629        if (cur_cnt--) {
17630            DELAY(FLR_WAIT_INTERVAL);
17631            crd = REG_RD(sc, regs->crd);
17632            crd_freed = REG_RD(sc, regs->crd_freed);
17633        } else {
17634            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17635            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17636            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17637            break;
17638        }
17639    }
17640
17641    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17642          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17643}
17644
17645static void
17646bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17647                       struct pbf_pN_cmd_regs *regs,
17648                       uint32_t               poll_count)
17649{
17650    uint32_t occup, to_free, freed, freed_start;
17651    uint32_t cur_cnt = poll_count;
17652
17653    occup = to_free = REG_RD(sc, regs->lines_occup);
17654    freed = freed_start = REG_RD(sc, regs->lines_freed);
17655
17656    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17657    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17658
17659    while (occup &&
17660           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17661        if (cur_cnt--) {
17662            DELAY(FLR_WAIT_INTERVAL);
17663            occup = REG_RD(sc, regs->lines_occup);
17664            freed = REG_RD(sc, regs->lines_freed);
17665        } else {
17666            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17667            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17668            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17669            break;
17670        }
17671    }
17672
17673    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17674          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17675}
17676
17677static void
17678bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17679{
17680    struct pbf_pN_cmd_regs cmd_regs[] = {
17681        {0, (CHIP_IS_E3B0(sc)) ?
17682            PBF_REG_TQ_OCCUPANCY_Q0 :
17683            PBF_REG_P0_TQ_OCCUPANCY,
17684            (CHIP_IS_E3B0(sc)) ?
17685            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17686            PBF_REG_P0_TQ_LINES_FREED_CNT},
17687        {1, (CHIP_IS_E3B0(sc)) ?
17688            PBF_REG_TQ_OCCUPANCY_Q1 :
17689            PBF_REG_P1_TQ_OCCUPANCY,
17690            (CHIP_IS_E3B0(sc)) ?
17691            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17692            PBF_REG_P1_TQ_LINES_FREED_CNT},
17693        {4, (CHIP_IS_E3B0(sc)) ?
17694            PBF_REG_TQ_OCCUPANCY_LB_Q :
17695            PBF_REG_P4_TQ_OCCUPANCY,
17696            (CHIP_IS_E3B0(sc)) ?
17697            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17698            PBF_REG_P4_TQ_LINES_FREED_CNT}
17699    };
17700
17701    struct pbf_pN_buf_regs buf_regs[] = {
17702        {0, (CHIP_IS_E3B0(sc)) ?
17703            PBF_REG_INIT_CRD_Q0 :
17704            PBF_REG_P0_INIT_CRD ,
17705            (CHIP_IS_E3B0(sc)) ?
17706            PBF_REG_CREDIT_Q0 :
17707            PBF_REG_P0_CREDIT,
17708            (CHIP_IS_E3B0(sc)) ?
17709            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17710            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17711        {1, (CHIP_IS_E3B0(sc)) ?
17712            PBF_REG_INIT_CRD_Q1 :
17713            PBF_REG_P1_INIT_CRD,
17714            (CHIP_IS_E3B0(sc)) ?
17715            PBF_REG_CREDIT_Q1 :
17716            PBF_REG_P1_CREDIT,
17717            (CHIP_IS_E3B0(sc)) ?
17718            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17719            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17720        {4, (CHIP_IS_E3B0(sc)) ?
17721            PBF_REG_INIT_CRD_LB_Q :
17722            PBF_REG_P4_INIT_CRD,
17723            (CHIP_IS_E3B0(sc)) ?
17724            PBF_REG_CREDIT_LB_Q :
17725            PBF_REG_P4_CREDIT,
17726            (CHIP_IS_E3B0(sc)) ?
17727            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17728            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17729    };
17730
17731    int i;
17732
17733    /* Verify the command queues are flushed P0, P1, P4 */
17734    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17735        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17736    }
17737
17738    /* Verify the transmission buffers are flushed P0, P1, P4 */
17739    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17740        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17741    }
17742}
17743
17744static void
17745bxe_hw_enable_status(struct bxe_softc *sc)
17746{
17747    uint32_t val;
17748
17749    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17750    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17751
17752    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17753    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17754
17755    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17756    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17757
17758    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17759    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17760
17761    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17762    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17763
17764    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17765    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17766
17767    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17768    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17769
17770    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17771    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17772}
17773
17774static int
17775bxe_pf_flr_clnup(struct bxe_softc *sc)
17776{
17777    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17778
17779    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17780
17781    /* Re-enable PF target read access */
17782    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17783
17784    /* Poll HW usage counters */
17785    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17786    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17787        return (-1);
17788    }
17789
17790    /* Zero the igu 'trailing edge' and 'leading edge' */
17791
17792    /* Send the FW cleanup command */
17793    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17794        return (-1);
17795    }
17796
17797    /* ATC cleanup */
17798
17799    /* Verify TX hw is flushed */
17800    bxe_tx_hw_flushed(sc, poll_cnt);
17801
17802    /* Wait 100ms (not adjusted according to platform) */
17803    DELAY(100000);
17804
17805    /* Verify no pending pci transactions */
17806    if (bxe_is_pcie_pending(sc)) {
17807        BLOGE(sc, "PCIE Transactions still pending\n");
17808    }
17809
17810    /* Debug */
17811    bxe_hw_enable_status(sc);
17812
17813    /*
17814     * Master enable - Due to WB DMAE writes performed before this
17815     * register is re-initialized as part of the regular function init
17816     */
17817    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17818
17819    return (0);
17820}
17821
17822static int
17823bxe_init_hw_func(struct bxe_softc *sc)
17824{
17825    int port = SC_PORT(sc);
17826    int func = SC_FUNC(sc);
17827    int init_phase = PHASE_PF0 + func;
17828    struct ecore_ilt *ilt = sc->ilt;
17829    uint16_t cdu_ilt_start;
17830    uint32_t addr, val;
17831    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17832    int i, main_mem_width, rc;
17833
17834    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17835
17836    /* FLR cleanup */
17837    if (!CHIP_IS_E1x(sc)) {
17838        rc = bxe_pf_flr_clnup(sc);
17839        if (rc) {
17840            BLOGE(sc, "FLR cleanup failed!\n");
17841            // XXX bxe_fw_dump(sc);
17842            // XXX bxe_idle_chk(sc);
17843            return (rc);
17844        }
17845    }
17846
17847    /* set MSI reconfigure capability */
17848    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17849        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17850        val = REG_RD(sc, addr);
17851        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17852        REG_WR(sc, addr, val);
17853    }
17854
17855    ecore_init_block(sc, BLOCK_PXP, init_phase);
17856    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17857
17858    ilt = sc->ilt;
17859    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17860
17861    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17862        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17863        ilt->lines[cdu_ilt_start + i].page_mapping =
17864            sc->context[i].vcxt_dma.paddr;
17865        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17866    }
17867    ecore_ilt_init_op(sc, INITOP_SET);
17868
17869    /* Set NIC mode */
17870    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17871    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17872
17873    if (!CHIP_IS_E1x(sc)) {
17874        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17875
17876        /* Turn on a single ISR mode in IGU if driver is going to use
17877         * INT#x or MSI
17878         */
17879        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17880            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17881        }
17882
17883        /*
17884         * Timers workaround bug: function init part.
17885         * Need to wait 20msec after initializing ILT,
17886         * needed to make sure there are no requests in
17887         * one of the PXP internal queues with "old" ILT addresses
17888         */
17889        DELAY(20000);
17890
17891        /*
17892         * Master enable - Due to WB DMAE writes performed before this
17893         * register is re-initialized as part of the regular function
17894         * init
17895         */
17896        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17897        /* Enable the function in IGU */
17898        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17899    }
17900
17901    sc->dmae_ready = 1;
17902
17903    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17904
17905    if (!CHIP_IS_E1x(sc))
17906        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17907
17908    ecore_init_block(sc, BLOCK_ATC, init_phase);
17909    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17910    ecore_init_block(sc, BLOCK_NIG, init_phase);
17911    ecore_init_block(sc, BLOCK_SRC, init_phase);
17912    ecore_init_block(sc, BLOCK_MISC, init_phase);
17913    ecore_init_block(sc, BLOCK_TCM, init_phase);
17914    ecore_init_block(sc, BLOCK_UCM, init_phase);
17915    ecore_init_block(sc, BLOCK_CCM, init_phase);
17916    ecore_init_block(sc, BLOCK_XCM, init_phase);
17917    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17918    ecore_init_block(sc, BLOCK_USEM, init_phase);
17919    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17920    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17921
17922    if (!CHIP_IS_E1x(sc))
17923        REG_WR(sc, QM_REG_PF_EN, 1);
17924
17925    if (!CHIP_IS_E1x(sc)) {
17926        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17927        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17928        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17929        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17930    }
17931    ecore_init_block(sc, BLOCK_QM, init_phase);
17932
17933    ecore_init_block(sc, BLOCK_TM, init_phase);
17934    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17935
17936    bxe_iov_init_dq(sc);
17937
17938    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17939    ecore_init_block(sc, BLOCK_PRS, init_phase);
17940    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17941    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17942    ecore_init_block(sc, BLOCK_USDM, init_phase);
17943    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17944    ecore_init_block(sc, BLOCK_UPB, init_phase);
17945    ecore_init_block(sc, BLOCK_XPB, init_phase);
17946    ecore_init_block(sc, BLOCK_PBF, init_phase);
17947    if (!CHIP_IS_E1x(sc))
17948        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17949
17950    ecore_init_block(sc, BLOCK_CDU, init_phase);
17951
17952    ecore_init_block(sc, BLOCK_CFC, init_phase);
17953
17954    if (!CHIP_IS_E1x(sc))
17955        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17956
17957    if (IS_MF(sc)) {
17958        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17959        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17960    }
17961
17962    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17963
17964    /* HC init per function */
17965    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17966        if (CHIP_IS_E1H(sc)) {
17967            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17968
17969            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17970            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17971        }
17972        ecore_init_block(sc, BLOCK_HC, init_phase);
17973
17974    } else {
17975        int num_segs, sb_idx, prod_offset;
17976
17977        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17978
17979        if (!CHIP_IS_E1x(sc)) {
17980            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17981            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17982        }
17983
17984        ecore_init_block(sc, BLOCK_IGU, init_phase);
17985
17986        if (!CHIP_IS_E1x(sc)) {
17987            int dsb_idx = 0;
17988            /**
17989             * Producer memory:
17990             * E2 mode: address 0-135 match to the mapping memory;
17991             * 136 - PF0 default prod; 137 - PF1 default prod;
17992             * 138 - PF2 default prod; 139 - PF3 default prod;
17993             * 140 - PF0 attn prod;    141 - PF1 attn prod;
17994             * 142 - PF2 attn prod;    143 - PF3 attn prod;
17995             * 144-147 reserved.
17996             *
17997             * E1.5 mode - In backward compatible mode;
17998             * for non default SB; each even line in the memory
17999             * holds the U producer and each odd line hold
18000             * the C producer. The first 128 producers are for
18001             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18002             * producers are for the DSB for each PF.
18003             * Each PF has five segments: (the order inside each
18004             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18005             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18006             * 144-147 attn prods;
18007             */
18008            /* non-default-status-blocks */
18009            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18010                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18011            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18012                prod_offset = (sc->igu_base_sb + sb_idx) *
18013                    num_segs;
18014
18015                for (i = 0; i < num_segs; i++) {
18016                    addr = IGU_REG_PROD_CONS_MEMORY +
18017                            (prod_offset + i) * 4;
18018                    REG_WR(sc, addr, 0);
18019                }
18020                /* send consumer update with value 0 */
18021                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18022                           USTORM_ID, 0, IGU_INT_NOP, 1);
18023                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18024            }
18025
18026            /* default-status-blocks */
18027            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18028                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18029
18030            if (CHIP_IS_MODE_4_PORT(sc))
18031                dsb_idx = SC_FUNC(sc);
18032            else
18033                dsb_idx = SC_VN(sc);
18034
18035            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18036                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18037                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18038
18039            /*
18040             * igu prods come in chunks of E1HVN_MAX (4) -
18041             * does not matters what is the current chip mode
18042             */
18043            for (i = 0; i < (num_segs * E1HVN_MAX);
18044                 i += E1HVN_MAX) {
18045                addr = IGU_REG_PROD_CONS_MEMORY +
18046                            (prod_offset + i)*4;
18047                REG_WR(sc, addr, 0);
18048            }
18049            /* send consumer update with 0 */
18050            if (CHIP_INT_MODE_IS_BC(sc)) {
18051                bxe_ack_sb(sc, sc->igu_dsb_id,
18052                           USTORM_ID, 0, IGU_INT_NOP, 1);
18053                bxe_ack_sb(sc, sc->igu_dsb_id,
18054                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18055                bxe_ack_sb(sc, sc->igu_dsb_id,
18056                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18057                bxe_ack_sb(sc, sc->igu_dsb_id,
18058                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18059                bxe_ack_sb(sc, sc->igu_dsb_id,
18060                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18061            } else {
18062                bxe_ack_sb(sc, sc->igu_dsb_id,
18063                           USTORM_ID, 0, IGU_INT_NOP, 1);
18064                bxe_ack_sb(sc, sc->igu_dsb_id,
18065                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18066            }
18067            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18068
18069            /* !!! these should become driver const once
18070               rf-tool supports split-68 const */
18071            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18072            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18073            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18074            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18075            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18076            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18077        }
18078    }
18079
18080    /* Reset PCIE errors for debug */
18081    REG_WR(sc, 0x2114, 0xffffffff);
18082    REG_WR(sc, 0x2120, 0xffffffff);
18083
18084    if (CHIP_IS_E1x(sc)) {
18085        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18086        main_mem_base = HC_REG_MAIN_MEMORY +
18087                SC_PORT(sc) * (main_mem_size * 4);
18088        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18089        main_mem_width = 8;
18090
18091        val = REG_RD(sc, main_mem_prty_clr);
18092        if (val) {
18093            BLOGD(sc, DBG_LOAD,
18094                  "Parity errors in HC block during function init (0x%x)!\n",
18095                  val);
18096        }
18097
18098        /* Clear "false" parity errors in MSI-X table */
18099        for (i = main_mem_base;
18100             i < main_mem_base + main_mem_size * 4;
18101             i += main_mem_width) {
18102            bxe_read_dmae(sc, i, main_mem_width / 4);
18103            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18104                           i, main_mem_width / 4);
18105        }
18106        /* Clear HC parity attention */
18107        REG_RD(sc, main_mem_prty_clr);
18108    }
18109
18110#if 1
18111    /* Enable STORMs SP logging */
18112    REG_WR8(sc, BAR_USTRORM_INTMEM +
18113           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18114    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18115           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18116    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18117           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18118    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18119           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18120#endif
18121
18122    elink_phy_probe(&sc->link_params);
18123
18124    return (0);
18125}
18126
18127static void
18128bxe_link_reset(struct bxe_softc *sc)
18129{
18130    if (!BXE_NOMCP(sc)) {
18131	bxe_acquire_phy_lock(sc);
18132        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18133	bxe_release_phy_lock(sc);
18134    } else {
18135        if (!CHIP_REV_IS_SLOW(sc)) {
18136            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18137        }
18138    }
18139}
18140
18141static void
18142bxe_reset_port(struct bxe_softc *sc)
18143{
18144    int port = SC_PORT(sc);
18145    uint32_t val;
18146
18147	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18148    /* reset physical Link */
18149    bxe_link_reset(sc);
18150
18151    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18152
18153    /* Do not rcv packets to BRB */
18154    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18155    /* Do not direct rcv packets that are not for MCP to the BRB */
18156    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18157               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18158
18159    /* Configure AEU */
18160    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18161
18162    DELAY(100000);
18163
18164    /* Check for BRB port occupancy */
18165    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18166    if (val) {
18167        BLOGD(sc, DBG_LOAD,
18168              "BRB1 is not empty, %d blocks are occupied\n", val);
18169    }
18170
18171    /* TODO: Close Doorbell port? */
18172}
18173
18174static void
18175bxe_ilt_wr(struct bxe_softc *sc,
18176           uint32_t         index,
18177           bus_addr_t       addr)
18178{
18179    int reg;
18180    uint32_t wb_write[2];
18181
18182    if (CHIP_IS_E1(sc)) {
18183        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18184    } else {
18185        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18186    }
18187
18188    wb_write[0] = ONCHIP_ADDR1(addr);
18189    wb_write[1] = ONCHIP_ADDR2(addr);
18190    REG_WR_DMAE(sc, reg, wb_write, 2);
18191}
18192
18193static void
18194bxe_clear_func_ilt(struct bxe_softc *sc,
18195                   uint32_t         func)
18196{
18197    uint32_t i, base = FUNC_ILT_BASE(func);
18198    for (i = base; i < base + ILT_PER_FUNC; i++) {
18199        bxe_ilt_wr(sc, i, 0);
18200    }
18201}
18202
18203static void
18204bxe_reset_func(struct bxe_softc *sc)
18205{
18206    struct bxe_fastpath *fp;
18207    int port = SC_PORT(sc);
18208    int func = SC_FUNC(sc);
18209    int i;
18210
18211    /* Disable the function in the FW */
18212    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18213    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18214    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18215    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18216
18217    /* FP SBs */
18218    FOR_EACH_ETH_QUEUE(sc, i) {
18219        fp = &sc->fp[i];
18220        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18221                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18222                SB_DISABLED);
18223    }
18224
18225    /* SP SB */
18226    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18227            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18228            SB_DISABLED);
18229
18230    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18231        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18232    }
18233
18234    /* Configure IGU */
18235    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18236        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18237        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18238    } else {
18239        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18240        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18241    }
18242
18243    if (CNIC_LOADED(sc)) {
18244        /* Disable Timer scan */
18245        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18246        /*
18247         * Wait for at least 10ms and up to 2 second for the timers
18248         * scan to complete
18249         */
18250        for (i = 0; i < 200; i++) {
18251            DELAY(10000);
18252            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18253                break;
18254        }
18255    }
18256
18257    /* Clear ILT */
18258    bxe_clear_func_ilt(sc, func);
18259
18260    /*
18261     * Timers workaround bug for E2: if this is vnic-3,
18262     * we need to set the entire ilt range for this timers.
18263     */
18264    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18265        struct ilt_client_info ilt_cli;
18266        /* use dummy TM client */
18267        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18268        ilt_cli.start = 0;
18269        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18270        ilt_cli.client_num = ILT_CLIENT_TM;
18271
18272        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18273    }
18274
18275    /* this assumes that reset_port() called before reset_func()*/
18276    if (!CHIP_IS_E1x(sc)) {
18277        bxe_pf_disable(sc);
18278    }
18279
18280    sc->dmae_ready = 0;
18281}
18282
18283static int
18284bxe_gunzip_init(struct bxe_softc *sc)
18285{
18286    return (0);
18287}
18288
18289static void
18290bxe_gunzip_end(struct bxe_softc *sc)
18291{
18292    return;
18293}
18294
18295static int
18296bxe_init_firmware(struct bxe_softc *sc)
18297{
18298    if (CHIP_IS_E1(sc)) {
18299        ecore_init_e1_firmware(sc);
18300        sc->iro_array = e1_iro_arr;
18301    } else if (CHIP_IS_E1H(sc)) {
18302        ecore_init_e1h_firmware(sc);
18303        sc->iro_array = e1h_iro_arr;
18304    } else if (!CHIP_IS_E1x(sc)) {
18305        ecore_init_e2_firmware(sc);
18306        sc->iro_array = e2_iro_arr;
18307    } else {
18308        BLOGE(sc, "Unsupported chip revision\n");
18309        return (-1);
18310    }
18311
18312    return (0);
18313}
18314
18315static void
18316bxe_release_firmware(struct bxe_softc *sc)
18317{
18318    /* Do nothing */
18319    return;
18320}
18321
18322static int
18323ecore_gunzip(struct bxe_softc *sc,
18324             const uint8_t    *zbuf,
18325             int              len)
18326{
18327    /* XXX : Implement... */
18328    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18329    return (FALSE);
18330}
18331
18332static void
18333ecore_reg_wr_ind(struct bxe_softc *sc,
18334                 uint32_t         addr,
18335                 uint32_t         val)
18336{
18337    bxe_reg_wr_ind(sc, addr, val);
18338}
18339
18340static void
18341ecore_write_dmae_phys_len(struct bxe_softc *sc,
18342                          bus_addr_t       phys_addr,
18343                          uint32_t         addr,
18344                          uint32_t         len)
18345{
18346    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18347}
18348
18349void
18350ecore_storm_memset_struct(struct bxe_softc *sc,
18351                          uint32_t         addr,
18352                          size_t           size,
18353                          uint32_t         *data)
18354{
18355    uint8_t i;
18356    for (i = 0; i < size/4; i++) {
18357        REG_WR(sc, addr + (i * 4), data[i]);
18358    }
18359}
18360
18361
18362/*
18363 * character device - ioctl interface definitions
18364 */
18365
18366
18367#include "bxe_dump.h"
18368#include "bxe_ioctl.h"
18369#include <sys/conf.h>
18370
18371static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18372                struct thread *td);
18373
18374static struct cdevsw bxe_cdevsw = {
18375    .d_version = D_VERSION,
18376    .d_ioctl = bxe_eioctl,
18377    .d_name = "bxecnic",
18378};
18379
18380#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18381
18382
18383#define DUMP_ALL_PRESETS        0x1FFF
18384#define DUMP_MAX_PRESETS        13
18385#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18386#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18387#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18388#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18389#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18390
18391#define IS_REG_IN_PRESET(presets, idx)  \
18392                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18393
18394
18395static int
18396bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18397{
18398    if (CHIP_IS_E1(sc))
18399        return dump_num_registers[0][preset-1];
18400    else if (CHIP_IS_E1H(sc))
18401        return dump_num_registers[1][preset-1];
18402    else if (CHIP_IS_E2(sc))
18403        return dump_num_registers[2][preset-1];
18404    else if (CHIP_IS_E3A0(sc))
18405        return dump_num_registers[3][preset-1];
18406    else if (CHIP_IS_E3B0(sc))
18407        return dump_num_registers[4][preset-1];
18408    else
18409        return 0;
18410}
18411
18412static int
18413bxe_get_total_regs_len32(struct bxe_softc *sc)
18414{
18415    uint32_t preset_idx;
18416    int regdump_len32 = 0;
18417
18418
18419    /* Calculate the total preset regs length */
18420    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18421        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18422    }
18423
18424    return regdump_len32;
18425}
18426
18427static const uint32_t *
18428__bxe_get_page_addr_ar(struct bxe_softc *sc)
18429{
18430    if (CHIP_IS_E2(sc))
18431        return page_vals_e2;
18432    else if (CHIP_IS_E3(sc))
18433        return page_vals_e3;
18434    else
18435        return NULL;
18436}
18437
18438static uint32_t
18439__bxe_get_page_reg_num(struct bxe_softc *sc)
18440{
18441    if (CHIP_IS_E2(sc))
18442        return PAGE_MODE_VALUES_E2;
18443    else if (CHIP_IS_E3(sc))
18444        return PAGE_MODE_VALUES_E3;
18445    else
18446        return 0;
18447}
18448
18449static const uint32_t *
18450__bxe_get_page_write_ar(struct bxe_softc *sc)
18451{
18452    if (CHIP_IS_E2(sc))
18453        return page_write_regs_e2;
18454    else if (CHIP_IS_E3(sc))
18455        return page_write_regs_e3;
18456    else
18457        return NULL;
18458}
18459
18460static uint32_t
18461__bxe_get_page_write_num(struct bxe_softc *sc)
18462{
18463    if (CHIP_IS_E2(sc))
18464        return PAGE_WRITE_REGS_E2;
18465    else if (CHIP_IS_E3(sc))
18466        return PAGE_WRITE_REGS_E3;
18467    else
18468        return 0;
18469}
18470
18471static const struct reg_addr *
18472__bxe_get_page_read_ar(struct bxe_softc *sc)
18473{
18474    if (CHIP_IS_E2(sc))
18475        return page_read_regs_e2;
18476    else if (CHIP_IS_E3(sc))
18477        return page_read_regs_e3;
18478    else
18479        return NULL;
18480}
18481
18482static uint32_t
18483__bxe_get_page_read_num(struct bxe_softc *sc)
18484{
18485    if (CHIP_IS_E2(sc))
18486        return PAGE_READ_REGS_E2;
18487    else if (CHIP_IS_E3(sc))
18488        return PAGE_READ_REGS_E3;
18489    else
18490        return 0;
18491}
18492
18493static bool
18494bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18495{
18496    if (CHIP_IS_E1(sc))
18497        return IS_E1_REG(reg_info->chips);
18498    else if (CHIP_IS_E1H(sc))
18499        return IS_E1H_REG(reg_info->chips);
18500    else if (CHIP_IS_E2(sc))
18501        return IS_E2_REG(reg_info->chips);
18502    else if (CHIP_IS_E3A0(sc))
18503        return IS_E3A0_REG(reg_info->chips);
18504    else if (CHIP_IS_E3B0(sc))
18505        return IS_E3B0_REG(reg_info->chips);
18506    else
18507        return 0;
18508}
18509
18510static bool
18511bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18512{
18513    if (CHIP_IS_E1(sc))
18514        return IS_E1_REG(wreg_info->chips);
18515    else if (CHIP_IS_E1H(sc))
18516        return IS_E1H_REG(wreg_info->chips);
18517    else if (CHIP_IS_E2(sc))
18518        return IS_E2_REG(wreg_info->chips);
18519    else if (CHIP_IS_E3A0(sc))
18520        return IS_E3A0_REG(wreg_info->chips);
18521    else if (CHIP_IS_E3B0(sc))
18522        return IS_E3B0_REG(wreg_info->chips);
18523    else
18524        return 0;
18525}
18526
18527/**
18528 * bxe_read_pages_regs - read "paged" registers
18529 *
18530 * @bp          device handle
18531 * @p           output buffer
18532 *
18533 * Reads "paged" memories: memories that may only be read by first writing to a
18534 * specific address ("write address") and then reading from a specific address
18535 * ("read address"). There may be more than one write address per "page" and
18536 * more than one read address per write address.
18537 */
18538static void
18539bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18540{
18541    uint32_t i, j, k, n;
18542
18543    /* addresses of the paged registers */
18544    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18545    /* number of paged registers */
18546    int num_pages = __bxe_get_page_reg_num(sc);
18547    /* write addresses */
18548    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18549    /* number of write addresses */
18550    int write_num = __bxe_get_page_write_num(sc);
18551    /* read addresses info */
18552    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18553    /* number of read addresses */
18554    int read_num = __bxe_get_page_read_num(sc);
18555    uint32_t addr, size;
18556
18557    for (i = 0; i < num_pages; i++) {
18558        for (j = 0; j < write_num; j++) {
18559            REG_WR(sc, write_addr[j], page_addr[i]);
18560
18561            for (k = 0; k < read_num; k++) {
18562                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18563                    size = read_addr[k].size;
18564                    for (n = 0; n < size; n++) {
18565                        addr = read_addr[k].addr + n*4;
18566                        *p++ = REG_RD(sc, addr);
18567                    }
18568                }
18569            }
18570        }
18571    }
18572    return;
18573}
18574
18575
18576static int
18577bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18578{
18579    uint32_t i, j, addr;
18580    const struct wreg_addr *wreg_addr_p = NULL;
18581
18582    if (CHIP_IS_E1(sc))
18583        wreg_addr_p = &wreg_addr_e1;
18584    else if (CHIP_IS_E1H(sc))
18585        wreg_addr_p = &wreg_addr_e1h;
18586    else if (CHIP_IS_E2(sc))
18587        wreg_addr_p = &wreg_addr_e2;
18588    else if (CHIP_IS_E3A0(sc))
18589        wreg_addr_p = &wreg_addr_e3;
18590    else if (CHIP_IS_E3B0(sc))
18591        wreg_addr_p = &wreg_addr_e3b0;
18592    else
18593        return (-1);
18594
18595    /* Read the idle_chk registers */
18596    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18597        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18598            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18599            for (j = 0; j < idle_reg_addrs[i].size; j++)
18600                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18601        }
18602    }
18603
18604    /* Read the regular registers */
18605    for (i = 0; i < REGS_COUNT; i++) {
18606        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18607            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18608            for (j = 0; j < reg_addrs[i].size; j++)
18609                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18610        }
18611    }
18612
18613    /* Read the CAM registers */
18614    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18615        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18616        for (i = 0; i < wreg_addr_p->size; i++) {
18617            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18618
18619            /* In case of wreg_addr register, read additional
18620               registers from read_regs array
18621             */
18622            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18623                addr = *(wreg_addr_p->read_regs);
18624                *p++ = REG_RD(sc, addr + j*4);
18625            }
18626        }
18627    }
18628
18629    /* Paged registers are supported in E2 & E3 only */
18630    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18631        /* Read "paged" registers */
18632        bxe_read_pages_regs(sc, p, preset);
18633    }
18634
18635    return 0;
18636}
18637
18638int
18639bxe_grc_dump(struct bxe_softc *sc)
18640{
18641    int rval = 0;
18642    uint32_t preset_idx;
18643    uint8_t *buf;
18644    uint32_t size;
18645    struct  dump_header *d_hdr;
18646    uint32_t i;
18647    uint32_t reg_val;
18648    uint32_t reg_addr;
18649    uint32_t cmd_offset;
18650    struct ecore_ilt *ilt = SC_ILT(sc);
18651    struct bxe_fastpath *fp;
18652    struct ilt_client_info *ilt_cli;
18653    int grc_dump_size;
18654
18655
18656    if (sc->grcdump_done || sc->grcdump_started)
18657	return (rval);
18658
18659    sc->grcdump_started = 1;
18660    BLOGI(sc, "Started collecting grcdump\n");
18661
18662    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18663                sizeof(struct  dump_header);
18664
18665    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18666
18667    if (sc->grc_dump == NULL) {
18668        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18669        return(ENOMEM);
18670    }
18671
18672
18673
18674    /* Disable parity attentions as long as following dump may
18675     * cause false alarms by reading never written registers. We
18676     * will re-enable parity attentions right after the dump.
18677     */
18678
18679    /* Disable parity on path 0 */
18680    bxe_pretend_func(sc, 0);
18681
18682    ecore_disable_blocks_parity(sc);
18683
18684    /* Disable parity on path 1 */
18685    bxe_pretend_func(sc, 1);
18686    ecore_disable_blocks_parity(sc);
18687
18688    /* Return to current function */
18689    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18690
18691    buf = sc->grc_dump;
18692    d_hdr = sc->grc_dump;
18693
18694    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18695    d_hdr->version = BNX2X_DUMP_VERSION;
18696    d_hdr->preset = DUMP_ALL_PRESETS;
18697
18698    if (CHIP_IS_E1(sc)) {
18699        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18700    } else if (CHIP_IS_E1H(sc)) {
18701        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18702    } else if (CHIP_IS_E2(sc)) {
18703        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18704                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18705    } else if (CHIP_IS_E3A0(sc)) {
18706        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18707                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18708    } else if (CHIP_IS_E3B0(sc)) {
18709        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18710                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18711    }
18712
18713    buf += sizeof(struct  dump_header);
18714
18715    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18716
18717        /* Skip presets with IOR */
18718        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18719            (preset_idx == 11))
18720            continue;
18721
18722        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18723
18724	if (rval)
18725            break;
18726
18727        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18728
18729        buf += size;
18730    }
18731
18732    bxe_pretend_func(sc, 0);
18733    ecore_clear_blocks_parity(sc);
18734    ecore_enable_blocks_parity(sc);
18735
18736    bxe_pretend_func(sc, 1);
18737    ecore_clear_blocks_parity(sc);
18738    ecore_enable_blocks_parity(sc);
18739
18740    /* Return to current function */
18741    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18742
18743
18744
18745    if(sc->state == BXE_STATE_OPEN) {
18746        if(sc->fw_stats_req  != NULL) {
18747    		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18748        			(uintmax_t)sc->fw_stats_req_mapping,
18749        			(uintmax_t)sc->fw_stats_data_mapping,
18750        			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18751		}
18752		if(sc->def_sb != NULL) {
18753			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18754        			(void *)sc->def_sb_dma.paddr, sc->def_sb,
18755        			sizeof(struct host_sp_status_block));
18756		}
18757		if(sc->eq_dma.vaddr != NULL) {
18758    		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18759        			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18760		}
18761		if(sc->sp_dma.vaddr != NULL) {
18762    		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18763        			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18764        			sizeof(struct bxe_slowpath));
18765		}
18766		if(sc->spq_dma.vaddr != NULL) {
18767    		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18768        			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18769		}
18770		if(sc->gz_buf_dma.vaddr != NULL) {
18771    		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18772        			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18773        			FW_BUF_SIZE);
18774		}
18775    	for (i = 0; i < sc->num_queues; i++) {
18776        	fp = &sc->fp[i];
18777			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
18778                        fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
18779                        fp->rx_sge_dma.vaddr != NULL) {
18780
18781				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18782            			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18783            			sizeof(union bxe_host_hc_status_block));
18784				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18785            			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18786            			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18787        		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18788            			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18789            			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18790        		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18791            			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18792            			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18793        		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18794            			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18795            			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18796    		}
18797		}
18798		if(ilt != NULL ) {
18799    		ilt_cli = &ilt->clients[1];
18800			if(ilt->lines != NULL) {
18801    		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18802        		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18803            			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18804            			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18805    		}
18806			}
18807		}
18808
18809
18810    	cmd_offset = DMAE_REG_CMD_MEM;
18811    	for (i = 0; i < 224; i++) {
18812        	reg_addr = (cmd_offset +(i * 4));
18813        	reg_val = REG_RD(sc, reg_addr);
18814        	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18815            			reg_addr, reg_val);
18816    	}
18817	}
18818
18819    BLOGI(sc, "Collection of grcdump done\n");
18820    sc->grcdump_done = 1;
18821    return(rval);
18822}
18823
18824static int
18825bxe_add_cdev(struct bxe_softc *sc)
18826{
18827    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18828
18829    if (sc->eeprom == NULL) {
18830        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18831        return (-1);
18832    }
18833
18834    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18835                            sc->ifp->if_dunit,
18836                            UID_ROOT,
18837                            GID_WHEEL,
18838                            0600,
18839                            "%s",
18840                            if_name(sc->ifp));
18841
18842    if (sc->ioctl_dev == NULL) {
18843        free(sc->eeprom, M_DEVBUF);
18844        sc->eeprom = NULL;
18845        return (-1);
18846    }
18847
18848    sc->ioctl_dev->si_drv1 = sc;
18849
18850    return (0);
18851}
18852
18853static void
18854bxe_del_cdev(struct bxe_softc *sc)
18855{
18856    if (sc->ioctl_dev != NULL)
18857        destroy_dev(sc->ioctl_dev);
18858
18859    if (sc->eeprom != NULL) {
18860        free(sc->eeprom, M_DEVBUF);
18861        sc->eeprom = NULL;
18862    }
18863    sc->ioctl_dev = NULL;
18864
18865    return;
18866}
18867
18868static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18869{
18870
18871    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
18872        return FALSE;
18873
18874    return TRUE;
18875}
18876
18877
18878static int
18879bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18880{
18881    int rval = 0;
18882
18883    if(!bxe_is_nvram_accessible(sc)) {
18884        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18885        return (-EAGAIN);
18886    }
18887    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18888
18889
18890   return (rval);
18891}
18892
18893static int
18894bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18895{
18896    int rval = 0;
18897
18898    if(!bxe_is_nvram_accessible(sc)) {
18899        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18900        return (-EAGAIN);
18901    }
18902    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18903
18904   return (rval);
18905}
18906
18907static int
18908bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18909{
18910    int rval = 0;
18911
18912    switch (eeprom->eeprom_cmd) {
18913
18914    case BXE_EEPROM_CMD_SET_EEPROM:
18915
18916        rval = copyin(eeprom->eeprom_data, sc->eeprom,
18917                       eeprom->eeprom_data_len);
18918
18919        if (rval)
18920            break;
18921
18922        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18923                       eeprom->eeprom_data_len);
18924        break;
18925
18926    case BXE_EEPROM_CMD_GET_EEPROM:
18927
18928        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18929                       eeprom->eeprom_data_len);
18930
18931        if (rval) {
18932            break;
18933        }
18934
18935        rval = copyout(sc->eeprom, eeprom->eeprom_data,
18936                       eeprom->eeprom_data_len);
18937        break;
18938
18939    default:
18940            rval = EINVAL;
18941            break;
18942    }
18943
18944    if (rval) {
18945        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
18946    }
18947
18948    return (rval);
18949}
18950
18951static int
18952bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18953{
18954    uint32_t ext_phy_config;
18955    int port = SC_PORT(sc);
18956    int cfg_idx = bxe_get_link_cfg_idx(sc);
18957
18958    dev_p->supported = sc->port.supported[cfg_idx] |
18959            (sc->port.supported[cfg_idx ^ 1] &
18960            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
18961    dev_p->advertising = sc->port.advertising[cfg_idx];
18962    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
18963        ELINK_ETH_PHY_SFP_1G_FIBER) {
18964        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
18965        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
18966    }
18967    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
18968        !(sc->flags & BXE_MF_FUNC_DIS)) {
18969        dev_p->duplex = sc->link_vars.duplex;
18970        if (IS_MF(sc) && !BXE_NOMCP(sc))
18971            dev_p->speed = bxe_get_mf_speed(sc);
18972        else
18973            dev_p->speed = sc->link_vars.line_speed;
18974    } else {
18975        dev_p->duplex = DUPLEX_UNKNOWN;
18976        dev_p->speed = SPEED_UNKNOWN;
18977    }
18978
18979    dev_p->port = bxe_media_detect(sc);
18980
18981    ext_phy_config = SHMEM_RD(sc,
18982                         dev_info.port_hw_config[port].external_phy_config);
18983    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
18984        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
18985        dev_p->phy_address =  sc->port.phy_addr;
18986    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18987            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
18988        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18989            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
18990        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
18991    else
18992        dev_p->phy_address = 0;
18993
18994    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
18995        dev_p->autoneg = AUTONEG_ENABLE;
18996    else
18997       dev_p->autoneg = AUTONEG_DISABLE;
18998
18999
19000    return 0;
19001}
19002
19003static int
19004bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19005        struct thread *td)
19006{
19007    struct bxe_softc    *sc;
19008    int                 rval = 0;
19009    device_t            pci_dev;
19010    bxe_grcdump_t       *dump = NULL;
19011    int grc_dump_size;
19012    bxe_drvinfo_t   *drv_infop = NULL;
19013    bxe_dev_setting_t  *dev_p;
19014    bxe_dev_setting_t  dev_set;
19015    bxe_get_regs_t  *reg_p;
19016    bxe_reg_rdw_t *reg_rdw_p;
19017    bxe_pcicfg_rdw_t *cfg_rdw_p;
19018    bxe_perm_mac_addr_t *mac_addr_p;
19019
19020
19021    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19022        return ENXIO;
19023
19024    pci_dev= sc->dev;
19025
19026    dump = (bxe_grcdump_t *)data;
19027
19028    switch(cmd) {
19029
19030        case BXE_GRC_DUMP_SIZE:
19031            dump->pci_func = sc->pcie_func;
19032            dump->grcdump_size =
19033                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19034                     sizeof(struct  dump_header);
19035            break;
19036
19037        case BXE_GRC_DUMP:
19038
19039            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19040                                sizeof(struct  dump_header);
19041            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19042                (dump->grcdump_size < grc_dump_size)) {
19043                rval = EINVAL;
19044                break;
19045            }
19046
19047            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19048                (!sc->grcdump_started)) {
19049                rval =  bxe_grc_dump(sc);
19050            }
19051
19052            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19053                (sc->grc_dump != NULL))  {
19054                dump->grcdump_dwords = grc_dump_size >> 2;
19055                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19056                free(sc->grc_dump, M_DEVBUF);
19057                sc->grc_dump = NULL;
19058                sc->grcdump_started = 0;
19059                sc->grcdump_done = 0;
19060            }
19061
19062            break;
19063
19064        case BXE_DRV_INFO:
19065            drv_infop = (bxe_drvinfo_t *)data;
19066            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19067            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19068                BXE_DRIVER_VERSION);
19069            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19070                sc->devinfo.bc_ver_str);
19071            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19072                "%s", sc->fw_ver_str);
19073            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19074            drv_infop->reg_dump_len =
19075                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19076                    + sizeof(struct  dump_header);
19077            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19078                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19079            break;
19080
19081        case BXE_DEV_SETTING:
19082            dev_p = (bxe_dev_setting_t *)data;
19083            bxe_get_settings(sc, &dev_set);
19084            dev_p->supported = dev_set.supported;
19085            dev_p->advertising = dev_set.advertising;
19086            dev_p->speed = dev_set.speed;
19087            dev_p->duplex = dev_set.duplex;
19088            dev_p->port = dev_set.port;
19089            dev_p->phy_address = dev_set.phy_address;
19090            dev_p->autoneg = dev_set.autoneg;
19091
19092            break;
19093
19094        case BXE_GET_REGS:
19095
19096            reg_p = (bxe_get_regs_t *)data;
19097            grc_dump_size = reg_p->reg_buf_len;
19098
19099            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19100                bxe_grc_dump(sc);
19101            }
19102            if((sc->grcdump_done) && (sc->grcdump_started) &&
19103                (sc->grc_dump != NULL))  {
19104                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19105                free(sc->grc_dump, M_DEVBUF);
19106                sc->grc_dump = NULL;
19107                sc->grcdump_started = 0;
19108                sc->grcdump_done = 0;
19109            }
19110
19111            break;
19112
19113        case BXE_RDW_REG:
19114            reg_rdw_p = (bxe_reg_rdw_t *)data;
19115            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19116                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19117                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19118
19119            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19120                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19121                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19122
19123            break;
19124
19125        case BXE_RDW_PCICFG:
19126            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19127            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19128
19129                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19130                                         cfg_rdw_p->cfg_width);
19131
19132            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19133                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19134                            cfg_rdw_p->cfg_width);
19135            } else {
19136                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19137            }
19138            break;
19139
19140        case BXE_MAC_ADDR:
19141            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19142            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19143                sc->mac_addr_str);
19144            break;
19145
19146        case BXE_EEPROM:
19147            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19148            break;
19149
19150
19151        default:
19152            break;
19153    }
19154
19155    return (rval);
19156}
19157